code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def create_scheduler_file(scheduler: str, job: Job) -> str: """Substitute values into a template scheduler file.""" logger.debug("Create Scheduler File Function") if job.scheduler_options is None: scheduler_options: Dict[str, Any] = {} else: scheduler_options = deepcopy(job.scheduler_options) try: setup_string = parse_setup(scheduler_options["setup"]) del scheduler_options["setup"] except KeyError: setup_string = "" # Create header header_string = create_header_string(scheduler, **scheduler_options) header_string += get_array_string(scheduler, len(job)) if scheduler.upper() == "SLURM": workdir = r"$SLURM_SUBMIT_DIR" array_index = r"$SLURM_ARRAY_TASK_ID" elif scheduler.upper() == "PBS": workdir = r"$PBS_O_WORKDIR" array_index = r"$PBS_ARRAY_INDEX" return header_string + SCHEDULER_TEMPLATE.format( workdir=workdir, command_list=job.as_bash_array(), setup=setup_string, array_index=array_index, )
def function[create_scheduler_file, parameter[scheduler, job]]: constant[Substitute values into a template scheduler file.] call[name[logger].debug, parameter[constant[Create Scheduler File Function]]] if compare[name[job].scheduler_options is constant[None]] begin[:] <ast.AnnAssign object at 0x7da18c4cf010> <ast.Try object at 0x7da18c4ce500> variable[header_string] assign[=] call[name[create_header_string], parameter[name[scheduler]]] <ast.AugAssign object at 0x7da18f09f250> if compare[call[name[scheduler].upper, parameter[]] equal[==] constant[SLURM]] begin[:] variable[workdir] assign[=] constant[$SLURM_SUBMIT_DIR] variable[array_index] assign[=] constant[$SLURM_ARRAY_TASK_ID] return[binary_operation[name[header_string] + call[name[SCHEDULER_TEMPLATE].format, parameter[]]]]
keyword[def] identifier[create_scheduler_file] ( identifier[scheduler] : identifier[str] , identifier[job] : identifier[Job] )-> identifier[str] : literal[string] identifier[logger] . identifier[debug] ( literal[string] ) keyword[if] identifier[job] . identifier[scheduler_options] keyword[is] keyword[None] : identifier[scheduler_options] : identifier[Dict] [ identifier[str] , identifier[Any] ]={} keyword[else] : identifier[scheduler_options] = identifier[deepcopy] ( identifier[job] . identifier[scheduler_options] ) keyword[try] : identifier[setup_string] = identifier[parse_setup] ( identifier[scheduler_options] [ literal[string] ]) keyword[del] identifier[scheduler_options] [ literal[string] ] keyword[except] identifier[KeyError] : identifier[setup_string] = literal[string] identifier[header_string] = identifier[create_header_string] ( identifier[scheduler] ,** identifier[scheduler_options] ) identifier[header_string] += identifier[get_array_string] ( identifier[scheduler] , identifier[len] ( identifier[job] )) keyword[if] identifier[scheduler] . identifier[upper] ()== literal[string] : identifier[workdir] = literal[string] identifier[array_index] = literal[string] keyword[elif] identifier[scheduler] . identifier[upper] ()== literal[string] : identifier[workdir] = literal[string] identifier[array_index] = literal[string] keyword[return] identifier[header_string] + identifier[SCHEDULER_TEMPLATE] . identifier[format] ( identifier[workdir] = identifier[workdir] , identifier[command_list] = identifier[job] . identifier[as_bash_array] (), identifier[setup] = identifier[setup_string] , identifier[array_index] = identifier[array_index] , )
def create_scheduler_file(scheduler: str, job: Job) -> str: """Substitute values into a template scheduler file.""" logger.debug('Create Scheduler File Function') if job.scheduler_options is None: scheduler_options: Dict[str, Any] = {} # depends on [control=['if'], data=[]] else: scheduler_options = deepcopy(job.scheduler_options) try: setup_string = parse_setup(scheduler_options['setup']) del scheduler_options['setup'] # depends on [control=['try'], data=[]] except KeyError: setup_string = '' # depends on [control=['except'], data=[]] # Create header header_string = create_header_string(scheduler, **scheduler_options) header_string += get_array_string(scheduler, len(job)) if scheduler.upper() == 'SLURM': workdir = '$SLURM_SUBMIT_DIR' array_index = '$SLURM_ARRAY_TASK_ID' # depends on [control=['if'], data=[]] elif scheduler.upper() == 'PBS': workdir = '$PBS_O_WORKDIR' array_index = '$PBS_ARRAY_INDEX' # depends on [control=['if'], data=[]] return header_string + SCHEDULER_TEMPLATE.format(workdir=workdir, command_list=job.as_bash_array(), setup=setup_string, array_index=array_index)
def link(g: Graph, subject: Node, predicate: URIRef) -> Tuple[Optional[URIRef], Optional[URIRef]]: """ Return the link URI and link type for subject and predicate :param g: graph context :param subject: subject of linke :param predicate: link predicate :return: URI and optional type URI. URI is None if not a link """ link_node = g.value(subject, predicate) if link_node: l = g.value(link_node, FHIR.link) if l: typ = g.value(l, RDF.type) return l, typ return None, None
def function[link, parameter[g, subject, predicate]]: constant[ Return the link URI and link type for subject and predicate :param g: graph context :param subject: subject of linke :param predicate: link predicate :return: URI and optional type URI. URI is None if not a link ] variable[link_node] assign[=] call[name[g].value, parameter[name[subject], name[predicate]]] if name[link_node] begin[:] variable[l] assign[=] call[name[g].value, parameter[name[link_node], name[FHIR].link]] if name[l] begin[:] variable[typ] assign[=] call[name[g].value, parameter[name[l], name[RDF].type]] return[tuple[[<ast.Name object at 0x7da18dc9bd60>, <ast.Name object at 0x7da18dc9b8b0>]]] return[tuple[[<ast.Constant object at 0x7da18dc9a770>, <ast.Constant object at 0x7da18dc9bc70>]]]
keyword[def] identifier[link] ( identifier[g] : identifier[Graph] , identifier[subject] : identifier[Node] , identifier[predicate] : identifier[URIRef] )-> identifier[Tuple] [ identifier[Optional] [ identifier[URIRef] ], identifier[Optional] [ identifier[URIRef] ]]: literal[string] identifier[link_node] = identifier[g] . identifier[value] ( identifier[subject] , identifier[predicate] ) keyword[if] identifier[link_node] : identifier[l] = identifier[g] . identifier[value] ( identifier[link_node] , identifier[FHIR] . identifier[link] ) keyword[if] identifier[l] : identifier[typ] = identifier[g] . identifier[value] ( identifier[l] , identifier[RDF] . identifier[type] ) keyword[return] identifier[l] , identifier[typ] keyword[return] keyword[None] , keyword[None]
def link(g: Graph, subject: Node, predicate: URIRef) -> Tuple[Optional[URIRef], Optional[URIRef]]: """ Return the link URI and link type for subject and predicate :param g: graph context :param subject: subject of linke :param predicate: link predicate :return: URI and optional type URI. URI is None if not a link """ link_node = g.value(subject, predicate) if link_node: l = g.value(link_node, FHIR.link) if l: typ = g.value(l, RDF.type) return (l, typ) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return (None, None)
def get_dataarg(args): """Retrieve the world 'data' argument from a set of input parameters. """ for i, arg in enumerate(args): if is_nested_config_arg(arg): return i, arg elif is_std_config_arg(arg): return i, {"config": arg} elif isinstance(arg, (list, tuple)) and is_nested_config_arg(arg[0]): return i, arg[0] raise ValueError("Did not find configuration or data object in arguments: %s" % args)
def function[get_dataarg, parameter[args]]: constant[Retrieve the world 'data' argument from a set of input parameters. ] for taget[tuple[[<ast.Name object at 0x7da1b170a770>, <ast.Name object at 0x7da1b17089a0>]]] in starred[call[name[enumerate], parameter[name[args]]]] begin[:] if call[name[is_nested_config_arg], parameter[name[arg]]] begin[:] return[tuple[[<ast.Name object at 0x7da1b1709870>, <ast.Name object at 0x7da1b170ab60>]]] <ast.Raise object at 0x7da1b1710430>
keyword[def] identifier[get_dataarg] ( identifier[args] ): literal[string] keyword[for] identifier[i] , identifier[arg] keyword[in] identifier[enumerate] ( identifier[args] ): keyword[if] identifier[is_nested_config_arg] ( identifier[arg] ): keyword[return] identifier[i] , identifier[arg] keyword[elif] identifier[is_std_config_arg] ( identifier[arg] ): keyword[return] identifier[i] ,{ literal[string] : identifier[arg] } keyword[elif] identifier[isinstance] ( identifier[arg] ,( identifier[list] , identifier[tuple] )) keyword[and] identifier[is_nested_config_arg] ( identifier[arg] [ literal[int] ]): keyword[return] identifier[i] , identifier[arg] [ literal[int] ] keyword[raise] identifier[ValueError] ( literal[string] % identifier[args] )
def get_dataarg(args): """Retrieve the world 'data' argument from a set of input parameters. """ for (i, arg) in enumerate(args): if is_nested_config_arg(arg): return (i, arg) # depends on [control=['if'], data=[]] elif is_std_config_arg(arg): return (i, {'config': arg}) # depends on [control=['if'], data=[]] elif isinstance(arg, (list, tuple)) and is_nested_config_arg(arg[0]): return (i, arg[0]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] raise ValueError('Did not find configuration or data object in arguments: %s' % args)
def user_stats_api(request, provider): """ Get statistics for selected Edookit users key: api key since: time as timestamp - get stats changed since """ if 'key' not in request.GET or provider not in settings.USER_STATS_API_KEY \ or request.GET['key'] != settings.USER_STATS_API_KEY[provider]: return HttpResponse('Unauthorized', status=401) since = None if 'since' in request.GET: since = datetime.datetime.fromtimestamp(int(request.GET['since'])) social_users = list(UserSocialAuth.objects.filter(provider=provider).select_related('user')) user_map = {u.user.id: u for u in social_users} stats = UserStat.objects.get_user_stats([u.user for u in social_users], lang=None, since=since, recalculate=False) data = {"users": []} for user, s in stats.items(): data["users"].append({ "user_id": user_map[user].uid, "concepts": s, }) return render_json(request, data, template='concepts_json.html', help_text=user_stats_bulk.__doc__)
def function[user_stats_api, parameter[request, provider]]: constant[ Get statistics for selected Edookit users key: api key since: time as timestamp - get stats changed since ] if <ast.BoolOp object at 0x7da20c76d270> begin[:] return[call[name[HttpResponse], parameter[constant[Unauthorized]]]] variable[since] assign[=] constant[None] if compare[constant[since] in name[request].GET] begin[:] variable[since] assign[=] call[name[datetime].datetime.fromtimestamp, parameter[call[name[int], parameter[call[name[request].GET][constant[since]]]]]] variable[social_users] assign[=] call[name[list], parameter[call[call[name[UserSocialAuth].objects.filter, parameter[]].select_related, parameter[constant[user]]]]] variable[user_map] assign[=] <ast.DictComp object at 0x7da20c76c4c0> variable[stats] assign[=] call[name[UserStat].objects.get_user_stats, parameter[<ast.ListComp object at 0x7da20c76fc40>]] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20c76db70>], [<ast.List object at 0x7da20c76e800>]] for taget[tuple[[<ast.Name object at 0x7da20c76d000>, <ast.Name object at 0x7da20c76e7d0>]]] in starred[call[name[stats].items, parameter[]]] begin[:] call[call[name[data]][constant[users]].append, parameter[dictionary[[<ast.Constant object at 0x7da20c76eef0>, <ast.Constant object at 0x7da20c76f6a0>], [<ast.Attribute object at 0x7da20c76d690>, <ast.Name object at 0x7da20c76eda0>]]]] return[call[name[render_json], parameter[name[request], name[data]]]]
keyword[def] identifier[user_stats_api] ( identifier[request] , identifier[provider] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[request] . identifier[GET] keyword[or] identifier[provider] keyword[not] keyword[in] identifier[settings] . identifier[USER_STATS_API_KEY] keyword[or] identifier[request] . identifier[GET] [ literal[string] ]!= identifier[settings] . identifier[USER_STATS_API_KEY] [ identifier[provider] ]: keyword[return] identifier[HttpResponse] ( literal[string] , identifier[status] = literal[int] ) identifier[since] = keyword[None] keyword[if] literal[string] keyword[in] identifier[request] . identifier[GET] : identifier[since] = identifier[datetime] . identifier[datetime] . identifier[fromtimestamp] ( identifier[int] ( identifier[request] . identifier[GET] [ literal[string] ])) identifier[social_users] = identifier[list] ( identifier[UserSocialAuth] . identifier[objects] . identifier[filter] ( identifier[provider] = identifier[provider] ). identifier[select_related] ( literal[string] )) identifier[user_map] ={ identifier[u] . identifier[user] . identifier[id] : identifier[u] keyword[for] identifier[u] keyword[in] identifier[social_users] } identifier[stats] = identifier[UserStat] . identifier[objects] . identifier[get_user_stats] ([ identifier[u] . identifier[user] keyword[for] identifier[u] keyword[in] identifier[social_users] ], identifier[lang] = keyword[None] , identifier[since] = identifier[since] , identifier[recalculate] = keyword[False] ) identifier[data] ={ literal[string] :[]} keyword[for] identifier[user] , identifier[s] keyword[in] identifier[stats] . identifier[items] (): identifier[data] [ literal[string] ]. identifier[append] ({ literal[string] : identifier[user_map] [ identifier[user] ]. identifier[uid] , literal[string] : identifier[s] , }) keyword[return] identifier[render_json] ( identifier[request] , identifier[data] , identifier[template] = literal[string] , identifier[help_text] = identifier[user_stats_bulk] . identifier[__doc__] )
def user_stats_api(request, provider): """ Get statistics for selected Edookit users key: api key since: time as timestamp - get stats changed since """ if 'key' not in request.GET or provider not in settings.USER_STATS_API_KEY or request.GET['key'] != settings.USER_STATS_API_KEY[provider]: return HttpResponse('Unauthorized', status=401) # depends on [control=['if'], data=[]] since = None if 'since' in request.GET: since = datetime.datetime.fromtimestamp(int(request.GET['since'])) # depends on [control=['if'], data=[]] social_users = list(UserSocialAuth.objects.filter(provider=provider).select_related('user')) user_map = {u.user.id: u for u in social_users} stats = UserStat.objects.get_user_stats([u.user for u in social_users], lang=None, since=since, recalculate=False) data = {'users': []} for (user, s) in stats.items(): data['users'].append({'user_id': user_map[user].uid, 'concepts': s}) # depends on [control=['for'], data=[]] return render_json(request, data, template='concepts_json.html', help_text=user_stats_bulk.__doc__)
def _set_global_auto_bandwidth(self, v, load=False): """ Setter method for global_auto_bandwidth, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/global_auto_bandwidth (container) If this variable is read-only (config: false) in the source YANG file, then _set_global_auto_bandwidth is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_global_auto_bandwidth() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=global_auto_bandwidth.global_auto_bandwidth, is_container='container', presence=True, yang_name="global-auto-bandwidth", rest_name="auto-bandwidth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable Auto-bandwdith feature globally', u'hidden': u'full', u'alt-name': u'auto-bandwidth'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """global_auto_bandwidth must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=global_auto_bandwidth.global_auto_bandwidth, is_container='container', presence=True, yang_name="global-auto-bandwidth", rest_name="auto-bandwidth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable Auto-bandwdith feature globally', u'hidden': u'full', u'alt-name': u'auto-bandwidth'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""", }) self.__global_auto_bandwidth = t if hasattr(self, '_set'): self._set()
def function[_set_global_auto_bandwidth, parameter[self, v, load]]: constant[ Setter method for global_auto_bandwidth, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/global_auto_bandwidth (container) If this variable is read-only (config: false) in the source YANG file, then _set_global_auto_bandwidth is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_global_auto_bandwidth() directly. ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da18bcc8d60> name[self].__global_auto_bandwidth assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_global_auto_bandwidth] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[global_auto_bandwidth] . identifier[global_auto_bandwidth] , identifier[is_container] = literal[string] , identifier[presence] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__global_auto_bandwidth] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_global_auto_bandwidth(self, v, load=False): """ Setter method for global_auto_bandwidth, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/global_auto_bandwidth (container) If this variable is read-only (config: false) in the source YANG file, then _set_global_auto_bandwidth is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_global_auto_bandwidth() directly. """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=global_auto_bandwidth.global_auto_bandwidth, is_container='container', presence=True, yang_name='global-auto-bandwidth', rest_name='auto-bandwidth', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable Auto-bandwdith feature globally', u'hidden': u'full', u'alt-name': u'auto-bandwidth'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'global_auto_bandwidth must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=global_auto_bandwidth.global_auto_bandwidth, is_container=\'container\', presence=True, yang_name="global-auto-bandwidth", rest_name="auto-bandwidth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Enable Auto-bandwdith feature globally\', u\'hidden\': u\'full\', u\'alt-name\': u\'auto-bandwidth\'}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]] self.__global_auto_bandwidth = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def map_metabolites_to_structures(metabolites, compartments): """ Map metabolites from the identifier namespace to structural space. Metabolites who lack structural annotation (InChI or InChIKey) are ignored. Parameters ---------- metabolites : iterable The cobra.Metabolites to map. compartments : iterable The different compartments to consider. Structures are treated separately for each compartment. Returns ------- dict A mapping from a cobra.Metabolite to its compartment specific structure index. """ # TODO (Moritz Beber): Consider SMILES? unique_identifiers = ["inchikey", "inchi"] met2mol = {} molecules = {c: [] for c in compartments} for met in metabolites: ann = [] for key in unique_identifiers: mol = met.annotation.get(key) if mol is not None: ann.append(mol) # Ignore metabolites without the required information. if len(ann) == 0: continue ann = set(ann) # Compare with other structures in the same compartment. mols = molecules[met.compartment] for i, mol_group in enumerate(mols): if len(ann & mol_group) > 0: mol_group.update(ann) # We map to the index of the group because it is hashable and # cheaper to compare later. met2mol[met] = "{}-{}".format(met.compartment, i) break if met not in met2mol: # The length of the list corresponds to the 0-index after appending. met2mol[met] = "{}-{}".format(met.compartment, len(mols)) mols.append(ann) return met2mol
def function[map_metabolites_to_structures, parameter[metabolites, compartments]]: constant[ Map metabolites from the identifier namespace to structural space. Metabolites who lack structural annotation (InChI or InChIKey) are ignored. Parameters ---------- metabolites : iterable The cobra.Metabolites to map. compartments : iterable The different compartments to consider. Structures are treated separately for each compartment. Returns ------- dict A mapping from a cobra.Metabolite to its compartment specific structure index. ] variable[unique_identifiers] assign[=] list[[<ast.Constant object at 0x7da1b06673a0>, <ast.Constant object at 0x7da1b0667b50>]] variable[met2mol] assign[=] dictionary[[], []] variable[molecules] assign[=] <ast.DictComp object at 0x7da1b0666f50> for taget[name[met]] in starred[name[metabolites]] begin[:] variable[ann] assign[=] list[[]] for taget[name[key]] in starred[name[unique_identifiers]] begin[:] variable[mol] assign[=] call[name[met].annotation.get, parameter[name[key]]] if compare[name[mol] is_not constant[None]] begin[:] call[name[ann].append, parameter[name[mol]]] if compare[call[name[len], parameter[name[ann]]] equal[==] constant[0]] begin[:] continue variable[ann] assign[=] call[name[set], parameter[name[ann]]] variable[mols] assign[=] call[name[molecules]][name[met].compartment] for taget[tuple[[<ast.Name object at 0x7da1b06d3700>, <ast.Name object at 0x7da1b06d1060>]]] in starred[call[name[enumerate], parameter[name[mols]]]] begin[:] if compare[call[name[len], parameter[binary_operation[name[ann] <ast.BitAnd object at 0x7da2590d6b60> name[mol_group]]]] greater[>] constant[0]] begin[:] call[name[mol_group].update, parameter[name[ann]]] call[name[met2mol]][name[met]] assign[=] call[constant[{}-{}].format, parameter[name[met].compartment, name[i]]] break if compare[name[met] <ast.NotIn object at 0x7da2590d7190> name[met2mol]] begin[:] call[name[met2mol]][name[met]] assign[=] call[constant[{}-{}].format, parameter[name[met].compartment, call[name[len], parameter[name[mols]]]]] call[name[mols].append, parameter[name[ann]]] return[name[met2mol]]
keyword[def] identifier[map_metabolites_to_structures] ( identifier[metabolites] , identifier[compartments] ): literal[string] identifier[unique_identifiers] =[ literal[string] , literal[string] ] identifier[met2mol] ={} identifier[molecules] ={ identifier[c] :[] keyword[for] identifier[c] keyword[in] identifier[compartments] } keyword[for] identifier[met] keyword[in] identifier[metabolites] : identifier[ann] =[] keyword[for] identifier[key] keyword[in] identifier[unique_identifiers] : identifier[mol] = identifier[met] . identifier[annotation] . identifier[get] ( identifier[key] ) keyword[if] identifier[mol] keyword[is] keyword[not] keyword[None] : identifier[ann] . identifier[append] ( identifier[mol] ) keyword[if] identifier[len] ( identifier[ann] )== literal[int] : keyword[continue] identifier[ann] = identifier[set] ( identifier[ann] ) identifier[mols] = identifier[molecules] [ identifier[met] . identifier[compartment] ] keyword[for] identifier[i] , identifier[mol_group] keyword[in] identifier[enumerate] ( identifier[mols] ): keyword[if] identifier[len] ( identifier[ann] & identifier[mol_group] )> literal[int] : identifier[mol_group] . identifier[update] ( identifier[ann] ) identifier[met2mol] [ identifier[met] ]= literal[string] . identifier[format] ( identifier[met] . identifier[compartment] , identifier[i] ) keyword[break] keyword[if] identifier[met] keyword[not] keyword[in] identifier[met2mol] : identifier[met2mol] [ identifier[met] ]= literal[string] . identifier[format] ( identifier[met] . identifier[compartment] , identifier[len] ( identifier[mols] )) identifier[mols] . identifier[append] ( identifier[ann] ) keyword[return] identifier[met2mol]
def map_metabolites_to_structures(metabolites, compartments): """ Map metabolites from the identifier namespace to structural space. Metabolites who lack structural annotation (InChI or InChIKey) are ignored. Parameters ---------- metabolites : iterable The cobra.Metabolites to map. compartments : iterable The different compartments to consider. Structures are treated separately for each compartment. Returns ------- dict A mapping from a cobra.Metabolite to its compartment specific structure index. """ # TODO (Moritz Beber): Consider SMILES? unique_identifiers = ['inchikey', 'inchi'] met2mol = {} molecules = {c: [] for c in compartments} for met in metabolites: ann = [] for key in unique_identifiers: mol = met.annotation.get(key) if mol is not None: ann.append(mol) # depends on [control=['if'], data=['mol']] # depends on [control=['for'], data=['key']] # Ignore metabolites without the required information. if len(ann) == 0: continue # depends on [control=['if'], data=[]] ann = set(ann) # Compare with other structures in the same compartment. mols = molecules[met.compartment] for (i, mol_group) in enumerate(mols): if len(ann & mol_group) > 0: mol_group.update(ann) # We map to the index of the group because it is hashable and # cheaper to compare later. met2mol[met] = '{}-{}'.format(met.compartment, i) break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] if met not in met2mol: # The length of the list corresponds to the 0-index after appending. met2mol[met] = '{}-{}'.format(met.compartment, len(mols)) mols.append(ann) # depends on [control=['if'], data=['met', 'met2mol']] # depends on [control=['for'], data=['met']] return met2mol
def competitions_data_download_file(self, id, file_name, **kwargs): # noqa: E501 """Download competition data file # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.competitions_data_download_file(id, file_name, async_req=True) >>> result = thread.get() :param async_req bool :param str id: Competition name (required) :param str file_name: Competition name (required) :return: Result If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.competitions_data_download_file_with_http_info(id, file_name, **kwargs) # noqa: E501 else: (data) = self.competitions_data_download_file_with_http_info(id, file_name, **kwargs) # noqa: E501 return data
def function[competitions_data_download_file, parameter[self, id, file_name]]: constant[Download competition data file # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.competitions_data_download_file(id, file_name, async_req=True) >>> result = thread.get() :param async_req bool :param str id: Competition name (required) :param str file_name: Competition name (required) :return: Result If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[async_req]]] begin[:] return[call[name[self].competitions_data_download_file_with_http_info, parameter[name[id], name[file_name]]]]
keyword[def] identifier[competitions_data_download_file] ( identifier[self] , identifier[id] , identifier[file_name] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[competitions_data_download_file_with_http_info] ( identifier[id] , identifier[file_name] ,** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[competitions_data_download_file_with_http_info] ( identifier[id] , identifier[file_name] ,** identifier[kwargs] ) keyword[return] identifier[data]
def competitions_data_download_file(self, id, file_name, **kwargs): # noqa: E501 'Download competition data file # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.competitions_data_download_file(id, file_name, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str id: Competition name (required)\n :param str file_name: Competition name (required)\n :return: Result\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.competitions_data_download_file_with_http_info(id, file_name, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]] else: data = self.competitions_data_download_file_with_http_info(id, file_name, **kwargs) # noqa: E501 return data
def analyze(self, scratch, **kwargs): """Run and return the results from the BroadcastReceive plugin.""" all_scripts = list(self.iter_scripts(scratch)) results = defaultdict(set) broadcast = dict((x, self.get_broadcast_events(x)) # Events by script for x in all_scripts) correct = self.get_receive(all_scripts) results['never broadcast'] = set(correct.keys()) for script, events in broadcast.items(): for event in events.keys(): if event is True: # Remove dynamic broadcasts results['dynamic broadcast'].add(script.morph.name) del events[event] elif event in correct: results['never broadcast'].discard(event) else: results['never received'].add(event) # remove events from correct dict that were never broadcast for event in correct.keys(): if event in results['never broadcast']: del correct[event] # Find scripts that have more than one broadcast event on any possible # execution path through the program # TODO: Permit mutually exclusive broadcasts for events in broadcast.values(): if len(events) > 1: for event in events: if event in correct: results['parallel broadcasts'].add(event) del correct[event] # Find events that have two (or more) receivers in which one of the # receivers has a "delay" block for event, scripts in correct.items(): if len(scripts) > 1: for script in scripts: for _, _, block in self.iter_blocks(script.blocks): if block.type.shape == 'stack': results['multiple receivers with delay'].add(event) if event in correct: del correct[event] results['success'] = set(correct.keys()) return {'broadcast': results}
def function[analyze, parameter[self, scratch]]: constant[Run and return the results from the BroadcastReceive plugin.] variable[all_scripts] assign[=] call[name[list], parameter[call[name[self].iter_scripts, parameter[name[scratch]]]]] variable[results] assign[=] call[name[defaultdict], parameter[name[set]]] variable[broadcast] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da18f58fc40>]] variable[correct] assign[=] call[name[self].get_receive, parameter[name[all_scripts]]] call[name[results]][constant[never broadcast]] assign[=] call[name[set], parameter[call[name[correct].keys, parameter[]]]] for taget[tuple[[<ast.Name object at 0x7da18f58f490>, <ast.Name object at 0x7da18f58db10>]]] in starred[call[name[broadcast].items, parameter[]]] begin[:] for taget[name[event]] in starred[call[name[events].keys, parameter[]]] begin[:] if compare[name[event] is constant[True]] begin[:] call[call[name[results]][constant[dynamic broadcast]].add, parameter[name[script].morph.name]] <ast.Delete object at 0x7da18f58c220> for taget[name[event]] in starred[call[name[correct].keys, parameter[]]] begin[:] if compare[name[event] in call[name[results]][constant[never broadcast]]] begin[:] <ast.Delete object at 0x7da18f58e4a0> for taget[name[events]] in starred[call[name[broadcast].values, parameter[]]] begin[:] if compare[call[name[len], parameter[name[events]]] greater[>] constant[1]] begin[:] for taget[name[event]] in starred[name[events]] begin[:] if compare[name[event] in name[correct]] begin[:] call[call[name[results]][constant[parallel broadcasts]].add, parameter[name[event]]] <ast.Delete object at 0x7da18f58f6d0> for taget[tuple[[<ast.Name object at 0x7da18f58e3e0>, <ast.Name object at 0x7da18f58c610>]]] in starred[call[name[correct].items, parameter[]]] begin[:] if compare[call[name[len], parameter[name[scripts]]] greater[>] constant[1]] begin[:] for taget[name[script]] in starred[name[scripts]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b0ff0700>, <ast.Name object at 0x7da1b0ff18d0>, <ast.Name object at 0x7da1b0ff3400>]]] in starred[call[name[self].iter_blocks, parameter[name[script].blocks]]] begin[:] if compare[name[block].type.shape equal[==] constant[stack]] begin[:] call[call[name[results]][constant[multiple receivers with delay]].add, parameter[name[event]]] if compare[name[event] in name[correct]] begin[:] <ast.Delete object at 0x7da1b0ff2770> call[name[results]][constant[success]] assign[=] call[name[set], parameter[call[name[correct].keys, parameter[]]]] return[dictionary[[<ast.Constant object at 0x7da20c76ed70>], [<ast.Name object at 0x7da20c76e380>]]]
keyword[def] identifier[analyze] ( identifier[self] , identifier[scratch] ,** identifier[kwargs] ): literal[string] identifier[all_scripts] = identifier[list] ( identifier[self] . identifier[iter_scripts] ( identifier[scratch] )) identifier[results] = identifier[defaultdict] ( identifier[set] ) identifier[broadcast] = identifier[dict] (( identifier[x] , identifier[self] . identifier[get_broadcast_events] ( identifier[x] )) keyword[for] identifier[x] keyword[in] identifier[all_scripts] ) identifier[correct] = identifier[self] . identifier[get_receive] ( identifier[all_scripts] ) identifier[results] [ literal[string] ]= identifier[set] ( identifier[correct] . identifier[keys] ()) keyword[for] identifier[script] , identifier[events] keyword[in] identifier[broadcast] . identifier[items] (): keyword[for] identifier[event] keyword[in] identifier[events] . identifier[keys] (): keyword[if] identifier[event] keyword[is] keyword[True] : identifier[results] [ literal[string] ]. identifier[add] ( identifier[script] . identifier[morph] . identifier[name] ) keyword[del] identifier[events] [ identifier[event] ] keyword[elif] identifier[event] keyword[in] identifier[correct] : identifier[results] [ literal[string] ]. identifier[discard] ( identifier[event] ) keyword[else] : identifier[results] [ literal[string] ]. identifier[add] ( identifier[event] ) keyword[for] identifier[event] keyword[in] identifier[correct] . identifier[keys] (): keyword[if] identifier[event] keyword[in] identifier[results] [ literal[string] ]: keyword[del] identifier[correct] [ identifier[event] ] keyword[for] identifier[events] keyword[in] identifier[broadcast] . identifier[values] (): keyword[if] identifier[len] ( identifier[events] )> literal[int] : keyword[for] identifier[event] keyword[in] identifier[events] : keyword[if] identifier[event] keyword[in] identifier[correct] : identifier[results] [ literal[string] ]. identifier[add] ( identifier[event] ) keyword[del] identifier[correct] [ identifier[event] ] keyword[for] identifier[event] , identifier[scripts] keyword[in] identifier[correct] . identifier[items] (): keyword[if] identifier[len] ( identifier[scripts] )> literal[int] : keyword[for] identifier[script] keyword[in] identifier[scripts] : keyword[for] identifier[_] , identifier[_] , identifier[block] keyword[in] identifier[self] . identifier[iter_blocks] ( identifier[script] . identifier[blocks] ): keyword[if] identifier[block] . identifier[type] . identifier[shape] == literal[string] : identifier[results] [ literal[string] ]. identifier[add] ( identifier[event] ) keyword[if] identifier[event] keyword[in] identifier[correct] : keyword[del] identifier[correct] [ identifier[event] ] identifier[results] [ literal[string] ]= identifier[set] ( identifier[correct] . identifier[keys] ()) keyword[return] { literal[string] : identifier[results] }
def analyze(self, scratch, **kwargs): """Run and return the results from the BroadcastReceive plugin.""" all_scripts = list(self.iter_scripts(scratch)) results = defaultdict(set) # Events by script broadcast = dict(((x, self.get_broadcast_events(x)) for x in all_scripts)) correct = self.get_receive(all_scripts) results['never broadcast'] = set(correct.keys()) for (script, events) in broadcast.items(): for event in events.keys(): if event is True: # Remove dynamic broadcasts results['dynamic broadcast'].add(script.morph.name) del events[event] # depends on [control=['if'], data=['event']] elif event in correct: results['never broadcast'].discard(event) # depends on [control=['if'], data=['event']] else: results['never received'].add(event) # depends on [control=['for'], data=['event']] # depends on [control=['for'], data=[]] # remove events from correct dict that were never broadcast for event in correct.keys(): if event in results['never broadcast']: del correct[event] # depends on [control=['if'], data=['event']] # depends on [control=['for'], data=['event']] # Find scripts that have more than one broadcast event on any possible # execution path through the program # TODO: Permit mutually exclusive broadcasts for events in broadcast.values(): if len(events) > 1: for event in events: if event in correct: results['parallel broadcasts'].add(event) del correct[event] # depends on [control=['if'], data=['event', 'correct']] # depends on [control=['for'], data=['event']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['events']] # Find events that have two (or more) receivers in which one of the # receivers has a "delay" block for (event, scripts) in correct.items(): if len(scripts) > 1: for script in scripts: for (_, _, block) in self.iter_blocks(script.blocks): if block.type.shape == 'stack': results['multiple receivers with delay'].add(event) if event in correct: del correct[event] # depends on [control=['if'], data=['event', 'correct']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['script']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] results['success'] = set(correct.keys()) return {'broadcast': results}
def new(sequence_count, start_time, process_id, box_id): """Make new GlobalID :param sequence_count: sequence count :type sequence_count: :class:`int` :param start_time: start date time of server (must be after 2005-01-01) :type start_time: :class:`str`, :class:`datetime` :param process_id: process id :type process_id: :class:`int` :param box_id: box id :type box_id: :class:`int` :return: Global ID integer :rtype: :class:`int` """ if not isinstance(start_time, datetime): start_time = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S') start_time_seconds = int((start_time - datetime(2005, 1, 1)).total_seconds()) return (box_id << 54) | (process_id << 50) | (start_time_seconds << 20) | sequence_count
def function[new, parameter[sequence_count, start_time, process_id, box_id]]: constant[Make new GlobalID :param sequence_count: sequence count :type sequence_count: :class:`int` :param start_time: start date time of server (must be after 2005-01-01) :type start_time: :class:`str`, :class:`datetime` :param process_id: process id :type process_id: :class:`int` :param box_id: box id :type box_id: :class:`int` :return: Global ID integer :rtype: :class:`int` ] if <ast.UnaryOp object at 0x7da1b23142b0> begin[:] variable[start_time] assign[=] call[name[datetime].strptime, parameter[name[start_time], constant[%Y-%m-%d %H:%M:%S]]] variable[start_time_seconds] assign[=] call[name[int], parameter[call[binary_operation[name[start_time] - call[name[datetime], parameter[constant[2005], constant[1], constant[1]]]].total_seconds, parameter[]]]] return[binary_operation[binary_operation[binary_operation[binary_operation[name[box_id] <ast.LShift object at 0x7da2590d69e0> constant[54]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[process_id] <ast.LShift object at 0x7da2590d69e0> constant[50]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[name[start_time_seconds] <ast.LShift object at 0x7da2590d69e0> constant[20]]] <ast.BitOr object at 0x7da2590d6aa0> name[sequence_count]]]
keyword[def] identifier[new] ( identifier[sequence_count] , identifier[start_time] , identifier[process_id] , identifier[box_id] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[start_time] , identifier[datetime] ): identifier[start_time] = identifier[datetime] . identifier[strptime] ( identifier[start_time] , literal[string] ) identifier[start_time_seconds] = identifier[int] (( identifier[start_time] - identifier[datetime] ( literal[int] , literal[int] , literal[int] )). identifier[total_seconds] ()) keyword[return] ( identifier[box_id] << literal[int] )|( identifier[process_id] << literal[int] )|( identifier[start_time_seconds] << literal[int] )| identifier[sequence_count]
def new(sequence_count, start_time, process_id, box_id): """Make new GlobalID :param sequence_count: sequence count :type sequence_count: :class:`int` :param start_time: start date time of server (must be after 2005-01-01) :type start_time: :class:`str`, :class:`datetime` :param process_id: process id :type process_id: :class:`int` :param box_id: box id :type box_id: :class:`int` :return: Global ID integer :rtype: :class:`int` """ if not isinstance(start_time, datetime): start_time = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S') # depends on [control=['if'], data=[]] start_time_seconds = int((start_time - datetime(2005, 1, 1)).total_seconds()) return box_id << 54 | process_id << 50 | start_time_seconds << 20 | sequence_count
def exists(self): """ Checks if item already exists in database """ self_object = self.query.filter_by(id=self.id).first() if self_object is None: return False return True
def function[exists, parameter[self]]: constant[ Checks if item already exists in database ] variable[self_object] assign[=] call[call[name[self].query.filter_by, parameter[]].first, parameter[]] if compare[name[self_object] is constant[None]] begin[:] return[constant[False]] return[constant[True]]
keyword[def] identifier[exists] ( identifier[self] ): literal[string] identifier[self_object] = identifier[self] . identifier[query] . identifier[filter_by] ( identifier[id] = identifier[self] . identifier[id] ). identifier[first] () keyword[if] identifier[self_object] keyword[is] keyword[None] : keyword[return] keyword[False] keyword[return] keyword[True]
def exists(self): """ Checks if item already exists in database """ self_object = self.query.filter_by(id=self.id).first() if self_object is None: return False # depends on [control=['if'], data=[]] return True
def switch_tutorial(self, action): """ Tutorial forms """ if action == 'background': self.parentApp.change_form('TUTORIALBACKGROUND') elif action == 'terminology': self.parentApp.change_form('TUTORIALTERMINOLOGY') elif action == 'setup': self.parentApp.change_form('TUTORIALGETTINGSETUP') elif action == 'starting_tools': self.parentApp.change_form('TUTORIALSTARTINGCORES') elif action == 'adding_tools': self.parentApp.change_form('TUTORIALADDINGPLUGINS') elif action == 'adding_files': self.parentApp.change_form('TUTORIALADDINGFILES') elif action == 'basic_troubleshooting': self.parentApp.change_form('TUTORIALTROUBLESHOOTING') return
def function[switch_tutorial, parameter[self, action]]: constant[ Tutorial forms ] if compare[name[action] equal[==] constant[background]] begin[:] call[name[self].parentApp.change_form, parameter[constant[TUTORIALBACKGROUND]]] return[None]
keyword[def] identifier[switch_tutorial] ( identifier[self] , identifier[action] ): literal[string] keyword[if] identifier[action] == literal[string] : identifier[self] . identifier[parentApp] . identifier[change_form] ( literal[string] ) keyword[elif] identifier[action] == literal[string] : identifier[self] . identifier[parentApp] . identifier[change_form] ( literal[string] ) keyword[elif] identifier[action] == literal[string] : identifier[self] . identifier[parentApp] . identifier[change_form] ( literal[string] ) keyword[elif] identifier[action] == literal[string] : identifier[self] . identifier[parentApp] . identifier[change_form] ( literal[string] ) keyword[elif] identifier[action] == literal[string] : identifier[self] . identifier[parentApp] . identifier[change_form] ( literal[string] ) keyword[elif] identifier[action] == literal[string] : identifier[self] . identifier[parentApp] . identifier[change_form] ( literal[string] ) keyword[elif] identifier[action] == literal[string] : identifier[self] . identifier[parentApp] . identifier[change_form] ( literal[string] ) keyword[return]
def switch_tutorial(self, action): """ Tutorial forms """ if action == 'background': self.parentApp.change_form('TUTORIALBACKGROUND') # depends on [control=['if'], data=[]] elif action == 'terminology': self.parentApp.change_form('TUTORIALTERMINOLOGY') # depends on [control=['if'], data=[]] elif action == 'setup': self.parentApp.change_form('TUTORIALGETTINGSETUP') # depends on [control=['if'], data=[]] elif action == 'starting_tools': self.parentApp.change_form('TUTORIALSTARTINGCORES') # depends on [control=['if'], data=[]] elif action == 'adding_tools': self.parentApp.change_form('TUTORIALADDINGPLUGINS') # depends on [control=['if'], data=[]] elif action == 'adding_files': self.parentApp.change_form('TUTORIALADDINGFILES') # depends on [control=['if'], data=[]] elif action == 'basic_troubleshooting': self.parentApp.change_form('TUTORIALTROUBLESHOOTING') # depends on [control=['if'], data=[]] return
def snapshot_statistics(self): """ Take a snapshot of request/borrow/sampled count for reporting back to X-Ray back-end by ``TargetPoller`` and reset those counters. """ with self._lock: stats = { 'request_count': self.request_count, 'borrow_count': self.borrow_count, 'sampled_count': self.sampled_count, } self._reset_statistics() return stats
def function[snapshot_statistics, parameter[self]]: constant[ Take a snapshot of request/borrow/sampled count for reporting back to X-Ray back-end by ``TargetPoller`` and reset those counters. ] with name[self]._lock begin[:] variable[stats] assign[=] dictionary[[<ast.Constant object at 0x7da1b26aea10>, <ast.Constant object at 0x7da1b26adff0>, <ast.Constant object at 0x7da1b26ae140>], [<ast.Attribute object at 0x7da1b26ae470>, <ast.Attribute object at 0x7da1b26ac850>, <ast.Attribute object at 0x7da1b26adde0>]] call[name[self]._reset_statistics, parameter[]] return[name[stats]]
keyword[def] identifier[snapshot_statistics] ( identifier[self] ): literal[string] keyword[with] identifier[self] . identifier[_lock] : identifier[stats] ={ literal[string] : identifier[self] . identifier[request_count] , literal[string] : identifier[self] . identifier[borrow_count] , literal[string] : identifier[self] . identifier[sampled_count] , } identifier[self] . identifier[_reset_statistics] () keyword[return] identifier[stats]
def snapshot_statistics(self): """ Take a snapshot of request/borrow/sampled count for reporting back to X-Ray back-end by ``TargetPoller`` and reset those counters. """ with self._lock: stats = {'request_count': self.request_count, 'borrow_count': self.borrow_count, 'sampled_count': self.sampled_count} self._reset_statistics() return stats # depends on [control=['with'], data=[]]
def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): """ Parameters: - db_name - tbl_name - part_vals - max_parts """ self.send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts) return self.recv_get_partitions_ps()
def function[get_partitions_ps, parameter[self, db_name, tbl_name, part_vals, max_parts]]: constant[ Parameters: - db_name - tbl_name - part_vals - max_parts ] call[name[self].send_get_partitions_ps, parameter[name[db_name], name[tbl_name], name[part_vals], name[max_parts]]] return[call[name[self].recv_get_partitions_ps, parameter[]]]
keyword[def] identifier[get_partitions_ps] ( identifier[self] , identifier[db_name] , identifier[tbl_name] , identifier[part_vals] , identifier[max_parts] ): literal[string] identifier[self] . identifier[send_get_partitions_ps] ( identifier[db_name] , identifier[tbl_name] , identifier[part_vals] , identifier[max_parts] ) keyword[return] identifier[self] . identifier[recv_get_partitions_ps] ()
def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts): """ Parameters: - db_name - tbl_name - part_vals - max_parts """ self.send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts) return self.recv_get_partitions_ps()
def find_jump_targets(self, debug): """ Detect all offsets in a byte code which are jump targets where we might insert a COME_FROM instruction. Return the list of offsets. Return the list of offsets. An instruction can be jumped to in from multiple instructions. """ code = self.code n = len(code) self.structs = [{'type': 'root', 'start': 0, 'end': n-1}] # All loop entry points self.loops = [] # Map fixed jumps to their real destination self.fixed_jumps = {} self.except_targets = {} self.ignore_if = set() self.build_statement_indices() self.else_start = {} # Containers filled by detect_control_flow() self.not_continue = set() self.return_end_ifs = set() self.setup_loop_targets = {} # target given setup_loop offset self.setup_loops = {} # setup_loop offset given target targets = {} for i, inst in enumerate(self.insts): offset = inst.offset op = inst.opcode # Determine structures and fix jumps in Python versions # since 2.3 self.detect_control_flow(offset, targets, i) if inst.has_arg: label = self.fixed_jumps.get(offset) oparg = inst.arg if (self.version >= 3.6 and self.code[offset] == self.opc.EXTENDED_ARG): j = xdis.next_offset(op, self.opc, offset) next_offset = xdis.next_offset(op, self.opc, j) else: next_offset = xdis.next_offset(op, self.opc, offset) if label is None: if op in self.opc.hasjrel and op != self.opc.FOR_ITER: label = next_offset + oparg elif op in self.opc.hasjabs: if op in self.jump_if_pop: if oparg > offset: label = oparg if label is not None and label != -1: targets[label] = targets.get(label, []) + [offset] elif op == self.opc.END_FINALLY and offset in self.fixed_jumps: label = self.fixed_jumps[offset] targets[label] = targets.get(label, []) + [offset] pass pass # for loop # DEBUG: if debug in ('both', 'after'): import pprint as pp pp.pprint(self.structs) return targets
def function[find_jump_targets, parameter[self, debug]]: constant[ Detect all offsets in a byte code which are jump targets where we might insert a COME_FROM instruction. Return the list of offsets. Return the list of offsets. An instruction can be jumped to in from multiple instructions. ] variable[code] assign[=] name[self].code variable[n] assign[=] call[name[len], parameter[name[code]]] name[self].structs assign[=] list[[<ast.Dict object at 0x7da204566c20>]] name[self].loops assign[=] list[[]] name[self].fixed_jumps assign[=] dictionary[[], []] name[self].except_targets assign[=] dictionary[[], []] name[self].ignore_if assign[=] call[name[set], parameter[]] call[name[self].build_statement_indices, parameter[]] name[self].else_start assign[=] dictionary[[], []] name[self].not_continue assign[=] call[name[set], parameter[]] name[self].return_end_ifs assign[=] call[name[set], parameter[]] name[self].setup_loop_targets assign[=] dictionary[[], []] name[self].setup_loops assign[=] dictionary[[], []] variable[targets] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da2045679d0>, <ast.Name object at 0x7da204565990>]]] in starred[call[name[enumerate], parameter[name[self].insts]]] begin[:] variable[offset] assign[=] name[inst].offset variable[op] assign[=] name[inst].opcode call[name[self].detect_control_flow, parameter[name[offset], name[targets], name[i]]] if name[inst].has_arg begin[:] variable[label] assign[=] call[name[self].fixed_jumps.get, parameter[name[offset]]] variable[oparg] assign[=] name[inst].arg if <ast.BoolOp object at 0x7da204567280> begin[:] variable[j] assign[=] call[name[xdis].next_offset, parameter[name[op], name[self].opc, name[offset]]] variable[next_offset] assign[=] call[name[xdis].next_offset, parameter[name[op], name[self].opc, name[j]]] if compare[name[label] is constant[None]] begin[:] if <ast.BoolOp object at 0x7da204564b80> begin[:] variable[label] assign[=] binary_operation[name[next_offset] + name[oparg]] if <ast.BoolOp object at 0x7da2045653c0> begin[:] call[name[targets]][name[label]] assign[=] binary_operation[call[name[targets].get, parameter[name[label], list[[]]]] + list[[<ast.Name object at 0x7da2045647c0>]]] pass if compare[name[debug] in tuple[[<ast.Constant object at 0x7da20c6e76a0>, <ast.Constant object at 0x7da20c6e6da0>]]] begin[:] import module[pprint] as alias[pp] call[name[pp].pprint, parameter[name[self].structs]] return[name[targets]]
keyword[def] identifier[find_jump_targets] ( identifier[self] , identifier[debug] ): literal[string] identifier[code] = identifier[self] . identifier[code] identifier[n] = identifier[len] ( identifier[code] ) identifier[self] . identifier[structs] =[{ literal[string] : literal[string] , literal[string] : literal[int] , literal[string] : identifier[n] - literal[int] }] identifier[self] . identifier[loops] =[] identifier[self] . identifier[fixed_jumps] ={} identifier[self] . identifier[except_targets] ={} identifier[self] . identifier[ignore_if] = identifier[set] () identifier[self] . identifier[build_statement_indices] () identifier[self] . identifier[else_start] ={} identifier[self] . identifier[not_continue] = identifier[set] () identifier[self] . identifier[return_end_ifs] = identifier[set] () identifier[self] . identifier[setup_loop_targets] ={} identifier[self] . identifier[setup_loops] ={} identifier[targets] ={} keyword[for] identifier[i] , identifier[inst] keyword[in] identifier[enumerate] ( identifier[self] . identifier[insts] ): identifier[offset] = identifier[inst] . identifier[offset] identifier[op] = identifier[inst] . identifier[opcode] identifier[self] . identifier[detect_control_flow] ( identifier[offset] , identifier[targets] , identifier[i] ) keyword[if] identifier[inst] . identifier[has_arg] : identifier[label] = identifier[self] . identifier[fixed_jumps] . identifier[get] ( identifier[offset] ) identifier[oparg] = identifier[inst] . identifier[arg] keyword[if] ( identifier[self] . identifier[version] >= literal[int] keyword[and] identifier[self] . identifier[code] [ identifier[offset] ]== identifier[self] . identifier[opc] . identifier[EXTENDED_ARG] ): identifier[j] = identifier[xdis] . identifier[next_offset] ( identifier[op] , identifier[self] . identifier[opc] , identifier[offset] ) identifier[next_offset] = identifier[xdis] . identifier[next_offset] ( identifier[op] , identifier[self] . identifier[opc] , identifier[j] ) keyword[else] : identifier[next_offset] = identifier[xdis] . identifier[next_offset] ( identifier[op] , identifier[self] . identifier[opc] , identifier[offset] ) keyword[if] identifier[label] keyword[is] keyword[None] : keyword[if] identifier[op] keyword[in] identifier[self] . identifier[opc] . identifier[hasjrel] keyword[and] identifier[op] != identifier[self] . identifier[opc] . identifier[FOR_ITER] : identifier[label] = identifier[next_offset] + identifier[oparg] keyword[elif] identifier[op] keyword[in] identifier[self] . identifier[opc] . identifier[hasjabs] : keyword[if] identifier[op] keyword[in] identifier[self] . identifier[jump_if_pop] : keyword[if] identifier[oparg] > identifier[offset] : identifier[label] = identifier[oparg] keyword[if] identifier[label] keyword[is] keyword[not] keyword[None] keyword[and] identifier[label] !=- literal[int] : identifier[targets] [ identifier[label] ]= identifier[targets] . identifier[get] ( identifier[label] ,[])+[ identifier[offset] ] keyword[elif] identifier[op] == identifier[self] . identifier[opc] . identifier[END_FINALLY] keyword[and] identifier[offset] keyword[in] identifier[self] . identifier[fixed_jumps] : identifier[label] = identifier[self] . identifier[fixed_jumps] [ identifier[offset] ] identifier[targets] [ identifier[label] ]= identifier[targets] . identifier[get] ( identifier[label] ,[])+[ identifier[offset] ] keyword[pass] keyword[pass] keyword[if] identifier[debug] keyword[in] ( literal[string] , literal[string] ): keyword[import] identifier[pprint] keyword[as] identifier[pp] identifier[pp] . identifier[pprint] ( identifier[self] . identifier[structs] ) keyword[return] identifier[targets]
def find_jump_targets(self, debug): """ Detect all offsets in a byte code which are jump targets where we might insert a COME_FROM instruction. Return the list of offsets. Return the list of offsets. An instruction can be jumped to in from multiple instructions. """ code = self.code n = len(code) self.structs = [{'type': 'root', 'start': 0, 'end': n - 1}] # All loop entry points self.loops = [] # Map fixed jumps to their real destination self.fixed_jumps = {} self.except_targets = {} self.ignore_if = set() self.build_statement_indices() self.else_start = {} # Containers filled by detect_control_flow() self.not_continue = set() self.return_end_ifs = set() self.setup_loop_targets = {} # target given setup_loop offset self.setup_loops = {} # setup_loop offset given target targets = {} for (i, inst) in enumerate(self.insts): offset = inst.offset op = inst.opcode # Determine structures and fix jumps in Python versions # since 2.3 self.detect_control_flow(offset, targets, i) if inst.has_arg: label = self.fixed_jumps.get(offset) oparg = inst.arg if self.version >= 3.6 and self.code[offset] == self.opc.EXTENDED_ARG: j = xdis.next_offset(op, self.opc, offset) next_offset = xdis.next_offset(op, self.opc, j) # depends on [control=['if'], data=[]] else: next_offset = xdis.next_offset(op, self.opc, offset) if label is None: if op in self.opc.hasjrel and op != self.opc.FOR_ITER: label = next_offset + oparg # depends on [control=['if'], data=[]] elif op in self.opc.hasjabs: if op in self.jump_if_pop: if oparg > offset: label = oparg # depends on [control=['if'], data=['oparg']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['op']] # depends on [control=['if'], data=['label']] if label is not None and label != -1: targets[label] = targets.get(label, []) + [offset] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif op == self.opc.END_FINALLY and offset in self.fixed_jumps: label = self.fixed_jumps[offset] targets[label] = targets.get(label, []) + [offset] pass # depends on [control=['if'], data=[]] pass # for loop # depends on [control=['for'], data=[]] # DEBUG: if debug in ('both', 'after'): import pprint as pp pp.pprint(self.structs) # depends on [control=['if'], data=[]] return targets
def get_file_copy_job(cls, id, api=None): """ Retrieve file copy async job :param id: Async job identifier :param api: Api instance :return: """ id = Transform.to_async_job(id) api = api if api else cls._API async_job = api.get( url=cls._URL['get_file_copy_job'].format(id=id) ).json() return AsyncJob(api=api, **async_job)
def function[get_file_copy_job, parameter[cls, id, api]]: constant[ Retrieve file copy async job :param id: Async job identifier :param api: Api instance :return: ] variable[id] assign[=] call[name[Transform].to_async_job, parameter[name[id]]] variable[api] assign[=] <ast.IfExp object at 0x7da18bc73370> variable[async_job] assign[=] call[call[name[api].get, parameter[]].json, parameter[]] return[call[name[AsyncJob], parameter[]]]
keyword[def] identifier[get_file_copy_job] ( identifier[cls] , identifier[id] , identifier[api] = keyword[None] ): literal[string] identifier[id] = identifier[Transform] . identifier[to_async_job] ( identifier[id] ) identifier[api] = identifier[api] keyword[if] identifier[api] keyword[else] identifier[cls] . identifier[_API] identifier[async_job] = identifier[api] . identifier[get] ( identifier[url] = identifier[cls] . identifier[_URL] [ literal[string] ]. identifier[format] ( identifier[id] = identifier[id] ) ). identifier[json] () keyword[return] identifier[AsyncJob] ( identifier[api] = identifier[api] ,** identifier[async_job] )
def get_file_copy_job(cls, id, api=None): """ Retrieve file copy async job :param id: Async job identifier :param api: Api instance :return: """ id = Transform.to_async_job(id) api = api if api else cls._API async_job = api.get(url=cls._URL['get_file_copy_job'].format(id=id)).json() return AsyncJob(api=api, **async_job)
def has_style(node): """Tells us if node element has defined styling. :Args: - node (:class:`ooxml.doc.Element`): Element :Returns: True or False """ elements = ['b', 'i', 'u', 'strike', 'color', 'jc', 'sz', 'ind', 'superscript', 'subscript', 'small_caps'] return any([True for elem in elements if elem in node.rpr])
def function[has_style, parameter[node]]: constant[Tells us if node element has defined styling. :Args: - node (:class:`ooxml.doc.Element`): Element :Returns: True or False ] variable[elements] assign[=] list[[<ast.Constant object at 0x7da20c6c4b20>, <ast.Constant object at 0x7da20c6c7400>, <ast.Constant object at 0x7da20c6c4280>, <ast.Constant object at 0x7da20c6c6320>, <ast.Constant object at 0x7da20c6c4250>, <ast.Constant object at 0x7da20c6c6590>, <ast.Constant object at 0x7da20c6c4df0>, <ast.Constant object at 0x7da20c6c4ee0>, <ast.Constant object at 0x7da20c6c7b20>, <ast.Constant object at 0x7da20c6c7820>, <ast.Constant object at 0x7da20c6c7af0>]] return[call[name[any], parameter[<ast.ListComp object at 0x7da20c6c5330>]]]
keyword[def] identifier[has_style] ( identifier[node] ): literal[string] identifier[elements] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] keyword[return] identifier[any] ([ keyword[True] keyword[for] identifier[elem] keyword[in] identifier[elements] keyword[if] identifier[elem] keyword[in] identifier[node] . identifier[rpr] ])
def has_style(node): """Tells us if node element has defined styling. :Args: - node (:class:`ooxml.doc.Element`): Element :Returns: True or False """ elements = ['b', 'i', 'u', 'strike', 'color', 'jc', 'sz', 'ind', 'superscript', 'subscript', 'small_caps'] return any([True for elem in elements if elem in node.rpr])
def get_stat(path, filename): ''' get stat ''' return os.stat(os.path.join(path, filename))
def function[get_stat, parameter[path, filename]]: constant[ get stat ] return[call[name[os].stat, parameter[call[name[os].path.join, parameter[name[path], name[filename]]]]]]
keyword[def] identifier[get_stat] ( identifier[path] , identifier[filename] ): literal[string] keyword[return] identifier[os] . identifier[stat] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[filename] ))
def get_stat(path, filename): """ get stat """ return os.stat(os.path.join(path, filename))
def mask_image(image: SpatialImage, mask: np.ndarray, data_type: type = None ) -> np.ndarray: """Mask image after optionally casting its type. Parameters ---------- image Image to mask. Can include time as the last dimension. mask Mask to apply. Must have the same shape as the image data. data_type Type to cast image to. Returns ------- np.ndarray Masked image. Raises ------ ValueError Image data and masks have different shapes. """ image_data = image.get_data() if image_data.shape[:3] != mask.shape: raise ValueError("Image data and mask have different shapes.") if data_type is not None: cast_data = image_data.astype(data_type) else: cast_data = image_data return cast_data[mask]
def function[mask_image, parameter[image, mask, data_type]]: constant[Mask image after optionally casting its type. Parameters ---------- image Image to mask. Can include time as the last dimension. mask Mask to apply. Must have the same shape as the image data. data_type Type to cast image to. Returns ------- np.ndarray Masked image. Raises ------ ValueError Image data and masks have different shapes. ] variable[image_data] assign[=] call[name[image].get_data, parameter[]] if compare[call[name[image_data].shape][<ast.Slice object at 0x7da1b0746980>] not_equal[!=] name[mask].shape] begin[:] <ast.Raise object at 0x7da1b0746b30> if compare[name[data_type] is_not constant[None]] begin[:] variable[cast_data] assign[=] call[name[image_data].astype, parameter[name[data_type]]] return[call[name[cast_data]][name[mask]]]
keyword[def] identifier[mask_image] ( identifier[image] : identifier[SpatialImage] , identifier[mask] : identifier[np] . identifier[ndarray] , identifier[data_type] : identifier[type] = keyword[None] )-> identifier[np] . identifier[ndarray] : literal[string] identifier[image_data] = identifier[image] . identifier[get_data] () keyword[if] identifier[image_data] . identifier[shape] [: literal[int] ]!= identifier[mask] . identifier[shape] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[data_type] keyword[is] keyword[not] keyword[None] : identifier[cast_data] = identifier[image_data] . identifier[astype] ( identifier[data_type] ) keyword[else] : identifier[cast_data] = identifier[image_data] keyword[return] identifier[cast_data] [ identifier[mask] ]
def mask_image(image: SpatialImage, mask: np.ndarray, data_type: type=None) -> np.ndarray: """Mask image after optionally casting its type. Parameters ---------- image Image to mask. Can include time as the last dimension. mask Mask to apply. Must have the same shape as the image data. data_type Type to cast image to. Returns ------- np.ndarray Masked image. Raises ------ ValueError Image data and masks have different shapes. """ image_data = image.get_data() if image_data.shape[:3] != mask.shape: raise ValueError('Image data and mask have different shapes.') # depends on [control=['if'], data=[]] if data_type is not None: cast_data = image_data.astype(data_type) # depends on [control=['if'], data=['data_type']] else: cast_data = image_data return cast_data[mask]
def trace(string): """ Implement the ``trace`` profile specified in :rfc:`4505`. """ check_prohibited_output( string, ( stringprep.in_table_c21, stringprep.in_table_c22, stringprep.in_table_c3, stringprep.in_table_c4, stringprep.in_table_c5, stringprep.in_table_c6, stringprep.in_table_c8, stringprep.in_table_c9, ) ) check_bidi(string) return string
def function[trace, parameter[string]]: constant[ Implement the ``trace`` profile specified in :rfc:`4505`. ] call[name[check_prohibited_output], parameter[name[string], tuple[[<ast.Attribute object at 0x7da18f7239a0>, <ast.Attribute object at 0x7da18f723100>, <ast.Attribute object at 0x7da18f722140>, <ast.Attribute object at 0x7da18f720850>, <ast.Attribute object at 0x7da18f7210c0>, <ast.Attribute object at 0x7da18f7200a0>, <ast.Attribute object at 0x7da18f723fa0>, <ast.Attribute object at 0x7da18f7212d0>]]]] call[name[check_bidi], parameter[name[string]]] return[name[string]]
keyword[def] identifier[trace] ( identifier[string] ): literal[string] identifier[check_prohibited_output] ( identifier[string] , ( identifier[stringprep] . identifier[in_table_c21] , identifier[stringprep] . identifier[in_table_c22] , identifier[stringprep] . identifier[in_table_c3] , identifier[stringprep] . identifier[in_table_c4] , identifier[stringprep] . identifier[in_table_c5] , identifier[stringprep] . identifier[in_table_c6] , identifier[stringprep] . identifier[in_table_c8] , identifier[stringprep] . identifier[in_table_c9] , ) ) identifier[check_bidi] ( identifier[string] ) keyword[return] identifier[string]
def trace(string): """ Implement the ``trace`` profile specified in :rfc:`4505`. """ check_prohibited_output(string, (stringprep.in_table_c21, stringprep.in_table_c22, stringprep.in_table_c3, stringprep.in_table_c4, stringprep.in_table_c5, stringprep.in_table_c6, stringprep.in_table_c8, stringprep.in_table_c9)) check_bidi(string) return string
def remove_bounding_box(self): """ Removes bounding box """ if hasattr(self, '_box_object'): actor = self.bounding_box_actor self.bounding_box_actor = None del self._box_object self.remove_actor(actor, reset_camera=False)
def function[remove_bounding_box, parameter[self]]: constant[ Removes bounding box ] if call[name[hasattr], parameter[name[self], constant[_box_object]]] begin[:] variable[actor] assign[=] name[self].bounding_box_actor name[self].bounding_box_actor assign[=] constant[None] <ast.Delete object at 0x7da18f00cf10> call[name[self].remove_actor, parameter[name[actor]]]
keyword[def] identifier[remove_bounding_box] ( identifier[self] ): literal[string] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[actor] = identifier[self] . identifier[bounding_box_actor] identifier[self] . identifier[bounding_box_actor] = keyword[None] keyword[del] identifier[self] . identifier[_box_object] identifier[self] . identifier[remove_actor] ( identifier[actor] , identifier[reset_camera] = keyword[False] )
def remove_bounding_box(self): """ Removes bounding box """ if hasattr(self, '_box_object'): actor = self.bounding_box_actor self.bounding_box_actor = None del self._box_object self.remove_actor(actor, reset_camera=False) # depends on [control=['if'], data=[]]
async def async_get_sensor_log(self, index: int) -> Optional[SensorLogResponse]: """ Get an entry from the Special sensor log. :param index: Index for the sensor log entry to be obtained. :return: Response containing the sensor log entry, or None if not found. """ response = await self._protocol.async_execute( GetSensorLogCommand(index)) if isinstance(response, SensorLogResponse): return response return None
<ast.AsyncFunctionDef object at 0x7da1b15b1ab0>
keyword[async] keyword[def] identifier[async_get_sensor_log] ( identifier[self] , identifier[index] : identifier[int] )-> identifier[Optional] [ identifier[SensorLogResponse] ]: literal[string] identifier[response] = keyword[await] identifier[self] . identifier[_protocol] . identifier[async_execute] ( identifier[GetSensorLogCommand] ( identifier[index] )) keyword[if] identifier[isinstance] ( identifier[response] , identifier[SensorLogResponse] ): keyword[return] identifier[response] keyword[return] keyword[None]
async def async_get_sensor_log(self, index: int) -> Optional[SensorLogResponse]: """ Get an entry from the Special sensor log. :param index: Index for the sensor log entry to be obtained. :return: Response containing the sensor log entry, or None if not found. """ response = await self._protocol.async_execute(GetSensorLogCommand(index)) if isinstance(response, SensorLogResponse): return response # depends on [control=['if'], data=[]] return None
def moveCamera(self, camstart, camstop, fraction): """ Takes as input two ``vtkCamera`` objects and returns a new ``vtkCamera`` that is at an intermediate position: fraction=0 -> camstart, fraction=1 -> camstop. Press ``shift-C`` key in interactive mode to dump a python snipplet of parameters for the current camera view. """ if isinstance(fraction, int): colors.printc("~lightning Warning in moveCamera(): fraction should not be an integer", c=1) if fraction > 1: colors.printc("~lightning Warning in moveCamera(): fraction is > 1", c=1) cam = vtk.vtkCamera() cam.DeepCopy(camstart) p1 = numpy.array(camstart.GetPosition()) f1 = numpy.array(camstart.GetFocalPoint()) v1 = numpy.array(camstart.GetViewUp()) c1 = numpy.array(camstart.GetClippingRange()) s1 = camstart.GetDistance() p2 = numpy.array(camstop.GetPosition()) f2 = numpy.array(camstop.GetFocalPoint()) v2 = numpy.array(camstop.GetViewUp()) c2 = numpy.array(camstop.GetClippingRange()) s2 = camstop.GetDistance() cam.SetPosition(p2 * fraction + p1 * (1 - fraction)) cam.SetFocalPoint(f2 * fraction + f1 * (1 - fraction)) cam.SetViewUp(v2 * fraction + v1 * (1 - fraction)) cam.SetDistance(s2 * fraction + s1 * (1 - fraction)) cam.SetClippingRange(c2 * fraction + c1 * (1 - fraction)) self.camera = cam save_int = self.interactive self.show(resetcam=0, interactive=0) self.interactive = save_int
def function[moveCamera, parameter[self, camstart, camstop, fraction]]: constant[ Takes as input two ``vtkCamera`` objects and returns a new ``vtkCamera`` that is at an intermediate position: fraction=0 -> camstart, fraction=1 -> camstop. Press ``shift-C`` key in interactive mode to dump a python snipplet of parameters for the current camera view. ] if call[name[isinstance], parameter[name[fraction], name[int]]] begin[:] call[name[colors].printc, parameter[constant[~lightning Warning in moveCamera(): fraction should not be an integer]]] if compare[name[fraction] greater[>] constant[1]] begin[:] call[name[colors].printc, parameter[constant[~lightning Warning in moveCamera(): fraction is > 1]]] variable[cam] assign[=] call[name[vtk].vtkCamera, parameter[]] call[name[cam].DeepCopy, parameter[name[camstart]]] variable[p1] assign[=] call[name[numpy].array, parameter[call[name[camstart].GetPosition, parameter[]]]] variable[f1] assign[=] call[name[numpy].array, parameter[call[name[camstart].GetFocalPoint, parameter[]]]] variable[v1] assign[=] call[name[numpy].array, parameter[call[name[camstart].GetViewUp, parameter[]]]] variable[c1] assign[=] call[name[numpy].array, parameter[call[name[camstart].GetClippingRange, parameter[]]]] variable[s1] assign[=] call[name[camstart].GetDistance, parameter[]] variable[p2] assign[=] call[name[numpy].array, parameter[call[name[camstop].GetPosition, parameter[]]]] variable[f2] assign[=] call[name[numpy].array, parameter[call[name[camstop].GetFocalPoint, parameter[]]]] variable[v2] assign[=] call[name[numpy].array, parameter[call[name[camstop].GetViewUp, parameter[]]]] variable[c2] assign[=] call[name[numpy].array, parameter[call[name[camstop].GetClippingRange, parameter[]]]] variable[s2] assign[=] call[name[camstop].GetDistance, parameter[]] call[name[cam].SetPosition, parameter[binary_operation[binary_operation[name[p2] * name[fraction]] + binary_operation[name[p1] * binary_operation[constant[1] - name[fraction]]]]]] call[name[cam].SetFocalPoint, parameter[binary_operation[binary_operation[name[f2] * name[fraction]] + binary_operation[name[f1] * binary_operation[constant[1] - name[fraction]]]]]] call[name[cam].SetViewUp, parameter[binary_operation[binary_operation[name[v2] * name[fraction]] + binary_operation[name[v1] * binary_operation[constant[1] - name[fraction]]]]]] call[name[cam].SetDistance, parameter[binary_operation[binary_operation[name[s2] * name[fraction]] + binary_operation[name[s1] * binary_operation[constant[1] - name[fraction]]]]]] call[name[cam].SetClippingRange, parameter[binary_operation[binary_operation[name[c2] * name[fraction]] + binary_operation[name[c1] * binary_operation[constant[1] - name[fraction]]]]]] name[self].camera assign[=] name[cam] variable[save_int] assign[=] name[self].interactive call[name[self].show, parameter[]] name[self].interactive assign[=] name[save_int]
keyword[def] identifier[moveCamera] ( identifier[self] , identifier[camstart] , identifier[camstop] , identifier[fraction] ): literal[string] keyword[if] identifier[isinstance] ( identifier[fraction] , identifier[int] ): identifier[colors] . identifier[printc] ( literal[string] , identifier[c] = literal[int] ) keyword[if] identifier[fraction] > literal[int] : identifier[colors] . identifier[printc] ( literal[string] , identifier[c] = literal[int] ) identifier[cam] = identifier[vtk] . identifier[vtkCamera] () identifier[cam] . identifier[DeepCopy] ( identifier[camstart] ) identifier[p1] = identifier[numpy] . identifier[array] ( identifier[camstart] . identifier[GetPosition] ()) identifier[f1] = identifier[numpy] . identifier[array] ( identifier[camstart] . identifier[GetFocalPoint] ()) identifier[v1] = identifier[numpy] . identifier[array] ( identifier[camstart] . identifier[GetViewUp] ()) identifier[c1] = identifier[numpy] . identifier[array] ( identifier[camstart] . identifier[GetClippingRange] ()) identifier[s1] = identifier[camstart] . identifier[GetDistance] () identifier[p2] = identifier[numpy] . identifier[array] ( identifier[camstop] . identifier[GetPosition] ()) identifier[f2] = identifier[numpy] . identifier[array] ( identifier[camstop] . identifier[GetFocalPoint] ()) identifier[v2] = identifier[numpy] . identifier[array] ( identifier[camstop] . identifier[GetViewUp] ()) identifier[c2] = identifier[numpy] . identifier[array] ( identifier[camstop] . identifier[GetClippingRange] ()) identifier[s2] = identifier[camstop] . identifier[GetDistance] () identifier[cam] . identifier[SetPosition] ( identifier[p2] * identifier[fraction] + identifier[p1] *( literal[int] - identifier[fraction] )) identifier[cam] . identifier[SetFocalPoint] ( identifier[f2] * identifier[fraction] + identifier[f1] *( literal[int] - identifier[fraction] )) identifier[cam] . identifier[SetViewUp] ( identifier[v2] * identifier[fraction] + identifier[v1] *( literal[int] - identifier[fraction] )) identifier[cam] . identifier[SetDistance] ( identifier[s2] * identifier[fraction] + identifier[s1] *( literal[int] - identifier[fraction] )) identifier[cam] . identifier[SetClippingRange] ( identifier[c2] * identifier[fraction] + identifier[c1] *( literal[int] - identifier[fraction] )) identifier[self] . identifier[camera] = identifier[cam] identifier[save_int] = identifier[self] . identifier[interactive] identifier[self] . identifier[show] ( identifier[resetcam] = literal[int] , identifier[interactive] = literal[int] ) identifier[self] . identifier[interactive] = identifier[save_int]
def moveCamera(self, camstart, camstop, fraction): """ Takes as input two ``vtkCamera`` objects and returns a new ``vtkCamera`` that is at an intermediate position: fraction=0 -> camstart, fraction=1 -> camstop. Press ``shift-C`` key in interactive mode to dump a python snipplet of parameters for the current camera view. """ if isinstance(fraction, int): colors.printc('~lightning Warning in moveCamera(): fraction should not be an integer', c=1) # depends on [control=['if'], data=[]] if fraction > 1: colors.printc('~lightning Warning in moveCamera(): fraction is > 1', c=1) # depends on [control=['if'], data=[]] cam = vtk.vtkCamera() cam.DeepCopy(camstart) p1 = numpy.array(camstart.GetPosition()) f1 = numpy.array(camstart.GetFocalPoint()) v1 = numpy.array(camstart.GetViewUp()) c1 = numpy.array(camstart.GetClippingRange()) s1 = camstart.GetDistance() p2 = numpy.array(camstop.GetPosition()) f2 = numpy.array(camstop.GetFocalPoint()) v2 = numpy.array(camstop.GetViewUp()) c2 = numpy.array(camstop.GetClippingRange()) s2 = camstop.GetDistance() cam.SetPosition(p2 * fraction + p1 * (1 - fraction)) cam.SetFocalPoint(f2 * fraction + f1 * (1 - fraction)) cam.SetViewUp(v2 * fraction + v1 * (1 - fraction)) cam.SetDistance(s2 * fraction + s1 * (1 - fraction)) cam.SetClippingRange(c2 * fraction + c1 * (1 - fraction)) self.camera = cam save_int = self.interactive self.show(resetcam=0, interactive=0) self.interactive = save_int
def check_meta(pfeed, *, as_df=False, include_warnings=False): """ Analog of :func:`check_frequencies` for ``pfeed.meta`` """ table = 'meta' problems = [] # Preliminary checks if pfeed.meta is None: problems.append(['error', 'Missing table', table, []]) else: f = pfeed.meta.copy() problems = check_for_required_columns(problems, table, f) if problems: return gt.format_problems(problems, as_df=as_df) if include_warnings: problems = check_for_invalid_columns(problems, table, f) if f.shape[0] > 1: problems.append(['error', 'Meta must have only one row', table, list(range(1, f.shape[0]))]) # Check agency_name problems = gt.check_column(problems, table, f, 'agency_name', gt.valid_str) # Check agency_url problems = gt.check_column(problems, table, f, 'agency_url', gt.valid_url) # Check agency_timezone problems = gt.check_column(problems, table, f, 'agency_timezone', gt.valid_timezone) # Check start_date and end_date for col in ['start_date', 'end_date']: problems = gt.check_column(problems, table, f, col, gt.valid_date) # Check default_route_speed problems = gt.check_column(problems, table, f, 'default_route_speed', valid_speed) return gt.format_problems(problems, as_df=as_df)
def function[check_meta, parameter[pfeed]]: constant[ Analog of :func:`check_frequencies` for ``pfeed.meta`` ] variable[table] assign[=] constant[meta] variable[problems] assign[=] list[[]] if compare[name[pfeed].meta is constant[None]] begin[:] call[name[problems].append, parameter[list[[<ast.Constant object at 0x7da20e9b2d40>, <ast.Constant object at 0x7da20e9b1570>, <ast.Name object at 0x7da20e9b1e10>, <ast.List object at 0x7da20e9b2b90>]]]] if name[problems] begin[:] return[call[name[gt].format_problems, parameter[name[problems]]]] if name[include_warnings] begin[:] variable[problems] assign[=] call[name[check_for_invalid_columns], parameter[name[problems], name[table], name[f]]] if compare[call[name[f].shape][constant[0]] greater[>] constant[1]] begin[:] call[name[problems].append, parameter[list[[<ast.Constant object at 0x7da18f7219f0>, <ast.Constant object at 0x7da18f7210f0>, <ast.Name object at 0x7da18f721420>, <ast.Call object at 0x7da18f722860>]]]] variable[problems] assign[=] call[name[gt].check_column, parameter[name[problems], name[table], name[f], constant[agency_name], name[gt].valid_str]] variable[problems] assign[=] call[name[gt].check_column, parameter[name[problems], name[table], name[f], constant[agency_url], name[gt].valid_url]] variable[problems] assign[=] call[name[gt].check_column, parameter[name[problems], name[table], name[f], constant[agency_timezone], name[gt].valid_timezone]] for taget[name[col]] in starred[list[[<ast.Constant object at 0x7da18f722170>, <ast.Constant object at 0x7da18f7202e0>]]] begin[:] variable[problems] assign[=] call[name[gt].check_column, parameter[name[problems], name[table], name[f], name[col], name[gt].valid_date]] variable[problems] assign[=] call[name[gt].check_column, parameter[name[problems], name[table], name[f], constant[default_route_speed], name[valid_speed]]] return[call[name[gt].format_problems, parameter[name[problems]]]]
keyword[def] identifier[check_meta] ( identifier[pfeed] ,*, identifier[as_df] = keyword[False] , identifier[include_warnings] = keyword[False] ): literal[string] identifier[table] = literal[string] identifier[problems] =[] keyword[if] identifier[pfeed] . identifier[meta] keyword[is] keyword[None] : identifier[problems] . identifier[append] ([ literal[string] , literal[string] , identifier[table] ,[]]) keyword[else] : identifier[f] = identifier[pfeed] . identifier[meta] . identifier[copy] () identifier[problems] = identifier[check_for_required_columns] ( identifier[problems] , identifier[table] , identifier[f] ) keyword[if] identifier[problems] : keyword[return] identifier[gt] . identifier[format_problems] ( identifier[problems] , identifier[as_df] = identifier[as_df] ) keyword[if] identifier[include_warnings] : identifier[problems] = identifier[check_for_invalid_columns] ( identifier[problems] , identifier[table] , identifier[f] ) keyword[if] identifier[f] . identifier[shape] [ literal[int] ]> literal[int] : identifier[problems] . identifier[append] ([ literal[string] , literal[string] , identifier[table] , identifier[list] ( identifier[range] ( literal[int] , identifier[f] . identifier[shape] [ literal[int] ]))]) identifier[problems] = identifier[gt] . identifier[check_column] ( identifier[problems] , identifier[table] , identifier[f] , literal[string] , identifier[gt] . identifier[valid_str] ) identifier[problems] = identifier[gt] . identifier[check_column] ( identifier[problems] , identifier[table] , identifier[f] , literal[string] , identifier[gt] . identifier[valid_url] ) identifier[problems] = identifier[gt] . identifier[check_column] ( identifier[problems] , identifier[table] , identifier[f] , literal[string] , identifier[gt] . identifier[valid_timezone] ) keyword[for] identifier[col] keyword[in] [ literal[string] , literal[string] ]: identifier[problems] = identifier[gt] . identifier[check_column] ( identifier[problems] , identifier[table] , identifier[f] , identifier[col] , identifier[gt] . identifier[valid_date] ) identifier[problems] = identifier[gt] . identifier[check_column] ( identifier[problems] , identifier[table] , identifier[f] , literal[string] , identifier[valid_speed] ) keyword[return] identifier[gt] . identifier[format_problems] ( identifier[problems] , identifier[as_df] = identifier[as_df] )
def check_meta(pfeed, *, as_df=False, include_warnings=False): """ Analog of :func:`check_frequencies` for ``pfeed.meta`` """ table = 'meta' problems = [] # Preliminary checks if pfeed.meta is None: problems.append(['error', 'Missing table', table, []]) # depends on [control=['if'], data=[]] else: f = pfeed.meta.copy() problems = check_for_required_columns(problems, table, f) if problems: return gt.format_problems(problems, as_df=as_df) # depends on [control=['if'], data=[]] if include_warnings: problems = check_for_invalid_columns(problems, table, f) # depends on [control=['if'], data=[]] if f.shape[0] > 1: problems.append(['error', 'Meta must have only one row', table, list(range(1, f.shape[0]))]) # depends on [control=['if'], data=[]] # Check agency_name problems = gt.check_column(problems, table, f, 'agency_name', gt.valid_str) # Check agency_url problems = gt.check_column(problems, table, f, 'agency_url', gt.valid_url) # Check agency_timezone problems = gt.check_column(problems, table, f, 'agency_timezone', gt.valid_timezone) # Check start_date and end_date for col in ['start_date', 'end_date']: problems = gt.check_column(problems, table, f, col, gt.valid_date) # depends on [control=['for'], data=['col']] # Check default_route_speed problems = gt.check_column(problems, table, f, 'default_route_speed', valid_speed) return gt.format_problems(problems, as_df=as_df)
def get_handler_class(ext): """Get the IOHandler that can handle the extension *ext*.""" if ext in _extensions_map: format = _extensions_map[ext] else: raise ValueError("Unknown format for %s extension." % ext) if format in _handler_map: hc = _handler_map[format] return hc else: matches = difflib.get_close_matches(format, _handler_map.keys()) raise ValueError("Unknown Handler for format %s, close matches: %s" % (format, str(matches)))
def function[get_handler_class, parameter[ext]]: constant[Get the IOHandler that can handle the extension *ext*.] if compare[name[ext] in name[_extensions_map]] begin[:] variable[format] assign[=] call[name[_extensions_map]][name[ext]] if compare[name[format] in name[_handler_map]] begin[:] variable[hc] assign[=] call[name[_handler_map]][name[format]] return[name[hc]]
keyword[def] identifier[get_handler_class] ( identifier[ext] ): literal[string] keyword[if] identifier[ext] keyword[in] identifier[_extensions_map] : identifier[format] = identifier[_extensions_map] [ identifier[ext] ] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[ext] ) keyword[if] identifier[format] keyword[in] identifier[_handler_map] : identifier[hc] = identifier[_handler_map] [ identifier[format] ] keyword[return] identifier[hc] keyword[else] : identifier[matches] = identifier[difflib] . identifier[get_close_matches] ( identifier[format] , identifier[_handler_map] . identifier[keys] ()) keyword[raise] identifier[ValueError] ( literal[string] %( identifier[format] , identifier[str] ( identifier[matches] )))
def get_handler_class(ext): """Get the IOHandler that can handle the extension *ext*.""" if ext in _extensions_map: format = _extensions_map[ext] # depends on [control=['if'], data=['ext', '_extensions_map']] else: raise ValueError('Unknown format for %s extension.' % ext) if format in _handler_map: hc = _handler_map[format] return hc # depends on [control=['if'], data=['format', '_handler_map']] else: matches = difflib.get_close_matches(format, _handler_map.keys()) raise ValueError('Unknown Handler for format %s, close matches: %s' % (format, str(matches)))
def power(self, n): """The matrix power of the channel. Args: n (int): compute the matrix power of the superoperator matrix. Returns: Stinespring: the matrix power of the SuperOp converted to a Stinespring channel. Raises: QiskitError: if the input and output dimensions of the QuantumChannel are not equal, or the power is not an integer. """ if n > 0: return super().power(n) return Stinespring(SuperOp(self).power(n))
def function[power, parameter[self, n]]: constant[The matrix power of the channel. Args: n (int): compute the matrix power of the superoperator matrix. Returns: Stinespring: the matrix power of the SuperOp converted to a Stinespring channel. Raises: QiskitError: if the input and output dimensions of the QuantumChannel are not equal, or the power is not an integer. ] if compare[name[n] greater[>] constant[0]] begin[:] return[call[call[name[super], parameter[]].power, parameter[name[n]]]] return[call[name[Stinespring], parameter[call[call[name[SuperOp], parameter[name[self]]].power, parameter[name[n]]]]]]
keyword[def] identifier[power] ( identifier[self] , identifier[n] ): literal[string] keyword[if] identifier[n] > literal[int] : keyword[return] identifier[super] (). identifier[power] ( identifier[n] ) keyword[return] identifier[Stinespring] ( identifier[SuperOp] ( identifier[self] ). identifier[power] ( identifier[n] ))
def power(self, n): """The matrix power of the channel. Args: n (int): compute the matrix power of the superoperator matrix. Returns: Stinespring: the matrix power of the SuperOp converted to a Stinespring channel. Raises: QiskitError: if the input and output dimensions of the QuantumChannel are not equal, or the power is not an integer. """ if n > 0: return super().power(n) # depends on [control=['if'], data=['n']] return Stinespring(SuperOp(self).power(n))
def retrieveVals(self): """Retrieve values for graphs.""" apacheInfo = ApacheInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = apacheInfo.getServerStats() if self.hasGraph('apache_access'): self.setGraphVal('apache_access', 'reqs', stats['Total Accesses']) if self.hasGraph('apache_bytes'): self.setGraphVal('apache_bytes', 'bytes', stats['Total kBytes'] * 1000) if self.hasGraph('apache_workers'): self.setGraphVal('apache_workers', 'busy', stats['BusyWorkers']) self.setGraphVal('apache_workers', 'idle', stats['IdleWorkers']) self.setGraphVal('apache_workers', 'max', stats['MaxWorkers'])
def function[retrieveVals, parameter[self]]: constant[Retrieve values for graphs.] variable[apacheInfo] assign[=] call[name[ApacheInfo], parameter[name[self]._host, name[self]._port, name[self]._user, name[self]._password, name[self]._statuspath, name[self]._ssl]] variable[stats] assign[=] call[name[apacheInfo].getServerStats, parameter[]] if call[name[self].hasGraph, parameter[constant[apache_access]]] begin[:] call[name[self].setGraphVal, parameter[constant[apache_access], constant[reqs], call[name[stats]][constant[Total Accesses]]]] if call[name[self].hasGraph, parameter[constant[apache_bytes]]] begin[:] call[name[self].setGraphVal, parameter[constant[apache_bytes], constant[bytes], binary_operation[call[name[stats]][constant[Total kBytes]] * constant[1000]]]] if call[name[self].hasGraph, parameter[constant[apache_workers]]] begin[:] call[name[self].setGraphVal, parameter[constant[apache_workers], constant[busy], call[name[stats]][constant[BusyWorkers]]]] call[name[self].setGraphVal, parameter[constant[apache_workers], constant[idle], call[name[stats]][constant[IdleWorkers]]]] call[name[self].setGraphVal, parameter[constant[apache_workers], constant[max], call[name[stats]][constant[MaxWorkers]]]]
keyword[def] identifier[retrieveVals] ( identifier[self] ): literal[string] identifier[apacheInfo] = identifier[ApacheInfo] ( identifier[self] . identifier[_host] , identifier[self] . identifier[_port] , identifier[self] . identifier[_user] , identifier[self] . identifier[_password] , identifier[self] . identifier[_statuspath] , identifier[self] . identifier[_ssl] ) identifier[stats] = identifier[apacheInfo] . identifier[getServerStats] () keyword[if] identifier[self] . identifier[hasGraph] ( literal[string] ): identifier[self] . identifier[setGraphVal] ( literal[string] , literal[string] , identifier[stats] [ literal[string] ]) keyword[if] identifier[self] . identifier[hasGraph] ( literal[string] ): identifier[self] . identifier[setGraphVal] ( literal[string] , literal[string] , identifier[stats] [ literal[string] ]* literal[int] ) keyword[if] identifier[self] . identifier[hasGraph] ( literal[string] ): identifier[self] . identifier[setGraphVal] ( literal[string] , literal[string] , identifier[stats] [ literal[string] ]) identifier[self] . identifier[setGraphVal] ( literal[string] , literal[string] , identifier[stats] [ literal[string] ]) identifier[self] . identifier[setGraphVal] ( literal[string] , literal[string] , identifier[stats] [ literal[string] ])
def retrieveVals(self): """Retrieve values for graphs.""" apacheInfo = ApacheInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = apacheInfo.getServerStats() if self.hasGraph('apache_access'): self.setGraphVal('apache_access', 'reqs', stats['Total Accesses']) # depends on [control=['if'], data=[]] if self.hasGraph('apache_bytes'): self.setGraphVal('apache_bytes', 'bytes', stats['Total kBytes'] * 1000) # depends on [control=['if'], data=[]] if self.hasGraph('apache_workers'): self.setGraphVal('apache_workers', 'busy', stats['BusyWorkers']) self.setGraphVal('apache_workers', 'idle', stats['IdleWorkers']) self.setGraphVal('apache_workers', 'max', stats['MaxWorkers']) # depends on [control=['if'], data=[]]
def convert_pre(self, markup): """ Substitutes <pre> to Wikipedia markup by adding a space at the start of a line. """ for m in re.findall(self.re["preformatted"], markup): markup = markup.replace(m, m.replace("\n", "\n ")) markup = re.sub("<pre.*?>\n{0,}", "", markup) markup = re.sub("\W{0,}</pre>", "", markup) return markup
def function[convert_pre, parameter[self, markup]]: constant[ Substitutes <pre> to Wikipedia markup by adding a space at the start of a line. ] for taget[name[m]] in starred[call[name[re].findall, parameter[call[name[self].re][constant[preformatted]], name[markup]]]] begin[:] variable[markup] assign[=] call[name[markup].replace, parameter[name[m], call[name[m].replace, parameter[constant[ ], constant[ ]]]]] variable[markup] assign[=] call[name[re].sub, parameter[constant[<pre.*?> {0,}], constant[], name[markup]]] variable[markup] assign[=] call[name[re].sub, parameter[constant[\W{0,}</pre>], constant[], name[markup]]] return[name[markup]]
keyword[def] identifier[convert_pre] ( identifier[self] , identifier[markup] ): literal[string] keyword[for] identifier[m] keyword[in] identifier[re] . identifier[findall] ( identifier[self] . identifier[re] [ literal[string] ], identifier[markup] ): identifier[markup] = identifier[markup] . identifier[replace] ( identifier[m] , identifier[m] . identifier[replace] ( literal[string] , literal[string] )) identifier[markup] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[markup] ) identifier[markup] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[markup] ) keyword[return] identifier[markup]
def convert_pre(self, markup): """ Substitutes <pre> to Wikipedia markup by adding a space at the start of a line. """ for m in re.findall(self.re['preformatted'], markup): markup = markup.replace(m, m.replace('\n', '\n ')) markup = re.sub('<pre.*?>\n{0,}', '', markup) markup = re.sub('\\W{0,}</pre>', '', markup) # depends on [control=['for'], data=['m']] return markup
def calc_evb_v1(self): """Calculate the actual water release from the snow cover. Required control parameters: |NHRU| |Lnk| |NFk| |GrasRef_R| Required state sequence: |BoWa| Required flux sequences: |EvPo| |EvI| Calculated flux sequence: |EvB| Basic equations: :math:`temp = exp(-GrasRef_R \\cdot \\frac{BoWa}{NFk})` :math:`EvB = (EvPo - EvI) \\cdot \\frac{1 - temp}{1 + temp -2 \\cdot exp(-GrasRef_R)}` Examples: Soil evaporation is calculated neither for water nor for sealed areas (see the first three HRUs of type |FLUSS|, |SEE|, and |VERS|). All other land use classes are handled in accordance with a recommendation of the set of codes described in ATV-DVWK-M 504 (arable land |ACKER| has been selected for the last four HRUs arbitrarily): >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(7) >>> lnk(FLUSS, SEE, VERS, ACKER, ACKER, ACKER, ACKER) >>> grasref_r(5.0) >>> nfk(100.0, 100.0, 100.0, 0.0, 100.0, 100.0, 100.0) >>> fluxes.evpo = 5.0 >>> fluxes.evi = 3.0 >>> states.bowa = 50.0, 50.0, 50.0, 0.0, 0.0, 50.0, 100.0 >>> model.calc_evb_v1() >>> fluxes.evb evb(0.0, 0.0, 0.0, 0.0, 0.0, 1.717962, 2.0) In case usable field capacity (|NFk|) is zero, soil evaporation (|EvB|) is generally set to zero (see the forth HRU). The last three HRUs demonstrate the rise in soil evaporation with increasing soil moisture, which is lessening in the high soil moisture range. """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nhru): if (con.lnk[k] in (VERS, WASSER, FLUSS, SEE)) or (con.nfk[k] <= 0.): flu.evb[k] = 0. else: d_temp = modelutils.exp(-con.grasref_r * sta.bowa[k]/con.nfk[k]) flu.evb[k] = ((flu.evpo[k]-flu.evi[k]) * (1.-d_temp) / (1.+d_temp-2.*modelutils.exp(-con.grasref_r)))
def function[calc_evb_v1, parameter[self]]: constant[Calculate the actual water release from the snow cover. Required control parameters: |NHRU| |Lnk| |NFk| |GrasRef_R| Required state sequence: |BoWa| Required flux sequences: |EvPo| |EvI| Calculated flux sequence: |EvB| Basic equations: :math:`temp = exp(-GrasRef_R \cdot \frac{BoWa}{NFk})` :math:`EvB = (EvPo - EvI) \cdot \frac{1 - temp}{1 + temp -2 \cdot exp(-GrasRef_R)}` Examples: Soil evaporation is calculated neither for water nor for sealed areas (see the first three HRUs of type |FLUSS|, |SEE|, and |VERS|). All other land use classes are handled in accordance with a recommendation of the set of codes described in ATV-DVWK-M 504 (arable land |ACKER| has been selected for the last four HRUs arbitrarily): >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(7) >>> lnk(FLUSS, SEE, VERS, ACKER, ACKER, ACKER, ACKER) >>> grasref_r(5.0) >>> nfk(100.0, 100.0, 100.0, 0.0, 100.0, 100.0, 100.0) >>> fluxes.evpo = 5.0 >>> fluxes.evi = 3.0 >>> states.bowa = 50.0, 50.0, 50.0, 0.0, 0.0, 50.0, 100.0 >>> model.calc_evb_v1() >>> fluxes.evb evb(0.0, 0.0, 0.0, 0.0, 0.0, 1.717962, 2.0) In case usable field capacity (|NFk|) is zero, soil evaporation (|EvB|) is generally set to zero (see the forth HRU). The last three HRUs demonstrate the rise in soil evaporation with increasing soil moisture, which is lessening in the high soil moisture range. ] variable[con] assign[=] name[self].parameters.control.fastaccess variable[flu] assign[=] name[self].sequences.fluxes.fastaccess variable[sta] assign[=] name[self].sequences.states.fastaccess for taget[name[k]] in starred[call[name[range], parameter[name[con].nhru]]] begin[:] if <ast.BoolOp object at 0x7da18f09d7b0> begin[:] call[name[flu].evb][name[k]] assign[=] constant[0.0]
keyword[def] identifier[calc_evb_v1] ( identifier[self] ): literal[string] identifier[con] = identifier[self] . identifier[parameters] . identifier[control] . identifier[fastaccess] identifier[flu] = identifier[self] . identifier[sequences] . identifier[fluxes] . identifier[fastaccess] identifier[sta] = identifier[self] . identifier[sequences] . identifier[states] . identifier[fastaccess] keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[con] . identifier[nhru] ): keyword[if] ( identifier[con] . identifier[lnk] [ identifier[k] ] keyword[in] ( identifier[VERS] , identifier[WASSER] , identifier[FLUSS] , identifier[SEE] )) keyword[or] ( identifier[con] . identifier[nfk] [ identifier[k] ]<= literal[int] ): identifier[flu] . identifier[evb] [ identifier[k] ]= literal[int] keyword[else] : identifier[d_temp] = identifier[modelutils] . identifier[exp] (- identifier[con] . identifier[grasref_r] * identifier[sta] . identifier[bowa] [ identifier[k] ]/ identifier[con] . identifier[nfk] [ identifier[k] ]) identifier[flu] . identifier[evb] [ identifier[k] ]=(( identifier[flu] . identifier[evpo] [ identifier[k] ]- identifier[flu] . identifier[evi] [ identifier[k] ])*( literal[int] - identifier[d_temp] )/ ( literal[int] + identifier[d_temp] - literal[int] * identifier[modelutils] . identifier[exp] (- identifier[con] . identifier[grasref_r] )))
def calc_evb_v1(self): """Calculate the actual water release from the snow cover. Required control parameters: |NHRU| |Lnk| |NFk| |GrasRef_R| Required state sequence: |BoWa| Required flux sequences: |EvPo| |EvI| Calculated flux sequence: |EvB| Basic equations: :math:`temp = exp(-GrasRef_R \\cdot \\frac{BoWa}{NFk})` :math:`EvB = (EvPo - EvI) \\cdot \\frac{1 - temp}{1 + temp -2 \\cdot exp(-GrasRef_R)}` Examples: Soil evaporation is calculated neither for water nor for sealed areas (see the first three HRUs of type |FLUSS|, |SEE|, and |VERS|). All other land use classes are handled in accordance with a recommendation of the set of codes described in ATV-DVWK-M 504 (arable land |ACKER| has been selected for the last four HRUs arbitrarily): >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(7) >>> lnk(FLUSS, SEE, VERS, ACKER, ACKER, ACKER, ACKER) >>> grasref_r(5.0) >>> nfk(100.0, 100.0, 100.0, 0.0, 100.0, 100.0, 100.0) >>> fluxes.evpo = 5.0 >>> fluxes.evi = 3.0 >>> states.bowa = 50.0, 50.0, 50.0, 0.0, 0.0, 50.0, 100.0 >>> model.calc_evb_v1() >>> fluxes.evb evb(0.0, 0.0, 0.0, 0.0, 0.0, 1.717962, 2.0) In case usable field capacity (|NFk|) is zero, soil evaporation (|EvB|) is generally set to zero (see the forth HRU). The last three HRUs demonstrate the rise in soil evaporation with increasing soil moisture, which is lessening in the high soil moisture range. """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nhru): if con.lnk[k] in (VERS, WASSER, FLUSS, SEE) or con.nfk[k] <= 0.0: flu.evb[k] = 0.0 # depends on [control=['if'], data=[]] else: d_temp = modelutils.exp(-con.grasref_r * sta.bowa[k] / con.nfk[k]) flu.evb[k] = (flu.evpo[k] - flu.evi[k]) * (1.0 - d_temp) / (1.0 + d_temp - 2.0 * modelutils.exp(-con.grasref_r)) # depends on [control=['for'], data=['k']]
def open(self, data_source, *args, **kwargs): """ Open filename to get data for data_source. :param data_source: Data source for which the file contains data. :type data_source: str Positional and keyword arguments can contain either the data to use for the data source or the full path of the file which contains data for the data source. """ if self.sources[data_source]._meta.data_reader.is_file_reader: filename = kwargs.get('filename') path = kwargs.get('path', '') rel_path = kwargs.get('rel_path', '') if len(args) > 0: filename = args[0] if len(args) > 1: path = args[1] if len(args) > 2: rel_path = args[2] args = () kwargs = {'filename': os.path.join(rel_path, path, filename)} LOGGER.debug('filename: %s', kwargs['filename']) # call constructor of data source with filename argument self.objects[data_source] = self.sources[data_source](*args, **kwargs) # register data and uncertainty in registry data_src_obj = self.objects[data_source] meta = [getattr(data_src_obj, m) for m in self.reg.meta_names] self.reg.register(data_src_obj.data, *meta)
def function[open, parameter[self, data_source]]: constant[ Open filename to get data for data_source. :param data_source: Data source for which the file contains data. :type data_source: str Positional and keyword arguments can contain either the data to use for the data source or the full path of the file which contains data for the data source. ] if call[name[self].sources][name[data_source]]._meta.data_reader.is_file_reader begin[:] variable[filename] assign[=] call[name[kwargs].get, parameter[constant[filename]]] variable[path] assign[=] call[name[kwargs].get, parameter[constant[path], constant[]]] variable[rel_path] assign[=] call[name[kwargs].get, parameter[constant[rel_path], constant[]]] if compare[call[name[len], parameter[name[args]]] greater[>] constant[0]] begin[:] variable[filename] assign[=] call[name[args]][constant[0]] if compare[call[name[len], parameter[name[args]]] greater[>] constant[1]] begin[:] variable[path] assign[=] call[name[args]][constant[1]] if compare[call[name[len], parameter[name[args]]] greater[>] constant[2]] begin[:] variable[rel_path] assign[=] call[name[args]][constant[2]] variable[args] assign[=] tuple[[]] variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da18eb55510>], [<ast.Call object at 0x7da18eb561d0>]] call[name[LOGGER].debug, parameter[constant[filename: %s], call[name[kwargs]][constant[filename]]]] call[name[self].objects][name[data_source]] assign[=] call[call[name[self].sources][name[data_source]], parameter[<ast.Starred object at 0x7da18eb568f0>]] variable[data_src_obj] assign[=] call[name[self].objects][name[data_source]] variable[meta] assign[=] <ast.ListComp object at 0x7da18eb552a0> call[name[self].reg.register, parameter[name[data_src_obj].data, <ast.Starred object at 0x7da20c6e4d60>]]
keyword[def] identifier[open] ( identifier[self] , identifier[data_source] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[self] . identifier[sources] [ identifier[data_source] ]. identifier[_meta] . identifier[data_reader] . identifier[is_file_reader] : identifier[filename] = identifier[kwargs] . identifier[get] ( literal[string] ) identifier[path] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ) identifier[rel_path] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[len] ( identifier[args] )> literal[int] : identifier[filename] = identifier[args] [ literal[int] ] keyword[if] identifier[len] ( identifier[args] )> literal[int] : identifier[path] = identifier[args] [ literal[int] ] keyword[if] identifier[len] ( identifier[args] )> literal[int] : identifier[rel_path] = identifier[args] [ literal[int] ] identifier[args] =() identifier[kwargs] ={ literal[string] : identifier[os] . identifier[path] . identifier[join] ( identifier[rel_path] , identifier[path] , identifier[filename] )} identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[kwargs] [ literal[string] ]) identifier[self] . identifier[objects] [ identifier[data_source] ]= identifier[self] . identifier[sources] [ identifier[data_source] ](* identifier[args] ,** identifier[kwargs] ) identifier[data_src_obj] = identifier[self] . identifier[objects] [ identifier[data_source] ] identifier[meta] =[ identifier[getattr] ( identifier[data_src_obj] , identifier[m] ) keyword[for] identifier[m] keyword[in] identifier[self] . identifier[reg] . identifier[meta_names] ] identifier[self] . identifier[reg] . identifier[register] ( identifier[data_src_obj] . identifier[data] ,* identifier[meta] )
def open(self, data_source, *args, **kwargs): """ Open filename to get data for data_source. :param data_source: Data source for which the file contains data. :type data_source: str Positional and keyword arguments can contain either the data to use for the data source or the full path of the file which contains data for the data source. """ if self.sources[data_source]._meta.data_reader.is_file_reader: filename = kwargs.get('filename') path = kwargs.get('path', '') rel_path = kwargs.get('rel_path', '') if len(args) > 0: filename = args[0] # depends on [control=['if'], data=[]] if len(args) > 1: path = args[1] # depends on [control=['if'], data=[]] if len(args) > 2: rel_path = args[2] # depends on [control=['if'], data=[]] args = () kwargs = {'filename': os.path.join(rel_path, path, filename)} LOGGER.debug('filename: %s', kwargs['filename']) # depends on [control=['if'], data=[]] # call constructor of data source with filename argument self.objects[data_source] = self.sources[data_source](*args, **kwargs) # register data and uncertainty in registry data_src_obj = self.objects[data_source] meta = [getattr(data_src_obj, m) for m in self.reg.meta_names] self.reg.register(data_src_obj.data, *meta)
def long_to_hex(l, size): """Encode a long value as a hex string, 0-padding to size. Note that size is the size of the resulting hex string. So, for a 32Byte long size should be 64 (two hex characters per byte".""" f_str = "{0:0%sx}" % size return ensure_bytes(f_str.format(l).lower())
def function[long_to_hex, parameter[l, size]]: constant[Encode a long value as a hex string, 0-padding to size. Note that size is the size of the resulting hex string. So, for a 32Byte long size should be 64 (two hex characters per byte".] variable[f_str] assign[=] binary_operation[constant[{0:0%sx}] <ast.Mod object at 0x7da2590d6920> name[size]] return[call[name[ensure_bytes], parameter[call[call[name[f_str].format, parameter[name[l]]].lower, parameter[]]]]]
keyword[def] identifier[long_to_hex] ( identifier[l] , identifier[size] ): literal[string] identifier[f_str] = literal[string] % identifier[size] keyword[return] identifier[ensure_bytes] ( identifier[f_str] . identifier[format] ( identifier[l] ). identifier[lower] ())
def long_to_hex(l, size): """Encode a long value as a hex string, 0-padding to size. Note that size is the size of the resulting hex string. So, for a 32Byte long size should be 64 (two hex characters per byte".""" f_str = '{0:0%sx}' % size return ensure_bytes(f_str.format(l).lower())
def list_all_states(cls, **kwargs): """List States Return a list of States This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_states(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[State] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_states_with_http_info(**kwargs) else: (data) = cls._list_all_states_with_http_info(**kwargs) return data
def function[list_all_states, parameter[cls]]: constant[List States Return a list of States This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_states(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[State] If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[async]]] begin[:] return[call[name[cls]._list_all_states_with_http_info, parameter[]]]
keyword[def] identifier[list_all_states] ( identifier[cls] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[cls] . identifier[_list_all_states_with_http_info] (** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[cls] . identifier[_list_all_states_with_http_info] (** identifier[kwargs] ) keyword[return] identifier[data]
def list_all_states(cls, **kwargs): """List States Return a list of States This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_states(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[State] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_states_with_http_info(**kwargs) # depends on [control=['if'], data=[]] else: data = cls._list_all_states_with_http_info(**kwargs) return data
def commit(self): """Commit any pending transaction.""" self._transaction = False try: self._con.commit() except self._failures as error: # cannot commit try: # try to reopen the connection con = self._create() except Exception: pass else: self._close() self._store(con) raise error
def function[commit, parameter[self]]: constant[Commit any pending transaction.] name[self]._transaction assign[=] constant[False] <ast.Try object at 0x7da20c990430>
keyword[def] identifier[commit] ( identifier[self] ): literal[string] identifier[self] . identifier[_transaction] = keyword[False] keyword[try] : identifier[self] . identifier[_con] . identifier[commit] () keyword[except] identifier[self] . identifier[_failures] keyword[as] identifier[error] : keyword[try] : identifier[con] = identifier[self] . identifier[_create] () keyword[except] identifier[Exception] : keyword[pass] keyword[else] : identifier[self] . identifier[_close] () identifier[self] . identifier[_store] ( identifier[con] ) keyword[raise] identifier[error]
def commit(self): """Commit any pending transaction.""" self._transaction = False try: self._con.commit() # depends on [control=['try'], data=[]] except self._failures as error: # cannot commit try: # try to reopen the connection con = self._create() # depends on [control=['try'], data=[]] except Exception: pass # depends on [control=['except'], data=[]] else: self._close() self._store(con) raise error # depends on [control=['except'], data=['error']]
def serial_udb_extra_f8_encode(self, sue_HEIGHT_TARGET_MAX, sue_HEIGHT_TARGET_MIN, sue_ALT_HOLD_THROTTLE_MIN, sue_ALT_HOLD_THROTTLE_MAX, sue_ALT_HOLD_PITCH_MIN, sue_ALT_HOLD_PITCH_MAX, sue_ALT_HOLD_PITCH_HIGH): ''' Backwards compatible version of SERIAL_UDB_EXTRA F8: format sue_HEIGHT_TARGET_MAX : Serial UDB Extra HEIGHT_TARGET_MAX (float) sue_HEIGHT_TARGET_MIN : Serial UDB Extra HEIGHT_TARGET_MIN (float) sue_ALT_HOLD_THROTTLE_MIN : Serial UDB Extra ALT_HOLD_THROTTLE_MIN (float) sue_ALT_HOLD_THROTTLE_MAX : Serial UDB Extra ALT_HOLD_THROTTLE_MAX (float) sue_ALT_HOLD_PITCH_MIN : Serial UDB Extra ALT_HOLD_PITCH_MIN (float) sue_ALT_HOLD_PITCH_MAX : Serial UDB Extra ALT_HOLD_PITCH_MAX (float) sue_ALT_HOLD_PITCH_HIGH : Serial UDB Extra ALT_HOLD_PITCH_HIGH (float) ''' return MAVLink_serial_udb_extra_f8_message(sue_HEIGHT_TARGET_MAX, sue_HEIGHT_TARGET_MIN, sue_ALT_HOLD_THROTTLE_MIN, sue_ALT_HOLD_THROTTLE_MAX, sue_ALT_HOLD_PITCH_MIN, sue_ALT_HOLD_PITCH_MAX, sue_ALT_HOLD_PITCH_HIGH)
def function[serial_udb_extra_f8_encode, parameter[self, sue_HEIGHT_TARGET_MAX, sue_HEIGHT_TARGET_MIN, sue_ALT_HOLD_THROTTLE_MIN, sue_ALT_HOLD_THROTTLE_MAX, sue_ALT_HOLD_PITCH_MIN, sue_ALT_HOLD_PITCH_MAX, sue_ALT_HOLD_PITCH_HIGH]]: constant[ Backwards compatible version of SERIAL_UDB_EXTRA F8: format sue_HEIGHT_TARGET_MAX : Serial UDB Extra HEIGHT_TARGET_MAX (float) sue_HEIGHT_TARGET_MIN : Serial UDB Extra HEIGHT_TARGET_MIN (float) sue_ALT_HOLD_THROTTLE_MIN : Serial UDB Extra ALT_HOLD_THROTTLE_MIN (float) sue_ALT_HOLD_THROTTLE_MAX : Serial UDB Extra ALT_HOLD_THROTTLE_MAX (float) sue_ALT_HOLD_PITCH_MIN : Serial UDB Extra ALT_HOLD_PITCH_MIN (float) sue_ALT_HOLD_PITCH_MAX : Serial UDB Extra ALT_HOLD_PITCH_MAX (float) sue_ALT_HOLD_PITCH_HIGH : Serial UDB Extra ALT_HOLD_PITCH_HIGH (float) ] return[call[name[MAVLink_serial_udb_extra_f8_message], parameter[name[sue_HEIGHT_TARGET_MAX], name[sue_HEIGHT_TARGET_MIN], name[sue_ALT_HOLD_THROTTLE_MIN], name[sue_ALT_HOLD_THROTTLE_MAX], name[sue_ALT_HOLD_PITCH_MIN], name[sue_ALT_HOLD_PITCH_MAX], name[sue_ALT_HOLD_PITCH_HIGH]]]]
keyword[def] identifier[serial_udb_extra_f8_encode] ( identifier[self] , identifier[sue_HEIGHT_TARGET_MAX] , identifier[sue_HEIGHT_TARGET_MIN] , identifier[sue_ALT_HOLD_THROTTLE_MIN] , identifier[sue_ALT_HOLD_THROTTLE_MAX] , identifier[sue_ALT_HOLD_PITCH_MIN] , identifier[sue_ALT_HOLD_PITCH_MAX] , identifier[sue_ALT_HOLD_PITCH_HIGH] ): literal[string] keyword[return] identifier[MAVLink_serial_udb_extra_f8_message] ( identifier[sue_HEIGHT_TARGET_MAX] , identifier[sue_HEIGHT_TARGET_MIN] , identifier[sue_ALT_HOLD_THROTTLE_MIN] , identifier[sue_ALT_HOLD_THROTTLE_MAX] , identifier[sue_ALT_HOLD_PITCH_MIN] , identifier[sue_ALT_HOLD_PITCH_MAX] , identifier[sue_ALT_HOLD_PITCH_HIGH] )
def serial_udb_extra_f8_encode(self, sue_HEIGHT_TARGET_MAX, sue_HEIGHT_TARGET_MIN, sue_ALT_HOLD_THROTTLE_MIN, sue_ALT_HOLD_THROTTLE_MAX, sue_ALT_HOLD_PITCH_MIN, sue_ALT_HOLD_PITCH_MAX, sue_ALT_HOLD_PITCH_HIGH): """ Backwards compatible version of SERIAL_UDB_EXTRA F8: format sue_HEIGHT_TARGET_MAX : Serial UDB Extra HEIGHT_TARGET_MAX (float) sue_HEIGHT_TARGET_MIN : Serial UDB Extra HEIGHT_TARGET_MIN (float) sue_ALT_HOLD_THROTTLE_MIN : Serial UDB Extra ALT_HOLD_THROTTLE_MIN (float) sue_ALT_HOLD_THROTTLE_MAX : Serial UDB Extra ALT_HOLD_THROTTLE_MAX (float) sue_ALT_HOLD_PITCH_MIN : Serial UDB Extra ALT_HOLD_PITCH_MIN (float) sue_ALT_HOLD_PITCH_MAX : Serial UDB Extra ALT_HOLD_PITCH_MAX (float) sue_ALT_HOLD_PITCH_HIGH : Serial UDB Extra ALT_HOLD_PITCH_HIGH (float) """ return MAVLink_serial_udb_extra_f8_message(sue_HEIGHT_TARGET_MAX, sue_HEIGHT_TARGET_MIN, sue_ALT_HOLD_THROTTLE_MIN, sue_ALT_HOLD_THROTTLE_MAX, sue_ALT_HOLD_PITCH_MIN, sue_ALT_HOLD_PITCH_MAX, sue_ALT_HOLD_PITCH_HIGH)
def _copy_or_render_source(ext, f, output_dir, render_callback, skip_copy=False): """ Tries to do regex match for each (pattern, target, subsd) tuple in ext.template_regexps for file f. """ # Either render a template or copy the source dirname = os.path.dirname(f) filename = os.path.basename(f) for pattern, target, subsd in ext.template_regexps: if re.match(pattern, filename): tgt = os.path.join(dirname, re.sub( pattern, target, filename)) rw = MetaReaderWriter('.metadata_subsd') try: prev_subsd = rw.get_from_metadata_file(output_dir, f) except (FileNotFoundError, KeyError): prev_subsd = None render_callback( get_abspath(f), os.path.join(output_dir, tgt), subsd, only_update=ext.only_update, prev_subsd=prev_subsd, create_dest_dirs=True, logger=ext.logger) rw.save_to_metadata_file(output_dir, f, subsd) return tgt else: if not skip_copy: copy(f, os.path.join(output_dir, os.path.dirname(f)), only_update=ext.only_update, dest_is_dir=True, create_dest_dirs=True, logger=ext.logger) return f
def function[_copy_or_render_source, parameter[ext, f, output_dir, render_callback, skip_copy]]: constant[ Tries to do regex match for each (pattern, target, subsd) tuple in ext.template_regexps for file f. ] variable[dirname] assign[=] call[name[os].path.dirname, parameter[name[f]]] variable[filename] assign[=] call[name[os].path.basename, parameter[name[f]]] for taget[tuple[[<ast.Name object at 0x7da2041da2c0>, <ast.Name object at 0x7da2041dadd0>, <ast.Name object at 0x7da2041d80a0>]]] in starred[name[ext].template_regexps] begin[:] if call[name[re].match, parameter[name[pattern], name[filename]]] begin[:] variable[tgt] assign[=] call[name[os].path.join, parameter[name[dirname], call[name[re].sub, parameter[name[pattern], name[target], name[filename]]]]] variable[rw] assign[=] call[name[MetaReaderWriter], parameter[constant[.metadata_subsd]]] <ast.Try object at 0x7da2041db0a0> call[name[render_callback], parameter[call[name[get_abspath], parameter[name[f]]], call[name[os].path.join, parameter[name[output_dir], name[tgt]]], name[subsd]]] call[name[rw].save_to_metadata_file, parameter[name[output_dir], name[f], name[subsd]]] return[name[tgt]]
keyword[def] identifier[_copy_or_render_source] ( identifier[ext] , identifier[f] , identifier[output_dir] , identifier[render_callback] , identifier[skip_copy] = keyword[False] ): literal[string] identifier[dirname] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[f] ) identifier[filename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[f] ) keyword[for] identifier[pattern] , identifier[target] , identifier[subsd] keyword[in] identifier[ext] . identifier[template_regexps] : keyword[if] identifier[re] . identifier[match] ( identifier[pattern] , identifier[filename] ): identifier[tgt] = identifier[os] . identifier[path] . identifier[join] ( identifier[dirname] , identifier[re] . identifier[sub] ( identifier[pattern] , identifier[target] , identifier[filename] )) identifier[rw] = identifier[MetaReaderWriter] ( literal[string] ) keyword[try] : identifier[prev_subsd] = identifier[rw] . identifier[get_from_metadata_file] ( identifier[output_dir] , identifier[f] ) keyword[except] ( identifier[FileNotFoundError] , identifier[KeyError] ): identifier[prev_subsd] = keyword[None] identifier[render_callback] ( identifier[get_abspath] ( identifier[f] ), identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir] , identifier[tgt] ), identifier[subsd] , identifier[only_update] = identifier[ext] . identifier[only_update] , identifier[prev_subsd] = identifier[prev_subsd] , identifier[create_dest_dirs] = keyword[True] , identifier[logger] = identifier[ext] . identifier[logger] ) identifier[rw] . identifier[save_to_metadata_file] ( identifier[output_dir] , identifier[f] , identifier[subsd] ) keyword[return] identifier[tgt] keyword[else] : keyword[if] keyword[not] identifier[skip_copy] : identifier[copy] ( identifier[f] , identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir] , identifier[os] . identifier[path] . identifier[dirname] ( identifier[f] )), identifier[only_update] = identifier[ext] . identifier[only_update] , identifier[dest_is_dir] = keyword[True] , identifier[create_dest_dirs] = keyword[True] , identifier[logger] = identifier[ext] . identifier[logger] ) keyword[return] identifier[f]
def _copy_or_render_source(ext, f, output_dir, render_callback, skip_copy=False): """ Tries to do regex match for each (pattern, target, subsd) tuple in ext.template_regexps for file f. """ # Either render a template or copy the source dirname = os.path.dirname(f) filename = os.path.basename(f) for (pattern, target, subsd) in ext.template_regexps: if re.match(pattern, filename): tgt = os.path.join(dirname, re.sub(pattern, target, filename)) rw = MetaReaderWriter('.metadata_subsd') try: prev_subsd = rw.get_from_metadata_file(output_dir, f) # depends on [control=['try'], data=[]] except (FileNotFoundError, KeyError): prev_subsd = None # depends on [control=['except'], data=[]] render_callback(get_abspath(f), os.path.join(output_dir, tgt), subsd, only_update=ext.only_update, prev_subsd=prev_subsd, create_dest_dirs=True, logger=ext.logger) rw.save_to_metadata_file(output_dir, f, subsd) return tgt # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] else: if not skip_copy: copy(f, os.path.join(output_dir, os.path.dirname(f)), only_update=ext.only_update, dest_is_dir=True, create_dest_dirs=True, logger=ext.logger) # depends on [control=['if'], data=[]] return f
def clone(self): """ Get a shallow clone of this object. The clone only shares the WSDL. All other attributes are unique to the cloned object including options. @return: A shallow clone. @rtype: L{Client} """ class Uninitialized(Client): def __init__(self): pass clone = Uninitialized() clone.options = Options() cp = Unskin(clone.options) mp = Unskin(self.options) cp.update(deepcopy(mp)) clone.wsdl = self.wsdl clone.factory = self.factory clone.service = ServiceSelector(clone, self.wsdl.services) clone.sd = self.sd clone.messages = dict(tx=None, rx=None) return clone
def function[clone, parameter[self]]: constant[ Get a shallow clone of this object. The clone only shares the WSDL. All other attributes are unique to the cloned object including options. @return: A shallow clone. @rtype: L{Client} ] class class[Uninitialized, parameter[]] begin[:] def function[__init__, parameter[self]]: pass variable[clone] assign[=] call[name[Uninitialized], parameter[]] name[clone].options assign[=] call[name[Options], parameter[]] variable[cp] assign[=] call[name[Unskin], parameter[name[clone].options]] variable[mp] assign[=] call[name[Unskin], parameter[name[self].options]] call[name[cp].update, parameter[call[name[deepcopy], parameter[name[mp]]]]] name[clone].wsdl assign[=] name[self].wsdl name[clone].factory assign[=] name[self].factory name[clone].service assign[=] call[name[ServiceSelector], parameter[name[clone], name[self].wsdl.services]] name[clone].sd assign[=] name[self].sd name[clone].messages assign[=] call[name[dict], parameter[]] return[name[clone]]
keyword[def] identifier[clone] ( identifier[self] ): literal[string] keyword[class] identifier[Uninitialized] ( identifier[Client] ): keyword[def] identifier[__init__] ( identifier[self] ): keyword[pass] identifier[clone] = identifier[Uninitialized] () identifier[clone] . identifier[options] = identifier[Options] () identifier[cp] = identifier[Unskin] ( identifier[clone] . identifier[options] ) identifier[mp] = identifier[Unskin] ( identifier[self] . identifier[options] ) identifier[cp] . identifier[update] ( identifier[deepcopy] ( identifier[mp] )) identifier[clone] . identifier[wsdl] = identifier[self] . identifier[wsdl] identifier[clone] . identifier[factory] = identifier[self] . identifier[factory] identifier[clone] . identifier[service] = identifier[ServiceSelector] ( identifier[clone] , identifier[self] . identifier[wsdl] . identifier[services] ) identifier[clone] . identifier[sd] = identifier[self] . identifier[sd] identifier[clone] . identifier[messages] = identifier[dict] ( identifier[tx] = keyword[None] , identifier[rx] = keyword[None] ) keyword[return] identifier[clone]
def clone(self): """ Get a shallow clone of this object. The clone only shares the WSDL. All other attributes are unique to the cloned object including options. @return: A shallow clone. @rtype: L{Client} """ class Uninitialized(Client): def __init__(self): pass clone = Uninitialized() clone.options = Options() cp = Unskin(clone.options) mp = Unskin(self.options) cp.update(deepcopy(mp)) clone.wsdl = self.wsdl clone.factory = self.factory clone.service = ServiceSelector(clone, self.wsdl.services) clone.sd = self.sd clone.messages = dict(tx=None, rx=None) return clone
def create_or_update(model, *, defaults=None, save=True, **kwargs): """ Create or update a django model instance. :param model: :param defaults: :param kwargs: :return: object, created, updated """ obj, created = model._default_manager.get_or_create(defaults=defaults, **kwargs) updated = False if not created: if defaults: for k, v in defaults.items(): if getattr(obj, k) != v: setattr(obj, k, v) updated = True if updated and save: obj.save() return obj, created, updated
def function[create_or_update, parameter[model]]: constant[ Create or update a django model instance. :param model: :param defaults: :param kwargs: :return: object, created, updated ] <ast.Tuple object at 0x7da20e9b0e50> assign[=] call[name[model]._default_manager.get_or_create, parameter[]] variable[updated] assign[=] constant[False] if <ast.UnaryOp object at 0x7da20e9b2c50> begin[:] if name[defaults] begin[:] for taget[tuple[[<ast.Name object at 0x7da20e9b3a30>, <ast.Name object at 0x7da18f811300>]]] in starred[call[name[defaults].items, parameter[]]] begin[:] if compare[call[name[getattr], parameter[name[obj], name[k]]] not_equal[!=] name[v]] begin[:] call[name[setattr], parameter[name[obj], name[k], name[v]]] variable[updated] assign[=] constant[True] if <ast.BoolOp object at 0x7da18f813880> begin[:] call[name[obj].save, parameter[]] return[tuple[[<ast.Name object at 0x7da18f811900>, <ast.Name object at 0x7da18f813910>, <ast.Name object at 0x7da18f810e50>]]]
keyword[def] identifier[create_or_update] ( identifier[model] ,*, identifier[defaults] = keyword[None] , identifier[save] = keyword[True] ,** identifier[kwargs] ): literal[string] identifier[obj] , identifier[created] = identifier[model] . identifier[_default_manager] . identifier[get_or_create] ( identifier[defaults] = identifier[defaults] ,** identifier[kwargs] ) identifier[updated] = keyword[False] keyword[if] keyword[not] identifier[created] : keyword[if] identifier[defaults] : keyword[for] identifier[k] , identifier[v] keyword[in] identifier[defaults] . identifier[items] (): keyword[if] identifier[getattr] ( identifier[obj] , identifier[k] )!= identifier[v] : identifier[setattr] ( identifier[obj] , identifier[k] , identifier[v] ) identifier[updated] = keyword[True] keyword[if] identifier[updated] keyword[and] identifier[save] : identifier[obj] . identifier[save] () keyword[return] identifier[obj] , identifier[created] , identifier[updated]
def create_or_update(model, *, defaults=None, save=True, **kwargs): """ Create or update a django model instance. :param model: :param defaults: :param kwargs: :return: object, created, updated """ (obj, created) = model._default_manager.get_or_create(defaults=defaults, **kwargs) updated = False if not created: if defaults: for (k, v) in defaults.items(): if getattr(obj, k) != v: setattr(obj, k, v) updated = True # depends on [control=['if'], data=['v']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] if updated and save: obj.save() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return (obj, created, updated)
def clearness_index(ghi, solar_zenith, extra_radiation, min_cos_zenith=0.065, max_clearness_index=2.0): """ Calculate the clearness index. The clearness index is the ratio of global to extraterrestrial irradiance on a horizontal plane. Parameters ---------- ghi : numeric Global horizontal irradiance in W/m^2. solar_zenith : numeric True (not refraction-corrected) solar zenith angle in decimal degrees. extra_radiation : numeric Irradiance incident at the top of the atmosphere min_cos_zenith : numeric, default 0.065 Minimum value of cos(zenith) to allow when calculating global clearness index `kt`. Equivalent to zenith = 86.273 degrees. max_clearness_index : numeric, default 2.0 Maximum value of the clearness index. The default, 2.0, allows for over-irradiance events typically seen in sub-hourly data. NREL's SRRL Fortran code used 0.82 for hourly data. Returns ------- kt : numeric Clearness index References ---------- .. [1] Maxwell, E. L., "A Quasi-Physical Model for Converting Hourly Global Horizontal to Direct Normal Insolation", Technical Report No. SERI/TR-215-3087, Golden, CO: Solar Energy Research Institute, 1987. """ cos_zenith = tools.cosd(solar_zenith) I0h = extra_radiation * np.maximum(cos_zenith, min_cos_zenith) # consider adding # with np.errstate(invalid='ignore', divide='ignore'): # to kt calculation, but perhaps it's good to allow these # warnings to the users that override min_cos_zenith kt = ghi / I0h kt = np.maximum(kt, 0) kt = np.minimum(kt, max_clearness_index) return kt
def function[clearness_index, parameter[ghi, solar_zenith, extra_radiation, min_cos_zenith, max_clearness_index]]: constant[ Calculate the clearness index. The clearness index is the ratio of global to extraterrestrial irradiance on a horizontal plane. Parameters ---------- ghi : numeric Global horizontal irradiance in W/m^2. solar_zenith : numeric True (not refraction-corrected) solar zenith angle in decimal degrees. extra_radiation : numeric Irradiance incident at the top of the atmosphere min_cos_zenith : numeric, default 0.065 Minimum value of cos(zenith) to allow when calculating global clearness index `kt`. Equivalent to zenith = 86.273 degrees. max_clearness_index : numeric, default 2.0 Maximum value of the clearness index. The default, 2.0, allows for over-irradiance events typically seen in sub-hourly data. NREL's SRRL Fortran code used 0.82 for hourly data. Returns ------- kt : numeric Clearness index References ---------- .. [1] Maxwell, E. L., "A Quasi-Physical Model for Converting Hourly Global Horizontal to Direct Normal Insolation", Technical Report No. SERI/TR-215-3087, Golden, CO: Solar Energy Research Institute, 1987. ] variable[cos_zenith] assign[=] call[name[tools].cosd, parameter[name[solar_zenith]]] variable[I0h] assign[=] binary_operation[name[extra_radiation] * call[name[np].maximum, parameter[name[cos_zenith], name[min_cos_zenith]]]] variable[kt] assign[=] binary_operation[name[ghi] / name[I0h]] variable[kt] assign[=] call[name[np].maximum, parameter[name[kt], constant[0]]] variable[kt] assign[=] call[name[np].minimum, parameter[name[kt], name[max_clearness_index]]] return[name[kt]]
keyword[def] identifier[clearness_index] ( identifier[ghi] , identifier[solar_zenith] , identifier[extra_radiation] , identifier[min_cos_zenith] = literal[int] , identifier[max_clearness_index] = literal[int] ): literal[string] identifier[cos_zenith] = identifier[tools] . identifier[cosd] ( identifier[solar_zenith] ) identifier[I0h] = identifier[extra_radiation] * identifier[np] . identifier[maximum] ( identifier[cos_zenith] , identifier[min_cos_zenith] ) identifier[kt] = identifier[ghi] / identifier[I0h] identifier[kt] = identifier[np] . identifier[maximum] ( identifier[kt] , literal[int] ) identifier[kt] = identifier[np] . identifier[minimum] ( identifier[kt] , identifier[max_clearness_index] ) keyword[return] identifier[kt]
def clearness_index(ghi, solar_zenith, extra_radiation, min_cos_zenith=0.065, max_clearness_index=2.0): """ Calculate the clearness index. The clearness index is the ratio of global to extraterrestrial irradiance on a horizontal plane. Parameters ---------- ghi : numeric Global horizontal irradiance in W/m^2. solar_zenith : numeric True (not refraction-corrected) solar zenith angle in decimal degrees. extra_radiation : numeric Irradiance incident at the top of the atmosphere min_cos_zenith : numeric, default 0.065 Minimum value of cos(zenith) to allow when calculating global clearness index `kt`. Equivalent to zenith = 86.273 degrees. max_clearness_index : numeric, default 2.0 Maximum value of the clearness index. The default, 2.0, allows for over-irradiance events typically seen in sub-hourly data. NREL's SRRL Fortran code used 0.82 for hourly data. Returns ------- kt : numeric Clearness index References ---------- .. [1] Maxwell, E. L., "A Quasi-Physical Model for Converting Hourly Global Horizontal to Direct Normal Insolation", Technical Report No. SERI/TR-215-3087, Golden, CO: Solar Energy Research Institute, 1987. """ cos_zenith = tools.cosd(solar_zenith) I0h = extra_radiation * np.maximum(cos_zenith, min_cos_zenith) # consider adding # with np.errstate(invalid='ignore', divide='ignore'): # to kt calculation, but perhaps it's good to allow these # warnings to the users that override min_cos_zenith kt = ghi / I0h kt = np.maximum(kt, 0) kt = np.minimum(kt, max_clearness_index) return kt
def get_html(self, url, params=None, cache_cb=None, decoder_encoding=None, decoder_errors=url_specified_decoder.ErrorsHandle.strict, **kwargs): """ Get html of an url. """ response = self.get( url=url, params=params, cache_cb=cache_cb, **kwargs ) return url_specified_decoder.decode( binary=response.content, url=response.url, encoding=decoder_encoding, errors=decoder_errors, )
def function[get_html, parameter[self, url, params, cache_cb, decoder_encoding, decoder_errors]]: constant[ Get html of an url. ] variable[response] assign[=] call[name[self].get, parameter[]] return[call[name[url_specified_decoder].decode, parameter[]]]
keyword[def] identifier[get_html] ( identifier[self] , identifier[url] , identifier[params] = keyword[None] , identifier[cache_cb] = keyword[None] , identifier[decoder_encoding] = keyword[None] , identifier[decoder_errors] = identifier[url_specified_decoder] . identifier[ErrorsHandle] . identifier[strict] , ** identifier[kwargs] ): literal[string] identifier[response] = identifier[self] . identifier[get] ( identifier[url] = identifier[url] , identifier[params] = identifier[params] , identifier[cache_cb] = identifier[cache_cb] , ** identifier[kwargs] ) keyword[return] identifier[url_specified_decoder] . identifier[decode] ( identifier[binary] = identifier[response] . identifier[content] , identifier[url] = identifier[response] . identifier[url] , identifier[encoding] = identifier[decoder_encoding] , identifier[errors] = identifier[decoder_errors] , )
def get_html(self, url, params=None, cache_cb=None, decoder_encoding=None, decoder_errors=url_specified_decoder.ErrorsHandle.strict, **kwargs): """ Get html of an url. """ response = self.get(url=url, params=params, cache_cb=cache_cb, **kwargs) return url_specified_decoder.decode(binary=response.content, url=response.url, encoding=decoder_encoding, errors=decoder_errors)
def evidence(ns_run, logw=None, simulate=False): r"""Bayesian evidence :math:`\log \mathcal{Z}`. Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. Returns ------- float """ if logw is None: logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate) return np.exp(scipy.special.logsumexp(logw))
def function[evidence, parameter[ns_run, logw, simulate]]: constant[Bayesian evidence :math:`\log \mathcal{Z}`. Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. Returns ------- float ] if compare[name[logw] is constant[None]] begin[:] variable[logw] assign[=] call[name[nestcheck].ns_run_utils.get_logw, parameter[name[ns_run]]] return[call[name[np].exp, parameter[call[name[scipy].special.logsumexp, parameter[name[logw]]]]]]
keyword[def] identifier[evidence] ( identifier[ns_run] , identifier[logw] = keyword[None] , identifier[simulate] = keyword[False] ): literal[string] keyword[if] identifier[logw] keyword[is] keyword[None] : identifier[logw] = identifier[nestcheck] . identifier[ns_run_utils] . identifier[get_logw] ( identifier[ns_run] , identifier[simulate] = identifier[simulate] ) keyword[return] identifier[np] . identifier[exp] ( identifier[scipy] . identifier[special] . identifier[logsumexp] ( identifier[logw] ))
def evidence(ns_run, logw=None, simulate=False): """Bayesian evidence :math:`\\log \\mathcal{Z}`. Parameters ---------- ns_run: dict Nested sampling run dict (see the data_processing module docstring for more details). logw: None or 1d numpy array, optional Log weights of samples. simulate: bool, optional Passed to ns_run_utils.get_logw if logw needs to be calculated. Returns ------- float """ if logw is None: logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate) # depends on [control=['if'], data=['logw']] return np.exp(scipy.special.logsumexp(logw))
def copy_to_clipboard(self, copy=True): """ Copies the selected items to the clipboard :param copy: True to copy, False to cut. """ urls = self.selected_urls() if not urls: return mime = self._UrlListMimeData(copy) mime.set_list(urls) clipboard = QtWidgets.QApplication.clipboard() clipboard.setMimeData(mime)
def function[copy_to_clipboard, parameter[self, copy]]: constant[ Copies the selected items to the clipboard :param copy: True to copy, False to cut. ] variable[urls] assign[=] call[name[self].selected_urls, parameter[]] if <ast.UnaryOp object at 0x7da20e9562c0> begin[:] return[None] variable[mime] assign[=] call[name[self]._UrlListMimeData, parameter[name[copy]]] call[name[mime].set_list, parameter[name[urls]]] variable[clipboard] assign[=] call[name[QtWidgets].QApplication.clipboard, parameter[]] call[name[clipboard].setMimeData, parameter[name[mime]]]
keyword[def] identifier[copy_to_clipboard] ( identifier[self] , identifier[copy] = keyword[True] ): literal[string] identifier[urls] = identifier[self] . identifier[selected_urls] () keyword[if] keyword[not] identifier[urls] : keyword[return] identifier[mime] = identifier[self] . identifier[_UrlListMimeData] ( identifier[copy] ) identifier[mime] . identifier[set_list] ( identifier[urls] ) identifier[clipboard] = identifier[QtWidgets] . identifier[QApplication] . identifier[clipboard] () identifier[clipboard] . identifier[setMimeData] ( identifier[mime] )
def copy_to_clipboard(self, copy=True): """ Copies the selected items to the clipboard :param copy: True to copy, False to cut. """ urls = self.selected_urls() if not urls: return # depends on [control=['if'], data=[]] mime = self._UrlListMimeData(copy) mime.set_list(urls) clipboard = QtWidgets.QApplication.clipboard() clipboard.setMimeData(mime)
async def send_tokens(payment_handle: int, tokens: int, address: str) -> str: """ Sends tokens to an address payment_handle is always 0 :param payment_handle: Integer :param tokens: Integer :param address: String Example: payment_handle = 0 amount = 1000 address = await Wallet.create_payment_address('00000000000000000000000001234567') await Wallet.send_tokens(payment_handle, amount, address) :return: """ logger = logging.getLogger(__name__) if not hasattr(Wallet.send_tokens, "cb"): logger.debug("vcx_wallet_send_tokens: Creating callback") Wallet.send_tokens.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p)) c_payment_handle = c_uint32(payment_handle) c_tokens = c_char_p(str(tokens).encode('utf-8')) c_address = c_char_p(address.encode('utf-8')) result = await do_call('vcx_wallet_send_tokens', c_payment_handle, c_tokens, c_address, Wallet.send_tokens.cb) logger.debug("vcx_wallet_send_tokens completed") return result
<ast.AsyncFunctionDef object at 0x7da18dc9ac50>
keyword[async] keyword[def] identifier[send_tokens] ( identifier[payment_handle] : identifier[int] , identifier[tokens] : identifier[int] , identifier[address] : identifier[str] )-> identifier[str] : literal[string] identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] ) keyword[if] keyword[not] identifier[hasattr] ( identifier[Wallet] . identifier[send_tokens] , literal[string] ): identifier[logger] . identifier[debug] ( literal[string] ) identifier[Wallet] . identifier[send_tokens] . identifier[cb] = identifier[create_cb] ( identifier[CFUNCTYPE] ( keyword[None] , identifier[c_uint32] , identifier[c_uint32] , identifier[c_char_p] )) identifier[c_payment_handle] = identifier[c_uint32] ( identifier[payment_handle] ) identifier[c_tokens] = identifier[c_char_p] ( identifier[str] ( identifier[tokens] ). identifier[encode] ( literal[string] )) identifier[c_address] = identifier[c_char_p] ( identifier[address] . identifier[encode] ( literal[string] )) identifier[result] = keyword[await] identifier[do_call] ( literal[string] , identifier[c_payment_handle] , identifier[c_tokens] , identifier[c_address] , identifier[Wallet] . identifier[send_tokens] . identifier[cb] ) identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[result]
async def send_tokens(payment_handle: int, tokens: int, address: str) -> str: """ Sends tokens to an address payment_handle is always 0 :param payment_handle: Integer :param tokens: Integer :param address: String Example: payment_handle = 0 amount = 1000 address = await Wallet.create_payment_address('00000000000000000000000001234567') await Wallet.send_tokens(payment_handle, amount, address) :return: """ logger = logging.getLogger(__name__) if not hasattr(Wallet.send_tokens, 'cb'): logger.debug('vcx_wallet_send_tokens: Creating callback') Wallet.send_tokens.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p)) # depends on [control=['if'], data=[]] c_payment_handle = c_uint32(payment_handle) c_tokens = c_char_p(str(tokens).encode('utf-8')) c_address = c_char_p(address.encode('utf-8')) result = await do_call('vcx_wallet_send_tokens', c_payment_handle, c_tokens, c_address, Wallet.send_tokens.cb) logger.debug('vcx_wallet_send_tokens completed') return result
def caller_path(steps=1): """Return the path to the source file of the current frames' caller.""" frame = sys._getframe(steps + 1) try: path = os.path.dirname(frame.f_code.co_filename) finally: del frame if not path: path = os.getcwd() return os.path.realpath(path)
def function[caller_path, parameter[steps]]: constant[Return the path to the source file of the current frames' caller.] variable[frame] assign[=] call[name[sys]._getframe, parameter[binary_operation[name[steps] + constant[1]]]] <ast.Try object at 0x7da18f00ec80> if <ast.UnaryOp object at 0x7da18f00d900> begin[:] variable[path] assign[=] call[name[os].getcwd, parameter[]] return[call[name[os].path.realpath, parameter[name[path]]]]
keyword[def] identifier[caller_path] ( identifier[steps] = literal[int] ): literal[string] identifier[frame] = identifier[sys] . identifier[_getframe] ( identifier[steps] + literal[int] ) keyword[try] : identifier[path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[frame] . identifier[f_code] . identifier[co_filename] ) keyword[finally] : keyword[del] identifier[frame] keyword[if] keyword[not] identifier[path] : identifier[path] = identifier[os] . identifier[getcwd] () keyword[return] identifier[os] . identifier[path] . identifier[realpath] ( identifier[path] )
def caller_path(steps=1): """Return the path to the source file of the current frames' caller.""" frame = sys._getframe(steps + 1) try: path = os.path.dirname(frame.f_code.co_filename) # depends on [control=['try'], data=[]] finally: del frame if not path: path = os.getcwd() # depends on [control=['if'], data=[]] return os.path.realpath(path)
def directed_bipartition(seq, nontrivial=False): """Return a list of directed bipartitions for a sequence. Args: seq (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two parts. Example: >>> directed_bipartition((1, 2, 3)) # doctest: +NORMALIZE_WHITESPACE [((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,)), ((3,), (1, 2)), ((1, 3), (2,)), ((2, 3), (1,)), ((1, 2, 3), ())] """ bipartitions = [ (tuple(seq[i] for i in part0_idx), tuple(seq[j] for j in part1_idx)) for part0_idx, part1_idx in directed_bipartition_indices(len(seq)) ] if nontrivial: # The first and last partitions have a part that is empty; skip them. # NOTE: This depends on the implementation of # `directed_partition_indices`. return bipartitions[1:-1] return bipartitions
def function[directed_bipartition, parameter[seq, nontrivial]]: constant[Return a list of directed bipartitions for a sequence. Args: seq (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two parts. Example: >>> directed_bipartition((1, 2, 3)) # doctest: +NORMALIZE_WHITESPACE [((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,)), ((3,), (1, 2)), ((1, 3), (2,)), ((2, 3), (1,)), ((1, 2, 3), ())] ] variable[bipartitions] assign[=] <ast.ListComp object at 0x7da18eb54280> if name[nontrivial] begin[:] return[call[name[bipartitions]][<ast.Slice object at 0x7da18eb54a00>]] return[name[bipartitions]]
keyword[def] identifier[directed_bipartition] ( identifier[seq] , identifier[nontrivial] = keyword[False] ): literal[string] identifier[bipartitions] =[ ( identifier[tuple] ( identifier[seq] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[part0_idx] ), identifier[tuple] ( identifier[seq] [ identifier[j] ] keyword[for] identifier[j] keyword[in] identifier[part1_idx] )) keyword[for] identifier[part0_idx] , identifier[part1_idx] keyword[in] identifier[directed_bipartition_indices] ( identifier[len] ( identifier[seq] )) ] keyword[if] identifier[nontrivial] : keyword[return] identifier[bipartitions] [ literal[int] :- literal[int] ] keyword[return] identifier[bipartitions]
def directed_bipartition(seq, nontrivial=False): """Return a list of directed bipartitions for a sequence. Args: seq (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two parts. Example: >>> directed_bipartition((1, 2, 3)) # doctest: +NORMALIZE_WHITESPACE [((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,)), ((3,), (1, 2)), ((1, 3), (2,)), ((2, 3), (1,)), ((1, 2, 3), ())] """ bipartitions = [(tuple((seq[i] for i in part0_idx)), tuple((seq[j] for j in part1_idx))) for (part0_idx, part1_idx) in directed_bipartition_indices(len(seq))] if nontrivial: # The first and last partitions have a part that is empty; skip them. # NOTE: This depends on the implementation of # `directed_partition_indices`. return bipartitions[1:-1] # depends on [control=['if'], data=[]] return bipartitions
def ShiftRight(x, **unused_kwargs): """Layer to shift the tensor to the right by padding on axis 1.""" if not isinstance(x, (list, tuple)): # non-chunked inputs pad_widths = [(0, 0), (1, 0)] padded = np.pad(x, pad_widths, mode='constant') return padded[:, :-1] # Handling chunked inputs. Recall that the list of chunks represents a big # sequence (the concatenation of the chunks). We want to shift that sequence, # so we put a 0 in the beginning of the first chunk and the last element of # that chunk is used as the new first element of the next chunk, and so on. padded = [] last_value = np.zeros_like(x[0][:, -1]) for chunk in x: padded_chunk = np.concatenate([last_value[:, np.newaxis], chunk], axis=1) last_value = chunk[:, -1] padded.append(padded_chunk[:, :-1]) return padded
def function[ShiftRight, parameter[x]]: constant[Layer to shift the tensor to the right by padding on axis 1.] if <ast.UnaryOp object at 0x7da18fe90910> begin[:] variable[pad_widths] assign[=] list[[<ast.Tuple object at 0x7da18fe93a30>, <ast.Tuple object at 0x7da18fe90e20>]] variable[padded] assign[=] call[name[np].pad, parameter[name[x], name[pad_widths]]] return[call[name[padded]][tuple[[<ast.Slice object at 0x7da18fe906a0>, <ast.Slice object at 0x7da18fe90220>]]]] variable[padded] assign[=] list[[]] variable[last_value] assign[=] call[name[np].zeros_like, parameter[call[call[name[x]][constant[0]]][tuple[[<ast.Slice object at 0x7da20c7cb670>, <ast.UnaryOp object at 0x7da20c7c92a0>]]]]] for taget[name[chunk]] in starred[name[x]] begin[:] variable[padded_chunk] assign[=] call[name[np].concatenate, parameter[list[[<ast.Subscript object at 0x7da20c7cb9a0>, <ast.Name object at 0x7da20c7cb970>]]]] variable[last_value] assign[=] call[name[chunk]][tuple[[<ast.Slice object at 0x7da20c7ca050>, <ast.UnaryOp object at 0x7da20c7cb610>]]] call[name[padded].append, parameter[call[name[padded_chunk]][tuple[[<ast.Slice object at 0x7da20c7c8c40>, <ast.Slice object at 0x7da20c7c8d30>]]]]] return[name[padded]]
keyword[def] identifier[ShiftRight] ( identifier[x] ,** identifier[unused_kwargs] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[x] ,( identifier[list] , identifier[tuple] )): identifier[pad_widths] =[( literal[int] , literal[int] ),( literal[int] , literal[int] )] identifier[padded] = identifier[np] . identifier[pad] ( identifier[x] , identifier[pad_widths] , identifier[mode] = literal[string] ) keyword[return] identifier[padded] [:,:- literal[int] ] identifier[padded] =[] identifier[last_value] = identifier[np] . identifier[zeros_like] ( identifier[x] [ literal[int] ][:,- literal[int] ]) keyword[for] identifier[chunk] keyword[in] identifier[x] : identifier[padded_chunk] = identifier[np] . identifier[concatenate] ([ identifier[last_value] [:, identifier[np] . identifier[newaxis] ], identifier[chunk] ], identifier[axis] = literal[int] ) identifier[last_value] = identifier[chunk] [:,- literal[int] ] identifier[padded] . identifier[append] ( identifier[padded_chunk] [:,:- literal[int] ]) keyword[return] identifier[padded]
def ShiftRight(x, **unused_kwargs): """Layer to shift the tensor to the right by padding on axis 1.""" if not isinstance(x, (list, tuple)): # non-chunked inputs pad_widths = [(0, 0), (1, 0)] padded = np.pad(x, pad_widths, mode='constant') return padded[:, :-1] # depends on [control=['if'], data=[]] # Handling chunked inputs. Recall that the list of chunks represents a big # sequence (the concatenation of the chunks). We want to shift that sequence, # so we put a 0 in the beginning of the first chunk and the last element of # that chunk is used as the new first element of the next chunk, and so on. padded = [] last_value = np.zeros_like(x[0][:, -1]) for chunk in x: padded_chunk = np.concatenate([last_value[:, np.newaxis], chunk], axis=1) last_value = chunk[:, -1] padded.append(padded_chunk[:, :-1]) # depends on [control=['for'], data=['chunk']] return padded
def _bin_update_items(self, items, replace_at_most_one, replacements, leftovers): """ <replacements and <leftovers> are modified directly, ala pass by reference. """ for key, value in items: # If there are existing items with key <key> that have yet to be # marked for replacement, mark that item's value to be replaced by # <value> by appending it to <replacements>. if key in self and key not in replacements: replacements[key] = [value] elif (key in self and not replace_at_most_one and len(replacements[key]) < len(self.values(key))): replacements[key].append(value) else: if replace_at_most_one: replacements[key] = [value] else: leftovers.append((key, value))
def function[_bin_update_items, parameter[self, items, replace_at_most_one, replacements, leftovers]]: constant[ <replacements and <leftovers> are modified directly, ala pass by reference. ] for taget[tuple[[<ast.Name object at 0x7da1b1ea3370>, <ast.Name object at 0x7da1b1ea19c0>]]] in starred[name[items]] begin[:] if <ast.BoolOp object at 0x7da1b1ea0280> begin[:] call[name[replacements]][name[key]] assign[=] list[[<ast.Name object at 0x7da1b1ea1b40>]]
keyword[def] identifier[_bin_update_items] ( identifier[self] , identifier[items] , identifier[replace_at_most_one] , identifier[replacements] , identifier[leftovers] ): literal[string] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[items] : keyword[if] identifier[key] keyword[in] identifier[self] keyword[and] identifier[key] keyword[not] keyword[in] identifier[replacements] : identifier[replacements] [ identifier[key] ]=[ identifier[value] ] keyword[elif] ( identifier[key] keyword[in] identifier[self] keyword[and] keyword[not] identifier[replace_at_most_one] keyword[and] identifier[len] ( identifier[replacements] [ identifier[key] ])< identifier[len] ( identifier[self] . identifier[values] ( identifier[key] ))): identifier[replacements] [ identifier[key] ]. identifier[append] ( identifier[value] ) keyword[else] : keyword[if] identifier[replace_at_most_one] : identifier[replacements] [ identifier[key] ]=[ identifier[value] ] keyword[else] : identifier[leftovers] . identifier[append] (( identifier[key] , identifier[value] ))
def _bin_update_items(self, items, replace_at_most_one, replacements, leftovers): """ <replacements and <leftovers> are modified directly, ala pass by reference. """ for (key, value) in items: # If there are existing items with key <key> that have yet to be # marked for replacement, mark that item's value to be replaced by # <value> by appending it to <replacements>. if key in self and key not in replacements: replacements[key] = [value] # depends on [control=['if'], data=[]] elif key in self and (not replace_at_most_one) and (len(replacements[key]) < len(self.values(key))): replacements[key].append(value) # depends on [control=['if'], data=[]] elif replace_at_most_one: replacements[key] = [value] # depends on [control=['if'], data=[]] else: leftovers.append((key, value)) # depends on [control=['for'], data=[]]
def MOVS(self, params): """ MOVS Ra, Rb MOVS Ra, #imm8 Move the value of Rb or imm8 into Ra Ra and Rb must be low registers """ Ra, Rb = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params) if self.is_immediate(Rb): self.check_arguments(low_registers=[Ra], imm8=[Rb]) def MOVS_func(): self.register[Ra] = self.convert_to_integer(Rb[1:]) # Set N and Z status flags self.set_NZ_flags(self.register[Ra]) return MOVS_func elif self.is_register(Rb): self.check_arguments(low_registers=(Ra, Rb)) def MOVS_func(): self.register[Ra] = self.register[Rb] self.set_NZ_flags(self.register[Ra]) return MOVS_func else: raise iarm.exceptions.ParsingError("Unknown parameter: {}".format(Rb))
def function[MOVS, parameter[self, params]]: constant[ MOVS Ra, Rb MOVS Ra, #imm8 Move the value of Rb or imm8 into Ra Ra and Rb must be low registers ] <ast.Tuple object at 0x7da20c9919c0> assign[=] call[name[self].get_two_parameters, parameter[name[self].TWO_PARAMETER_COMMA_SEPARATED, name[params]]] if call[name[self].is_immediate, parameter[name[Rb]]] begin[:] call[name[self].check_arguments, parameter[]] def function[MOVS_func, parameter[]]: call[name[self].register][name[Ra]] assign[=] call[name[self].convert_to_integer, parameter[call[name[Rb]][<ast.Slice object at 0x7da20c9903a0>]]] call[name[self].set_NZ_flags, parameter[call[name[self].register][name[Ra]]]] return[name[MOVS_func]]
keyword[def] identifier[MOVS] ( identifier[self] , identifier[params] ): literal[string] identifier[Ra] , identifier[Rb] = identifier[self] . identifier[get_two_parameters] ( identifier[self] . identifier[TWO_PARAMETER_COMMA_SEPARATED] , identifier[params] ) keyword[if] identifier[self] . identifier[is_immediate] ( identifier[Rb] ): identifier[self] . identifier[check_arguments] ( identifier[low_registers] =[ identifier[Ra] ], identifier[imm8] =[ identifier[Rb] ]) keyword[def] identifier[MOVS_func] (): identifier[self] . identifier[register] [ identifier[Ra] ]= identifier[self] . identifier[convert_to_integer] ( identifier[Rb] [ literal[int] :]) identifier[self] . identifier[set_NZ_flags] ( identifier[self] . identifier[register] [ identifier[Ra] ]) keyword[return] identifier[MOVS_func] keyword[elif] identifier[self] . identifier[is_register] ( identifier[Rb] ): identifier[self] . identifier[check_arguments] ( identifier[low_registers] =( identifier[Ra] , identifier[Rb] )) keyword[def] identifier[MOVS_func] (): identifier[self] . identifier[register] [ identifier[Ra] ]= identifier[self] . identifier[register] [ identifier[Rb] ] identifier[self] . identifier[set_NZ_flags] ( identifier[self] . identifier[register] [ identifier[Ra] ]) keyword[return] identifier[MOVS_func] keyword[else] : keyword[raise] identifier[iarm] . identifier[exceptions] . identifier[ParsingError] ( literal[string] . identifier[format] ( identifier[Rb] ))
def MOVS(self, params): """ MOVS Ra, Rb MOVS Ra, #imm8 Move the value of Rb or imm8 into Ra Ra and Rb must be low registers """ (Ra, Rb) = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params) if self.is_immediate(Rb): self.check_arguments(low_registers=[Ra], imm8=[Rb]) def MOVS_func(): self.register[Ra] = self.convert_to_integer(Rb[1:]) # Set N and Z status flags self.set_NZ_flags(self.register[Ra]) return MOVS_func # depends on [control=['if'], data=[]] elif self.is_register(Rb): self.check_arguments(low_registers=(Ra, Rb)) def MOVS_func(): self.register[Ra] = self.register[Rb] self.set_NZ_flags(self.register[Ra]) return MOVS_func # depends on [control=['if'], data=[]] else: raise iarm.exceptions.ParsingError('Unknown parameter: {}'.format(Rb))
def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result
def function[disk_partition_usage, parameter[all]]: constant[ Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ] variable[result] assign[=] call[name[disk_partitions], parameter[name[all]]] for taget[name[partition]] in starred[name[result]] begin[:] call[name[partition].update, parameter[call[name[disk_usage], parameter[call[name[partition]][constant[mountpoint]]]]]] return[name[result]]
keyword[def] identifier[disk_partition_usage] ( identifier[all] = keyword[False] ): literal[string] identifier[result] = identifier[disk_partitions] ( identifier[all] ) keyword[for] identifier[partition] keyword[in] identifier[result] : identifier[partition] . identifier[update] ( identifier[disk_usage] ( identifier[partition] [ literal[string] ])) keyword[return] identifier[result]
def disk_partition_usage(all=False): """ Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage """ result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) # depends on [control=['for'], data=['partition']] return result
def get_pokemon(pid=None,name=None,pokemons=None): '''get_pokemon will return a pokemon with a specific ID, or if none is given, will select randomly. First the pid will be used, then the name, then any filters. :param pid: the pokemon ID to return :param pokemons: the pokemons data structure ''' if pokemons == None: pokemons = catch_em_all() # First see if we want to find a pokemon by name if name is not None: catches = lookup_pokemon(field="name", value=name, pokemons=pokemons) if catches is not None: return catches print("We don't have a pokemon called %s" %name) sys.exit(1) # Next see if they want a random pokemon if pid is None: choices = list(pokemons.keys()) pid = int(choice(choices)) # Retrieve the random, or user selected pokemon if pid is not None and str(pid) in pokemons.keys(): return {pid:pokemons[str(pid)]} else: print("Cannot find pokemon with this criteria!")
def function[get_pokemon, parameter[pid, name, pokemons]]: constant[get_pokemon will return a pokemon with a specific ID, or if none is given, will select randomly. First the pid will be used, then the name, then any filters. :param pid: the pokemon ID to return :param pokemons: the pokemons data structure ] if compare[name[pokemons] equal[==] constant[None]] begin[:] variable[pokemons] assign[=] call[name[catch_em_all], parameter[]] if compare[name[name] is_not constant[None]] begin[:] variable[catches] assign[=] call[name[lookup_pokemon], parameter[]] if compare[name[catches] is_not constant[None]] begin[:] return[name[catches]] call[name[print], parameter[binary_operation[constant[We don't have a pokemon called %s] <ast.Mod object at 0x7da2590d6920> name[name]]]] call[name[sys].exit, parameter[constant[1]]] if compare[name[pid] is constant[None]] begin[:] variable[choices] assign[=] call[name[list], parameter[call[name[pokemons].keys, parameter[]]]] variable[pid] assign[=] call[name[int], parameter[call[name[choice], parameter[name[choices]]]]] if <ast.BoolOp object at 0x7da1b0d33250> begin[:] return[dictionary[[<ast.Name object at 0x7da20c6c7820>], [<ast.Subscript object at 0x7da20c6c6290>]]]
keyword[def] identifier[get_pokemon] ( identifier[pid] = keyword[None] , identifier[name] = keyword[None] , identifier[pokemons] = keyword[None] ): literal[string] keyword[if] identifier[pokemons] == keyword[None] : identifier[pokemons] = identifier[catch_em_all] () keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] : identifier[catches] = identifier[lookup_pokemon] ( identifier[field] = literal[string] , identifier[value] = identifier[name] , identifier[pokemons] = identifier[pokemons] ) keyword[if] identifier[catches] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[catches] identifier[print] ( literal[string] % identifier[name] ) identifier[sys] . identifier[exit] ( literal[int] ) keyword[if] identifier[pid] keyword[is] keyword[None] : identifier[choices] = identifier[list] ( identifier[pokemons] . identifier[keys] ()) identifier[pid] = identifier[int] ( identifier[choice] ( identifier[choices] )) keyword[if] identifier[pid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[str] ( identifier[pid] ) keyword[in] identifier[pokemons] . identifier[keys] (): keyword[return] { identifier[pid] : identifier[pokemons] [ identifier[str] ( identifier[pid] )]} keyword[else] : identifier[print] ( literal[string] )
def get_pokemon(pid=None, name=None, pokemons=None): """get_pokemon will return a pokemon with a specific ID, or if none is given, will select randomly. First the pid will be used, then the name, then any filters. :param pid: the pokemon ID to return :param pokemons: the pokemons data structure """ if pokemons == None: pokemons = catch_em_all() # depends on [control=['if'], data=['pokemons']] # First see if we want to find a pokemon by name if name is not None: catches = lookup_pokemon(field='name', value=name, pokemons=pokemons) if catches is not None: return catches # depends on [control=['if'], data=['catches']] print("We don't have a pokemon called %s" % name) sys.exit(1) # depends on [control=['if'], data=['name']] # Next see if they want a random pokemon if pid is None: choices = list(pokemons.keys()) pid = int(choice(choices)) # depends on [control=['if'], data=['pid']] # Retrieve the random, or user selected pokemon if pid is not None and str(pid) in pokemons.keys(): return {pid: pokemons[str(pid)]} # depends on [control=['if'], data=[]] else: print('Cannot find pokemon with this criteria!')
def get_pin_codes(self, refresh=False): """Get the list of PIN codes Codes can also be found with self.get_complex_value('PinCodes') """ if refresh: self.refresh() val = self.get_value("pincodes") # val syntax string: <VERSION=3>next_available_user_code_id\tuser_code_id,active,date_added,date_used,PIN_code,name;\t... # See (outdated) http://wiki.micasaverde.com/index.php/Luup_UPnP_Variables_and_Actions#DoorLock1 # Remove the trailing tab # ignore the version and next available at the start # and split out each set of code attributes raw_code_list = [] try: raw_code_list = val.rstrip().split('\t')[1:] except Exception as ex: logger.error('Got unsupported string {}: {}'.format(val, ex)) # Loop to create a list of codes codes = [] for code in raw_code_list: try: # Strip off trailing semicolon # Create a list from csv code_addrs = code.split(';')[0].split(',') # Get the code ID (slot) and see if it should have values slot, active = code_addrs[:2] if active != '0': # Since it has additional attributes, get the remaining ones _, _, pin, name = code_addrs[2:] # And add them as a tuple to the list codes.append((slot, name, pin)) except Exception as ex: logger.error('Problem parsing pin code string {}: {}'.format(code, ex)) return codes
def function[get_pin_codes, parameter[self, refresh]]: constant[Get the list of PIN codes Codes can also be found with self.get_complex_value('PinCodes') ] if name[refresh] begin[:] call[name[self].refresh, parameter[]] variable[val] assign[=] call[name[self].get_value, parameter[constant[pincodes]]] variable[raw_code_list] assign[=] list[[]] <ast.Try object at 0x7da1b118c5b0> variable[codes] assign[=] list[[]] for taget[name[code]] in starred[name[raw_code_list]] begin[:] <ast.Try object at 0x7da18fe91de0> return[name[codes]]
keyword[def] identifier[get_pin_codes] ( identifier[self] , identifier[refresh] = keyword[False] ): literal[string] keyword[if] identifier[refresh] : identifier[self] . identifier[refresh] () identifier[val] = identifier[self] . identifier[get_value] ( literal[string] ) identifier[raw_code_list] =[] keyword[try] : identifier[raw_code_list] = identifier[val] . identifier[rstrip] (). identifier[split] ( literal[string] )[ literal[int] :] keyword[except] identifier[Exception] keyword[as] identifier[ex] : identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[val] , identifier[ex] )) identifier[codes] =[] keyword[for] identifier[code] keyword[in] identifier[raw_code_list] : keyword[try] : identifier[code_addrs] = identifier[code] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] ) identifier[slot] , identifier[active] = identifier[code_addrs] [: literal[int] ] keyword[if] identifier[active] != literal[string] : identifier[_] , identifier[_] , identifier[pin] , identifier[name] = identifier[code_addrs] [ literal[int] :] identifier[codes] . identifier[append] (( identifier[slot] , identifier[name] , identifier[pin] )) keyword[except] identifier[Exception] keyword[as] identifier[ex] : identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[code] , identifier[ex] )) keyword[return] identifier[codes]
def get_pin_codes(self, refresh=False): """Get the list of PIN codes Codes can also be found with self.get_complex_value('PinCodes') """ if refresh: self.refresh() # depends on [control=['if'], data=[]] val = self.get_value('pincodes') # val syntax string: <VERSION=3>next_available_user_code_id\tuser_code_id,active,date_added,date_used,PIN_code,name;\t... # See (outdated) http://wiki.micasaverde.com/index.php/Luup_UPnP_Variables_and_Actions#DoorLock1 # Remove the trailing tab # ignore the version and next available at the start # and split out each set of code attributes raw_code_list = [] try: raw_code_list = val.rstrip().split('\t')[1:] # depends on [control=['try'], data=[]] except Exception as ex: logger.error('Got unsupported string {}: {}'.format(val, ex)) # depends on [control=['except'], data=['ex']] # Loop to create a list of codes codes = [] for code in raw_code_list: try: # Strip off trailing semicolon # Create a list from csv code_addrs = code.split(';')[0].split(',') # Get the code ID (slot) and see if it should have values (slot, active) = code_addrs[:2] if active != '0': # Since it has additional attributes, get the remaining ones (_, _, pin, name) = code_addrs[2:] # And add them as a tuple to the list codes.append((slot, name, pin)) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except Exception as ex: logger.error('Problem parsing pin code string {}: {}'.format(code, ex)) # depends on [control=['except'], data=['ex']] # depends on [control=['for'], data=['code']] return codes
def set_model_version(model, version): """ Sets the version of the ONNX model. :param model: instance of an ONNX model :param version: integer containing the version of the model Example: :: from onnxmltools.utils import set_model_version onnx_model = load_model("SqueezeNet.onnx") set_model_version(onnx_model, 1) """ if model is None or not isinstance(model, onnx_proto.ModelProto): raise ValueError("Model is not a valid ONNX model.") if not convert_utils.is_numeric_type(version): raise ValueError("Version must be a numeric type.") model.model_version = version
def function[set_model_version, parameter[model, version]]: constant[ Sets the version of the ONNX model. :param model: instance of an ONNX model :param version: integer containing the version of the model Example: :: from onnxmltools.utils import set_model_version onnx_model = load_model("SqueezeNet.onnx") set_model_version(onnx_model, 1) ] if <ast.BoolOp object at 0x7da1b1d985e0> begin[:] <ast.Raise object at 0x7da1b1d99ea0> if <ast.UnaryOp object at 0x7da1b1d992d0> begin[:] <ast.Raise object at 0x7da1b1d9b430> name[model].model_version assign[=] name[version]
keyword[def] identifier[set_model_version] ( identifier[model] , identifier[version] ): literal[string] keyword[if] identifier[model] keyword[is] keyword[None] keyword[or] keyword[not] identifier[isinstance] ( identifier[model] , identifier[onnx_proto] . identifier[ModelProto] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] keyword[not] identifier[convert_utils] . identifier[is_numeric_type] ( identifier[version] ): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[model] . identifier[model_version] = identifier[version]
def set_model_version(model, version): """ Sets the version of the ONNX model. :param model: instance of an ONNX model :param version: integer containing the version of the model Example: :: from onnxmltools.utils import set_model_version onnx_model = load_model("SqueezeNet.onnx") set_model_version(onnx_model, 1) """ if model is None or not isinstance(model, onnx_proto.ModelProto): raise ValueError('Model is not a valid ONNX model.') # depends on [control=['if'], data=[]] if not convert_utils.is_numeric_type(version): raise ValueError('Version must be a numeric type.') # depends on [control=['if'], data=[]] model.model_version = version
def create(self, path, metadata, filter_=filter_hidden, object_class=None): """ Create objects in CDSTAR and register them in the catalog. Note that we guess the mimetype based on the filename extension, using `mimetypes.guess_type`. Thus, it is the caller's responsibility to add custom or otherwise uncommon types to the list of known types using `mimetypes.add_type`. :param path: :param metadata: :param filter_: :return: """ path = Path(path) if path.is_file(): fnames = [path] elif path.is_dir(): fnames = list(walk(path, mode='files')) else: raise ValueError('path must be a file or directory') # pragma: no cover for fname in fnames: if not filter_ or filter_(fname): created, obj = self._create(fname, metadata, object_class=object_class) yield fname, created, obj
def function[create, parameter[self, path, metadata, filter_, object_class]]: constant[ Create objects in CDSTAR and register them in the catalog. Note that we guess the mimetype based on the filename extension, using `mimetypes.guess_type`. Thus, it is the caller's responsibility to add custom or otherwise uncommon types to the list of known types using `mimetypes.add_type`. :param path: :param metadata: :param filter_: :return: ] variable[path] assign[=] call[name[Path], parameter[name[path]]] if call[name[path].is_file, parameter[]] begin[:] variable[fnames] assign[=] list[[<ast.Name object at 0x7da1b1589c30>]] for taget[name[fname]] in starred[name[fnames]] begin[:] if <ast.BoolOp object at 0x7da1b1588b80> begin[:] <ast.Tuple object at 0x7da1b158b490> assign[=] call[name[self]._create, parameter[name[fname], name[metadata]]] <ast.Yield object at 0x7da204622d10>
keyword[def] identifier[create] ( identifier[self] , identifier[path] , identifier[metadata] , identifier[filter_] = identifier[filter_hidden] , identifier[object_class] = keyword[None] ): literal[string] identifier[path] = identifier[Path] ( identifier[path] ) keyword[if] identifier[path] . identifier[is_file] (): identifier[fnames] =[ identifier[path] ] keyword[elif] identifier[path] . identifier[is_dir] (): identifier[fnames] = identifier[list] ( identifier[walk] ( identifier[path] , identifier[mode] = literal[string] )) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[for] identifier[fname] keyword[in] identifier[fnames] : keyword[if] keyword[not] identifier[filter_] keyword[or] identifier[filter_] ( identifier[fname] ): identifier[created] , identifier[obj] = identifier[self] . identifier[_create] ( identifier[fname] , identifier[metadata] , identifier[object_class] = identifier[object_class] ) keyword[yield] identifier[fname] , identifier[created] , identifier[obj]
def create(self, path, metadata, filter_=filter_hidden, object_class=None): """ Create objects in CDSTAR and register them in the catalog. Note that we guess the mimetype based on the filename extension, using `mimetypes.guess_type`. Thus, it is the caller's responsibility to add custom or otherwise uncommon types to the list of known types using `mimetypes.add_type`. :param path: :param metadata: :param filter_: :return: """ path = Path(path) if path.is_file(): fnames = [path] # depends on [control=['if'], data=[]] elif path.is_dir(): fnames = list(walk(path, mode='files')) # depends on [control=['if'], data=[]] else: raise ValueError('path must be a file or directory') # pragma: no cover for fname in fnames: if not filter_ or filter_(fname): (created, obj) = self._create(fname, metadata, object_class=object_class) yield (fname, created, obj) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fname']]
def help_center_section_articles(self, id, locale=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/articles#list-articles" api_path = "/api/v2/help_center/sections/{id}/articles.json" api_path = api_path.format(id=id) if locale: api_opt_path = "/api/v2/help_center/{locale}/sections/{id}/articles.json" api_path = api_opt_path.format(id=id, locale=locale) return self.call(api_path, **kwargs)
def function[help_center_section_articles, parameter[self, id, locale]]: constant[https://developer.zendesk.com/rest_api/docs/help_center/articles#list-articles] variable[api_path] assign[=] constant[/api/v2/help_center/sections/{id}/articles.json] variable[api_path] assign[=] call[name[api_path].format, parameter[]] if name[locale] begin[:] variable[api_opt_path] assign[=] constant[/api/v2/help_center/{locale}/sections/{id}/articles.json] variable[api_path] assign[=] call[name[api_opt_path].format, parameter[]] return[call[name[self].call, parameter[name[api_path]]]]
keyword[def] identifier[help_center_section_articles] ( identifier[self] , identifier[id] , identifier[locale] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[api_path] = literal[string] identifier[api_path] = identifier[api_path] . identifier[format] ( identifier[id] = identifier[id] ) keyword[if] identifier[locale] : identifier[api_opt_path] = literal[string] identifier[api_path] = identifier[api_opt_path] . identifier[format] ( identifier[id] = identifier[id] , identifier[locale] = identifier[locale] ) keyword[return] identifier[self] . identifier[call] ( identifier[api_path] ,** identifier[kwargs] )
def help_center_section_articles(self, id, locale=None, **kwargs): """https://developer.zendesk.com/rest_api/docs/help_center/articles#list-articles""" api_path = '/api/v2/help_center/sections/{id}/articles.json' api_path = api_path.format(id=id) if locale: api_opt_path = '/api/v2/help_center/{locale}/sections/{id}/articles.json' api_path = api_opt_path.format(id=id, locale=locale) # depends on [control=['if'], data=[]] return self.call(api_path, **kwargs)
def get_setup_version(location, reponame, pkgname=None, archive_commit=None): """Helper for use in setup.py to get the current version from either git describe or the .version file (if available). Set pkgname to the package name if it is different from the repository name. To ensure git information is included in a git archive, add setup.py to .gitattributes (in addition to __init__): ``` __init__.py export-subst setup.py export-subst ``` Then supply "$Format:%h$" for archive_commit. """ import warnings pkgname = reponame if pkgname is None else pkgname if archive_commit is None: warnings.warn("No archive commit available; git archives will not contain version information") return Version.setup_version(os.path.dirname(os.path.abspath(location)),reponame,pkgname=pkgname,archive_commit=archive_commit)
def function[get_setup_version, parameter[location, reponame, pkgname, archive_commit]]: constant[Helper for use in setup.py to get the current version from either git describe or the .version file (if available). Set pkgname to the package name if it is different from the repository name. To ensure git information is included in a git archive, add setup.py to .gitattributes (in addition to __init__): ``` __init__.py export-subst setup.py export-subst ``` Then supply "$Format:%h$" for archive_commit. ] import module[warnings] variable[pkgname] assign[=] <ast.IfExp object at 0x7da18fe930a0> if compare[name[archive_commit] is constant[None]] begin[:] call[name[warnings].warn, parameter[constant[No archive commit available; git archives will not contain version information]]] return[call[name[Version].setup_version, parameter[call[name[os].path.dirname, parameter[call[name[os].path.abspath, parameter[name[location]]]]], name[reponame]]]]
keyword[def] identifier[get_setup_version] ( identifier[location] , identifier[reponame] , identifier[pkgname] = keyword[None] , identifier[archive_commit] = keyword[None] ): literal[string] keyword[import] identifier[warnings] identifier[pkgname] = identifier[reponame] keyword[if] identifier[pkgname] keyword[is] keyword[None] keyword[else] identifier[pkgname] keyword[if] identifier[archive_commit] keyword[is] keyword[None] : identifier[warnings] . identifier[warn] ( literal[string] ) keyword[return] identifier[Version] . identifier[setup_version] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[location] )), identifier[reponame] , identifier[pkgname] = identifier[pkgname] , identifier[archive_commit] = identifier[archive_commit] )
def get_setup_version(location, reponame, pkgname=None, archive_commit=None): """Helper for use in setup.py to get the current version from either git describe or the .version file (if available). Set pkgname to the package name if it is different from the repository name. To ensure git information is included in a git archive, add setup.py to .gitattributes (in addition to __init__): ``` __init__.py export-subst setup.py export-subst ``` Then supply "$Format:%h$" for archive_commit. """ import warnings pkgname = reponame if pkgname is None else pkgname if archive_commit is None: warnings.warn('No archive commit available; git archives will not contain version information') # depends on [control=['if'], data=[]] return Version.setup_version(os.path.dirname(os.path.abspath(location)), reponame, pkgname=pkgname, archive_commit=archive_commit)
def readFILTERLIST(self): """ Read a length-prefixed list of FILTERs """ number = self.readUI8() return [self.readFILTER() for _ in range(number)]
def function[readFILTERLIST, parameter[self]]: constant[ Read a length-prefixed list of FILTERs ] variable[number] assign[=] call[name[self].readUI8, parameter[]] return[<ast.ListComp object at 0x7da1b0d12b60>]
keyword[def] identifier[readFILTERLIST] ( identifier[self] ): literal[string] identifier[number] = identifier[self] . identifier[readUI8] () keyword[return] [ identifier[self] . identifier[readFILTER] () keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[number] )]
def readFILTERLIST(self): """ Read a length-prefixed list of FILTERs """ number = self.readUI8() return [self.readFILTER() for _ in range(number)]
def _find_best_root(self, covariation=True, force_positive=True, slope=0, **kwarks): ''' Determine the node that, when the tree is rooted on this node, results in the best regression of temporal constraints and root to tip distances. Parameters ---------- infer_gtr : bool If True, infer new GTR model after re-root covariation : bool account for covariation structure when rerooting the tree force_positive : bool only accept positive evolutionary rate estimates when rerooting the tree ''' for n in self.tree.find_clades(): n.branch_length=n.mutation_length self.logger("TreeTime._find_best_root: searching for the best root position...",2) Treg = self.setup_TreeRegression(covariation=covariation) return Treg.optimal_reroot(force_positive=force_positive, slope=slope)['node']
def function[_find_best_root, parameter[self, covariation, force_positive, slope]]: constant[ Determine the node that, when the tree is rooted on this node, results in the best regression of temporal constraints and root to tip distances. Parameters ---------- infer_gtr : bool If True, infer new GTR model after re-root covariation : bool account for covariation structure when rerooting the tree force_positive : bool only accept positive evolutionary rate estimates when rerooting the tree ] for taget[name[n]] in starred[call[name[self].tree.find_clades, parameter[]]] begin[:] name[n].branch_length assign[=] name[n].mutation_length call[name[self].logger, parameter[constant[TreeTime._find_best_root: searching for the best root position...], constant[2]]] variable[Treg] assign[=] call[name[self].setup_TreeRegression, parameter[]] return[call[call[name[Treg].optimal_reroot, parameter[]]][constant[node]]]
keyword[def] identifier[_find_best_root] ( identifier[self] , identifier[covariation] = keyword[True] , identifier[force_positive] = keyword[True] , identifier[slope] = literal[int] ,** identifier[kwarks] ): literal[string] keyword[for] identifier[n] keyword[in] identifier[self] . identifier[tree] . identifier[find_clades] (): identifier[n] . identifier[branch_length] = identifier[n] . identifier[mutation_length] identifier[self] . identifier[logger] ( literal[string] , literal[int] ) identifier[Treg] = identifier[self] . identifier[setup_TreeRegression] ( identifier[covariation] = identifier[covariation] ) keyword[return] identifier[Treg] . identifier[optimal_reroot] ( identifier[force_positive] = identifier[force_positive] , identifier[slope] = identifier[slope] )[ literal[string] ]
def _find_best_root(self, covariation=True, force_positive=True, slope=0, **kwarks): """ Determine the node that, when the tree is rooted on this node, results in the best regression of temporal constraints and root to tip distances. Parameters ---------- infer_gtr : bool If True, infer new GTR model after re-root covariation : bool account for covariation structure when rerooting the tree force_positive : bool only accept positive evolutionary rate estimates when rerooting the tree """ for n in self.tree.find_clades(): n.branch_length = n.mutation_length # depends on [control=['for'], data=['n']] self.logger('TreeTime._find_best_root: searching for the best root position...', 2) Treg = self.setup_TreeRegression(covariation=covariation) return Treg.optimal_reroot(force_positive=force_positive, slope=slope)['node']
def handleFailure(self, test, err): """ Baseclass override. Called when a test fails. If the test isn't going to be rerun again, then report the failure to the nose test result. :param test: The test that has raised an error :type test: :class:`nose.case.Test` :param err: Information about the test failure (from sys.exc_info()) :type err: `tuple` of `class`, :class:`Exception`, `traceback` :return: True, if the test will be rerun; False, if nose should handle it. :rtype: `bool` """ # pylint:disable=invalid-name want_failure = self._handle_test_error_or_failure(test, err) if not want_failure and id(test) in self._tests_that_reran: self._nose_result.addFailure(test, err) return want_failure or None
def function[handleFailure, parameter[self, test, err]]: constant[ Baseclass override. Called when a test fails. If the test isn't going to be rerun again, then report the failure to the nose test result. :param test: The test that has raised an error :type test: :class:`nose.case.Test` :param err: Information about the test failure (from sys.exc_info()) :type err: `tuple` of `class`, :class:`Exception`, `traceback` :return: True, if the test will be rerun; False, if nose should handle it. :rtype: `bool` ] variable[want_failure] assign[=] call[name[self]._handle_test_error_or_failure, parameter[name[test], name[err]]] if <ast.BoolOp object at 0x7da1b07ad2d0> begin[:] call[name[self]._nose_result.addFailure, parameter[name[test], name[err]]] return[<ast.BoolOp object at 0x7da1b063d930>]
keyword[def] identifier[handleFailure] ( identifier[self] , identifier[test] , identifier[err] ): literal[string] identifier[want_failure] = identifier[self] . identifier[_handle_test_error_or_failure] ( identifier[test] , identifier[err] ) keyword[if] keyword[not] identifier[want_failure] keyword[and] identifier[id] ( identifier[test] ) keyword[in] identifier[self] . identifier[_tests_that_reran] : identifier[self] . identifier[_nose_result] . identifier[addFailure] ( identifier[test] , identifier[err] ) keyword[return] identifier[want_failure] keyword[or] keyword[None]
def handleFailure(self, test, err): """ Baseclass override. Called when a test fails. If the test isn't going to be rerun again, then report the failure to the nose test result. :param test: The test that has raised an error :type test: :class:`nose.case.Test` :param err: Information about the test failure (from sys.exc_info()) :type err: `tuple` of `class`, :class:`Exception`, `traceback` :return: True, if the test will be rerun; False, if nose should handle it. :rtype: `bool` """ # pylint:disable=invalid-name want_failure = self._handle_test_error_or_failure(test, err) if not want_failure and id(test) in self._tests_that_reran: self._nose_result.addFailure(test, err) # depends on [control=['if'], data=[]] return want_failure or None
def _parse_pool_transaction_file( ledger, nodeReg, cliNodeReg, nodeKeys, activeValidators, ledger_size=None): """ helper function for parseLedgerForHaAndKeys """ for _, txn in ledger.getAllTxn(to=ledger_size): if get_type(txn) == NODE: txn_data = get_payload_data(txn) nodeName = txn_data[DATA][ALIAS] clientStackName = nodeName + CLIENT_STACK_SUFFIX nHa = (txn_data[DATA][NODE_IP], txn_data[DATA][NODE_PORT]) \ if (NODE_IP in txn_data[DATA] and NODE_PORT in txn_data[DATA]) \ else None cHa = (txn_data[DATA][CLIENT_IP], txn_data[DATA][CLIENT_PORT]) \ if (CLIENT_IP in txn_data[DATA] and CLIENT_PORT in txn_data[DATA]) \ else None if nHa: nodeReg[nodeName] = HA(*nHa) if cHa: cliNodeReg[clientStackName] = HA(*cHa) try: # TODO: Need to handle abbreviated verkey key_type = 'verkey' verkey = cryptonymToHex(str(txn_data[TARGET_NYM])) key_type = 'identifier' cryptonymToHex(get_from(txn)) except ValueError: logger.exception( 'Invalid {}. Rebuild pool transactions.'.format(key_type)) exit('Invalid {}. Rebuild pool transactions.'.format(key_type)) nodeKeys[nodeName] = verkey services = txn_data[DATA].get(SERVICES) if isinstance(services, list): if VALIDATOR in services: activeValidators.add(nodeName) else: activeValidators.discard(nodeName)
def function[_parse_pool_transaction_file, parameter[ledger, nodeReg, cliNodeReg, nodeKeys, activeValidators, ledger_size]]: constant[ helper function for parseLedgerForHaAndKeys ] for taget[tuple[[<ast.Name object at 0x7da2044c2bf0>, <ast.Name object at 0x7da2044c3160>]]] in starred[call[name[ledger].getAllTxn, parameter[]]] begin[:] if compare[call[name[get_type], parameter[name[txn]]] equal[==] name[NODE]] begin[:] variable[txn_data] assign[=] call[name[get_payload_data], parameter[name[txn]]] variable[nodeName] assign[=] call[call[name[txn_data]][name[DATA]]][name[ALIAS]] variable[clientStackName] assign[=] binary_operation[name[nodeName] + name[CLIENT_STACK_SUFFIX]] variable[nHa] assign[=] <ast.IfExp object at 0x7da2044c0940> variable[cHa] assign[=] <ast.IfExp object at 0x7da2044c2350> if name[nHa] begin[:] call[name[nodeReg]][name[nodeName]] assign[=] call[name[HA], parameter[<ast.Starred object at 0x7da2044c1c90>]] if name[cHa] begin[:] call[name[cliNodeReg]][name[clientStackName]] assign[=] call[name[HA], parameter[<ast.Starred object at 0x7da2044c2f50>]] <ast.Try object at 0x7da2044c2a10> call[name[nodeKeys]][name[nodeName]] assign[=] name[verkey] variable[services] assign[=] call[call[name[txn_data]][name[DATA]].get, parameter[name[SERVICES]]] if call[name[isinstance], parameter[name[services], name[list]]] begin[:] if compare[name[VALIDATOR] in name[services]] begin[:] call[name[activeValidators].add, parameter[name[nodeName]]]
keyword[def] identifier[_parse_pool_transaction_file] ( identifier[ledger] , identifier[nodeReg] , identifier[cliNodeReg] , identifier[nodeKeys] , identifier[activeValidators] , identifier[ledger_size] = keyword[None] ): literal[string] keyword[for] identifier[_] , identifier[txn] keyword[in] identifier[ledger] . identifier[getAllTxn] ( identifier[to] = identifier[ledger_size] ): keyword[if] identifier[get_type] ( identifier[txn] )== identifier[NODE] : identifier[txn_data] = identifier[get_payload_data] ( identifier[txn] ) identifier[nodeName] = identifier[txn_data] [ identifier[DATA] ][ identifier[ALIAS] ] identifier[clientStackName] = identifier[nodeName] + identifier[CLIENT_STACK_SUFFIX] identifier[nHa] =( identifier[txn_data] [ identifier[DATA] ][ identifier[NODE_IP] ], identifier[txn_data] [ identifier[DATA] ][ identifier[NODE_PORT] ]) keyword[if] ( identifier[NODE_IP] keyword[in] identifier[txn_data] [ identifier[DATA] ] keyword[and] identifier[NODE_PORT] keyword[in] identifier[txn_data] [ identifier[DATA] ]) keyword[else] keyword[None] identifier[cHa] =( identifier[txn_data] [ identifier[DATA] ][ identifier[CLIENT_IP] ], identifier[txn_data] [ identifier[DATA] ][ identifier[CLIENT_PORT] ]) keyword[if] ( identifier[CLIENT_IP] keyword[in] identifier[txn_data] [ identifier[DATA] ] keyword[and] identifier[CLIENT_PORT] keyword[in] identifier[txn_data] [ identifier[DATA] ]) keyword[else] keyword[None] keyword[if] identifier[nHa] : identifier[nodeReg] [ identifier[nodeName] ]= identifier[HA] (* identifier[nHa] ) keyword[if] identifier[cHa] : identifier[cliNodeReg] [ identifier[clientStackName] ]= identifier[HA] (* identifier[cHa] ) keyword[try] : identifier[key_type] = literal[string] identifier[verkey] = identifier[cryptonymToHex] ( identifier[str] ( identifier[txn_data] [ identifier[TARGET_NYM] ])) identifier[key_type] = literal[string] identifier[cryptonymToHex] ( identifier[get_from] ( identifier[txn] )) keyword[except] identifier[ValueError] : identifier[logger] . identifier[exception] ( literal[string] . identifier[format] ( identifier[key_type] )) identifier[exit] ( literal[string] . identifier[format] ( identifier[key_type] )) identifier[nodeKeys] [ identifier[nodeName] ]= identifier[verkey] identifier[services] = identifier[txn_data] [ identifier[DATA] ]. identifier[get] ( identifier[SERVICES] ) keyword[if] identifier[isinstance] ( identifier[services] , identifier[list] ): keyword[if] identifier[VALIDATOR] keyword[in] identifier[services] : identifier[activeValidators] . identifier[add] ( identifier[nodeName] ) keyword[else] : identifier[activeValidators] . identifier[discard] ( identifier[nodeName] )
def _parse_pool_transaction_file(ledger, nodeReg, cliNodeReg, nodeKeys, activeValidators, ledger_size=None): """ helper function for parseLedgerForHaAndKeys """ for (_, txn) in ledger.getAllTxn(to=ledger_size): if get_type(txn) == NODE: txn_data = get_payload_data(txn) nodeName = txn_data[DATA][ALIAS] clientStackName = nodeName + CLIENT_STACK_SUFFIX nHa = (txn_data[DATA][NODE_IP], txn_data[DATA][NODE_PORT]) if NODE_IP in txn_data[DATA] and NODE_PORT in txn_data[DATA] else None cHa = (txn_data[DATA][CLIENT_IP], txn_data[DATA][CLIENT_PORT]) if CLIENT_IP in txn_data[DATA] and CLIENT_PORT in txn_data[DATA] else None if nHa: nodeReg[nodeName] = HA(*nHa) # depends on [control=['if'], data=[]] if cHa: cliNodeReg[clientStackName] = HA(*cHa) # depends on [control=['if'], data=[]] try: # TODO: Need to handle abbreviated verkey key_type = 'verkey' verkey = cryptonymToHex(str(txn_data[TARGET_NYM])) key_type = 'identifier' cryptonymToHex(get_from(txn)) # depends on [control=['try'], data=[]] except ValueError: logger.exception('Invalid {}. Rebuild pool transactions.'.format(key_type)) exit('Invalid {}. Rebuild pool transactions.'.format(key_type)) # depends on [control=['except'], data=[]] nodeKeys[nodeName] = verkey services = txn_data[DATA].get(SERVICES) if isinstance(services, list): if VALIDATOR in services: activeValidators.add(nodeName) # depends on [control=['if'], data=[]] else: activeValidators.discard(nodeName) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def highlight_matches(self): """Highlight found results""" if self.is_code_editor and self.highlight_button.isChecked(): text = self.search_text.currentText() words = self.words_button.isChecked() regexp = self.re_button.isChecked() self.editor.highlight_found_results(text, words=words, regexp=regexp)
def function[highlight_matches, parameter[self]]: constant[Highlight found results] if <ast.BoolOp object at 0x7da18f00c880> begin[:] variable[text] assign[=] call[name[self].search_text.currentText, parameter[]] variable[words] assign[=] call[name[self].words_button.isChecked, parameter[]] variable[regexp] assign[=] call[name[self].re_button.isChecked, parameter[]] call[name[self].editor.highlight_found_results, parameter[name[text]]]
keyword[def] identifier[highlight_matches] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[is_code_editor] keyword[and] identifier[self] . identifier[highlight_button] . identifier[isChecked] (): identifier[text] = identifier[self] . identifier[search_text] . identifier[currentText] () identifier[words] = identifier[self] . identifier[words_button] . identifier[isChecked] () identifier[regexp] = identifier[self] . identifier[re_button] . identifier[isChecked] () identifier[self] . identifier[editor] . identifier[highlight_found_results] ( identifier[text] , identifier[words] = identifier[words] , identifier[regexp] = identifier[regexp] )
def highlight_matches(self): """Highlight found results""" if self.is_code_editor and self.highlight_button.isChecked(): text = self.search_text.currentText() words = self.words_button.isChecked() regexp = self.re_button.isChecked() self.editor.highlight_found_results(text, words=words, regexp=regexp) # depends on [control=['if'], data=[]]
def retrieve_all_pages(api_endpoint, **kwargs): """ Some MTP apis are paginated using Django Rest Framework's LimitOffsetPagination paginator, this method loads all pages into a single results list :param api_endpoint: slumber callable, e.g. `[api_client].cashbook.transactions.locked.get` :param kwargs: additional arguments to pass into api callable """ page_size = getattr(settings, 'REQUEST_PAGE_SIZE', 20) loaded_results = [] offset = 0 while True: response = api_endpoint(limit=page_size, offset=offset, **kwargs) count = response.get('count', 0) loaded_results += response.get('results', []) if len(loaded_results) >= count: break offset += page_size return loaded_results
def function[retrieve_all_pages, parameter[api_endpoint]]: constant[ Some MTP apis are paginated using Django Rest Framework's LimitOffsetPagination paginator, this method loads all pages into a single results list :param api_endpoint: slumber callable, e.g. `[api_client].cashbook.transactions.locked.get` :param kwargs: additional arguments to pass into api callable ] variable[page_size] assign[=] call[name[getattr], parameter[name[settings], constant[REQUEST_PAGE_SIZE], constant[20]]] variable[loaded_results] assign[=] list[[]] variable[offset] assign[=] constant[0] while constant[True] begin[:] variable[response] assign[=] call[name[api_endpoint], parameter[]] variable[count] assign[=] call[name[response].get, parameter[constant[count], constant[0]]] <ast.AugAssign object at 0x7da2044c2860> if compare[call[name[len], parameter[name[loaded_results]]] greater_or_equal[>=] name[count]] begin[:] break <ast.AugAssign object at 0x7da2044c2e30> return[name[loaded_results]]
keyword[def] identifier[retrieve_all_pages] ( identifier[api_endpoint] ,** identifier[kwargs] ): literal[string] identifier[page_size] = identifier[getattr] ( identifier[settings] , literal[string] , literal[int] ) identifier[loaded_results] =[] identifier[offset] = literal[int] keyword[while] keyword[True] : identifier[response] = identifier[api_endpoint] ( identifier[limit] = identifier[page_size] , identifier[offset] = identifier[offset] , ** identifier[kwargs] ) identifier[count] = identifier[response] . identifier[get] ( literal[string] , literal[int] ) identifier[loaded_results] += identifier[response] . identifier[get] ( literal[string] ,[]) keyword[if] identifier[len] ( identifier[loaded_results] )>= identifier[count] : keyword[break] identifier[offset] += identifier[page_size] keyword[return] identifier[loaded_results]
def retrieve_all_pages(api_endpoint, **kwargs): """ Some MTP apis are paginated using Django Rest Framework's LimitOffsetPagination paginator, this method loads all pages into a single results list :param api_endpoint: slumber callable, e.g. `[api_client].cashbook.transactions.locked.get` :param kwargs: additional arguments to pass into api callable """ page_size = getattr(settings, 'REQUEST_PAGE_SIZE', 20) loaded_results = [] offset = 0 while True: response = api_endpoint(limit=page_size, offset=offset, **kwargs) count = response.get('count', 0) loaded_results += response.get('results', []) if len(loaded_results) >= count: break # depends on [control=['if'], data=[]] offset += page_size # depends on [control=['while'], data=[]] return loaded_results
def c_mvar(self): """ return tau exponent "c" for noise type. MVAR = prefactor * h_a * tau^c """ if self.b == -4: return 1.0 elif self.b == -3: return 0.0 elif self.b == -2: return -1.0 elif self.b == -1: return -2.0 elif self.b == 0: return -3.0
def function[c_mvar, parameter[self]]: constant[ return tau exponent "c" for noise type. MVAR = prefactor * h_a * tau^c ] if compare[name[self].b equal[==] <ast.UnaryOp object at 0x7da204622110>] begin[:] return[constant[1.0]]
keyword[def] identifier[c_mvar] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[b] ==- literal[int] : keyword[return] literal[int] keyword[elif] identifier[self] . identifier[b] ==- literal[int] : keyword[return] literal[int] keyword[elif] identifier[self] . identifier[b] ==- literal[int] : keyword[return] - literal[int] keyword[elif] identifier[self] . identifier[b] ==- literal[int] : keyword[return] - literal[int] keyword[elif] identifier[self] . identifier[b] == literal[int] : keyword[return] - literal[int]
def c_mvar(self): """ return tau exponent "c" for noise type. MVAR = prefactor * h_a * tau^c """ if self.b == -4: return 1.0 # depends on [control=['if'], data=[]] elif self.b == -3: return 0.0 # depends on [control=['if'], data=[]] elif self.b == -2: return -1.0 # depends on [control=['if'], data=[]] elif self.b == -1: return -2.0 # depends on [control=['if'], data=[]] elif self.b == 0: return -3.0 # depends on [control=['if'], data=[]]
def set_feature_generator(self): """Generates proteins with quant from the lookup table""" self.features = preparation.build_peptidetable(self.lookup, self.headerfields, self.isobaric, self.precursor, self.fdr, self.pep, self.genecentric)
def function[set_feature_generator, parameter[self]]: constant[Generates proteins with quant from the lookup table] name[self].features assign[=] call[name[preparation].build_peptidetable, parameter[name[self].lookup, name[self].headerfields, name[self].isobaric, name[self].precursor, name[self].fdr, name[self].pep, name[self].genecentric]]
keyword[def] identifier[set_feature_generator] ( identifier[self] ): literal[string] identifier[self] . identifier[features] = identifier[preparation] . identifier[build_peptidetable] ( identifier[self] . identifier[lookup] , identifier[self] . identifier[headerfields] , identifier[self] . identifier[isobaric] , identifier[self] . identifier[precursor] , identifier[self] . identifier[fdr] , identifier[self] . identifier[pep] , identifier[self] . identifier[genecentric] )
def set_feature_generator(self): """Generates proteins with quant from the lookup table""" self.features = preparation.build_peptidetable(self.lookup, self.headerfields, self.isobaric, self.precursor, self.fdr, self.pep, self.genecentric)
def get_data(self): """ Returns the data of the X509 extension, encoded as ASN.1. :return: The ASN.1 encoded data of this X509 extension. :rtype: :py:data:`bytes` .. versionadded:: 0.12 """ octet_result = _lib.X509_EXTENSION_get_data(self._extension) string_result = _ffi.cast('ASN1_STRING*', octet_result) char_result = _lib.ASN1_STRING_data(string_result) result_length = _lib.ASN1_STRING_length(string_result) return _ffi.buffer(char_result, result_length)[:]
def function[get_data, parameter[self]]: constant[ Returns the data of the X509 extension, encoded as ASN.1. :return: The ASN.1 encoded data of this X509 extension. :rtype: :py:data:`bytes` .. versionadded:: 0.12 ] variable[octet_result] assign[=] call[name[_lib].X509_EXTENSION_get_data, parameter[name[self]._extension]] variable[string_result] assign[=] call[name[_ffi].cast, parameter[constant[ASN1_STRING*], name[octet_result]]] variable[char_result] assign[=] call[name[_lib].ASN1_STRING_data, parameter[name[string_result]]] variable[result_length] assign[=] call[name[_lib].ASN1_STRING_length, parameter[name[string_result]]] return[call[call[name[_ffi].buffer, parameter[name[char_result], name[result_length]]]][<ast.Slice object at 0x7da1b0259e70>]]
keyword[def] identifier[get_data] ( identifier[self] ): literal[string] identifier[octet_result] = identifier[_lib] . identifier[X509_EXTENSION_get_data] ( identifier[self] . identifier[_extension] ) identifier[string_result] = identifier[_ffi] . identifier[cast] ( literal[string] , identifier[octet_result] ) identifier[char_result] = identifier[_lib] . identifier[ASN1_STRING_data] ( identifier[string_result] ) identifier[result_length] = identifier[_lib] . identifier[ASN1_STRING_length] ( identifier[string_result] ) keyword[return] identifier[_ffi] . identifier[buffer] ( identifier[char_result] , identifier[result_length] )[:]
def get_data(self): """ Returns the data of the X509 extension, encoded as ASN.1. :return: The ASN.1 encoded data of this X509 extension. :rtype: :py:data:`bytes` .. versionadded:: 0.12 """ octet_result = _lib.X509_EXTENSION_get_data(self._extension) string_result = _ffi.cast('ASN1_STRING*', octet_result) char_result = _lib.ASN1_STRING_data(string_result) result_length = _lib.ASN1_STRING_length(string_result) return _ffi.buffer(char_result, result_length)[:]
def size(self): """Return the number of elements of the cursor with skip and limit""" initial_count = self.count() count_with_skip = max(0, initial_count - self._skip) size = min(count_with_skip, self._limit) return size
def function[size, parameter[self]]: constant[Return the number of elements of the cursor with skip and limit] variable[initial_count] assign[=] call[name[self].count, parameter[]] variable[count_with_skip] assign[=] call[name[max], parameter[constant[0], binary_operation[name[initial_count] - name[self]._skip]]] variable[size] assign[=] call[name[min], parameter[name[count_with_skip], name[self]._limit]] return[name[size]]
keyword[def] identifier[size] ( identifier[self] ): literal[string] identifier[initial_count] = identifier[self] . identifier[count] () identifier[count_with_skip] = identifier[max] ( literal[int] , identifier[initial_count] - identifier[self] . identifier[_skip] ) identifier[size] = identifier[min] ( identifier[count_with_skip] , identifier[self] . identifier[_limit] ) keyword[return] identifier[size]
def size(self): """Return the number of elements of the cursor with skip and limit""" initial_count = self.count() count_with_skip = max(0, initial_count - self._skip) size = min(count_with_skip, self._limit) return size
def get_reversed_statuses(context): """Return a mapping of exit codes to status strings. Args: context (scriptworker.context.Context): the scriptworker context Returns: dict: the mapping of exit codes to status strings. """ _rev = {v: k for k, v in STATUSES.items()} _rev.update(dict(context.config['reversed_statuses'])) return _rev
def function[get_reversed_statuses, parameter[context]]: constant[Return a mapping of exit codes to status strings. Args: context (scriptworker.context.Context): the scriptworker context Returns: dict: the mapping of exit codes to status strings. ] variable[_rev] assign[=] <ast.DictComp object at 0x7da1b0b0ad10> call[name[_rev].update, parameter[call[name[dict], parameter[call[name[context].config][constant[reversed_statuses]]]]]] return[name[_rev]]
keyword[def] identifier[get_reversed_statuses] ( identifier[context] ): literal[string] identifier[_rev] ={ identifier[v] : identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[STATUSES] . identifier[items] ()} identifier[_rev] . identifier[update] ( identifier[dict] ( identifier[context] . identifier[config] [ literal[string] ])) keyword[return] identifier[_rev]
def get_reversed_statuses(context): """Return a mapping of exit codes to status strings. Args: context (scriptworker.context.Context): the scriptworker context Returns: dict: the mapping of exit codes to status strings. """ _rev = {v: k for (k, v) in STATUSES.items()} _rev.update(dict(context.config['reversed_statuses'])) return _rev
def is_zipstream(data): """ just like zipfile.is_zipfile, but works upon buffers and streams rather than filenames. If data supports the read method, it will be treated as a stream and read from to test whether it is a valid ZipFile. If data also supports the tell and seek methods, it will be rewound after being tested. """ if isinstance(data, (str, buffer)): data = BytesIO(data) if hasattr(data, "read"): tell = 0 if hasattr(data, "tell"): tell = data.tell() try: result = bool(_EndRecData(data)) except IOError: result = False if hasattr(data, "seek"): data.seek(tell) else: raise TypeError("requies str, buffer, or stream-like object") return result
def function[is_zipstream, parameter[data]]: constant[ just like zipfile.is_zipfile, but works upon buffers and streams rather than filenames. If data supports the read method, it will be treated as a stream and read from to test whether it is a valid ZipFile. If data also supports the tell and seek methods, it will be rewound after being tested. ] if call[name[isinstance], parameter[name[data], tuple[[<ast.Name object at 0x7da1b0c33070>, <ast.Name object at 0x7da1b0c33940>]]]] begin[:] variable[data] assign[=] call[name[BytesIO], parameter[name[data]]] if call[name[hasattr], parameter[name[data], constant[read]]] begin[:] variable[tell] assign[=] constant[0] if call[name[hasattr], parameter[name[data], constant[tell]]] begin[:] variable[tell] assign[=] call[name[data].tell, parameter[]] <ast.Try object at 0x7da1b0c32050> if call[name[hasattr], parameter[name[data], constant[seek]]] begin[:] call[name[data].seek, parameter[name[tell]]] return[name[result]]
keyword[def] identifier[is_zipstream] ( identifier[data] ): literal[string] keyword[if] identifier[isinstance] ( identifier[data] ,( identifier[str] , identifier[buffer] )): identifier[data] = identifier[BytesIO] ( identifier[data] ) keyword[if] identifier[hasattr] ( identifier[data] , literal[string] ): identifier[tell] = literal[int] keyword[if] identifier[hasattr] ( identifier[data] , literal[string] ): identifier[tell] = identifier[data] . identifier[tell] () keyword[try] : identifier[result] = identifier[bool] ( identifier[_EndRecData] ( identifier[data] )) keyword[except] identifier[IOError] : identifier[result] = keyword[False] keyword[if] identifier[hasattr] ( identifier[data] , literal[string] ): identifier[data] . identifier[seek] ( identifier[tell] ) keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] ) keyword[return] identifier[result]
def is_zipstream(data): """ just like zipfile.is_zipfile, but works upon buffers and streams rather than filenames. If data supports the read method, it will be treated as a stream and read from to test whether it is a valid ZipFile. If data also supports the tell and seek methods, it will be rewound after being tested. """ if isinstance(data, (str, buffer)): data = BytesIO(data) # depends on [control=['if'], data=[]] if hasattr(data, 'read'): tell = 0 if hasattr(data, 'tell'): tell = data.tell() # depends on [control=['if'], data=[]] try: result = bool(_EndRecData(data)) # depends on [control=['try'], data=[]] except IOError: result = False # depends on [control=['except'], data=[]] if hasattr(data, 'seek'): data.seek(tell) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: raise TypeError('requies str, buffer, or stream-like object') return result
def short_full_symbol(self): """Gets the full symbol excluding the character under the cursor.""" if self._short_full_symbol is None: self._short_full_symbol = self._symbol_extract(cache.RE_FULL_CURSOR, False, True) return self._short_full_symbol
def function[short_full_symbol, parameter[self]]: constant[Gets the full symbol excluding the character under the cursor.] if compare[name[self]._short_full_symbol is constant[None]] begin[:] name[self]._short_full_symbol assign[=] call[name[self]._symbol_extract, parameter[name[cache].RE_FULL_CURSOR, constant[False], constant[True]]] return[name[self]._short_full_symbol]
keyword[def] identifier[short_full_symbol] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_short_full_symbol] keyword[is] keyword[None] : identifier[self] . identifier[_short_full_symbol] = identifier[self] . identifier[_symbol_extract] ( identifier[cache] . identifier[RE_FULL_CURSOR] , keyword[False] , keyword[True] ) keyword[return] identifier[self] . identifier[_short_full_symbol]
def short_full_symbol(self): """Gets the full symbol excluding the character under the cursor.""" if self._short_full_symbol is None: self._short_full_symbol = self._symbol_extract(cache.RE_FULL_CURSOR, False, True) # depends on [control=['if'], data=[]] return self._short_full_symbol
def _get_lr_tensor(self): """Get lr minimizing the surrogate. Returns: The lr_t. """ lr = tf.squared_difference(1.0, tf.sqrt(self._mu)) / self._h_min return lr
def function[_get_lr_tensor, parameter[self]]: constant[Get lr minimizing the surrogate. Returns: The lr_t. ] variable[lr] assign[=] binary_operation[call[name[tf].squared_difference, parameter[constant[1.0], call[name[tf].sqrt, parameter[name[self]._mu]]]] / name[self]._h_min] return[name[lr]]
keyword[def] identifier[_get_lr_tensor] ( identifier[self] ): literal[string] identifier[lr] = identifier[tf] . identifier[squared_difference] ( literal[int] , identifier[tf] . identifier[sqrt] ( identifier[self] . identifier[_mu] ))/ identifier[self] . identifier[_h_min] keyword[return] identifier[lr]
def _get_lr_tensor(self): """Get lr minimizing the surrogate. Returns: The lr_t. """ lr = tf.squared_difference(1.0, tf.sqrt(self._mu)) / self._h_min return lr
def save_file(self, obj): """Save a file""" try: import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute except ImportError: import io as pystringIO if not hasattr(obj, 'name') or not hasattr(obj, 'mode'): raise pickle.PicklingError("Cannot pickle files that do not map to an actual file") if obj is sys.stdout: return self.save_reduce(getattr, (sys,'stdout'), obj=obj) if obj is sys.stderr: return self.save_reduce(getattr, (sys,'stderr'), obj=obj) if obj is sys.stdin: raise pickle.PicklingError("Cannot pickle standard input") if obj.closed: raise pickle.PicklingError("Cannot pickle closed files") if hasattr(obj, 'isatty') and obj.isatty(): raise pickle.PicklingError("Cannot pickle files that map to tty objects") if 'r' not in obj.mode and '+' not in obj.mode: raise pickle.PicklingError("Cannot pickle files that are not opened for reading: %s" % obj.mode) name = obj.name retval = pystringIO.StringIO() try: # Read the whole file curloc = obj.tell() obj.seek(0) contents = obj.read() obj.seek(curloc) except IOError: raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name) retval.write(contents) retval.seek(curloc) retval.name = name self.save(retval) self.memoize(obj)
def function[save_file, parameter[self, obj]]: constant[Save a file] <ast.Try object at 0x7da1b1ec4f40> if <ast.BoolOp object at 0x7da1b2065990> begin[:] <ast.Raise object at 0x7da1b2067550> if compare[name[obj] is name[sys].stdout] begin[:] return[call[name[self].save_reduce, parameter[name[getattr], tuple[[<ast.Name object at 0x7da1b2064b20>, <ast.Constant object at 0x7da1b2067f70>]]]]] if compare[name[obj] is name[sys].stderr] begin[:] return[call[name[self].save_reduce, parameter[name[getattr], tuple[[<ast.Name object at 0x7da1b2065ff0>, <ast.Constant object at 0x7da1b2065d80>]]]]] if compare[name[obj] is name[sys].stdin] begin[:] <ast.Raise object at 0x7da1b2064070> if name[obj].closed begin[:] <ast.Raise object at 0x7da1b2067910> if <ast.BoolOp object at 0x7da1b2064400> begin[:] <ast.Raise object at 0x7da1b2067c70> if <ast.BoolOp object at 0x7da1b20665f0> begin[:] <ast.Raise object at 0x7da1b2064520> variable[name] assign[=] name[obj].name variable[retval] assign[=] call[name[pystringIO].StringIO, parameter[]] <ast.Try object at 0x7da1b20668c0> call[name[retval].write, parameter[name[contents]]] call[name[retval].seek, parameter[name[curloc]]] name[retval].name assign[=] name[name] call[name[self].save, parameter[name[retval]]] call[name[self].memoize, parameter[name[obj]]]
keyword[def] identifier[save_file] ( identifier[self] , identifier[obj] ): literal[string] keyword[try] : keyword[import] identifier[StringIO] keyword[as] identifier[pystringIO] keyword[except] identifier[ImportError] : keyword[import] identifier[io] keyword[as] identifier[pystringIO] keyword[if] keyword[not] identifier[hasattr] ( identifier[obj] , literal[string] ) keyword[or] keyword[not] identifier[hasattr] ( identifier[obj] , literal[string] ): keyword[raise] identifier[pickle] . identifier[PicklingError] ( literal[string] ) keyword[if] identifier[obj] keyword[is] identifier[sys] . identifier[stdout] : keyword[return] identifier[self] . identifier[save_reduce] ( identifier[getattr] ,( identifier[sys] , literal[string] ), identifier[obj] = identifier[obj] ) keyword[if] identifier[obj] keyword[is] identifier[sys] . identifier[stderr] : keyword[return] identifier[self] . identifier[save_reduce] ( identifier[getattr] ,( identifier[sys] , literal[string] ), identifier[obj] = identifier[obj] ) keyword[if] identifier[obj] keyword[is] identifier[sys] . identifier[stdin] : keyword[raise] identifier[pickle] . identifier[PicklingError] ( literal[string] ) keyword[if] identifier[obj] . identifier[closed] : keyword[raise] identifier[pickle] . identifier[PicklingError] ( literal[string] ) keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ) keyword[and] identifier[obj] . identifier[isatty] (): keyword[raise] identifier[pickle] . identifier[PicklingError] ( literal[string] ) keyword[if] literal[string] keyword[not] keyword[in] identifier[obj] . identifier[mode] keyword[and] literal[string] keyword[not] keyword[in] identifier[obj] . identifier[mode] : keyword[raise] identifier[pickle] . identifier[PicklingError] ( literal[string] % identifier[obj] . identifier[mode] ) identifier[name] = identifier[obj] . identifier[name] identifier[retval] = identifier[pystringIO] . identifier[StringIO] () keyword[try] : identifier[curloc] = identifier[obj] . identifier[tell] () identifier[obj] . identifier[seek] ( literal[int] ) identifier[contents] = identifier[obj] . identifier[read] () identifier[obj] . identifier[seek] ( identifier[curloc] ) keyword[except] identifier[IOError] : keyword[raise] identifier[pickle] . identifier[PicklingError] ( literal[string] % identifier[name] ) identifier[retval] . identifier[write] ( identifier[contents] ) identifier[retval] . identifier[seek] ( identifier[curloc] ) identifier[retval] . identifier[name] = identifier[name] identifier[self] . identifier[save] ( identifier[retval] ) identifier[self] . identifier[memoize] ( identifier[obj] )
def save_file(self, obj): """Save a file""" try: import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute # depends on [control=['try'], data=[]] except ImportError: import io as pystringIO # depends on [control=['except'], data=[]] if not hasattr(obj, 'name') or not hasattr(obj, 'mode'): raise pickle.PicklingError('Cannot pickle files that do not map to an actual file') # depends on [control=['if'], data=[]] if obj is sys.stdout: return self.save_reduce(getattr, (sys, 'stdout'), obj=obj) # depends on [control=['if'], data=['obj']] if obj is sys.stderr: return self.save_reduce(getattr, (sys, 'stderr'), obj=obj) # depends on [control=['if'], data=['obj']] if obj is sys.stdin: raise pickle.PicklingError('Cannot pickle standard input') # depends on [control=['if'], data=[]] if obj.closed: raise pickle.PicklingError('Cannot pickle closed files') # depends on [control=['if'], data=[]] if hasattr(obj, 'isatty') and obj.isatty(): raise pickle.PicklingError('Cannot pickle files that map to tty objects') # depends on [control=['if'], data=[]] if 'r' not in obj.mode and '+' not in obj.mode: raise pickle.PicklingError('Cannot pickle files that are not opened for reading: %s' % obj.mode) # depends on [control=['if'], data=[]] name = obj.name retval = pystringIO.StringIO() try: # Read the whole file curloc = obj.tell() obj.seek(0) contents = obj.read() obj.seek(curloc) # depends on [control=['try'], data=[]] except IOError: raise pickle.PicklingError('Cannot pickle file %s as it cannot be read' % name) # depends on [control=['except'], data=[]] retval.write(contents) retval.seek(curloc) retval.name = name self.save(retval) self.memoize(obj)
def from_etree(tree): """Constructs an executable form a given ElementTree structure. :param tree: :type tree: xml.etree.ElementTree.ElementTree :rtype: Executable """ exe = Executable(tree) exe.category = tree.findtext('category') exe.version = tree.findtext('version') exe.title = tree.findtext('title') or exe.name exe.description = tree.findtext('description') exe.license = tree.findtext('license') or "unknown" exe.contributor = tree.findtext('contributor') for ps in tree.iterfind("parameters"): assert isinstance(ps, ET.Element) paras = ParameterGroup( ps.findtext("label"), ps.findtext("description"), ps.attrib.get('advanced', "false") == "true", filter(lambda x: x is not None, map(Parameter.from_xml_node, list(ps)))) exe.parameter_groups.append(paras) return exe
def function[from_etree, parameter[tree]]: constant[Constructs an executable form a given ElementTree structure. :param tree: :type tree: xml.etree.ElementTree.ElementTree :rtype: Executable ] variable[exe] assign[=] call[name[Executable], parameter[name[tree]]] name[exe].category assign[=] call[name[tree].findtext, parameter[constant[category]]] name[exe].version assign[=] call[name[tree].findtext, parameter[constant[version]]] name[exe].title assign[=] <ast.BoolOp object at 0x7da1b1616350> name[exe].description assign[=] call[name[tree].findtext, parameter[constant[description]]] name[exe].license assign[=] <ast.BoolOp object at 0x7da1b1614a90> name[exe].contributor assign[=] call[name[tree].findtext, parameter[constant[contributor]]] for taget[name[ps]] in starred[call[name[tree].iterfind, parameter[constant[parameters]]]] begin[:] assert[call[name[isinstance], parameter[name[ps], name[ET].Element]]] variable[paras] assign[=] call[name[ParameterGroup], parameter[call[name[ps].findtext, parameter[constant[label]]], call[name[ps].findtext, parameter[constant[description]]], compare[call[name[ps].attrib.get, parameter[constant[advanced], constant[false]]] equal[==] constant[true]], call[name[filter], parameter[<ast.Lambda object at 0x7da1b16163e0>, call[name[map], parameter[name[Parameter].from_xml_node, call[name[list], parameter[name[ps]]]]]]]]] call[name[exe].parameter_groups.append, parameter[name[paras]]] return[name[exe]]
keyword[def] identifier[from_etree] ( identifier[tree] ): literal[string] identifier[exe] = identifier[Executable] ( identifier[tree] ) identifier[exe] . identifier[category] = identifier[tree] . identifier[findtext] ( literal[string] ) identifier[exe] . identifier[version] = identifier[tree] . identifier[findtext] ( literal[string] ) identifier[exe] . identifier[title] = identifier[tree] . identifier[findtext] ( literal[string] ) keyword[or] identifier[exe] . identifier[name] identifier[exe] . identifier[description] = identifier[tree] . identifier[findtext] ( literal[string] ) identifier[exe] . identifier[license] = identifier[tree] . identifier[findtext] ( literal[string] ) keyword[or] literal[string] identifier[exe] . identifier[contributor] = identifier[tree] . identifier[findtext] ( literal[string] ) keyword[for] identifier[ps] keyword[in] identifier[tree] . identifier[iterfind] ( literal[string] ): keyword[assert] identifier[isinstance] ( identifier[ps] , identifier[ET] . identifier[Element] ) identifier[paras] = identifier[ParameterGroup] ( identifier[ps] . identifier[findtext] ( literal[string] ), identifier[ps] . identifier[findtext] ( literal[string] ), identifier[ps] . identifier[attrib] . identifier[get] ( literal[string] , literal[string] )== literal[string] , identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] keyword[is] keyword[not] keyword[None] , identifier[map] ( identifier[Parameter] . identifier[from_xml_node] , identifier[list] ( identifier[ps] )))) identifier[exe] . identifier[parameter_groups] . identifier[append] ( identifier[paras] ) keyword[return] identifier[exe]
def from_etree(tree): """Constructs an executable form a given ElementTree structure. :param tree: :type tree: xml.etree.ElementTree.ElementTree :rtype: Executable """ exe = Executable(tree) exe.category = tree.findtext('category') exe.version = tree.findtext('version') exe.title = tree.findtext('title') or exe.name exe.description = tree.findtext('description') exe.license = tree.findtext('license') or 'unknown' exe.contributor = tree.findtext('contributor') for ps in tree.iterfind('parameters'): assert isinstance(ps, ET.Element) paras = ParameterGroup(ps.findtext('label'), ps.findtext('description'), ps.attrib.get('advanced', 'false') == 'true', filter(lambda x: x is not None, map(Parameter.from_xml_node, list(ps)))) exe.parameter_groups.append(paras) # depends on [control=['for'], data=['ps']] return exe
def merge_all_cells(cells): """ Loop through list of cells and piece them together one by one Parameters ---------- cells : list of dashtable.data2rst.Cell Returns ------- grid_table : str The final grid table """ current = 0 while len(cells) > 1: count = 0 while count < len(cells): cell1 = cells[current] cell2 = cells[count] merge_direction = get_merge_direction(cell1, cell2) if not merge_direction == "NONE": merge_cells(cell1, cell2, merge_direction) if current > count: current -= 1 cells.pop(count) else: count += 1 current += 1 if current >= len(cells): current = 0 return cells[0].text
def function[merge_all_cells, parameter[cells]]: constant[ Loop through list of cells and piece them together one by one Parameters ---------- cells : list of dashtable.data2rst.Cell Returns ------- grid_table : str The final grid table ] variable[current] assign[=] constant[0] while compare[call[name[len], parameter[name[cells]]] greater[>] constant[1]] begin[:] variable[count] assign[=] constant[0] while compare[name[count] less[<] call[name[len], parameter[name[cells]]]] begin[:] variable[cell1] assign[=] call[name[cells]][name[current]] variable[cell2] assign[=] call[name[cells]][name[count]] variable[merge_direction] assign[=] call[name[get_merge_direction], parameter[name[cell1], name[cell2]]] if <ast.UnaryOp object at 0x7da18c4ce1a0> begin[:] call[name[merge_cells], parameter[name[cell1], name[cell2], name[merge_direction]]] if compare[name[current] greater[>] name[count]] begin[:] <ast.AugAssign object at 0x7da1b2345e10> call[name[cells].pop, parameter[name[count]]] <ast.AugAssign object at 0x7da1b23459f0> if compare[name[current] greater_or_equal[>=] call[name[len], parameter[name[cells]]]] begin[:] variable[current] assign[=] constant[0] return[call[name[cells]][constant[0]].text]
keyword[def] identifier[merge_all_cells] ( identifier[cells] ): literal[string] identifier[current] = literal[int] keyword[while] identifier[len] ( identifier[cells] )> literal[int] : identifier[count] = literal[int] keyword[while] identifier[count] < identifier[len] ( identifier[cells] ): identifier[cell1] = identifier[cells] [ identifier[current] ] identifier[cell2] = identifier[cells] [ identifier[count] ] identifier[merge_direction] = identifier[get_merge_direction] ( identifier[cell1] , identifier[cell2] ) keyword[if] keyword[not] identifier[merge_direction] == literal[string] : identifier[merge_cells] ( identifier[cell1] , identifier[cell2] , identifier[merge_direction] ) keyword[if] identifier[current] > identifier[count] : identifier[current] -= literal[int] identifier[cells] . identifier[pop] ( identifier[count] ) keyword[else] : identifier[count] += literal[int] identifier[current] += literal[int] keyword[if] identifier[current] >= identifier[len] ( identifier[cells] ): identifier[current] = literal[int] keyword[return] identifier[cells] [ literal[int] ]. identifier[text]
def merge_all_cells(cells): """ Loop through list of cells and piece them together one by one Parameters ---------- cells : list of dashtable.data2rst.Cell Returns ------- grid_table : str The final grid table """ current = 0 while len(cells) > 1: count = 0 while count < len(cells): cell1 = cells[current] cell2 = cells[count] merge_direction = get_merge_direction(cell1, cell2) if not merge_direction == 'NONE': merge_cells(cell1, cell2, merge_direction) if current > count: current -= 1 # depends on [control=['if'], data=['current']] cells.pop(count) # depends on [control=['if'], data=[]] else: count += 1 # depends on [control=['while'], data=['count']] current += 1 if current >= len(cells): current = 0 # depends on [control=['if'], data=['current']] # depends on [control=['while'], data=[]] return cells[0].text
def get_block_count(self, is_full: bool = False) -> int or dict: """ This interface is used to get the decimal block number in current network. Return: the decimal total number of blocks in current network. """ payload = self.generate_json_rpc_payload(RpcMethod.GET_BLOCK_COUNT) response = self.__post(self.__url, payload) if is_full: return response return response['result']
def function[get_block_count, parameter[self, is_full]]: constant[ This interface is used to get the decimal block number in current network. Return: the decimal total number of blocks in current network. ] variable[payload] assign[=] call[name[self].generate_json_rpc_payload, parameter[name[RpcMethod].GET_BLOCK_COUNT]] variable[response] assign[=] call[name[self].__post, parameter[name[self].__url, name[payload]]] if name[is_full] begin[:] return[name[response]] return[call[name[response]][constant[result]]]
keyword[def] identifier[get_block_count] ( identifier[self] , identifier[is_full] : identifier[bool] = keyword[False] )-> identifier[int] keyword[or] identifier[dict] : literal[string] identifier[payload] = identifier[self] . identifier[generate_json_rpc_payload] ( identifier[RpcMethod] . identifier[GET_BLOCK_COUNT] ) identifier[response] = identifier[self] . identifier[__post] ( identifier[self] . identifier[__url] , identifier[payload] ) keyword[if] identifier[is_full] : keyword[return] identifier[response] keyword[return] identifier[response] [ literal[string] ]
def get_block_count(self, is_full: bool=False) -> int or dict: """ This interface is used to get the decimal block number in current network. Return: the decimal total number of blocks in current network. """ payload = self.generate_json_rpc_payload(RpcMethod.GET_BLOCK_COUNT) response = self.__post(self.__url, payload) if is_full: return response # depends on [control=['if'], data=[]] return response['result']
def has_context_loop(state, incorrect_msg, exact_names): """When dispatched on loops, has_context the target vars are the attribute _target_vars. Note: This is to allow people to call has_context on a node (e.g. for_loop) rather than one of its attributes (e.g. body). Purely for convenience. """ return _test( state, incorrect_msg or MSG_INCORRECT_LOOP, exact_names, tv_name="_target_vars", highlight_name="target", )
def function[has_context_loop, parameter[state, incorrect_msg, exact_names]]: constant[When dispatched on loops, has_context the target vars are the attribute _target_vars. Note: This is to allow people to call has_context on a node (e.g. for_loop) rather than one of its attributes (e.g. body). Purely for convenience. ] return[call[name[_test], parameter[name[state], <ast.BoolOp object at 0x7da1b02b99c0>, name[exact_names]]]]
keyword[def] identifier[has_context_loop] ( identifier[state] , identifier[incorrect_msg] , identifier[exact_names] ): literal[string] keyword[return] identifier[_test] ( identifier[state] , identifier[incorrect_msg] keyword[or] identifier[MSG_INCORRECT_LOOP] , identifier[exact_names] , identifier[tv_name] = literal[string] , identifier[highlight_name] = literal[string] , )
def has_context_loop(state, incorrect_msg, exact_names): """When dispatched on loops, has_context the target vars are the attribute _target_vars. Note: This is to allow people to call has_context on a node (e.g. for_loop) rather than one of its attributes (e.g. body). Purely for convenience. """ return _test(state, incorrect_msg or MSG_INCORRECT_LOOP, exact_names, tv_name='_target_vars', highlight_name='target')
def associate_by_distance(labels_a, labels_b, distance): '''Find the objects that are within a given distance of each other Given two labels matrices and a distance, find pairs of objects that are within the given distance of each other where the distance is the minimum distance between any point in the convex hull of the two objects. labels_a - first labels matrix labels_b - second labels matrix distance - distance to measure returns a n x 2 matrix where m[x,0] is the label number in labels1 and m[x,1] is the label number in labels2 Algorithm for computing distance between convex polygons taken from Chin, "Optimal Algorithms for the Intersection and the Minimum Distance Problems Between Planar Polygons", IEEE Transactions on Computers, vol. C-32, # 12, December 1983 ''' if np.max(labels_a) == 0 or np.max(labels_b) == 0: return np.zeros((0,2),int) hull_a, point_counts_a = convex_hull(labels_a) hull_b, point_counts_b = convex_hull(labels_b) centers_a, radii_a = minimum_enclosing_circle( labels_a, hull_and_point_count = (hull_a, point_counts_a)) centers_b, radii_b = minimum_enclosing_circle( labels_b, hull_and_point_count = (hull_b, point_counts_b)) # # Make an indexer into the hull tables # indexer_a = np.cumsum(point_counts_a) indexer_a[1:] = indexer_a[:-1] indexer_a[0] = 0 indexer_b = np.cumsum(point_counts_b) indexer_b[1:] = indexer_b[:-1] indexer_b[0] = 0 # # Compute the distances between minimum enclosing circles = # distance - radius_a - radius_b # i,j = np.mgrid[0:len(radii_a),0:len(radii_b)] ab_distance = np.sqrt((centers_a[i,0]-centers_b[j,0])**2 + (centers_a[i,1]-centers_b[j,1])**2) ab_distance_minus_radii = ab_distance - radii_a[i] - radii_b[j] # Account for roundoff error ab_distance_minus_radii -= np.sqrt(np.finfo(float).eps) # # Exclude from consideration ab_distance > distance and automatically # choose those whose centers are within the distance # ab_easy_wins = ab_distance <= distance ij_wins = np.dstack((hull_a[indexer_a[i[ab_easy_wins]],0], hull_b[indexer_b[j[ab_easy_wins]],0])) ij_wins.shape = ij_wins.shape[1:] ab_consider = (ab_distance_minus_radii <= distance) & (~ ab_easy_wins) ij_consider = np.dstack((i[ab_consider], j[ab_consider])) ij_consider.shape = ij_consider.shape[1:] if np.product(ij_consider.shape) == 0: return ij_wins if True: wins = [] distance2 = distance**2 for ii,jj in ij_consider: a = hull_a[indexer_a[ii]:indexer_a[ii]+point_counts_a[ii],1:] b = hull_b[indexer_b[jj]:indexer_b[jj]+point_counts_b[jj],1:] d = minimum_distance2(a,centers_a[ii,:], b,centers_b[jj,:]) if d <= distance2: wins.append((hull_a[indexer_a[ii],0], hull_b[indexer_b[jj],0])) ij_wins = np.vstack((ij_wins, np.array(wins))) return ij_wins else: # # For each point in the hull, get the next point mod # of points in hull # hull_next_a = np.arange(hull_a.shape[0])+1 hull_next_a[indexer_a+point_counts_a-1] = indexer_a hull_next_b = np.arange(hull_b.shape[0])+1 hull_next_b[indexer_b+point_counts_b-1] = indexer_b # # Parallelize the algorithm for overlap # # For each pair of points i, i+1 mod n in the hull, and the test point t # the cross product of the vector from i to i+1 and the vector from i+1 # to t should have the same sign. # next_b = hull_b[hull_next_b,1:] vector_b = hull_b[:,1:] - next_b # # For each i,j, we have to compare the centers_a against point_counts_b[j] # crosses. # b_len = point_counts_b[ij_consider[:,1]] b_index = np.cumsum(point_counts_b) b_elems = b_index[-1] b_index[1:] = b_index[:-1] b_index[0] = 0 # # First create a vector that's b_elems long and every element contains an # index into the ij_consider vector. How we do this: # 1) mark the first element at a particular index by 1, all others = 0 # 2) Erase the first 1 # 3) Take the cumulative sum which will increment to 1 when it hits the # first 1, again when it hits the second...etc. # b_indexer = np.zeros(b_elems, int) b_indexer[b_index[1:]] = 1 b_indexer = np.cumsum(b_indexer) # # The sub-index is the index from 1 to n for each of the vertices # per b convex hull # b_sub_index = np.arange(b_elems) - b_index[b_indexer] # # For each element of b_indexer, get the i and j at that index # b_i = ij_consider[b_indexer,0] b_j = ij_consider[b_indexer,1] # # Compute the cross-products now # b_vector_b = vector_b[indexer_b[b_j]+b_sub_index,:] b_center_vector = (next_b[indexer_b[b_j]+b_sub_index,:] - centers_a[indexer_a[b_i]]) cross = (b_vector_b[:,0] * b_center_vector[:,1] - b_vector_b[:,1] * b_center_vector[:,0]) hits = (all_true(cross > 0, b_index) | all_true(cross < 0, b_index)) ij_wins = np.vstack((ij_wins, ij_consider[hits,:])) ij_consider = ij_consider[~hits,:] if ij_consider.shape[0] == 0: return ij_wins
def function[associate_by_distance, parameter[labels_a, labels_b, distance]]: constant[Find the objects that are within a given distance of each other Given two labels matrices and a distance, find pairs of objects that are within the given distance of each other where the distance is the minimum distance between any point in the convex hull of the two objects. labels_a - first labels matrix labels_b - second labels matrix distance - distance to measure returns a n x 2 matrix where m[x,0] is the label number in labels1 and m[x,1] is the label number in labels2 Algorithm for computing distance between convex polygons taken from Chin, "Optimal Algorithms for the Intersection and the Minimum Distance Problems Between Planar Polygons", IEEE Transactions on Computers, vol. C-32, # 12, December 1983 ] if <ast.BoolOp object at 0x7da1b26aec20> begin[:] return[call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da1b26ac310>, <ast.Constant object at 0x7da1b26ad5a0>]], name[int]]]] <ast.Tuple object at 0x7da1b26ac220> assign[=] call[name[convex_hull], parameter[name[labels_a]]] <ast.Tuple object at 0x7da1b26ac520> assign[=] call[name[convex_hull], parameter[name[labels_b]]] <ast.Tuple object at 0x7da1b26ad000> assign[=] call[name[minimum_enclosing_circle], parameter[name[labels_a]]] <ast.Tuple object at 0x7da1b26afa00> assign[=] call[name[minimum_enclosing_circle], parameter[name[labels_b]]] variable[indexer_a] assign[=] call[name[np].cumsum, parameter[name[point_counts_a]]] call[name[indexer_a]][<ast.Slice object at 0x7da1b26ac7c0>] assign[=] call[name[indexer_a]][<ast.Slice object at 0x7da1b26afca0>] call[name[indexer_a]][constant[0]] assign[=] constant[0] variable[indexer_b] assign[=] call[name[np].cumsum, parameter[name[point_counts_b]]] call[name[indexer_b]][<ast.Slice object at 0x7da1b26af220>] assign[=] call[name[indexer_b]][<ast.Slice object at 0x7da1b26af790>] call[name[indexer_b]][constant[0]] assign[=] constant[0] <ast.Tuple object at 0x7da1b26ae7a0> assign[=] call[name[np].mgrid][tuple[[<ast.Slice object at 0x7da1b26ae470>, <ast.Slice object at 0x7da1b26aecb0>]]] variable[ab_distance] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[binary_operation[call[name[centers_a]][tuple[[<ast.Name object at 0x7da1b26af820>, <ast.Constant object at 0x7da1b26adf30>]]] - call[name[centers_b]][tuple[[<ast.Name object at 0x7da1b26ac8b0>, <ast.Constant object at 0x7da1b26ac6d0>]]]] ** constant[2]] + binary_operation[binary_operation[call[name[centers_a]][tuple[[<ast.Name object at 0x7da1b26af6d0>, <ast.Constant object at 0x7da1b26afdc0>]]] - call[name[centers_b]][tuple[[<ast.Name object at 0x7da1b26ac1f0>, <ast.Constant object at 0x7da1b26aed10>]]]] ** constant[2]]]]] variable[ab_distance_minus_radii] assign[=] binary_operation[binary_operation[name[ab_distance] - call[name[radii_a]][name[i]]] - call[name[radii_b]][name[j]]] <ast.AugAssign object at 0x7da1b26adf60> variable[ab_easy_wins] assign[=] compare[name[ab_distance] less_or_equal[<=] name[distance]] variable[ij_wins] assign[=] call[name[np].dstack, parameter[tuple[[<ast.Subscript object at 0x7da1b26afe80>, <ast.Subscript object at 0x7da1b26af5e0>]]]] name[ij_wins].shape assign[=] call[name[ij_wins].shape][<ast.Slice object at 0x7da1b26ae9e0>] variable[ab_consider] assign[=] binary_operation[compare[name[ab_distance_minus_radii] less_or_equal[<=] name[distance]] <ast.BitAnd object at 0x7da2590d6b60> <ast.UnaryOp object at 0x7da1b26af700>] variable[ij_consider] assign[=] call[name[np].dstack, parameter[tuple[[<ast.Subscript object at 0x7da1b26aeec0>, <ast.Subscript object at 0x7da1b26af070>]]]] name[ij_consider].shape assign[=] call[name[ij_consider].shape][<ast.Slice object at 0x7da1b26ac3d0>] if compare[call[name[np].product, parameter[name[ij_consider].shape]] equal[==] constant[0]] begin[:] return[name[ij_wins]] if constant[True] begin[:] variable[wins] assign[=] list[[]] variable[distance2] assign[=] binary_operation[name[distance] ** constant[2]] for taget[tuple[[<ast.Name object at 0x7da1b26ae980>, <ast.Name object at 0x7da1b26acaf0>]]] in starred[name[ij_consider]] begin[:] variable[a] assign[=] call[name[hull_a]][tuple[[<ast.Slice object at 0x7da1b26ac190>, <ast.Slice object at 0x7da1b26acc70>]]] variable[b] assign[=] call[name[hull_b]][tuple[[<ast.Slice object at 0x7da1b26ac4c0>, <ast.Slice object at 0x7da1b26aed70>]]] variable[d] assign[=] call[name[minimum_distance2], parameter[name[a], call[name[centers_a]][tuple[[<ast.Name object at 0x7da207f01a80>, <ast.Slice object at 0x7da207f02800>]]], name[b], call[name[centers_b]][tuple[[<ast.Name object at 0x7da207f03a60>, <ast.Slice object at 0x7da207f01990>]]]]] if compare[name[d] less_or_equal[<=] name[distance2]] begin[:] call[name[wins].append, parameter[tuple[[<ast.Subscript object at 0x7da207f00ca0>, <ast.Subscript object at 0x7da207f03220>]]]] variable[ij_wins] assign[=] call[name[np].vstack, parameter[tuple[[<ast.Name object at 0x7da207f02e60>, <ast.Call object at 0x7da207f02680>]]]] return[name[ij_wins]]
keyword[def] identifier[associate_by_distance] ( identifier[labels_a] , identifier[labels_b] , identifier[distance] ): literal[string] keyword[if] identifier[np] . identifier[max] ( identifier[labels_a] )== literal[int] keyword[or] identifier[np] . identifier[max] ( identifier[labels_b] )== literal[int] : keyword[return] identifier[np] . identifier[zeros] (( literal[int] , literal[int] ), identifier[int] ) identifier[hull_a] , identifier[point_counts_a] = identifier[convex_hull] ( identifier[labels_a] ) identifier[hull_b] , identifier[point_counts_b] = identifier[convex_hull] ( identifier[labels_b] ) identifier[centers_a] , identifier[radii_a] = identifier[minimum_enclosing_circle] ( identifier[labels_a] , identifier[hull_and_point_count] =( identifier[hull_a] , identifier[point_counts_a] )) identifier[centers_b] , identifier[radii_b] = identifier[minimum_enclosing_circle] ( identifier[labels_b] , identifier[hull_and_point_count] =( identifier[hull_b] , identifier[point_counts_b] )) identifier[indexer_a] = identifier[np] . identifier[cumsum] ( identifier[point_counts_a] ) identifier[indexer_a] [ literal[int] :]= identifier[indexer_a] [:- literal[int] ] identifier[indexer_a] [ literal[int] ]= literal[int] identifier[indexer_b] = identifier[np] . identifier[cumsum] ( identifier[point_counts_b] ) identifier[indexer_b] [ literal[int] :]= identifier[indexer_b] [:- literal[int] ] identifier[indexer_b] [ literal[int] ]= literal[int] identifier[i] , identifier[j] = identifier[np] . identifier[mgrid] [ literal[int] : identifier[len] ( identifier[radii_a] ), literal[int] : identifier[len] ( identifier[radii_b] )] identifier[ab_distance] = identifier[np] . identifier[sqrt] (( identifier[centers_a] [ identifier[i] , literal[int] ]- identifier[centers_b] [ identifier[j] , literal[int] ])** literal[int] + ( identifier[centers_a] [ identifier[i] , literal[int] ]- identifier[centers_b] [ identifier[j] , literal[int] ])** literal[int] ) identifier[ab_distance_minus_radii] = identifier[ab_distance] - identifier[radii_a] [ identifier[i] ]- identifier[radii_b] [ identifier[j] ] identifier[ab_distance_minus_radii] -= identifier[np] . identifier[sqrt] ( identifier[np] . identifier[finfo] ( identifier[float] ). identifier[eps] ) identifier[ab_easy_wins] = identifier[ab_distance] <= identifier[distance] identifier[ij_wins] = identifier[np] . identifier[dstack] (( identifier[hull_a] [ identifier[indexer_a] [ identifier[i] [ identifier[ab_easy_wins] ]], literal[int] ], identifier[hull_b] [ identifier[indexer_b] [ identifier[j] [ identifier[ab_easy_wins] ]], literal[int] ])) identifier[ij_wins] . identifier[shape] = identifier[ij_wins] . identifier[shape] [ literal[int] :] identifier[ab_consider] =( identifier[ab_distance_minus_radii] <= identifier[distance] )&(~ identifier[ab_easy_wins] ) identifier[ij_consider] = identifier[np] . identifier[dstack] (( identifier[i] [ identifier[ab_consider] ], identifier[j] [ identifier[ab_consider] ])) identifier[ij_consider] . identifier[shape] = identifier[ij_consider] . identifier[shape] [ literal[int] :] keyword[if] identifier[np] . identifier[product] ( identifier[ij_consider] . identifier[shape] )== literal[int] : keyword[return] identifier[ij_wins] keyword[if] keyword[True] : identifier[wins] =[] identifier[distance2] = identifier[distance] ** literal[int] keyword[for] identifier[ii] , identifier[jj] keyword[in] identifier[ij_consider] : identifier[a] = identifier[hull_a] [ identifier[indexer_a] [ identifier[ii] ]: identifier[indexer_a] [ identifier[ii] ]+ identifier[point_counts_a] [ identifier[ii] ], literal[int] :] identifier[b] = identifier[hull_b] [ identifier[indexer_b] [ identifier[jj] ]: identifier[indexer_b] [ identifier[jj] ]+ identifier[point_counts_b] [ identifier[jj] ], literal[int] :] identifier[d] = identifier[minimum_distance2] ( identifier[a] , identifier[centers_a] [ identifier[ii] ,:], identifier[b] , identifier[centers_b] [ identifier[jj] ,:]) keyword[if] identifier[d] <= identifier[distance2] : identifier[wins] . identifier[append] (( identifier[hull_a] [ identifier[indexer_a] [ identifier[ii] ], literal[int] ], identifier[hull_b] [ identifier[indexer_b] [ identifier[jj] ], literal[int] ])) identifier[ij_wins] = identifier[np] . identifier[vstack] (( identifier[ij_wins] , identifier[np] . identifier[array] ( identifier[wins] ))) keyword[return] identifier[ij_wins] keyword[else] : identifier[hull_next_a] = identifier[np] . identifier[arange] ( identifier[hull_a] . identifier[shape] [ literal[int] ])+ literal[int] identifier[hull_next_a] [ identifier[indexer_a] + identifier[point_counts_a] - literal[int] ]= identifier[indexer_a] identifier[hull_next_b] = identifier[np] . identifier[arange] ( identifier[hull_b] . identifier[shape] [ literal[int] ])+ literal[int] identifier[hull_next_b] [ identifier[indexer_b] + identifier[point_counts_b] - literal[int] ]= identifier[indexer_b] identifier[next_b] = identifier[hull_b] [ identifier[hull_next_b] , literal[int] :] identifier[vector_b] = identifier[hull_b] [:, literal[int] :]- identifier[next_b] identifier[b_len] = identifier[point_counts_b] [ identifier[ij_consider] [:, literal[int] ]] identifier[b_index] = identifier[np] . identifier[cumsum] ( identifier[point_counts_b] ) identifier[b_elems] = identifier[b_index] [- literal[int] ] identifier[b_index] [ literal[int] :]= identifier[b_index] [:- literal[int] ] identifier[b_index] [ literal[int] ]= literal[int] identifier[b_indexer] = identifier[np] . identifier[zeros] ( identifier[b_elems] , identifier[int] ) identifier[b_indexer] [ identifier[b_index] [ literal[int] :]]= literal[int] identifier[b_indexer] = identifier[np] . identifier[cumsum] ( identifier[b_indexer] ) identifier[b_sub_index] = identifier[np] . identifier[arange] ( identifier[b_elems] )- identifier[b_index] [ identifier[b_indexer] ] identifier[b_i] = identifier[ij_consider] [ identifier[b_indexer] , literal[int] ] identifier[b_j] = identifier[ij_consider] [ identifier[b_indexer] , literal[int] ] identifier[b_vector_b] = identifier[vector_b] [ identifier[indexer_b] [ identifier[b_j] ]+ identifier[b_sub_index] ,:] identifier[b_center_vector] =( identifier[next_b] [ identifier[indexer_b] [ identifier[b_j] ]+ identifier[b_sub_index] ,:]- identifier[centers_a] [ identifier[indexer_a] [ identifier[b_i] ]]) identifier[cross] =( identifier[b_vector_b] [:, literal[int] ]* identifier[b_center_vector] [:, literal[int] ]- identifier[b_vector_b] [:, literal[int] ]* identifier[b_center_vector] [:, literal[int] ]) identifier[hits] =( identifier[all_true] ( identifier[cross] > literal[int] , identifier[b_index] )| identifier[all_true] ( identifier[cross] < literal[int] , identifier[b_index] )) identifier[ij_wins] = identifier[np] . identifier[vstack] (( identifier[ij_wins] , identifier[ij_consider] [ identifier[hits] ,:])) identifier[ij_consider] = identifier[ij_consider] [~ identifier[hits] ,:] keyword[if] identifier[ij_consider] . identifier[shape] [ literal[int] ]== literal[int] : keyword[return] identifier[ij_wins]
def associate_by_distance(labels_a, labels_b, distance): """Find the objects that are within a given distance of each other Given two labels matrices and a distance, find pairs of objects that are within the given distance of each other where the distance is the minimum distance between any point in the convex hull of the two objects. labels_a - first labels matrix labels_b - second labels matrix distance - distance to measure returns a n x 2 matrix where m[x,0] is the label number in labels1 and m[x,1] is the label number in labels2 Algorithm for computing distance between convex polygons taken from Chin, "Optimal Algorithms for the Intersection and the Minimum Distance Problems Between Planar Polygons", IEEE Transactions on Computers, vol. C-32, # 12, December 1983 """ if np.max(labels_a) == 0 or np.max(labels_b) == 0: return np.zeros((0, 2), int) # depends on [control=['if'], data=[]] (hull_a, point_counts_a) = convex_hull(labels_a) (hull_b, point_counts_b) = convex_hull(labels_b) (centers_a, radii_a) = minimum_enclosing_circle(labels_a, hull_and_point_count=(hull_a, point_counts_a)) (centers_b, radii_b) = minimum_enclosing_circle(labels_b, hull_and_point_count=(hull_b, point_counts_b)) # # Make an indexer into the hull tables # indexer_a = np.cumsum(point_counts_a) indexer_a[1:] = indexer_a[:-1] indexer_a[0] = 0 indexer_b = np.cumsum(point_counts_b) indexer_b[1:] = indexer_b[:-1] indexer_b[0] = 0 # # Compute the distances between minimum enclosing circles = # distance - radius_a - radius_b # (i, j) = np.mgrid[0:len(radii_a), 0:len(radii_b)] ab_distance = np.sqrt((centers_a[i, 0] - centers_b[j, 0]) ** 2 + (centers_a[i, 1] - centers_b[j, 1]) ** 2) ab_distance_minus_radii = ab_distance - radii_a[i] - radii_b[j] # Account for roundoff error ab_distance_minus_radii -= np.sqrt(np.finfo(float).eps) # # Exclude from consideration ab_distance > distance and automatically # choose those whose centers are within the distance # ab_easy_wins = ab_distance <= distance ij_wins = np.dstack((hull_a[indexer_a[i[ab_easy_wins]], 0], hull_b[indexer_b[j[ab_easy_wins]], 0])) ij_wins.shape = ij_wins.shape[1:] ab_consider = (ab_distance_minus_radii <= distance) & ~ab_easy_wins ij_consider = np.dstack((i[ab_consider], j[ab_consider])) ij_consider.shape = ij_consider.shape[1:] if np.product(ij_consider.shape) == 0: return ij_wins # depends on [control=['if'], data=[]] if True: wins = [] distance2 = distance ** 2 for (ii, jj) in ij_consider: a = hull_a[indexer_a[ii]:indexer_a[ii] + point_counts_a[ii], 1:] b = hull_b[indexer_b[jj]:indexer_b[jj] + point_counts_b[jj], 1:] d = minimum_distance2(a, centers_a[ii, :], b, centers_b[jj, :]) if d <= distance2: wins.append((hull_a[indexer_a[ii], 0], hull_b[indexer_b[jj], 0])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] ij_wins = np.vstack((ij_wins, np.array(wins))) return ij_wins # depends on [control=['if'], data=[]] else: # # For each point in the hull, get the next point mod # of points in hull # hull_next_a = np.arange(hull_a.shape[0]) + 1 hull_next_a[indexer_a + point_counts_a - 1] = indexer_a hull_next_b = np.arange(hull_b.shape[0]) + 1 hull_next_b[indexer_b + point_counts_b - 1] = indexer_b # # Parallelize the algorithm for overlap # # For each pair of points i, i+1 mod n in the hull, and the test point t # the cross product of the vector from i to i+1 and the vector from i+1 # to t should have the same sign. # next_b = hull_b[hull_next_b, 1:] vector_b = hull_b[:, 1:] - next_b # # For each i,j, we have to compare the centers_a against point_counts_b[j] # crosses. # b_len = point_counts_b[ij_consider[:, 1]] b_index = np.cumsum(point_counts_b) b_elems = b_index[-1] b_index[1:] = b_index[:-1] b_index[0] = 0 # # First create a vector that's b_elems long and every element contains an # index into the ij_consider vector. How we do this: # 1) mark the first element at a particular index by 1, all others = 0 # 2) Erase the first 1 # 3) Take the cumulative sum which will increment to 1 when it hits the # first 1, again when it hits the second...etc. # b_indexer = np.zeros(b_elems, int) b_indexer[b_index[1:]] = 1 b_indexer = np.cumsum(b_indexer) # # The sub-index is the index from 1 to n for each of the vertices # per b convex hull # b_sub_index = np.arange(b_elems) - b_index[b_indexer] # # For each element of b_indexer, get the i and j at that index # b_i = ij_consider[b_indexer, 0] b_j = ij_consider[b_indexer, 1] # # Compute the cross-products now # b_vector_b = vector_b[indexer_b[b_j] + b_sub_index, :] b_center_vector = next_b[indexer_b[b_j] + b_sub_index, :] - centers_a[indexer_a[b_i]] cross = b_vector_b[:, 0] * b_center_vector[:, 1] - b_vector_b[:, 1] * b_center_vector[:, 0] hits = all_true(cross > 0, b_index) | all_true(cross < 0, b_index) ij_wins = np.vstack((ij_wins, ij_consider[hits, :])) ij_consider = ij_consider[~hits, :] if ij_consider.shape[0] == 0: return ij_wins # depends on [control=['if'], data=[]]
def is_list_of_dict_like(obj, attr=('keys', 'items')): """test if object is a list only containing dict like items """ try: if len(obj) == 0: return False return all([is_dict_like(i, attr) for i in obj]) except Exception: return False
def function[is_list_of_dict_like, parameter[obj, attr]]: constant[test if object is a list only containing dict like items ] <ast.Try object at 0x7da2054a4ac0>
keyword[def] identifier[is_list_of_dict_like] ( identifier[obj] , identifier[attr] =( literal[string] , literal[string] )): literal[string] keyword[try] : keyword[if] identifier[len] ( identifier[obj] )== literal[int] : keyword[return] keyword[False] keyword[return] identifier[all] ([ identifier[is_dict_like] ( identifier[i] , identifier[attr] ) keyword[for] identifier[i] keyword[in] identifier[obj] ]) keyword[except] identifier[Exception] : keyword[return] keyword[False]
def is_list_of_dict_like(obj, attr=('keys', 'items')): """test if object is a list only containing dict like items """ try: if len(obj) == 0: return False # depends on [control=['if'], data=[]] return all([is_dict_like(i, attr) for i in obj]) # depends on [control=['try'], data=[]] except Exception: return False # depends on [control=['except'], data=[]]
def get_collection(source, name, collection_format, default): """get collection named `name` from the given `source` that formatted accordingly to `collection_format`. """ if collection_format in COLLECTION_SEP: separator = COLLECTION_SEP[collection_format] value = source.get(name, None) if value is None: return default return value.split(separator) if collection_format == 'brackets': return source.getall(name + '[]', default) else: # format: multi return source.getall(name, default)
def function[get_collection, parameter[source, name, collection_format, default]]: constant[get collection named `name` from the given `source` that formatted accordingly to `collection_format`. ] if compare[name[collection_format] in name[COLLECTION_SEP]] begin[:] variable[separator] assign[=] call[name[COLLECTION_SEP]][name[collection_format]] variable[value] assign[=] call[name[source].get, parameter[name[name], constant[None]]] if compare[name[value] is constant[None]] begin[:] return[name[default]] return[call[name[value].split, parameter[name[separator]]]] if compare[name[collection_format] equal[==] constant[brackets]] begin[:] return[call[name[source].getall, parameter[binary_operation[name[name] + constant[[]]], name[default]]]]
keyword[def] identifier[get_collection] ( identifier[source] , identifier[name] , identifier[collection_format] , identifier[default] ): literal[string] keyword[if] identifier[collection_format] keyword[in] identifier[COLLECTION_SEP] : identifier[separator] = identifier[COLLECTION_SEP] [ identifier[collection_format] ] identifier[value] = identifier[source] . identifier[get] ( identifier[name] , keyword[None] ) keyword[if] identifier[value] keyword[is] keyword[None] : keyword[return] identifier[default] keyword[return] identifier[value] . identifier[split] ( identifier[separator] ) keyword[if] identifier[collection_format] == literal[string] : keyword[return] identifier[source] . identifier[getall] ( identifier[name] + literal[string] , identifier[default] ) keyword[else] : keyword[return] identifier[source] . identifier[getall] ( identifier[name] , identifier[default] )
def get_collection(source, name, collection_format, default): """get collection named `name` from the given `source` that formatted accordingly to `collection_format`. """ if collection_format in COLLECTION_SEP: separator = COLLECTION_SEP[collection_format] value = source.get(name, None) if value is None: return default # depends on [control=['if'], data=[]] return value.split(separator) # depends on [control=['if'], data=['collection_format', 'COLLECTION_SEP']] if collection_format == 'brackets': return source.getall(name + '[]', default) # depends on [control=['if'], data=[]] else: # format: multi return source.getall(name, default)
def patched_web3_eth_estimate_gas(self, transaction, block_identifier=None): """ Temporary workaround until next web3.py release (5.X.X) Current master of web3.py has this implementation already: https://github.com/ethereum/web3.py/blob/2a67ea9f0ab40bb80af2b803dce742d6cad5943e/web3/eth.py#L311 """ if 'from' not in transaction and is_checksum_address(self.defaultAccount): transaction = assoc(transaction, 'from', self.defaultAccount) if block_identifier is None: params = [transaction] else: params = [transaction, block_identifier] try: result = self.web3.manager.request_blocking( 'eth_estimateGas', params, ) except ValueError as e: if check_value_error_for_parity(e, ParityCallType.ESTIMATE_GAS): result = None else: # else the error is not denoting estimate gas failure and is something else raise e return result
def function[patched_web3_eth_estimate_gas, parameter[self, transaction, block_identifier]]: constant[ Temporary workaround until next web3.py release (5.X.X) Current master of web3.py has this implementation already: https://github.com/ethereum/web3.py/blob/2a67ea9f0ab40bb80af2b803dce742d6cad5943e/web3/eth.py#L311 ] if <ast.BoolOp object at 0x7da1b19145b0> begin[:] variable[transaction] assign[=] call[name[assoc], parameter[name[transaction], constant[from], name[self].defaultAccount]] if compare[name[block_identifier] is constant[None]] begin[:] variable[params] assign[=] list[[<ast.Name object at 0x7da1b1917700>]] <ast.Try object at 0x7da1b19504f0> return[name[result]]
keyword[def] identifier[patched_web3_eth_estimate_gas] ( identifier[self] , identifier[transaction] , identifier[block_identifier] = keyword[None] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[transaction] keyword[and] identifier[is_checksum_address] ( identifier[self] . identifier[defaultAccount] ): identifier[transaction] = identifier[assoc] ( identifier[transaction] , literal[string] , identifier[self] . identifier[defaultAccount] ) keyword[if] identifier[block_identifier] keyword[is] keyword[None] : identifier[params] =[ identifier[transaction] ] keyword[else] : identifier[params] =[ identifier[transaction] , identifier[block_identifier] ] keyword[try] : identifier[result] = identifier[self] . identifier[web3] . identifier[manager] . identifier[request_blocking] ( literal[string] , identifier[params] , ) keyword[except] identifier[ValueError] keyword[as] identifier[e] : keyword[if] identifier[check_value_error_for_parity] ( identifier[e] , identifier[ParityCallType] . identifier[ESTIMATE_GAS] ): identifier[result] = keyword[None] keyword[else] : keyword[raise] identifier[e] keyword[return] identifier[result]
def patched_web3_eth_estimate_gas(self, transaction, block_identifier=None): """ Temporary workaround until next web3.py release (5.X.X) Current master of web3.py has this implementation already: https://github.com/ethereum/web3.py/blob/2a67ea9f0ab40bb80af2b803dce742d6cad5943e/web3/eth.py#L311 """ if 'from' not in transaction and is_checksum_address(self.defaultAccount): transaction = assoc(transaction, 'from', self.defaultAccount) # depends on [control=['if'], data=[]] if block_identifier is None: params = [transaction] # depends on [control=['if'], data=[]] else: params = [transaction, block_identifier] try: result = self.web3.manager.request_blocking('eth_estimateGas', params) # depends on [control=['try'], data=[]] except ValueError as e: if check_value_error_for_parity(e, ParityCallType.ESTIMATE_GAS): result = None # depends on [control=['if'], data=[]] else: # else the error is not denoting estimate gas failure and is something else raise e # depends on [control=['except'], data=['e']] return result
def scan(): """ Scan for Crazyflie and return its URI. """ # Initiate the low level drivers cflib.crtp.init_drivers(enable_debug_driver=False) # Scan for Crazyflies print('Scanning interfaces for Crazyflies...') available = cflib.crtp.scan_interfaces() interfaces = [uri for uri, _ in available] if not interfaces: return None return choose(interfaces, 'Crazyflies found:', 'Select interface: ')
def function[scan, parameter[]]: constant[ Scan for Crazyflie and return its URI. ] call[name[cflib].crtp.init_drivers, parameter[]] call[name[print], parameter[constant[Scanning interfaces for Crazyflies...]]] variable[available] assign[=] call[name[cflib].crtp.scan_interfaces, parameter[]] variable[interfaces] assign[=] <ast.ListComp object at 0x7da1b1645f00> if <ast.UnaryOp object at 0x7da1b1646080> begin[:] return[constant[None]] return[call[name[choose], parameter[name[interfaces], constant[Crazyflies found:], constant[Select interface: ]]]]
keyword[def] identifier[scan] (): literal[string] identifier[cflib] . identifier[crtp] . identifier[init_drivers] ( identifier[enable_debug_driver] = keyword[False] ) identifier[print] ( literal[string] ) identifier[available] = identifier[cflib] . identifier[crtp] . identifier[scan_interfaces] () identifier[interfaces] =[ identifier[uri] keyword[for] identifier[uri] , identifier[_] keyword[in] identifier[available] ] keyword[if] keyword[not] identifier[interfaces] : keyword[return] keyword[None] keyword[return] identifier[choose] ( identifier[interfaces] , literal[string] , literal[string] )
def scan(): """ Scan for Crazyflie and return its URI. """ # Initiate the low level drivers cflib.crtp.init_drivers(enable_debug_driver=False) # Scan for Crazyflies print('Scanning interfaces for Crazyflies...') available = cflib.crtp.scan_interfaces() interfaces = [uri for (uri, _) in available] if not interfaces: return None # depends on [control=['if'], data=[]] return choose(interfaces, 'Crazyflies found:', 'Select interface: ')
def new_bundle(self, name: str, created_at: dt.datetime=None) -> models.Bundle: """Create a new file bundle.""" new_bundle = self.Bundle(name=name, created_at=created_at) return new_bundle
def function[new_bundle, parameter[self, name, created_at]]: constant[Create a new file bundle.] variable[new_bundle] assign[=] call[name[self].Bundle, parameter[]] return[name[new_bundle]]
keyword[def] identifier[new_bundle] ( identifier[self] , identifier[name] : identifier[str] , identifier[created_at] : identifier[dt] . identifier[datetime] = keyword[None] )-> identifier[models] . identifier[Bundle] : literal[string] identifier[new_bundle] = identifier[self] . identifier[Bundle] ( identifier[name] = identifier[name] , identifier[created_at] = identifier[created_at] ) keyword[return] identifier[new_bundle]
def new_bundle(self, name: str, created_at: dt.datetime=None) -> models.Bundle: """Create a new file bundle.""" new_bundle = self.Bundle(name=name, created_at=created_at) return new_bundle
def timestr2time(time_str): ''' Turns a string into a datetime.time object. This will only work if the format can be "guessed", so the string must have one of the formats from VALID_TIME_FORMATS_TEXT. Args: time_str (str) a string that represents a date Returns: datetime.time object Raises: ValueError if the input string does not have a valid format. ''' if any(c not in '0123456789:' for c in time_str): raise ValueError('Illegal character in time string') if time_str.count(':') == 2: h, m, s = time_str.split(':') elif time_str.count(':') == 1: h, m = time_str.split(':') s = '00' elif len(time_str) == 6: h = time_str[:2] m = time_str[2:4] s = time_str[4:] else: raise ValueError('Time format not recognised. {}'.format( VALID_TIME_FORMATS_TEXT)) if len(m) == 2 and len(s) == 2: mins = int(m) sec = int(s) else: raise ValueError('m and s must be 2 digits') try: return datetime.time(int(h), mins, sec) except ValueError: raise ValueError('Invalid time {}. {}'.format(time_str, VALID_TIME_FORMATS_TEXT))
def function[timestr2time, parameter[time_str]]: constant[ Turns a string into a datetime.time object. This will only work if the format can be "guessed", so the string must have one of the formats from VALID_TIME_FORMATS_TEXT. Args: time_str (str) a string that represents a date Returns: datetime.time object Raises: ValueError if the input string does not have a valid format. ] if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b1349870>]] begin[:] <ast.Raise object at 0x7da1b134bac0> if compare[call[name[time_str].count, parameter[constant[:]]] equal[==] constant[2]] begin[:] <ast.Tuple object at 0x7da1b1348220> assign[=] call[name[time_str].split, parameter[constant[:]]] if <ast.BoolOp object at 0x7da1b13480a0> begin[:] variable[mins] assign[=] call[name[int], parameter[name[m]]] variable[sec] assign[=] call[name[int], parameter[name[s]]] <ast.Try object at 0x7da1b1349150>
keyword[def] identifier[timestr2time] ( identifier[time_str] ): literal[string] keyword[if] identifier[any] ( identifier[c] keyword[not] keyword[in] literal[string] keyword[for] identifier[c] keyword[in] identifier[time_str] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[time_str] . identifier[count] ( literal[string] )== literal[int] : identifier[h] , identifier[m] , identifier[s] = identifier[time_str] . identifier[split] ( literal[string] ) keyword[elif] identifier[time_str] . identifier[count] ( literal[string] )== literal[int] : identifier[h] , identifier[m] = identifier[time_str] . identifier[split] ( literal[string] ) identifier[s] = literal[string] keyword[elif] identifier[len] ( identifier[time_str] )== literal[int] : identifier[h] = identifier[time_str] [: literal[int] ] identifier[m] = identifier[time_str] [ literal[int] : literal[int] ] identifier[s] = identifier[time_str] [ literal[int] :] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[VALID_TIME_FORMATS_TEXT] )) keyword[if] identifier[len] ( identifier[m] )== literal[int] keyword[and] identifier[len] ( identifier[s] )== literal[int] : identifier[mins] = identifier[int] ( identifier[m] ) identifier[sec] = identifier[int] ( identifier[s] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[try] : keyword[return] identifier[datetime] . identifier[time] ( identifier[int] ( identifier[h] ), identifier[mins] , identifier[sec] ) keyword[except] identifier[ValueError] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[time_str] , identifier[VALID_TIME_FORMATS_TEXT] ))
def timestr2time(time_str): """ Turns a string into a datetime.time object. This will only work if the format can be "guessed", so the string must have one of the formats from VALID_TIME_FORMATS_TEXT. Args: time_str (str) a string that represents a date Returns: datetime.time object Raises: ValueError if the input string does not have a valid format. """ if any((c not in '0123456789:' for c in time_str)): raise ValueError('Illegal character in time string') # depends on [control=['if'], data=[]] if time_str.count(':') == 2: (h, m, s) = time_str.split(':') # depends on [control=['if'], data=[]] elif time_str.count(':') == 1: (h, m) = time_str.split(':') s = '00' # depends on [control=['if'], data=[]] elif len(time_str) == 6: h = time_str[:2] m = time_str[2:4] s = time_str[4:] # depends on [control=['if'], data=[]] else: raise ValueError('Time format not recognised. {}'.format(VALID_TIME_FORMATS_TEXT)) if len(m) == 2 and len(s) == 2: mins = int(m) sec = int(s) # depends on [control=['if'], data=[]] else: raise ValueError('m and s must be 2 digits') try: return datetime.time(int(h), mins, sec) # depends on [control=['try'], data=[]] except ValueError: raise ValueError('Invalid time {}. {}'.format(time_str, VALID_TIME_FORMATS_TEXT)) # depends on [control=['except'], data=[]]
def _add_cytomine_cli_args(argparse): """ Add cytomine CLI args to the ArgumentParser object: cytomine_host, cytomine_public_key, cytomine_private_key and cytomine_verbose. Parameters ---------- argparse: ArgumentParser The argument parser Return ------ argparse: ArgumentParser The argument parser (same object as parameter) """ argparse.add_argument(*_cytomine_parameter_name_synonyms("host"), dest="host", help="The Cytomine host (without protocol).", required=True) argparse.add_argument(*_cytomine_parameter_name_synonyms("public_key"), dest="public_key", help="The Cytomine public key.", required=True) argparse.add_argument(*_cytomine_parameter_name_synonyms("private_key"), dest="private_key", help="The Cytomine private key.", required=True) argparse.add_argument("--verbose", "--cytomine_verbose", dest="verbose", type=int, default=logging.INFO, help="The verbosity level of the client (as an integer value).") argparse.add_argument("-l", "--log_level", "--cytomine_log_level", dest="log_level", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help="The logging level of the client (as a string value)") return argparse
def function[_add_cytomine_cli_args, parameter[argparse]]: constant[ Add cytomine CLI args to the ArgumentParser object: cytomine_host, cytomine_public_key, cytomine_private_key and cytomine_verbose. Parameters ---------- argparse: ArgumentParser The argument parser Return ------ argparse: ArgumentParser The argument parser (same object as parameter) ] call[name[argparse].add_argument, parameter[<ast.Starred object at 0x7da20e9639a0>]] call[name[argparse].add_argument, parameter[<ast.Starred object at 0x7da20e9610f0>]] call[name[argparse].add_argument, parameter[<ast.Starred object at 0x7da20e961330>]] call[name[argparse].add_argument, parameter[constant[--verbose], constant[--cytomine_verbose]]] call[name[argparse].add_argument, parameter[constant[-l], constant[--log_level], constant[--cytomine_log_level]]] return[name[argparse]]
keyword[def] identifier[_add_cytomine_cli_args] ( identifier[argparse] ): literal[string] identifier[argparse] . identifier[add_argument] (* identifier[_cytomine_parameter_name_synonyms] ( literal[string] ), identifier[dest] = literal[string] , identifier[help] = literal[string] , identifier[required] = keyword[True] ) identifier[argparse] . identifier[add_argument] (* identifier[_cytomine_parameter_name_synonyms] ( literal[string] ), identifier[dest] = literal[string] , identifier[help] = literal[string] , identifier[required] = keyword[True] ) identifier[argparse] . identifier[add_argument] (* identifier[_cytomine_parameter_name_synonyms] ( literal[string] ), identifier[dest] = literal[string] , identifier[help] = literal[string] , identifier[required] = keyword[True] ) identifier[argparse] . identifier[add_argument] ( literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[type] = identifier[int] , identifier[default] = identifier[logging] . identifier[INFO] , identifier[help] = literal[string] ) identifier[argparse] . identifier[add_argument] ( literal[string] , literal[string] , literal[string] , identifier[dest] = literal[string] , identifier[choices] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ], identifier[help] = literal[string] ) keyword[return] identifier[argparse]
def _add_cytomine_cli_args(argparse): """ Add cytomine CLI args to the ArgumentParser object: cytomine_host, cytomine_public_key, cytomine_private_key and cytomine_verbose. Parameters ---------- argparse: ArgumentParser The argument parser Return ------ argparse: ArgumentParser The argument parser (same object as parameter) """ argparse.add_argument(*_cytomine_parameter_name_synonyms('host'), dest='host', help='The Cytomine host (without protocol).', required=True) argparse.add_argument(*_cytomine_parameter_name_synonyms('public_key'), dest='public_key', help='The Cytomine public key.', required=True) argparse.add_argument(*_cytomine_parameter_name_synonyms('private_key'), dest='private_key', help='The Cytomine private key.', required=True) argparse.add_argument('--verbose', '--cytomine_verbose', dest='verbose', type=int, default=logging.INFO, help='The verbosity level of the client (as an integer value).') argparse.add_argument('-l', '--log_level', '--cytomine_log_level', dest='log_level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='The logging level of the client (as a string value)') return argparse
def probes(self, fromtime, totime=None): """Get a list of probes that performed tests for a specified check during a specified period.""" args = {'from': fromtime} if totime: args['to'] = totime response = self.pingdom.request('GET', 'summary.probes/%s' % self.id, args) return response.json()['probes']
def function[probes, parameter[self, fromtime, totime]]: constant[Get a list of probes that performed tests for a specified check during a specified period.] variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da1b0ff0310>], [<ast.Name object at 0x7da1b0ff0850>]] if name[totime] begin[:] call[name[args]][constant[to]] assign[=] name[totime] variable[response] assign[=] call[name[self].pingdom.request, parameter[constant[GET], binary_operation[constant[summary.probes/%s] <ast.Mod object at 0x7da2590d6920> name[self].id], name[args]]] return[call[call[name[response].json, parameter[]]][constant[probes]]]
keyword[def] identifier[probes] ( identifier[self] , identifier[fromtime] , identifier[totime] = keyword[None] ): literal[string] identifier[args] ={ literal[string] : identifier[fromtime] } keyword[if] identifier[totime] : identifier[args] [ literal[string] ]= identifier[totime] identifier[response] = identifier[self] . identifier[pingdom] . identifier[request] ( literal[string] , literal[string] % identifier[self] . identifier[id] , identifier[args] ) keyword[return] identifier[response] . identifier[json] ()[ literal[string] ]
def probes(self, fromtime, totime=None): """Get a list of probes that performed tests for a specified check during a specified period.""" args = {'from': fromtime} if totime: args['to'] = totime # depends on [control=['if'], data=[]] response = self.pingdom.request('GET', 'summary.probes/%s' % self.id, args) return response.json()['probes']
def estimate_cp_pval(ts, method="mean"): """ Estimate changepoints in a time series by using R. """ """ ts: time series method: look for a single changepoint in 'mean' , 'var', 'mean and var' Returns: returns index of the changepoint, and pvalue. Here pvalue = 1 means statistically significant """ robjects.r("library(changepoint)") method_map = { "mean": "cpt.mean({}, class=FALSE)", "var": "cpt.var({}, class=FALSE)", "meanvar": "cpt.meanvar({}, class=FALSE)", } mt = robjects.FloatVector(ts) robjects.globalenv["mt"] = mt cmd = method_map[method].format("mt") robjects.globalenv["mycpt"] = robjects.r(cmd) ecp_pval = robjects.r("mycpt") ecp = ecp_pval[0] pval = ecp_pval[1] return ecp, pval
def function[estimate_cp_pval, parameter[ts, method]]: constant[ Estimate changepoints in a time series by using R. ] constant[ ts: time series method: look for a single changepoint in 'mean' , 'var', 'mean and var' Returns: returns index of the changepoint, and pvalue. Here pvalue = 1 means statistically significant ] call[name[robjects].r, parameter[constant[library(changepoint)]]] variable[method_map] assign[=] dictionary[[<ast.Constant object at 0x7da20c7c82b0>, <ast.Constant object at 0x7da20c7c8340>, <ast.Constant object at 0x7da20c7c83a0>], [<ast.Constant object at 0x7da20c7ca080>, <ast.Constant object at 0x7da20c7c8940>, <ast.Constant object at 0x7da20c7ca1a0>]] variable[mt] assign[=] call[name[robjects].FloatVector, parameter[name[ts]]] call[name[robjects].globalenv][constant[mt]] assign[=] name[mt] variable[cmd] assign[=] call[call[name[method_map]][name[method]].format, parameter[constant[mt]]] call[name[robjects].globalenv][constant[mycpt]] assign[=] call[name[robjects].r, parameter[name[cmd]]] variable[ecp_pval] assign[=] call[name[robjects].r, parameter[constant[mycpt]]] variable[ecp] assign[=] call[name[ecp_pval]][constant[0]] variable[pval] assign[=] call[name[ecp_pval]][constant[1]] return[tuple[[<ast.Name object at 0x7da204961c60>, <ast.Name object at 0x7da20c76eb60>]]]
keyword[def] identifier[estimate_cp_pval] ( identifier[ts] , identifier[method] = literal[string] ): literal[string] literal[string] identifier[robjects] . identifier[r] ( literal[string] ) identifier[method_map] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , } identifier[mt] = identifier[robjects] . identifier[FloatVector] ( identifier[ts] ) identifier[robjects] . identifier[globalenv] [ literal[string] ]= identifier[mt] identifier[cmd] = identifier[method_map] [ identifier[method] ]. identifier[format] ( literal[string] ) identifier[robjects] . identifier[globalenv] [ literal[string] ]= identifier[robjects] . identifier[r] ( identifier[cmd] ) identifier[ecp_pval] = identifier[robjects] . identifier[r] ( literal[string] ) identifier[ecp] = identifier[ecp_pval] [ literal[int] ] identifier[pval] = identifier[ecp_pval] [ literal[int] ] keyword[return] identifier[ecp] , identifier[pval]
def estimate_cp_pval(ts, method='mean'): """ Estimate changepoints in a time series by using R. """ " \n ts: time series\n method: look for a single changepoint in 'mean' , 'var', 'mean and var'\n Returns: returns index of the changepoint, and pvalue. Here pvalue = 1\n means statistically significant\n " robjects.r('library(changepoint)') method_map = {'mean': 'cpt.mean({}, class=FALSE)', 'var': 'cpt.var({}, class=FALSE)', 'meanvar': 'cpt.meanvar({}, class=FALSE)'} mt = robjects.FloatVector(ts) robjects.globalenv['mt'] = mt cmd = method_map[method].format('mt') robjects.globalenv['mycpt'] = robjects.r(cmd) ecp_pval = robjects.r('mycpt') ecp = ecp_pval[0] pval = ecp_pval[1] return (ecp, pval)
def submit_batch_completion(self, user, course_key, blocks): """ Performs a batch insertion of completion objects. Parameters: * user (django.contrib.auth.models.User): The user for whom the completions are being submitted. * course_key (opaque_keys.edx.keys.CourseKey): The course in which the submitted blocks are found. * blocks: A list of tuples of UsageKey to float completion values. (float in range [0.0, 1.0]): The fractional completion value of the block (0.0 = incomplete, 1.0 = complete). Return Value: Dict of (BlockCompletion, bool): A dictionary with a BlockCompletion object key and a value of bool. The boolean value indicates whether the object was newly created by this call. Raises: ValueError: If the wrong type is passed for one of the parameters. django.core.exceptions.ValidationError: If a float is passed that is not between 0.0 and 1.0. django.db.DatabaseError: If there was a problem getting, creating, or updating the BlockCompletion record in the database. """ block_completions = {} for block, completion in blocks: (block_completion, is_new) = self.submit_completion(user, course_key, block, completion) block_completions[block_completion] = is_new return block_completions
def function[submit_batch_completion, parameter[self, user, course_key, blocks]]: constant[ Performs a batch insertion of completion objects. Parameters: * user (django.contrib.auth.models.User): The user for whom the completions are being submitted. * course_key (opaque_keys.edx.keys.CourseKey): The course in which the submitted blocks are found. * blocks: A list of tuples of UsageKey to float completion values. (float in range [0.0, 1.0]): The fractional completion value of the block (0.0 = incomplete, 1.0 = complete). Return Value: Dict of (BlockCompletion, bool): A dictionary with a BlockCompletion object key and a value of bool. The boolean value indicates whether the object was newly created by this call. Raises: ValueError: If the wrong type is passed for one of the parameters. django.core.exceptions.ValidationError: If a float is passed that is not between 0.0 and 1.0. django.db.DatabaseError: If there was a problem getting, creating, or updating the BlockCompletion record in the database. ] variable[block_completions] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b04a57b0>, <ast.Name object at 0x7da1b04a7190>]]] in starred[name[blocks]] begin[:] <ast.Tuple object at 0x7da1b04a50f0> assign[=] call[name[self].submit_completion, parameter[name[user], name[course_key], name[block], name[completion]]] call[name[block_completions]][name[block_completion]] assign[=] name[is_new] return[name[block_completions]]
keyword[def] identifier[submit_batch_completion] ( identifier[self] , identifier[user] , identifier[course_key] , identifier[blocks] ): literal[string] identifier[block_completions] ={} keyword[for] identifier[block] , identifier[completion] keyword[in] identifier[blocks] : ( identifier[block_completion] , identifier[is_new] )= identifier[self] . identifier[submit_completion] ( identifier[user] , identifier[course_key] , identifier[block] , identifier[completion] ) identifier[block_completions] [ identifier[block_completion] ]= identifier[is_new] keyword[return] identifier[block_completions]
def submit_batch_completion(self, user, course_key, blocks): """ Performs a batch insertion of completion objects. Parameters: * user (django.contrib.auth.models.User): The user for whom the completions are being submitted. * course_key (opaque_keys.edx.keys.CourseKey): The course in which the submitted blocks are found. * blocks: A list of tuples of UsageKey to float completion values. (float in range [0.0, 1.0]): The fractional completion value of the block (0.0 = incomplete, 1.0 = complete). Return Value: Dict of (BlockCompletion, bool): A dictionary with a BlockCompletion object key and a value of bool. The boolean value indicates whether the object was newly created by this call. Raises: ValueError: If the wrong type is passed for one of the parameters. django.core.exceptions.ValidationError: If a float is passed that is not between 0.0 and 1.0. django.db.DatabaseError: If there was a problem getting, creating, or updating the BlockCompletion record in the database. """ block_completions = {} for (block, completion) in blocks: (block_completion, is_new) = self.submit_completion(user, course_key, block, completion) block_completions[block_completion] = is_new # depends on [control=['for'], data=[]] return block_completions
def chop_into_sequences(episode_ids, unroll_ids, agent_indices, feature_columns, state_columns, max_seq_len, dynamic_max=True, _extra_padding=0): """Truncate and pad experiences into fixed-length sequences. Arguments: episode_ids (list): List of episode ids for each step. unroll_ids (list): List of identifiers for the sample batch. This is used to make sure sequences are cut between sample batches. agent_indices (list): List of agent ids for each step. Note that this has to be combined with episode_ids for uniqueness. feature_columns (list): List of arrays containing features. state_columns (list): List of arrays containing LSTM state values. max_seq_len (int): Max length of sequences before truncation. dynamic_max (bool): Whether to dynamically shrink the max seq len. For example, if max len is 20 and the actual max seq len in the data is 7, it will be shrunk to 7. _extra_padding (int): Add extra padding to the end of sequences. Returns: f_pad (list): Padded feature columns. These will be of shape [NUM_SEQUENCES * MAX_SEQ_LEN, ...]. s_init (list): Initial states for each sequence, of shape [NUM_SEQUENCES, ...]. seq_lens (list): List of sequence lengths, of shape [NUM_SEQUENCES]. Examples: >>> f_pad, s_init, seq_lens = chop_into_sequences( episode_ids=[1, 1, 5, 5, 5, 5], unroll_ids=[4, 4, 4, 4, 4, 4], agent_indices=[0, 0, 0, 0, 0, 0], feature_columns=[[4, 4, 8, 8, 8, 8], [1, 1, 0, 1, 1, 0]], state_columns=[[4, 5, 4, 5, 5, 5]], max_seq_len=3) >>> print(f_pad) [[4, 4, 0, 8, 8, 8, 8, 0, 0], [1, 1, 0, 0, 1, 1, 0, 0, 0]] >>> print(s_init) [[4, 4, 5]] >>> print(seq_lens) [2, 3, 1] """ prev_id = None seq_lens = [] seq_len = 0 unique_ids = np.add( np.add(episode_ids, agent_indices), np.array(unroll_ids) << 32) for uid in unique_ids: if (prev_id is not None and uid != prev_id) or \ seq_len >= max_seq_len: seq_lens.append(seq_len) seq_len = 0 seq_len += 1 prev_id = uid if seq_len: seq_lens.append(seq_len) assert sum(seq_lens) == len(unique_ids) # Dynamically shrink max len as needed to optimize memory usage if dynamic_max: max_seq_len = max(seq_lens) + _extra_padding feature_sequences = [] for f in feature_columns: f = np.array(f) f_pad = np.zeros((len(seq_lens) * max_seq_len, ) + np.shape(f)[1:]) seq_base = 0 i = 0 for l in seq_lens: for seq_offset in range(l): f_pad[seq_base + seq_offset] = f[i] i += 1 seq_base += max_seq_len assert i == len(unique_ids), f feature_sequences.append(f_pad) initial_states = [] for s in state_columns: s = np.array(s) s_init = [] i = 0 for l in seq_lens: s_init.append(s[i]) i += l initial_states.append(np.array(s_init)) return feature_sequences, initial_states, np.array(seq_lens)
def function[chop_into_sequences, parameter[episode_ids, unroll_ids, agent_indices, feature_columns, state_columns, max_seq_len, dynamic_max, _extra_padding]]: constant[Truncate and pad experiences into fixed-length sequences. Arguments: episode_ids (list): List of episode ids for each step. unroll_ids (list): List of identifiers for the sample batch. This is used to make sure sequences are cut between sample batches. agent_indices (list): List of agent ids for each step. Note that this has to be combined with episode_ids for uniqueness. feature_columns (list): List of arrays containing features. state_columns (list): List of arrays containing LSTM state values. max_seq_len (int): Max length of sequences before truncation. dynamic_max (bool): Whether to dynamically shrink the max seq len. For example, if max len is 20 and the actual max seq len in the data is 7, it will be shrunk to 7. _extra_padding (int): Add extra padding to the end of sequences. Returns: f_pad (list): Padded feature columns. These will be of shape [NUM_SEQUENCES * MAX_SEQ_LEN, ...]. s_init (list): Initial states for each sequence, of shape [NUM_SEQUENCES, ...]. seq_lens (list): List of sequence lengths, of shape [NUM_SEQUENCES]. Examples: >>> f_pad, s_init, seq_lens = chop_into_sequences( episode_ids=[1, 1, 5, 5, 5, 5], unroll_ids=[4, 4, 4, 4, 4, 4], agent_indices=[0, 0, 0, 0, 0, 0], feature_columns=[[4, 4, 8, 8, 8, 8], [1, 1, 0, 1, 1, 0]], state_columns=[[4, 5, 4, 5, 5, 5]], max_seq_len=3) >>> print(f_pad) [[4, 4, 0, 8, 8, 8, 8, 0, 0], [1, 1, 0, 0, 1, 1, 0, 0, 0]] >>> print(s_init) [[4, 4, 5]] >>> print(seq_lens) [2, 3, 1] ] variable[prev_id] assign[=] constant[None] variable[seq_lens] assign[=] list[[]] variable[seq_len] assign[=] constant[0] variable[unique_ids] assign[=] call[name[np].add, parameter[call[name[np].add, parameter[name[episode_ids], name[agent_indices]]], binary_operation[call[name[np].array, parameter[name[unroll_ids]]] <ast.LShift object at 0x7da2590d69e0> constant[32]]]] for taget[name[uid]] in starred[name[unique_ids]] begin[:] if <ast.BoolOp object at 0x7da207f9bc10> begin[:] call[name[seq_lens].append, parameter[name[seq_len]]] variable[seq_len] assign[=] constant[0] <ast.AugAssign object at 0x7da207f9bf10> variable[prev_id] assign[=] name[uid] if name[seq_len] begin[:] call[name[seq_lens].append, parameter[name[seq_len]]] assert[compare[call[name[sum], parameter[name[seq_lens]]] equal[==] call[name[len], parameter[name[unique_ids]]]]] if name[dynamic_max] begin[:] variable[max_seq_len] assign[=] binary_operation[call[name[max], parameter[name[seq_lens]]] + name[_extra_padding]] variable[feature_sequences] assign[=] list[[]] for taget[name[f]] in starred[name[feature_columns]] begin[:] variable[f] assign[=] call[name[np].array, parameter[name[f]]] variable[f_pad] assign[=] call[name[np].zeros, parameter[binary_operation[tuple[[<ast.BinOp object at 0x7da207f981f0>]] + call[call[name[np].shape, parameter[name[f]]]][<ast.Slice object at 0x7da207f99150>]]]] variable[seq_base] assign[=] constant[0] variable[i] assign[=] constant[0] for taget[name[l]] in starred[name[seq_lens]] begin[:] for taget[name[seq_offset]] in starred[call[name[range], parameter[name[l]]]] begin[:] call[name[f_pad]][binary_operation[name[seq_base] + name[seq_offset]]] assign[=] call[name[f]][name[i]] <ast.AugAssign object at 0x7da207f9a920> <ast.AugAssign object at 0x7da207f98280> assert[compare[name[i] equal[==] call[name[len], parameter[name[unique_ids]]]]] call[name[feature_sequences].append, parameter[name[f_pad]]] variable[initial_states] assign[=] list[[]] for taget[name[s]] in starred[name[state_columns]] begin[:] variable[s] assign[=] call[name[np].array, parameter[name[s]]] variable[s_init] assign[=] list[[]] variable[i] assign[=] constant[0] for taget[name[l]] in starred[name[seq_lens]] begin[:] call[name[s_init].append, parameter[call[name[s]][name[i]]]] <ast.AugAssign object at 0x7da18fe93550> call[name[initial_states].append, parameter[call[name[np].array, parameter[name[s_init]]]]] return[tuple[[<ast.Name object at 0x7da18fe918d0>, <ast.Name object at 0x7da18fe90a90>, <ast.Call object at 0x7da18fe915a0>]]]
keyword[def] identifier[chop_into_sequences] ( identifier[episode_ids] , identifier[unroll_ids] , identifier[agent_indices] , identifier[feature_columns] , identifier[state_columns] , identifier[max_seq_len] , identifier[dynamic_max] = keyword[True] , identifier[_extra_padding] = literal[int] ): literal[string] identifier[prev_id] = keyword[None] identifier[seq_lens] =[] identifier[seq_len] = literal[int] identifier[unique_ids] = identifier[np] . identifier[add] ( identifier[np] . identifier[add] ( identifier[episode_ids] , identifier[agent_indices] ), identifier[np] . identifier[array] ( identifier[unroll_ids] )<< literal[int] ) keyword[for] identifier[uid] keyword[in] identifier[unique_ids] : keyword[if] ( identifier[prev_id] keyword[is] keyword[not] keyword[None] keyword[and] identifier[uid] != identifier[prev_id] ) keyword[or] identifier[seq_len] >= identifier[max_seq_len] : identifier[seq_lens] . identifier[append] ( identifier[seq_len] ) identifier[seq_len] = literal[int] identifier[seq_len] += literal[int] identifier[prev_id] = identifier[uid] keyword[if] identifier[seq_len] : identifier[seq_lens] . identifier[append] ( identifier[seq_len] ) keyword[assert] identifier[sum] ( identifier[seq_lens] )== identifier[len] ( identifier[unique_ids] ) keyword[if] identifier[dynamic_max] : identifier[max_seq_len] = identifier[max] ( identifier[seq_lens] )+ identifier[_extra_padding] identifier[feature_sequences] =[] keyword[for] identifier[f] keyword[in] identifier[feature_columns] : identifier[f] = identifier[np] . identifier[array] ( identifier[f] ) identifier[f_pad] = identifier[np] . identifier[zeros] (( identifier[len] ( identifier[seq_lens] )* identifier[max_seq_len] ,)+ identifier[np] . identifier[shape] ( identifier[f] )[ literal[int] :]) identifier[seq_base] = literal[int] identifier[i] = literal[int] keyword[for] identifier[l] keyword[in] identifier[seq_lens] : keyword[for] identifier[seq_offset] keyword[in] identifier[range] ( identifier[l] ): identifier[f_pad] [ identifier[seq_base] + identifier[seq_offset] ]= identifier[f] [ identifier[i] ] identifier[i] += literal[int] identifier[seq_base] += identifier[max_seq_len] keyword[assert] identifier[i] == identifier[len] ( identifier[unique_ids] ), identifier[f] identifier[feature_sequences] . identifier[append] ( identifier[f_pad] ) identifier[initial_states] =[] keyword[for] identifier[s] keyword[in] identifier[state_columns] : identifier[s] = identifier[np] . identifier[array] ( identifier[s] ) identifier[s_init] =[] identifier[i] = literal[int] keyword[for] identifier[l] keyword[in] identifier[seq_lens] : identifier[s_init] . identifier[append] ( identifier[s] [ identifier[i] ]) identifier[i] += identifier[l] identifier[initial_states] . identifier[append] ( identifier[np] . identifier[array] ( identifier[s_init] )) keyword[return] identifier[feature_sequences] , identifier[initial_states] , identifier[np] . identifier[array] ( identifier[seq_lens] )
def chop_into_sequences(episode_ids, unroll_ids, agent_indices, feature_columns, state_columns, max_seq_len, dynamic_max=True, _extra_padding=0): """Truncate and pad experiences into fixed-length sequences. Arguments: episode_ids (list): List of episode ids for each step. unroll_ids (list): List of identifiers for the sample batch. This is used to make sure sequences are cut between sample batches. agent_indices (list): List of agent ids for each step. Note that this has to be combined with episode_ids for uniqueness. feature_columns (list): List of arrays containing features. state_columns (list): List of arrays containing LSTM state values. max_seq_len (int): Max length of sequences before truncation. dynamic_max (bool): Whether to dynamically shrink the max seq len. For example, if max len is 20 and the actual max seq len in the data is 7, it will be shrunk to 7. _extra_padding (int): Add extra padding to the end of sequences. Returns: f_pad (list): Padded feature columns. These will be of shape [NUM_SEQUENCES * MAX_SEQ_LEN, ...]. s_init (list): Initial states for each sequence, of shape [NUM_SEQUENCES, ...]. seq_lens (list): List of sequence lengths, of shape [NUM_SEQUENCES]. Examples: >>> f_pad, s_init, seq_lens = chop_into_sequences( episode_ids=[1, 1, 5, 5, 5, 5], unroll_ids=[4, 4, 4, 4, 4, 4], agent_indices=[0, 0, 0, 0, 0, 0], feature_columns=[[4, 4, 8, 8, 8, 8], [1, 1, 0, 1, 1, 0]], state_columns=[[4, 5, 4, 5, 5, 5]], max_seq_len=3) >>> print(f_pad) [[4, 4, 0, 8, 8, 8, 8, 0, 0], [1, 1, 0, 0, 1, 1, 0, 0, 0]] >>> print(s_init) [[4, 4, 5]] >>> print(seq_lens) [2, 3, 1] """ prev_id = None seq_lens = [] seq_len = 0 unique_ids = np.add(np.add(episode_ids, agent_indices), np.array(unroll_ids) << 32) for uid in unique_ids: if prev_id is not None and uid != prev_id or seq_len >= max_seq_len: seq_lens.append(seq_len) seq_len = 0 # depends on [control=['if'], data=[]] seq_len += 1 prev_id = uid # depends on [control=['for'], data=['uid']] if seq_len: seq_lens.append(seq_len) # depends on [control=['if'], data=[]] assert sum(seq_lens) == len(unique_ids) # Dynamically shrink max len as needed to optimize memory usage if dynamic_max: max_seq_len = max(seq_lens) + _extra_padding # depends on [control=['if'], data=[]] feature_sequences = [] for f in feature_columns: f = np.array(f) f_pad = np.zeros((len(seq_lens) * max_seq_len,) + np.shape(f)[1:]) seq_base = 0 i = 0 for l in seq_lens: for seq_offset in range(l): f_pad[seq_base + seq_offset] = f[i] i += 1 # depends on [control=['for'], data=['seq_offset']] seq_base += max_seq_len # depends on [control=['for'], data=['l']] assert i == len(unique_ids), f feature_sequences.append(f_pad) # depends on [control=['for'], data=['f']] initial_states = [] for s in state_columns: s = np.array(s) s_init = [] i = 0 for l in seq_lens: s_init.append(s[i]) i += l # depends on [control=['for'], data=['l']] initial_states.append(np.array(s_init)) # depends on [control=['for'], data=['s']] return (feature_sequences, initial_states, np.array(seq_lens))
def allocate(self, size, max_time_to_block_ms): """ Allocate a buffer of the given size. This method blocks if there is not enough memory and the buffer pool is configured with blocking mode. Arguments: size (int): The buffer size to allocate in bytes [ignored] max_time_to_block_ms (int): The maximum time in milliseconds to block for buffer memory to be available Returns: io.BytesIO """ with self._lock: # check if we have a free buffer of the right size pooled if self._free: return self._free.popleft() elif self._poolable_size == 0: return io.BytesIO() else: # we are out of buffers and will have to block buf = None more_memory = threading.Condition(self._lock) self._waiters.append(more_memory) # loop over and over until we have a buffer or have reserved # enough memory to allocate one while buf is None: start_wait = time.time() more_memory.wait(max_time_to_block_ms / 1000.0) end_wait = time.time() if self.wait_time: self.wait_time.record(end_wait - start_wait) if self._free: buf = self._free.popleft() else: self._waiters.remove(more_memory) raise Errors.KafkaTimeoutError( "Failed to allocate memory within the configured" " max blocking time") # remove the condition for this thread to let the next thread # in line start getting memory removed = self._waiters.popleft() assert removed is more_memory, 'Wrong condition' # signal any additional waiters if there is more memory left # over for them if self._free and self._waiters: self._waiters[0].notify() # unlock and return the buffer return buf
def function[allocate, parameter[self, size, max_time_to_block_ms]]: constant[ Allocate a buffer of the given size. This method blocks if there is not enough memory and the buffer pool is configured with blocking mode. Arguments: size (int): The buffer size to allocate in bytes [ignored] max_time_to_block_ms (int): The maximum time in milliseconds to block for buffer memory to be available Returns: io.BytesIO ] with name[self]._lock begin[:] if name[self]._free begin[:] return[call[name[self]._free.popleft, parameter[]]]
keyword[def] identifier[allocate] ( identifier[self] , identifier[size] , identifier[max_time_to_block_ms] ): literal[string] keyword[with] identifier[self] . identifier[_lock] : keyword[if] identifier[self] . identifier[_free] : keyword[return] identifier[self] . identifier[_free] . identifier[popleft] () keyword[elif] identifier[self] . identifier[_poolable_size] == literal[int] : keyword[return] identifier[io] . identifier[BytesIO] () keyword[else] : identifier[buf] = keyword[None] identifier[more_memory] = identifier[threading] . identifier[Condition] ( identifier[self] . identifier[_lock] ) identifier[self] . identifier[_waiters] . identifier[append] ( identifier[more_memory] ) keyword[while] identifier[buf] keyword[is] keyword[None] : identifier[start_wait] = identifier[time] . identifier[time] () identifier[more_memory] . identifier[wait] ( identifier[max_time_to_block_ms] / literal[int] ) identifier[end_wait] = identifier[time] . identifier[time] () keyword[if] identifier[self] . identifier[wait_time] : identifier[self] . identifier[wait_time] . identifier[record] ( identifier[end_wait] - identifier[start_wait] ) keyword[if] identifier[self] . identifier[_free] : identifier[buf] = identifier[self] . identifier[_free] . identifier[popleft] () keyword[else] : identifier[self] . identifier[_waiters] . identifier[remove] ( identifier[more_memory] ) keyword[raise] identifier[Errors] . identifier[KafkaTimeoutError] ( literal[string] literal[string] ) identifier[removed] = identifier[self] . identifier[_waiters] . identifier[popleft] () keyword[assert] identifier[removed] keyword[is] identifier[more_memory] , literal[string] keyword[if] identifier[self] . identifier[_free] keyword[and] identifier[self] . identifier[_waiters] : identifier[self] . identifier[_waiters] [ literal[int] ]. identifier[notify] () keyword[return] identifier[buf]
def allocate(self, size, max_time_to_block_ms): """ Allocate a buffer of the given size. This method blocks if there is not enough memory and the buffer pool is configured with blocking mode. Arguments: size (int): The buffer size to allocate in bytes [ignored] max_time_to_block_ms (int): The maximum time in milliseconds to block for buffer memory to be available Returns: io.BytesIO """ with self._lock: # check if we have a free buffer of the right size pooled if self._free: return self._free.popleft() # depends on [control=['if'], data=[]] elif self._poolable_size == 0: return io.BytesIO() # depends on [control=['if'], data=[]] else: # we are out of buffers and will have to block buf = None more_memory = threading.Condition(self._lock) self._waiters.append(more_memory) # loop over and over until we have a buffer or have reserved # enough memory to allocate one while buf is None: start_wait = time.time() more_memory.wait(max_time_to_block_ms / 1000.0) end_wait = time.time() if self.wait_time: self.wait_time.record(end_wait - start_wait) # depends on [control=['if'], data=[]] if self._free: buf = self._free.popleft() # depends on [control=['if'], data=[]] else: self._waiters.remove(more_memory) raise Errors.KafkaTimeoutError('Failed to allocate memory within the configured max blocking time') # depends on [control=['while'], data=['buf']] # remove the condition for this thread to let the next thread # in line start getting memory removed = self._waiters.popleft() assert removed is more_memory, 'Wrong condition' # signal any additional waiters if there is more memory left # over for them if self._free and self._waiters: self._waiters[0].notify() # depends on [control=['if'], data=[]] # unlock and return the buffer return buf # depends on [control=['with'], data=[]]
def golden_images(self): """ Gets the Golden Images API client. Returns: GoldenImages: """ if not self.__golden_images: self.__golden_images = GoldenImages(self.__connection) return self.__golden_images
def function[golden_images, parameter[self]]: constant[ Gets the Golden Images API client. Returns: GoldenImages: ] if <ast.UnaryOp object at 0x7da18f00dcc0> begin[:] name[self].__golden_images assign[=] call[name[GoldenImages], parameter[name[self].__connection]] return[name[self].__golden_images]
keyword[def] identifier[golden_images] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[__golden_images] : identifier[self] . identifier[__golden_images] = identifier[GoldenImages] ( identifier[self] . identifier[__connection] ) keyword[return] identifier[self] . identifier[__golden_images]
def golden_images(self): """ Gets the Golden Images API client. Returns: GoldenImages: """ if not self.__golden_images: self.__golden_images = GoldenImages(self.__connection) # depends on [control=['if'], data=[]] return self.__golden_images
def delocate_tree_libs(lib_dict, lib_path, root_path): """ Move needed libraries in `lib_dict` into `lib_path` `lib_dict` has keys naming libraries required by the files in the corresponding value. Call the keys, "required libs". Call the values "requiring objects". Copy all the required libs to `lib_path`. Fix up the rpaths and install names in the requiring objects to point to these new copies. Exception: required libs within the directory tree pointed to by `root_path` stay where they are, but we modify requiring objects to use relative paths to these libraries. Parameters ---------- lib_dict : dict Dictionary with (key, value) pairs of (``depended_lib_path``, ``dependings_dict``) (see :func:`libsana.tree_libs`) lib_path : str Path in which to store copies of libs referred to in keys of `lib_dict`. Assumed to exist root_path : str, optional Root directory of tree analyzed in `lib_dict`. Any required library within the subtrees of `root_path` does not get copied, but libraries linking to it have links adjusted to use relative path to this library. Returns ------- copied_libs : dict Filtered `lib_dict` dict containing only the (key, value) pairs from `lib_dict` where the keys are the libraries copied to `lib_path``. """ copied_libs = {} delocated_libs = set() copied_basenames = set() rp_root_path = realpath(root_path) rp_lib_path = realpath(lib_path) # Test for errors first to avoid getting half-way through changing the tree for required, requirings in lib_dict.items(): if required.startswith('@'): # assume @rpath etc are correct # But warn, because likely they are not warnings.warn('Not processing required path {0} because it ' 'begins with @'.format(required)) continue r_ed_base = basename(required) if relpath(required, rp_root_path).startswith('..'): # Not local, plan to copy if r_ed_base in copied_basenames: raise DelocationError('Already planning to copy library with ' 'same basename as: ' + r_ed_base) if not exists(required): raise DelocationError('library "{0}" does not exist'.format( required)) copied_libs[required] = requirings copied_basenames.add(r_ed_base) else: # Is local, plan to set relative loader_path delocated_libs.add(required) # Modify in place now that we've checked for errors for required in copied_libs: shutil.copy(required, lib_path) # Set rpath and install names for this copied library for requiring, orig_install_name in lib_dict[required].items(): req_rel = relpath(rp_lib_path, dirname(requiring)) set_install_name(requiring, orig_install_name, '@loader_path/{0}/{1}'.format( req_rel, basename(required))) for required in delocated_libs: # Set relative path for local library for requiring, orig_install_name in lib_dict[required].items(): req_rel = relpath(required, dirname(requiring)) set_install_name(requiring, orig_install_name, '@loader_path/' + req_rel) return copied_libs
def function[delocate_tree_libs, parameter[lib_dict, lib_path, root_path]]: constant[ Move needed libraries in `lib_dict` into `lib_path` `lib_dict` has keys naming libraries required by the files in the corresponding value. Call the keys, "required libs". Call the values "requiring objects". Copy all the required libs to `lib_path`. Fix up the rpaths and install names in the requiring objects to point to these new copies. Exception: required libs within the directory tree pointed to by `root_path` stay where they are, but we modify requiring objects to use relative paths to these libraries. Parameters ---------- lib_dict : dict Dictionary with (key, value) pairs of (``depended_lib_path``, ``dependings_dict``) (see :func:`libsana.tree_libs`) lib_path : str Path in which to store copies of libs referred to in keys of `lib_dict`. Assumed to exist root_path : str, optional Root directory of tree analyzed in `lib_dict`. Any required library within the subtrees of `root_path` does not get copied, but libraries linking to it have links adjusted to use relative path to this library. Returns ------- copied_libs : dict Filtered `lib_dict` dict containing only the (key, value) pairs from `lib_dict` where the keys are the libraries copied to `lib_path``. ] variable[copied_libs] assign[=] dictionary[[], []] variable[delocated_libs] assign[=] call[name[set], parameter[]] variable[copied_basenames] assign[=] call[name[set], parameter[]] variable[rp_root_path] assign[=] call[name[realpath], parameter[name[root_path]]] variable[rp_lib_path] assign[=] call[name[realpath], parameter[name[lib_path]]] for taget[tuple[[<ast.Name object at 0x7da204567a00>, <ast.Name object at 0x7da2045669b0>]]] in starred[call[name[lib_dict].items, parameter[]]] begin[:] if call[name[required].startswith, parameter[constant[@]]] begin[:] call[name[warnings].warn, parameter[call[constant[Not processing required path {0} because it begins with @].format, parameter[name[required]]]]] continue variable[r_ed_base] assign[=] call[name[basename], parameter[name[required]]] if call[call[name[relpath], parameter[name[required], name[rp_root_path]]].startswith, parameter[constant[..]]] begin[:] if compare[name[r_ed_base] in name[copied_basenames]] begin[:] <ast.Raise object at 0x7da204566080> if <ast.UnaryOp object at 0x7da2045665c0> begin[:] <ast.Raise object at 0x7da204566380> call[name[copied_libs]][name[required]] assign[=] name[requirings] call[name[copied_basenames].add, parameter[name[r_ed_base]]] for taget[name[required]] in starred[name[copied_libs]] begin[:] call[name[shutil].copy, parameter[name[required], name[lib_path]]] for taget[tuple[[<ast.Name object at 0x7da204565b40>, <ast.Name object at 0x7da204564e80>]]] in starred[call[call[name[lib_dict]][name[required]].items, parameter[]]] begin[:] variable[req_rel] assign[=] call[name[relpath], parameter[name[rp_lib_path], call[name[dirname], parameter[name[requiring]]]]] call[name[set_install_name], parameter[name[requiring], name[orig_install_name], call[constant[@loader_path/{0}/{1}].format, parameter[name[req_rel], call[name[basename], parameter[name[required]]]]]]] for taget[name[required]] in starred[name[delocated_libs]] begin[:] for taget[tuple[[<ast.Name object at 0x7da204567700>, <ast.Name object at 0x7da2045672b0>]]] in starred[call[call[name[lib_dict]][name[required]].items, parameter[]]] begin[:] variable[req_rel] assign[=] call[name[relpath], parameter[name[required], call[name[dirname], parameter[name[requiring]]]]] call[name[set_install_name], parameter[name[requiring], name[orig_install_name], binary_operation[constant[@loader_path/] + name[req_rel]]]] return[name[copied_libs]]
keyword[def] identifier[delocate_tree_libs] ( identifier[lib_dict] , identifier[lib_path] , identifier[root_path] ): literal[string] identifier[copied_libs] ={} identifier[delocated_libs] = identifier[set] () identifier[copied_basenames] = identifier[set] () identifier[rp_root_path] = identifier[realpath] ( identifier[root_path] ) identifier[rp_lib_path] = identifier[realpath] ( identifier[lib_path] ) keyword[for] identifier[required] , identifier[requirings] keyword[in] identifier[lib_dict] . identifier[items] (): keyword[if] identifier[required] . identifier[startswith] ( literal[string] ): identifier[warnings] . identifier[warn] ( literal[string] literal[string] . identifier[format] ( identifier[required] )) keyword[continue] identifier[r_ed_base] = identifier[basename] ( identifier[required] ) keyword[if] identifier[relpath] ( identifier[required] , identifier[rp_root_path] ). identifier[startswith] ( literal[string] ): keyword[if] identifier[r_ed_base] keyword[in] identifier[copied_basenames] : keyword[raise] identifier[DelocationError] ( literal[string] literal[string] + identifier[r_ed_base] ) keyword[if] keyword[not] identifier[exists] ( identifier[required] ): keyword[raise] identifier[DelocationError] ( literal[string] . identifier[format] ( identifier[required] )) identifier[copied_libs] [ identifier[required] ]= identifier[requirings] identifier[copied_basenames] . identifier[add] ( identifier[r_ed_base] ) keyword[else] : identifier[delocated_libs] . identifier[add] ( identifier[required] ) keyword[for] identifier[required] keyword[in] identifier[copied_libs] : identifier[shutil] . identifier[copy] ( identifier[required] , identifier[lib_path] ) keyword[for] identifier[requiring] , identifier[orig_install_name] keyword[in] identifier[lib_dict] [ identifier[required] ]. identifier[items] (): identifier[req_rel] = identifier[relpath] ( identifier[rp_lib_path] , identifier[dirname] ( identifier[requiring] )) identifier[set_install_name] ( identifier[requiring] , identifier[orig_install_name] , literal[string] . identifier[format] ( identifier[req_rel] , identifier[basename] ( identifier[required] ))) keyword[for] identifier[required] keyword[in] identifier[delocated_libs] : keyword[for] identifier[requiring] , identifier[orig_install_name] keyword[in] identifier[lib_dict] [ identifier[required] ]. identifier[items] (): identifier[req_rel] = identifier[relpath] ( identifier[required] , identifier[dirname] ( identifier[requiring] )) identifier[set_install_name] ( identifier[requiring] , identifier[orig_install_name] , literal[string] + identifier[req_rel] ) keyword[return] identifier[copied_libs]
def delocate_tree_libs(lib_dict, lib_path, root_path): """ Move needed libraries in `lib_dict` into `lib_path` `lib_dict` has keys naming libraries required by the files in the corresponding value. Call the keys, "required libs". Call the values "requiring objects". Copy all the required libs to `lib_path`. Fix up the rpaths and install names in the requiring objects to point to these new copies. Exception: required libs within the directory tree pointed to by `root_path` stay where they are, but we modify requiring objects to use relative paths to these libraries. Parameters ---------- lib_dict : dict Dictionary with (key, value) pairs of (``depended_lib_path``, ``dependings_dict``) (see :func:`libsana.tree_libs`) lib_path : str Path in which to store copies of libs referred to in keys of `lib_dict`. Assumed to exist root_path : str, optional Root directory of tree analyzed in `lib_dict`. Any required library within the subtrees of `root_path` does not get copied, but libraries linking to it have links adjusted to use relative path to this library. Returns ------- copied_libs : dict Filtered `lib_dict` dict containing only the (key, value) pairs from `lib_dict` where the keys are the libraries copied to `lib_path``. """ copied_libs = {} delocated_libs = set() copied_basenames = set() rp_root_path = realpath(root_path) rp_lib_path = realpath(lib_path) # Test for errors first to avoid getting half-way through changing the tree for (required, requirings) in lib_dict.items(): if required.startswith('@'): # assume @rpath etc are correct # But warn, because likely they are not warnings.warn('Not processing required path {0} because it begins with @'.format(required)) continue # depends on [control=['if'], data=[]] r_ed_base = basename(required) if relpath(required, rp_root_path).startswith('..'): # Not local, plan to copy if r_ed_base in copied_basenames: raise DelocationError('Already planning to copy library with same basename as: ' + r_ed_base) # depends on [control=['if'], data=['r_ed_base']] if not exists(required): raise DelocationError('library "{0}" does not exist'.format(required)) # depends on [control=['if'], data=[]] copied_libs[required] = requirings copied_basenames.add(r_ed_base) # depends on [control=['if'], data=[]] else: # Is local, plan to set relative loader_path delocated_libs.add(required) # depends on [control=['for'], data=[]] # Modify in place now that we've checked for errors for required in copied_libs: shutil.copy(required, lib_path) # Set rpath and install names for this copied library for (requiring, orig_install_name) in lib_dict[required].items(): req_rel = relpath(rp_lib_path, dirname(requiring)) set_install_name(requiring, orig_install_name, '@loader_path/{0}/{1}'.format(req_rel, basename(required))) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['required']] for required in delocated_libs: # Set relative path for local library for (requiring, orig_install_name) in lib_dict[required].items(): req_rel = relpath(required, dirname(requiring)) set_install_name(requiring, orig_install_name, '@loader_path/' + req_rel) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['required']] return copied_libs
def _run_validators(self, value): """Perform validation on ``value``. Raise a :exc:`ValidationError` if validation does not succeed. """ if value in self.empty_values: return errors = [] for validator in self.validators: try: validator(value) except exceptions.ValidationError as err: if isinstance(err.messages, dict): errors.append(err.messages) else: errors.extend(err.messages) if errors: raise exceptions.ValidationError(errors)
def function[_run_validators, parameter[self, value]]: constant[Perform validation on ``value``. Raise a :exc:`ValidationError` if validation does not succeed. ] if compare[name[value] in name[self].empty_values] begin[:] return[None] variable[errors] assign[=] list[[]] for taget[name[validator]] in starred[name[self].validators] begin[:] <ast.Try object at 0x7da18ede75e0> if name[errors] begin[:] <ast.Raise object at 0x7da18ede6530>
keyword[def] identifier[_run_validators] ( identifier[self] , identifier[value] ): literal[string] keyword[if] identifier[value] keyword[in] identifier[self] . identifier[empty_values] : keyword[return] identifier[errors] =[] keyword[for] identifier[validator] keyword[in] identifier[self] . identifier[validators] : keyword[try] : identifier[validator] ( identifier[value] ) keyword[except] identifier[exceptions] . identifier[ValidationError] keyword[as] identifier[err] : keyword[if] identifier[isinstance] ( identifier[err] . identifier[messages] , identifier[dict] ): identifier[errors] . identifier[append] ( identifier[err] . identifier[messages] ) keyword[else] : identifier[errors] . identifier[extend] ( identifier[err] . identifier[messages] ) keyword[if] identifier[errors] : keyword[raise] identifier[exceptions] . identifier[ValidationError] ( identifier[errors] )
def _run_validators(self, value): """Perform validation on ``value``. Raise a :exc:`ValidationError` if validation does not succeed. """ if value in self.empty_values: return # depends on [control=['if'], data=[]] errors = [] for validator in self.validators: try: validator(value) # depends on [control=['try'], data=[]] except exceptions.ValidationError as err: if isinstance(err.messages, dict): errors.append(err.messages) # depends on [control=['if'], data=[]] else: errors.extend(err.messages) # depends on [control=['except'], data=['err']] # depends on [control=['for'], data=['validator']] if errors: raise exceptions.ValidationError(errors) # depends on [control=['if'], data=[]]
def runcp_consumer_loop( in_queue_url, workdir, lclist_pkl_s3url, lc_altexts=('',), wait_time_seconds=5, cache_clean_timer_seconds=3600.0, shutdown_check_timer_seconds=60.0, sqs_client=None, s3_client=None ): """This runs checkplot pickle making in a loop until interrupted. Consumes work task items from an input queue set up by `runcp_producer_loop` above. For the moment, we don't generate neighbor light curves since this would require a lot more S3 calls. Parameters ---------- in_queue_url : str The SQS URL of the input queue to listen to for work assignment messages. The task orders will include the input and output S3 bucket names, as well as the URL of the output queue to where this function will report its work-complete or work-failed status. workdir : str The directory on the local machine where this worker loop will download the input light curves and associated period-finder results (if any), process them, and produce its output checkplot pickles. These will then be uploaded to the specified S3 output bucket and then deleted from the workdir when the upload is confirmed to make it safely to S3. lclist_pkl : str S3 URL of a catalog pickle generated by `lcproc.catalogs.make_lclist` that contains objectids and coordinates, as well as a kdtree for all of the objects in the current light curve collection being processed. This is used to look up neighbors for each object being processed. lc_altexts : sequence of str If not None, this is a sequence of alternate extensions to try for the input light curve file other than the one provided in the input task order. For example, to get anything that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to strip the .gz. wait_time_seconds : int The amount of time to wait in the input SQS queue for an input task order. If this timeout expires and no task has been received, this function goes back to the top of the work loop. cache_clean_timer_seconds : float The amount of time in seconds to wait before periodically removing old files (such as finder chart FITS, external service result pickles) from the astrobase cache directory. These accumulate as the work items are processed, and take up significant space, so must be removed periodically. shutdown_check_timer_seconds : float The amount of time to wait before checking for a pending EC2 shutdown message for the instance this worker loop is operating on. If a shutdown is noticed, the worker loop is cancelled in preparation for instance shutdown. sqs_client : boto3.Client or None If None, this function will instantiate a new `boto3.Client` object to use in its SQS operations. Alternatively, pass in an existing `boto3.Client` instance to re-use it here. s3_client : boto3.Client or None If None, this function will instantiate a new `boto3.Client` object to use in its S3 operations. Alternatively, pass in an existing `boto3.Client` instance to re-use it here. Returns ------- Nothing. """ if not sqs_client: sqs_client = boto3.client('sqs') if not s3_client: s3_client = boto3.client('s3') lclist_pklf = lclist_pkl_s3url.split('/')[-1] if not os.path.exists(lclist_pklf): # get the lclist pickle from S3 to help with neighbor queries lclist_pklf = awsutils.s3_get_url( lclist_pkl_s3url, client=s3_client ) with open(lclist_pklf,'rb') as infd: lclistpkl = pickle.load(infd) # listen to the kill and term signals and raise KeyboardInterrupt when # called signal.signal(signal.SIGINT, kill_handler) signal.signal(signal.SIGTERM, kill_handler) shutdown_last_time = time.monotonic() diskspace_last_time = time.monotonic() while True: curr_time = time.monotonic() if (curr_time - shutdown_last_time) > shutdown_check_timer_seconds: shutdown_check = shutdown_check_handler() if shutdown_check: LOGWARNING('instance will die soon, breaking loop') break shutdown_last_time = time.monotonic() if (curr_time - diskspace_last_time) > cache_clean_timer_seconds: cache_clean_handler() diskspace_last_time = time.monotonic() try: # receive a single message from the inqueue work = awsutils.sqs_get_item(in_queue_url, client=sqs_client, raiseonfail=True) # JSON deserialize the work item if work is not None and len(work) > 0: recv = work[0] # skip any messages that don't tell us to runcp # FIXME: use the MessageAttributes for setting topics instead action = recv['item']['action'] if action != 'runcp': continue target = recv['item']['target'] args = recv['item']['args'] kwargs = recv['item']['kwargs'] outbucket = recv['item']['outbucket'] if 'outqueue' in recv['item']: out_queue_url = recv['item']['outqueue'] else: out_queue_url = None receipt = recv['receipt_handle'] # download the target from S3 to a file in the work directory try: lc_filename = awsutils.s3_get_url( target, altexts=lc_altexts, client=s3_client, ) # get the period-finder pickle if present in args if len(args) > 0 and args[0] is not None: pf_pickle = awsutils.s3_get_url( args[0], client=s3_client ) else: pf_pickle = None # now runcp cpfs = runcp( pf_pickle, workdir, workdir, lcfname=lc_filename, lclistpkl=lclistpkl, makeneighborlcs=False, **kwargs ) if cpfs and all(os.path.exists(x) for x in cpfs): LOGINFO('runcp OK for LC: %s, PF: %s -> %s' % (lc_filename, pf_pickle, cpfs)) # check if the file exists already because it's been # processed somewhere else resp = s3_client.list_objects_v2( Bucket=outbucket, MaxKeys=1, Prefix=cpfs[0] ) outbucket_list = resp.get('Contents',[]) if outbucket_list and len(outbucket_list) > 0: LOGWARNING( 'not uploading runcp results for %s because ' 'they exist in the output bucket already' % target ) awsutils.sqs_delete_item(in_queue_url, receipt) continue for cpf in cpfs: put_url = awsutils.s3_put_file(cpf, outbucket, client=s3_client) if put_url is not None: LOGINFO('result uploaded to %s' % put_url) # put the S3 URL of the output into the output # queue if requested if out_queue_url is not None: awsutils.sqs_put_item( out_queue_url, {'cpf':put_url, 'target': target, 'lc_filename':lc_filename, 'lclistpkl':lclist_pklf, 'kwargs':kwargs}, raiseonfail=True ) # delete the result from the local directory os.remove(cpf) # if the upload fails, don't acknowledge the # message. might be a temporary S3 failure, so # another worker might succeed later. else: LOGERROR('failed to upload %s to S3' % cpf) # delete the input item from the input queue to # acknowledge its receipt and indicate that # processing is done and successful awsutils.sqs_delete_item(in_queue_url, receipt) # delete the light curve file when we're done with it if ( (lc_filename is not None) and (os.path.exists(lc_filename)) ): os.remove(lc_filename) # if runcp failed outright, don't requeue. instead, write a # ('failed-checkplot-%s.pkl' % lc_filename) file to the # output S3 bucket. else: LOGWARNING('runcp failed for LC: %s, PF: %s' % (lc_filename, pf_pickle)) with open('failed-checkplot-%s.pkl' % lc_filename, 'wb') as outfd: pickle.dump( {'in_queue_url':in_queue_url, 'target':target, 'lc_filename':lc_filename, 'lclistpkl':lclist_pklf, 'kwargs':kwargs, 'outbucket':outbucket, 'out_queue_url':out_queue_url}, outfd, pickle.HIGHEST_PROTOCOL ) put_url = awsutils.s3_put_file( 'failed-checkplot-%s.pkl' % lc_filename, outbucket, client=s3_client ) # put the S3 URL of the output into the output # queue if requested if out_queue_url is not None: awsutils.sqs_put_item( out_queue_url, {'cpf':put_url, 'lc_filename':lc_filename, 'lclistpkl':lclist_pklf, 'kwargs':kwargs}, raiseonfail=True ) # delete the input item from the input queue to # acknowledge its receipt and indicate that # processing is done awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True) # delete the light curve file when we're done with it if ( (lc_filename is not None) and (os.path.exists(lc_filename)) ): os.remove(lc_filename) except ClientError as e: LOGWARNING('queues have disappeared. stopping worker loop') break # if there's any other exception, put a failed response into the # output bucket and queue except Exception as e: LOGEXCEPTION('could not process input from queue') if 'lc_filename' in locals(): with open('failed-checkplot-%s.pkl' % lc_filename,'wb') as outfd: pickle.dump( {'in_queue_url':in_queue_url, 'target':target, 'lc_filename':lc_filename, 'lclistpkl':lclist_pklf, 'kwargs':kwargs, 'outbucket':outbucket, 'out_queue_url':out_queue_url}, outfd, pickle.HIGHEST_PROTOCOL ) put_url = awsutils.s3_put_file( 'failed-checkplot-%s.pkl' % lc_filename, outbucket, client=s3_client ) # put the S3 URL of the output into the output # queue if requested if out_queue_url is not None: awsutils.sqs_put_item( out_queue_url, {'cpf':put_url, 'lc_filename':lc_filename, 'lclistpkl':lclist_pklf, 'kwargs':kwargs}, raiseonfail=True ) if ( (lc_filename is not None) and (os.path.exists(lc_filename)) ): os.remove(lc_filename) # delete the input item from the input queue to # acknowledge its receipt and indicate that # processing is done awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True) # a keyboard interrupt kills the loop except KeyboardInterrupt: LOGWARNING('breaking out of the processing loop.') break # if the queues disappear, then the producer loop is done and we should # exit except ClientError as e: LOGWARNING('queues have disappeared. stopping worker loop') break # any other exception continues the loop we'll write the output file to # the output S3 bucket (and any optional output queue), but add a # failed-* prefix to it to indicate that processing failed. FIXME: could # use a dead-letter queue for this instead except Exception as e: LOGEXCEPTION('could not process input from queue') if 'lc_filename' in locals(): with open('failed-checkplot-%s.pkl' % lc_filename,'wb') as outfd: pickle.dump( {'in_queue_url':in_queue_url, 'target':target, 'lclistpkl':lclist_pklf, 'kwargs':kwargs, 'outbucket':outbucket, 'out_queue_url':out_queue_url}, outfd, pickle.HIGHEST_PROTOCOL ) put_url = awsutils.s3_put_file( 'failed-checkplot-%s.pkl' % lc_filename, outbucket, client=s3_client ) # put the S3 URL of the output into the output # queue if requested if out_queue_url is not None: awsutils.sqs_put_item( out_queue_url, {'cpf':put_url, 'lclistpkl':lclist_pklf, 'kwargs':kwargs}, raiseonfail=True ) if ( (lc_filename is not None) and (os.path.exists(lc_filename)) ): os.remove(lc_filename) # delete the input item from the input queue to # acknowledge its receipt and indicate that # processing is done awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True)
def function[runcp_consumer_loop, parameter[in_queue_url, workdir, lclist_pkl_s3url, lc_altexts, wait_time_seconds, cache_clean_timer_seconds, shutdown_check_timer_seconds, sqs_client, s3_client]]: constant[This runs checkplot pickle making in a loop until interrupted. Consumes work task items from an input queue set up by `runcp_producer_loop` above. For the moment, we don't generate neighbor light curves since this would require a lot more S3 calls. Parameters ---------- in_queue_url : str The SQS URL of the input queue to listen to for work assignment messages. The task orders will include the input and output S3 bucket names, as well as the URL of the output queue to where this function will report its work-complete or work-failed status. workdir : str The directory on the local machine where this worker loop will download the input light curves and associated period-finder results (if any), process them, and produce its output checkplot pickles. These will then be uploaded to the specified S3 output bucket and then deleted from the workdir when the upload is confirmed to make it safely to S3. lclist_pkl : str S3 URL of a catalog pickle generated by `lcproc.catalogs.make_lclist` that contains objectids and coordinates, as well as a kdtree for all of the objects in the current light curve collection being processed. This is used to look up neighbors for each object being processed. lc_altexts : sequence of str If not None, this is a sequence of alternate extensions to try for the input light curve file other than the one provided in the input task order. For example, to get anything that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to strip the .gz. wait_time_seconds : int The amount of time to wait in the input SQS queue for an input task order. If this timeout expires and no task has been received, this function goes back to the top of the work loop. cache_clean_timer_seconds : float The amount of time in seconds to wait before periodically removing old files (such as finder chart FITS, external service result pickles) from the astrobase cache directory. These accumulate as the work items are processed, and take up significant space, so must be removed periodically. shutdown_check_timer_seconds : float The amount of time to wait before checking for a pending EC2 shutdown message for the instance this worker loop is operating on. If a shutdown is noticed, the worker loop is cancelled in preparation for instance shutdown. sqs_client : boto3.Client or None If None, this function will instantiate a new `boto3.Client` object to use in its SQS operations. Alternatively, pass in an existing `boto3.Client` instance to re-use it here. s3_client : boto3.Client or None If None, this function will instantiate a new `boto3.Client` object to use in its S3 operations. Alternatively, pass in an existing `boto3.Client` instance to re-use it here. Returns ------- Nothing. ] if <ast.UnaryOp object at 0x7da1b0061e70> begin[:] variable[sqs_client] assign[=] call[name[boto3].client, parameter[constant[sqs]]] if <ast.UnaryOp object at 0x7da1b00639d0> begin[:] variable[s3_client] assign[=] call[name[boto3].client, parameter[constant[s3]]] variable[lclist_pklf] assign[=] call[call[name[lclist_pkl_s3url].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da1b0061540>] if <ast.UnaryOp object at 0x7da1b0060b20> begin[:] variable[lclist_pklf] assign[=] call[name[awsutils].s3_get_url, parameter[name[lclist_pkl_s3url]]] with call[name[open], parameter[name[lclist_pklf], constant[rb]]] begin[:] variable[lclistpkl] assign[=] call[name[pickle].load, parameter[name[infd]]] call[name[signal].signal, parameter[name[signal].SIGINT, name[kill_handler]]] call[name[signal].signal, parameter[name[signal].SIGTERM, name[kill_handler]]] variable[shutdown_last_time] assign[=] call[name[time].monotonic, parameter[]] variable[diskspace_last_time] assign[=] call[name[time].monotonic, parameter[]] while constant[True] begin[:] variable[curr_time] assign[=] call[name[time].monotonic, parameter[]] if compare[binary_operation[name[curr_time] - name[shutdown_last_time]] greater[>] name[shutdown_check_timer_seconds]] begin[:] variable[shutdown_check] assign[=] call[name[shutdown_check_handler], parameter[]] if name[shutdown_check] begin[:] call[name[LOGWARNING], parameter[constant[instance will die soon, breaking loop]]] break variable[shutdown_last_time] assign[=] call[name[time].monotonic, parameter[]] if compare[binary_operation[name[curr_time] - name[diskspace_last_time]] greater[>] name[cache_clean_timer_seconds]] begin[:] call[name[cache_clean_handler], parameter[]] variable[diskspace_last_time] assign[=] call[name[time].monotonic, parameter[]] <ast.Try object at 0x7da1b0060280>
keyword[def] identifier[runcp_consumer_loop] ( identifier[in_queue_url] , identifier[workdir] , identifier[lclist_pkl_s3url] , identifier[lc_altexts] =( literal[string] ,), identifier[wait_time_seconds] = literal[int] , identifier[cache_clean_timer_seconds] = literal[int] , identifier[shutdown_check_timer_seconds] = literal[int] , identifier[sqs_client] = keyword[None] , identifier[s3_client] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[sqs_client] : identifier[sqs_client] = identifier[boto3] . identifier[client] ( literal[string] ) keyword[if] keyword[not] identifier[s3_client] : identifier[s3_client] = identifier[boto3] . identifier[client] ( literal[string] ) identifier[lclist_pklf] = identifier[lclist_pkl_s3url] . identifier[split] ( literal[string] )[- literal[int] ] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[lclist_pklf] ): identifier[lclist_pklf] = identifier[awsutils] . identifier[s3_get_url] ( identifier[lclist_pkl_s3url] , identifier[client] = identifier[s3_client] ) keyword[with] identifier[open] ( identifier[lclist_pklf] , literal[string] ) keyword[as] identifier[infd] : identifier[lclistpkl] = identifier[pickle] . identifier[load] ( identifier[infd] ) identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGINT] , identifier[kill_handler] ) identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGTERM] , identifier[kill_handler] ) identifier[shutdown_last_time] = identifier[time] . identifier[monotonic] () identifier[diskspace_last_time] = identifier[time] . identifier[monotonic] () keyword[while] keyword[True] : identifier[curr_time] = identifier[time] . identifier[monotonic] () keyword[if] ( identifier[curr_time] - identifier[shutdown_last_time] )> identifier[shutdown_check_timer_seconds] : identifier[shutdown_check] = identifier[shutdown_check_handler] () keyword[if] identifier[shutdown_check] : identifier[LOGWARNING] ( literal[string] ) keyword[break] identifier[shutdown_last_time] = identifier[time] . identifier[monotonic] () keyword[if] ( identifier[curr_time] - identifier[diskspace_last_time] )> identifier[cache_clean_timer_seconds] : identifier[cache_clean_handler] () identifier[diskspace_last_time] = identifier[time] . identifier[monotonic] () keyword[try] : identifier[work] = identifier[awsutils] . identifier[sqs_get_item] ( identifier[in_queue_url] , identifier[client] = identifier[sqs_client] , identifier[raiseonfail] = keyword[True] ) keyword[if] identifier[work] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[work] )> literal[int] : identifier[recv] = identifier[work] [ literal[int] ] identifier[action] = identifier[recv] [ literal[string] ][ literal[string] ] keyword[if] identifier[action] != literal[string] : keyword[continue] identifier[target] = identifier[recv] [ literal[string] ][ literal[string] ] identifier[args] = identifier[recv] [ literal[string] ][ literal[string] ] identifier[kwargs] = identifier[recv] [ literal[string] ][ literal[string] ] identifier[outbucket] = identifier[recv] [ literal[string] ][ literal[string] ] keyword[if] literal[string] keyword[in] identifier[recv] [ literal[string] ]: identifier[out_queue_url] = identifier[recv] [ literal[string] ][ literal[string] ] keyword[else] : identifier[out_queue_url] = keyword[None] identifier[receipt] = identifier[recv] [ literal[string] ] keyword[try] : identifier[lc_filename] = identifier[awsutils] . identifier[s3_get_url] ( identifier[target] , identifier[altexts] = identifier[lc_altexts] , identifier[client] = identifier[s3_client] , ) keyword[if] identifier[len] ( identifier[args] )> literal[int] keyword[and] identifier[args] [ literal[int] ] keyword[is] keyword[not] keyword[None] : identifier[pf_pickle] = identifier[awsutils] . identifier[s3_get_url] ( identifier[args] [ literal[int] ], identifier[client] = identifier[s3_client] ) keyword[else] : identifier[pf_pickle] = keyword[None] identifier[cpfs] = identifier[runcp] ( identifier[pf_pickle] , identifier[workdir] , identifier[workdir] , identifier[lcfname] = identifier[lc_filename] , identifier[lclistpkl] = identifier[lclistpkl] , identifier[makeneighborlcs] = keyword[False] , ** identifier[kwargs] ) keyword[if] identifier[cpfs] keyword[and] identifier[all] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[cpfs] ): identifier[LOGINFO] ( literal[string] % ( identifier[lc_filename] , identifier[pf_pickle] , identifier[cpfs] )) identifier[resp] = identifier[s3_client] . identifier[list_objects_v2] ( identifier[Bucket] = identifier[outbucket] , identifier[MaxKeys] = literal[int] , identifier[Prefix] = identifier[cpfs] [ literal[int] ] ) identifier[outbucket_list] = identifier[resp] . identifier[get] ( literal[string] ,[]) keyword[if] identifier[outbucket_list] keyword[and] identifier[len] ( identifier[outbucket_list] )> literal[int] : identifier[LOGWARNING] ( literal[string] literal[string] % identifier[target] ) identifier[awsutils] . identifier[sqs_delete_item] ( identifier[in_queue_url] , identifier[receipt] ) keyword[continue] keyword[for] identifier[cpf] keyword[in] identifier[cpfs] : identifier[put_url] = identifier[awsutils] . identifier[s3_put_file] ( identifier[cpf] , identifier[outbucket] , identifier[client] = identifier[s3_client] ) keyword[if] identifier[put_url] keyword[is] keyword[not] keyword[None] : identifier[LOGINFO] ( literal[string] % identifier[put_url] ) keyword[if] identifier[out_queue_url] keyword[is] keyword[not] keyword[None] : identifier[awsutils] . identifier[sqs_put_item] ( identifier[out_queue_url] , { literal[string] : identifier[put_url] , literal[string] : identifier[target] , literal[string] : identifier[lc_filename] , literal[string] : identifier[lclist_pklf] , literal[string] : identifier[kwargs] }, identifier[raiseonfail] = keyword[True] ) identifier[os] . identifier[remove] ( identifier[cpf] ) keyword[else] : identifier[LOGERROR] ( literal[string] % identifier[cpf] ) identifier[awsutils] . identifier[sqs_delete_item] ( identifier[in_queue_url] , identifier[receipt] ) keyword[if] (( identifier[lc_filename] keyword[is] keyword[not] keyword[None] ) keyword[and] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[lc_filename] ))): identifier[os] . identifier[remove] ( identifier[lc_filename] ) keyword[else] : identifier[LOGWARNING] ( literal[string] % ( identifier[lc_filename] , identifier[pf_pickle] )) keyword[with] identifier[open] ( literal[string] % identifier[lc_filename] , literal[string] ) keyword[as] identifier[outfd] : identifier[pickle] . identifier[dump] ( { literal[string] : identifier[in_queue_url] , literal[string] : identifier[target] , literal[string] : identifier[lc_filename] , literal[string] : identifier[lclist_pklf] , literal[string] : identifier[kwargs] , literal[string] : identifier[outbucket] , literal[string] : identifier[out_queue_url] }, identifier[outfd] , identifier[pickle] . identifier[HIGHEST_PROTOCOL] ) identifier[put_url] = identifier[awsutils] . identifier[s3_put_file] ( literal[string] % identifier[lc_filename] , identifier[outbucket] , identifier[client] = identifier[s3_client] ) keyword[if] identifier[out_queue_url] keyword[is] keyword[not] keyword[None] : identifier[awsutils] . identifier[sqs_put_item] ( identifier[out_queue_url] , { literal[string] : identifier[put_url] , literal[string] : identifier[lc_filename] , literal[string] : identifier[lclist_pklf] , literal[string] : identifier[kwargs] }, identifier[raiseonfail] = keyword[True] ) identifier[awsutils] . identifier[sqs_delete_item] ( identifier[in_queue_url] , identifier[receipt] , identifier[raiseonfail] = keyword[True] ) keyword[if] (( identifier[lc_filename] keyword[is] keyword[not] keyword[None] ) keyword[and] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[lc_filename] ))): identifier[os] . identifier[remove] ( identifier[lc_filename] ) keyword[except] identifier[ClientError] keyword[as] identifier[e] : identifier[LOGWARNING] ( literal[string] ) keyword[break] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[LOGEXCEPTION] ( literal[string] ) keyword[if] literal[string] keyword[in] identifier[locals] (): keyword[with] identifier[open] ( literal[string] % identifier[lc_filename] , literal[string] ) keyword[as] identifier[outfd] : identifier[pickle] . identifier[dump] ( { literal[string] : identifier[in_queue_url] , literal[string] : identifier[target] , literal[string] : identifier[lc_filename] , literal[string] : identifier[lclist_pklf] , literal[string] : identifier[kwargs] , literal[string] : identifier[outbucket] , literal[string] : identifier[out_queue_url] }, identifier[outfd] , identifier[pickle] . identifier[HIGHEST_PROTOCOL] ) identifier[put_url] = identifier[awsutils] . identifier[s3_put_file] ( literal[string] % identifier[lc_filename] , identifier[outbucket] , identifier[client] = identifier[s3_client] ) keyword[if] identifier[out_queue_url] keyword[is] keyword[not] keyword[None] : identifier[awsutils] . identifier[sqs_put_item] ( identifier[out_queue_url] , { literal[string] : identifier[put_url] , literal[string] : identifier[lc_filename] , literal[string] : identifier[lclist_pklf] , literal[string] : identifier[kwargs] }, identifier[raiseonfail] = keyword[True] ) keyword[if] (( identifier[lc_filename] keyword[is] keyword[not] keyword[None] ) keyword[and] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[lc_filename] ))): identifier[os] . identifier[remove] ( identifier[lc_filename] ) identifier[awsutils] . identifier[sqs_delete_item] ( identifier[in_queue_url] , identifier[receipt] , identifier[raiseonfail] = keyword[True] ) keyword[except] identifier[KeyboardInterrupt] : identifier[LOGWARNING] ( literal[string] ) keyword[break] keyword[except] identifier[ClientError] keyword[as] identifier[e] : identifier[LOGWARNING] ( literal[string] ) keyword[break] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[LOGEXCEPTION] ( literal[string] ) keyword[if] literal[string] keyword[in] identifier[locals] (): keyword[with] identifier[open] ( literal[string] % identifier[lc_filename] , literal[string] ) keyword[as] identifier[outfd] : identifier[pickle] . identifier[dump] ( { literal[string] : identifier[in_queue_url] , literal[string] : identifier[target] , literal[string] : identifier[lclist_pklf] , literal[string] : identifier[kwargs] , literal[string] : identifier[outbucket] , literal[string] : identifier[out_queue_url] }, identifier[outfd] , identifier[pickle] . identifier[HIGHEST_PROTOCOL] ) identifier[put_url] = identifier[awsutils] . identifier[s3_put_file] ( literal[string] % identifier[lc_filename] , identifier[outbucket] , identifier[client] = identifier[s3_client] ) keyword[if] identifier[out_queue_url] keyword[is] keyword[not] keyword[None] : identifier[awsutils] . identifier[sqs_put_item] ( identifier[out_queue_url] , { literal[string] : identifier[put_url] , literal[string] : identifier[lclist_pklf] , literal[string] : identifier[kwargs] }, identifier[raiseonfail] = keyword[True] ) keyword[if] (( identifier[lc_filename] keyword[is] keyword[not] keyword[None] ) keyword[and] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[lc_filename] ))): identifier[os] . identifier[remove] ( identifier[lc_filename] ) identifier[awsutils] . identifier[sqs_delete_item] ( identifier[in_queue_url] , identifier[receipt] , identifier[raiseonfail] = keyword[True] )
def runcp_consumer_loop(in_queue_url, workdir, lclist_pkl_s3url, lc_altexts=('',), wait_time_seconds=5, cache_clean_timer_seconds=3600.0, shutdown_check_timer_seconds=60.0, sqs_client=None, s3_client=None): """This runs checkplot pickle making in a loop until interrupted. Consumes work task items from an input queue set up by `runcp_producer_loop` above. For the moment, we don't generate neighbor light curves since this would require a lot more S3 calls. Parameters ---------- in_queue_url : str The SQS URL of the input queue to listen to for work assignment messages. The task orders will include the input and output S3 bucket names, as well as the URL of the output queue to where this function will report its work-complete or work-failed status. workdir : str The directory on the local machine where this worker loop will download the input light curves and associated period-finder results (if any), process them, and produce its output checkplot pickles. These will then be uploaded to the specified S3 output bucket and then deleted from the workdir when the upload is confirmed to make it safely to S3. lclist_pkl : str S3 URL of a catalog pickle generated by `lcproc.catalogs.make_lclist` that contains objectids and coordinates, as well as a kdtree for all of the objects in the current light curve collection being processed. This is used to look up neighbors for each object being processed. lc_altexts : sequence of str If not None, this is a sequence of alternate extensions to try for the input light curve file other than the one provided in the input task order. For example, to get anything that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to strip the .gz. wait_time_seconds : int The amount of time to wait in the input SQS queue for an input task order. If this timeout expires and no task has been received, this function goes back to the top of the work loop. cache_clean_timer_seconds : float The amount of time in seconds to wait before periodically removing old files (such as finder chart FITS, external service result pickles) from the astrobase cache directory. These accumulate as the work items are processed, and take up significant space, so must be removed periodically. shutdown_check_timer_seconds : float The amount of time to wait before checking for a pending EC2 shutdown message for the instance this worker loop is operating on. If a shutdown is noticed, the worker loop is cancelled in preparation for instance shutdown. sqs_client : boto3.Client or None If None, this function will instantiate a new `boto3.Client` object to use in its SQS operations. Alternatively, pass in an existing `boto3.Client` instance to re-use it here. s3_client : boto3.Client or None If None, this function will instantiate a new `boto3.Client` object to use in its S3 operations. Alternatively, pass in an existing `boto3.Client` instance to re-use it here. Returns ------- Nothing. """ if not sqs_client: sqs_client = boto3.client('sqs') # depends on [control=['if'], data=[]] if not s3_client: s3_client = boto3.client('s3') # depends on [control=['if'], data=[]] lclist_pklf = lclist_pkl_s3url.split('/')[-1] if not os.path.exists(lclist_pklf): # get the lclist pickle from S3 to help with neighbor queries lclist_pklf = awsutils.s3_get_url(lclist_pkl_s3url, client=s3_client) # depends on [control=['if'], data=[]] with open(lclist_pklf, 'rb') as infd: lclistpkl = pickle.load(infd) # depends on [control=['with'], data=['infd']] # listen to the kill and term signals and raise KeyboardInterrupt when # called signal.signal(signal.SIGINT, kill_handler) signal.signal(signal.SIGTERM, kill_handler) shutdown_last_time = time.monotonic() diskspace_last_time = time.monotonic() while True: curr_time = time.monotonic() if curr_time - shutdown_last_time > shutdown_check_timer_seconds: shutdown_check = shutdown_check_handler() if shutdown_check: LOGWARNING('instance will die soon, breaking loop') break # depends on [control=['if'], data=[]] shutdown_last_time = time.monotonic() # depends on [control=['if'], data=[]] if curr_time - diskspace_last_time > cache_clean_timer_seconds: cache_clean_handler() diskspace_last_time = time.monotonic() # depends on [control=['if'], data=[]] try: # receive a single message from the inqueue work = awsutils.sqs_get_item(in_queue_url, client=sqs_client, raiseonfail=True) # JSON deserialize the work item if work is not None and len(work) > 0: recv = work[0] # skip any messages that don't tell us to runcp # FIXME: use the MessageAttributes for setting topics instead action = recv['item']['action'] if action != 'runcp': continue # depends on [control=['if'], data=[]] target = recv['item']['target'] args = recv['item']['args'] kwargs = recv['item']['kwargs'] outbucket = recv['item']['outbucket'] if 'outqueue' in recv['item']: out_queue_url = recv['item']['outqueue'] # depends on [control=['if'], data=[]] else: out_queue_url = None receipt = recv['receipt_handle'] # download the target from S3 to a file in the work directory try: lc_filename = awsutils.s3_get_url(target, altexts=lc_altexts, client=s3_client) # get the period-finder pickle if present in args if len(args) > 0 and args[0] is not None: pf_pickle = awsutils.s3_get_url(args[0], client=s3_client) # depends on [control=['if'], data=[]] else: pf_pickle = None # now runcp cpfs = runcp(pf_pickle, workdir, workdir, lcfname=lc_filename, lclistpkl=lclistpkl, makeneighborlcs=False, **kwargs) if cpfs and all((os.path.exists(x) for x in cpfs)): LOGINFO('runcp OK for LC: %s, PF: %s -> %s' % (lc_filename, pf_pickle, cpfs)) # check if the file exists already because it's been # processed somewhere else resp = s3_client.list_objects_v2(Bucket=outbucket, MaxKeys=1, Prefix=cpfs[0]) outbucket_list = resp.get('Contents', []) if outbucket_list and len(outbucket_list) > 0: LOGWARNING('not uploading runcp results for %s because they exist in the output bucket already' % target) awsutils.sqs_delete_item(in_queue_url, receipt) continue # depends on [control=['if'], data=[]] for cpf in cpfs: put_url = awsutils.s3_put_file(cpf, outbucket, client=s3_client) if put_url is not None: LOGINFO('result uploaded to %s' % put_url) # put the S3 URL of the output into the output # queue if requested if out_queue_url is not None: awsutils.sqs_put_item(out_queue_url, {'cpf': put_url, 'target': target, 'lc_filename': lc_filename, 'lclistpkl': lclist_pklf, 'kwargs': kwargs}, raiseonfail=True) # depends on [control=['if'], data=['out_queue_url']] # delete the result from the local directory os.remove(cpf) # depends on [control=['if'], data=['put_url']] else: # if the upload fails, don't acknowledge the # message. might be a temporary S3 failure, so # another worker might succeed later. LOGERROR('failed to upload %s to S3' % cpf) # depends on [control=['for'], data=['cpf']] # delete the input item from the input queue to # acknowledge its receipt and indicate that # processing is done and successful awsutils.sqs_delete_item(in_queue_url, receipt) # delete the light curve file when we're done with it if lc_filename is not None and os.path.exists(lc_filename): os.remove(lc_filename) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: # if runcp failed outright, don't requeue. instead, write a # ('failed-checkplot-%s.pkl' % lc_filename) file to the # output S3 bucket. LOGWARNING('runcp failed for LC: %s, PF: %s' % (lc_filename, pf_pickle)) with open('failed-checkplot-%s.pkl' % lc_filename, 'wb') as outfd: pickle.dump({'in_queue_url': in_queue_url, 'target': target, 'lc_filename': lc_filename, 'lclistpkl': lclist_pklf, 'kwargs': kwargs, 'outbucket': outbucket, 'out_queue_url': out_queue_url}, outfd, pickle.HIGHEST_PROTOCOL) # depends on [control=['with'], data=['outfd']] put_url = awsutils.s3_put_file('failed-checkplot-%s.pkl' % lc_filename, outbucket, client=s3_client) # put the S3 URL of the output into the output # queue if requested if out_queue_url is not None: awsutils.sqs_put_item(out_queue_url, {'cpf': put_url, 'lc_filename': lc_filename, 'lclistpkl': lclist_pklf, 'kwargs': kwargs}, raiseonfail=True) # depends on [control=['if'], data=['out_queue_url']] # delete the input item from the input queue to # acknowledge its receipt and indicate that # processing is done awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True) # delete the light curve file when we're done with it if lc_filename is not None and os.path.exists(lc_filename): os.remove(lc_filename) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except ClientError as e: LOGWARNING('queues have disappeared. stopping worker loop') break # depends on [control=['except'], data=[]] # if there's any other exception, put a failed response into the # output bucket and queue except Exception as e: LOGEXCEPTION('could not process input from queue') if 'lc_filename' in locals(): with open('failed-checkplot-%s.pkl' % lc_filename, 'wb') as outfd: pickle.dump({'in_queue_url': in_queue_url, 'target': target, 'lc_filename': lc_filename, 'lclistpkl': lclist_pklf, 'kwargs': kwargs, 'outbucket': outbucket, 'out_queue_url': out_queue_url}, outfd, pickle.HIGHEST_PROTOCOL) # depends on [control=['with'], data=['outfd']] put_url = awsutils.s3_put_file('failed-checkplot-%s.pkl' % lc_filename, outbucket, client=s3_client) # put the S3 URL of the output into the output # queue if requested if out_queue_url is not None: awsutils.sqs_put_item(out_queue_url, {'cpf': put_url, 'lc_filename': lc_filename, 'lclistpkl': lclist_pklf, 'kwargs': kwargs}, raiseonfail=True) # depends on [control=['if'], data=['out_queue_url']] if lc_filename is not None and os.path.exists(lc_filename): os.remove(lc_filename) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # delete the input item from the input queue to # acknowledge its receipt and indicate that # processing is done awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] # a keyboard interrupt kills the loop except KeyboardInterrupt: LOGWARNING('breaking out of the processing loop.') break # depends on [control=['except'], data=[]] # if the queues disappear, then the producer loop is done and we should # exit except ClientError as e: LOGWARNING('queues have disappeared. stopping worker loop') break # depends on [control=['except'], data=[]] # any other exception continues the loop we'll write the output file to # the output S3 bucket (and any optional output queue), but add a # failed-* prefix to it to indicate that processing failed. FIXME: could # use a dead-letter queue for this instead except Exception as e: LOGEXCEPTION('could not process input from queue') if 'lc_filename' in locals(): with open('failed-checkplot-%s.pkl' % lc_filename, 'wb') as outfd: pickle.dump({'in_queue_url': in_queue_url, 'target': target, 'lclistpkl': lclist_pklf, 'kwargs': kwargs, 'outbucket': outbucket, 'out_queue_url': out_queue_url}, outfd, pickle.HIGHEST_PROTOCOL) # depends on [control=['with'], data=['outfd']] put_url = awsutils.s3_put_file('failed-checkplot-%s.pkl' % lc_filename, outbucket, client=s3_client) # put the S3 URL of the output into the output # queue if requested if out_queue_url is not None: awsutils.sqs_put_item(out_queue_url, {'cpf': put_url, 'lclistpkl': lclist_pklf, 'kwargs': kwargs}, raiseonfail=True) # depends on [control=['if'], data=['out_queue_url']] if lc_filename is not None and os.path.exists(lc_filename): os.remove(lc_filename) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # delete the input item from the input queue to # acknowledge its receipt and indicate that # processing is done awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
def qteCloseQtmacs(self): """ Close Qtmacs. First kill all applets, then shut down Qtmacs. |Args| * **None** |Returns| * **None** |Raises| * **None** """ # Announce the shutdown. msgObj = QtmacsMessage() msgObj.setSignalName('qtesigCloseQtmacs') self.qtesigCloseQtmacs.emit(msgObj) # Kill all applets and update the GUI. for appName in self.qteGetAllAppletIDs(): self.qteKillApplet(appName) self._qteFocusManager() # Kill all windows and update the GUI. for window in self._qteWindowList: window.close() self._qteFocusManager() # Schedule QtmacsMain for deletion. self.deleteLater()
def function[qteCloseQtmacs, parameter[self]]: constant[ Close Qtmacs. First kill all applets, then shut down Qtmacs. |Args| * **None** |Returns| * **None** |Raises| * **None** ] variable[msgObj] assign[=] call[name[QtmacsMessage], parameter[]] call[name[msgObj].setSignalName, parameter[constant[qtesigCloseQtmacs]]] call[name[self].qtesigCloseQtmacs.emit, parameter[name[msgObj]]] for taget[name[appName]] in starred[call[name[self].qteGetAllAppletIDs, parameter[]]] begin[:] call[name[self].qteKillApplet, parameter[name[appName]]] call[name[self]._qteFocusManager, parameter[]] for taget[name[window]] in starred[name[self]._qteWindowList] begin[:] call[name[window].close, parameter[]] call[name[self]._qteFocusManager, parameter[]] call[name[self].deleteLater, parameter[]]
keyword[def] identifier[qteCloseQtmacs] ( identifier[self] ): literal[string] identifier[msgObj] = identifier[QtmacsMessage] () identifier[msgObj] . identifier[setSignalName] ( literal[string] ) identifier[self] . identifier[qtesigCloseQtmacs] . identifier[emit] ( identifier[msgObj] ) keyword[for] identifier[appName] keyword[in] identifier[self] . identifier[qteGetAllAppletIDs] (): identifier[self] . identifier[qteKillApplet] ( identifier[appName] ) identifier[self] . identifier[_qteFocusManager] () keyword[for] identifier[window] keyword[in] identifier[self] . identifier[_qteWindowList] : identifier[window] . identifier[close] () identifier[self] . identifier[_qteFocusManager] () identifier[self] . identifier[deleteLater] ()
def qteCloseQtmacs(self): """ Close Qtmacs. First kill all applets, then shut down Qtmacs. |Args| * **None** |Returns| * **None** |Raises| * **None** """ # Announce the shutdown. msgObj = QtmacsMessage() msgObj.setSignalName('qtesigCloseQtmacs') self.qtesigCloseQtmacs.emit(msgObj) # Kill all applets and update the GUI. for appName in self.qteGetAllAppletIDs(): self.qteKillApplet(appName) # depends on [control=['for'], data=['appName']] self._qteFocusManager() # Kill all windows and update the GUI. for window in self._qteWindowList: window.close() # depends on [control=['for'], data=['window']] self._qteFocusManager() # Schedule QtmacsMain for deletion. self.deleteLater()
def get_assessment_part_item_design_session(self, *args, **kwargs): """Gets the ``OsidSession`` associated with the assessment part item design service. return: (osid.assessment.authoring.AssessmentPartItemDesignSession) - an ``AssessmentPartItemDesignSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_part_item_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_part_lookup()`` is ``true``.* """ if not self.supports_assessment_part_lookup(): # This is kludgy, but only until Tom fixes spec raise errors.Unimplemented() if self._proxy_in_args(*args, **kwargs): raise errors.InvalidArgument('A Proxy object was received but not expected.') # pylint: disable=no-member return sessions.AssessmentPartItemDesignSession(runtime=self._runtime)
def function[get_assessment_part_item_design_session, parameter[self]]: constant[Gets the ``OsidSession`` associated with the assessment part item design service. return: (osid.assessment.authoring.AssessmentPartItemDesignSession) - an ``AssessmentPartItemDesignSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_part_item_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_part_lookup()`` is ``true``.* ] if <ast.UnaryOp object at 0x7da20c795210> begin[:] <ast.Raise object at 0x7da20c794dc0> if call[name[self]._proxy_in_args, parameter[<ast.Starred object at 0x7da20c794b80>]] begin[:] <ast.Raise object at 0x7da20c7968c0> return[call[name[sessions].AssessmentPartItemDesignSession, parameter[]]]
keyword[def] identifier[get_assessment_part_item_design_session] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[supports_assessment_part_lookup] (): keyword[raise] identifier[errors] . identifier[Unimplemented] () keyword[if] identifier[self] . identifier[_proxy_in_args] (* identifier[args] ,** identifier[kwargs] ): keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] ) keyword[return] identifier[sessions] . identifier[AssessmentPartItemDesignSession] ( identifier[runtime] = identifier[self] . identifier[_runtime] )
def get_assessment_part_item_design_session(self, *args, **kwargs): """Gets the ``OsidSession`` associated with the assessment part item design service. return: (osid.assessment.authoring.AssessmentPartItemDesignSession) - an ``AssessmentPartItemDesignSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_part_item_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_part_lookup()`` is ``true``.* """ if not self.supports_assessment_part_lookup(): # This is kludgy, but only until Tom fixes spec raise errors.Unimplemented() # depends on [control=['if'], data=[]] if self._proxy_in_args(*args, **kwargs): raise errors.InvalidArgument('A Proxy object was received but not expected.') # depends on [control=['if'], data=[]] # pylint: disable=no-member return sessions.AssessmentPartItemDesignSession(runtime=self._runtime)
def app_upload(path, name, manifest, package, docker_address, registry, manifest_only, **kwargs): """ Upload application with its environment (directory) into the storage. Application directory or its subdirectories must contain valid manifest file named `manifest.json` or `manifest` otherwise you must specify it explicitly by setting `--manifest` option. You can specify application name. By default, leaf directory name is treated as application name. If you have already prepared application archive (*.tar.gz), you can explicitly specify path to it by setting `--package` option. Additional output can be turned on by passing `-vvvv` option. """ lower_limit = 120.0 ctx = Context(**kwargs) if ctx.timeout < lower_limit: ctx.timeout = lower_limit log.info('shifted timeout to the %.2fs', ctx.timeout) mutex_record = collections.namedtuple('mutex_record', 'value, name') mutex = [ (mutex_record(path, 'PATH'), mutex_record(package, '--package')), (mutex_record(package, '--package'), mutex_record(docker_address, '--docker')), (mutex_record(package, '--package'), mutex_record(registry, '--registry')), ] for (f, s) in mutex: if f.value and s.value: click.echo('Wrong usage: option {} and {} are mutual exclusive, you can only use one'. format(f.name, s.name)) exit(os.EX_USAGE) if manifest_only: ctx.execute_action('app:upload-manual', **{ 'storage': ctx.repo.create_secure_service('storage'), 'name': name, 'manifest': manifest, 'package': None, 'manifest_only': manifest_only, }) elif package: ctx.execute_action('app:upload-manual', **{ 'storage': ctx.repo.create_secure_service('storage'), 'name': name, 'manifest': manifest, 'package': package }) elif docker_address: ctx.execute_action('app:upload-docker', **{ 'storage': ctx.repo.create_secure_service('storage'), 'path': path, 'name': name, 'manifest': manifest, 'address': docker_address, 'registry': registry }) else: ctx.execute_action('app:upload', **{ 'storage': ctx.repo.create_secure_service('storage'), 'path': path, 'name': name, 'manifest': manifest })
def function[app_upload, parameter[path, name, manifest, package, docker_address, registry, manifest_only]]: constant[ Upload application with its environment (directory) into the storage. Application directory or its subdirectories must contain valid manifest file named `manifest.json` or `manifest` otherwise you must specify it explicitly by setting `--manifest` option. You can specify application name. By default, leaf directory name is treated as application name. If you have already prepared application archive (*.tar.gz), you can explicitly specify path to it by setting `--package` option. Additional output can be turned on by passing `-vvvv` option. ] variable[lower_limit] assign[=] constant[120.0] variable[ctx] assign[=] call[name[Context], parameter[]] if compare[name[ctx].timeout less[<] name[lower_limit]] begin[:] name[ctx].timeout assign[=] name[lower_limit] call[name[log].info, parameter[constant[shifted timeout to the %.2fs], name[ctx].timeout]] variable[mutex_record] assign[=] call[name[collections].namedtuple, parameter[constant[mutex_record], constant[value, name]]] variable[mutex] assign[=] list[[<ast.Tuple object at 0x7da1b2506350>, <ast.Tuple object at 0x7da1b2507400>, <ast.Tuple object at 0x7da1b2506140>]] for taget[tuple[[<ast.Name object at 0x7da1b25062c0>, <ast.Name object at 0x7da1b2506020>]]] in starred[name[mutex]] begin[:] if <ast.BoolOp object at 0x7da1b2507b20> begin[:] call[name[click].echo, parameter[call[constant[Wrong usage: option {} and {} are mutual exclusive, you can only use one].format, parameter[name[f].name, name[s].name]]]] call[name[exit], parameter[name[os].EX_USAGE]] if name[manifest_only] begin[:] call[name[ctx].execute_action, parameter[constant[app:upload-manual]]]
keyword[def] identifier[app_upload] ( identifier[path] , identifier[name] , identifier[manifest] , identifier[package] , identifier[docker_address] , identifier[registry] , identifier[manifest_only] ,** identifier[kwargs] ): literal[string] identifier[lower_limit] = literal[int] identifier[ctx] = identifier[Context] (** identifier[kwargs] ) keyword[if] identifier[ctx] . identifier[timeout] < identifier[lower_limit] : identifier[ctx] . identifier[timeout] = identifier[lower_limit] identifier[log] . identifier[info] ( literal[string] , identifier[ctx] . identifier[timeout] ) identifier[mutex_record] = identifier[collections] . identifier[namedtuple] ( literal[string] , literal[string] ) identifier[mutex] =[ ( identifier[mutex_record] ( identifier[path] , literal[string] ), identifier[mutex_record] ( identifier[package] , literal[string] )), ( identifier[mutex_record] ( identifier[package] , literal[string] ), identifier[mutex_record] ( identifier[docker_address] , literal[string] )), ( identifier[mutex_record] ( identifier[package] , literal[string] ), identifier[mutex_record] ( identifier[registry] , literal[string] )), ] keyword[for] ( identifier[f] , identifier[s] ) keyword[in] identifier[mutex] : keyword[if] identifier[f] . identifier[value] keyword[and] identifier[s] . identifier[value] : identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[f] . identifier[name] , identifier[s] . identifier[name] )) identifier[exit] ( identifier[os] . identifier[EX_USAGE] ) keyword[if] identifier[manifest_only] : identifier[ctx] . identifier[execute_action] ( literal[string] ,**{ literal[string] : identifier[ctx] . identifier[repo] . identifier[create_secure_service] ( literal[string] ), literal[string] : identifier[name] , literal[string] : identifier[manifest] , literal[string] : keyword[None] , literal[string] : identifier[manifest_only] , }) keyword[elif] identifier[package] : identifier[ctx] . identifier[execute_action] ( literal[string] ,**{ literal[string] : identifier[ctx] . identifier[repo] . identifier[create_secure_service] ( literal[string] ), literal[string] : identifier[name] , literal[string] : identifier[manifest] , literal[string] : identifier[package] }) keyword[elif] identifier[docker_address] : identifier[ctx] . identifier[execute_action] ( literal[string] ,**{ literal[string] : identifier[ctx] . identifier[repo] . identifier[create_secure_service] ( literal[string] ), literal[string] : identifier[path] , literal[string] : identifier[name] , literal[string] : identifier[manifest] , literal[string] : identifier[docker_address] , literal[string] : identifier[registry] }) keyword[else] : identifier[ctx] . identifier[execute_action] ( literal[string] ,**{ literal[string] : identifier[ctx] . identifier[repo] . identifier[create_secure_service] ( literal[string] ), literal[string] : identifier[path] , literal[string] : identifier[name] , literal[string] : identifier[manifest] })
def app_upload(path, name, manifest, package, docker_address, registry, manifest_only, **kwargs): """ Upload application with its environment (directory) into the storage. Application directory or its subdirectories must contain valid manifest file named `manifest.json` or `manifest` otherwise you must specify it explicitly by setting `--manifest` option. You can specify application name. By default, leaf directory name is treated as application name. If you have already prepared application archive (*.tar.gz), you can explicitly specify path to it by setting `--package` option. Additional output can be turned on by passing `-vvvv` option. """ lower_limit = 120.0 ctx = Context(**kwargs) if ctx.timeout < lower_limit: ctx.timeout = lower_limit log.info('shifted timeout to the %.2fs', ctx.timeout) # depends on [control=['if'], data=['lower_limit']] mutex_record = collections.namedtuple('mutex_record', 'value, name') mutex = [(mutex_record(path, 'PATH'), mutex_record(package, '--package')), (mutex_record(package, '--package'), mutex_record(docker_address, '--docker')), (mutex_record(package, '--package'), mutex_record(registry, '--registry'))] for (f, s) in mutex: if f.value and s.value: click.echo('Wrong usage: option {} and {} are mutual exclusive, you can only use one'.format(f.name, s.name)) exit(os.EX_USAGE) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] if manifest_only: ctx.execute_action('app:upload-manual', **{'storage': ctx.repo.create_secure_service('storage'), 'name': name, 'manifest': manifest, 'package': None, 'manifest_only': manifest_only}) # depends on [control=['if'], data=[]] elif package: ctx.execute_action('app:upload-manual', **{'storage': ctx.repo.create_secure_service('storage'), 'name': name, 'manifest': manifest, 'package': package}) # depends on [control=['if'], data=[]] elif docker_address: ctx.execute_action('app:upload-docker', **{'storage': ctx.repo.create_secure_service('storage'), 'path': path, 'name': name, 'manifest': manifest, 'address': docker_address, 'registry': registry}) # depends on [control=['if'], data=[]] else: ctx.execute_action('app:upload', **{'storage': ctx.repo.create_secure_service('storage'), 'path': path, 'name': name, 'manifest': manifest})
def _disable(self): """ The configuration containing this function has been disabled by host. Endpoint do not work anymore, so cancel AIO operation blocks. """ if self._enabled: self._real_onCannotSend() has_cancelled = 0 for block in self._aio_recv_block_list + self._aio_send_block_list: try: self._aio_context.cancel(block) except OSError as exc: trace( 'cancelling %r raised: %s' % (block, exc), ) else: has_cancelled += 1 if has_cancelled: noIntr(functools.partial(self._aio_context.getEvents, min_nr=None)) self._enabled = False
def function[_disable, parameter[self]]: constant[ The configuration containing this function has been disabled by host. Endpoint do not work anymore, so cancel AIO operation blocks. ] if name[self]._enabled begin[:] call[name[self]._real_onCannotSend, parameter[]] variable[has_cancelled] assign[=] constant[0] for taget[name[block]] in starred[binary_operation[name[self]._aio_recv_block_list + name[self]._aio_send_block_list]] begin[:] <ast.Try object at 0x7da1b10c64d0> if name[has_cancelled] begin[:] call[name[noIntr], parameter[call[name[functools].partial, parameter[name[self]._aio_context.getEvents]]]] name[self]._enabled assign[=] constant[False]
keyword[def] identifier[_disable] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_enabled] : identifier[self] . identifier[_real_onCannotSend] () identifier[has_cancelled] = literal[int] keyword[for] identifier[block] keyword[in] identifier[self] . identifier[_aio_recv_block_list] + identifier[self] . identifier[_aio_send_block_list] : keyword[try] : identifier[self] . identifier[_aio_context] . identifier[cancel] ( identifier[block] ) keyword[except] identifier[OSError] keyword[as] identifier[exc] : identifier[trace] ( literal[string] %( identifier[block] , identifier[exc] ), ) keyword[else] : identifier[has_cancelled] += literal[int] keyword[if] identifier[has_cancelled] : identifier[noIntr] ( identifier[functools] . identifier[partial] ( identifier[self] . identifier[_aio_context] . identifier[getEvents] , identifier[min_nr] = keyword[None] )) identifier[self] . identifier[_enabled] = keyword[False]
def _disable(self): """ The configuration containing this function has been disabled by host. Endpoint do not work anymore, so cancel AIO operation blocks. """ if self._enabled: self._real_onCannotSend() has_cancelled = 0 for block in self._aio_recv_block_list + self._aio_send_block_list: try: self._aio_context.cancel(block) # depends on [control=['try'], data=[]] except OSError as exc: trace('cancelling %r raised: %s' % (block, exc)) # depends on [control=['except'], data=['exc']] else: has_cancelled += 1 # depends on [control=['for'], data=['block']] if has_cancelled: noIntr(functools.partial(self._aio_context.getEvents, min_nr=None)) # depends on [control=['if'], data=[]] self._enabled = False # depends on [control=['if'], data=[]]
def do_not_track(): """ Decorator to skip the default metrics collection for the method. *Note*: explicit metrics decorators will still collect the data """ def decorator(f): @functools.wraps(f) def func(*args, **kwargs): request.prom_do_not_track = True return f(*args, **kwargs) return func return decorator
def function[do_not_track, parameter[]]: constant[ Decorator to skip the default metrics collection for the method. *Note*: explicit metrics decorators will still collect the data ] def function[decorator, parameter[f]]: def function[func, parameter[]]: name[request].prom_do_not_track assign[=] constant[True] return[call[name[f], parameter[<ast.Starred object at 0x7da18f00d6c0>]]] return[name[func]] return[name[decorator]]
keyword[def] identifier[do_not_track] (): literal[string] keyword[def] identifier[decorator] ( identifier[f] ): @ identifier[functools] . identifier[wraps] ( identifier[f] ) keyword[def] identifier[func] (* identifier[args] ,** identifier[kwargs] ): identifier[request] . identifier[prom_do_not_track] = keyword[True] keyword[return] identifier[f] (* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[func] keyword[return] identifier[decorator]
def do_not_track(): """ Decorator to skip the default metrics collection for the method. *Note*: explicit metrics decorators will still collect the data """ def decorator(f): @functools.wraps(f) def func(*args, **kwargs): request.prom_do_not_track = True return f(*args, **kwargs) return func return decorator
def add_filter(self, table, cols, condition): """ Add a filter. When reading *table*, rows in *table* will be filtered by filter_rows(). Args: table: The table the filter applies to. cols: The columns in *table* to filter on. condition: The filter function. """ if table is not None and table not in self.relations: raise ItsdbError('Cannot add filter; table "{}" is not defined ' 'by the relations file.' .format(table)) # this is a hack, though perhaps well-motivated if cols is None: cols = [None] self.filters[table].append((cols, condition))
def function[add_filter, parameter[self, table, cols, condition]]: constant[ Add a filter. When reading *table*, rows in *table* will be filtered by filter_rows(). Args: table: The table the filter applies to. cols: The columns in *table* to filter on. condition: The filter function. ] if <ast.BoolOp object at 0x7da1b06ca0e0> begin[:] <ast.Raise object at 0x7da1b06cb700> if compare[name[cols] is constant[None]] begin[:] variable[cols] assign[=] list[[<ast.Constant object at 0x7da1b06ca500>]] call[call[name[self].filters][name[table]].append, parameter[tuple[[<ast.Name object at 0x7da1b06cb760>, <ast.Name object at 0x7da1b06c8b50>]]]]
keyword[def] identifier[add_filter] ( identifier[self] , identifier[table] , identifier[cols] , identifier[condition] ): literal[string] keyword[if] identifier[table] keyword[is] keyword[not] keyword[None] keyword[and] identifier[table] keyword[not] keyword[in] identifier[self] . identifier[relations] : keyword[raise] identifier[ItsdbError] ( literal[string] literal[string] . identifier[format] ( identifier[table] )) keyword[if] identifier[cols] keyword[is] keyword[None] : identifier[cols] =[ keyword[None] ] identifier[self] . identifier[filters] [ identifier[table] ]. identifier[append] (( identifier[cols] , identifier[condition] ))
def add_filter(self, table, cols, condition): """ Add a filter. When reading *table*, rows in *table* will be filtered by filter_rows(). Args: table: The table the filter applies to. cols: The columns in *table* to filter on. condition: The filter function. """ if table is not None and table not in self.relations: raise ItsdbError('Cannot add filter; table "{}" is not defined by the relations file.'.format(table)) # depends on [control=['if'], data=[]] # this is a hack, though perhaps well-motivated if cols is None: cols = [None] # depends on [control=['if'], data=['cols']] self.filters[table].append((cols, condition))
def _redundancy_routers_for_floatingip( self, context, router_id, redundancy_router_ids=None, ha_settings_db=None): """To be called in update_floatingip() to get the redundant router ids. """ if ha_settings_db is None: ha_settings_db = self._get_ha_settings_by_router_id(context, router_id) if ha_settings_db is None: return e_context = context.elevated() router_ids = [] for r_id in (redundancy_router_ids or self._get_redundancy_router_ids(e_context, router_id)): router_ids.append(r_id) return router_ids
def function[_redundancy_routers_for_floatingip, parameter[self, context, router_id, redundancy_router_ids, ha_settings_db]]: constant[To be called in update_floatingip() to get the redundant router ids. ] if compare[name[ha_settings_db] is constant[None]] begin[:] variable[ha_settings_db] assign[=] call[name[self]._get_ha_settings_by_router_id, parameter[name[context], name[router_id]]] if compare[name[ha_settings_db] is constant[None]] begin[:] return[None] variable[e_context] assign[=] call[name[context].elevated, parameter[]] variable[router_ids] assign[=] list[[]] for taget[name[r_id]] in starred[<ast.BoolOp object at 0x7da18bc70730>] begin[:] call[name[router_ids].append, parameter[name[r_id]]] return[name[router_ids]]
keyword[def] identifier[_redundancy_routers_for_floatingip] ( identifier[self] , identifier[context] , identifier[router_id] , identifier[redundancy_router_ids] = keyword[None] , identifier[ha_settings_db] = keyword[None] ): literal[string] keyword[if] identifier[ha_settings_db] keyword[is] keyword[None] : identifier[ha_settings_db] = identifier[self] . identifier[_get_ha_settings_by_router_id] ( identifier[context] , identifier[router_id] ) keyword[if] identifier[ha_settings_db] keyword[is] keyword[None] : keyword[return] identifier[e_context] = identifier[context] . identifier[elevated] () identifier[router_ids] =[] keyword[for] identifier[r_id] keyword[in] ( identifier[redundancy_router_ids] keyword[or] identifier[self] . identifier[_get_redundancy_router_ids] ( identifier[e_context] , identifier[router_id] )): identifier[router_ids] . identifier[append] ( identifier[r_id] ) keyword[return] identifier[router_ids]
def _redundancy_routers_for_floatingip(self, context, router_id, redundancy_router_ids=None, ha_settings_db=None): """To be called in update_floatingip() to get the redundant router ids. """ if ha_settings_db is None: ha_settings_db = self._get_ha_settings_by_router_id(context, router_id) # depends on [control=['if'], data=['ha_settings_db']] if ha_settings_db is None: return # depends on [control=['if'], data=[]] e_context = context.elevated() router_ids = [] for r_id in redundancy_router_ids or self._get_redundancy_router_ids(e_context, router_id): router_ids.append(r_id) # depends on [control=['for'], data=['r_id']] return router_ids
def uncork(self): """ #305: during startup :class:`LogHandler` may be installed before it is possible to route messages, therefore messages are buffered until :meth:`uncork` is called by :class:`ExternalContext`. """ self._send = self.context.send for msg in self._buffer: self._send(msg) self._buffer = None
def function[uncork, parameter[self]]: constant[ #305: during startup :class:`LogHandler` may be installed before it is possible to route messages, therefore messages are buffered until :meth:`uncork` is called by :class:`ExternalContext`. ] name[self]._send assign[=] name[self].context.send for taget[name[msg]] in starred[name[self]._buffer] begin[:] call[name[self]._send, parameter[name[msg]]] name[self]._buffer assign[=] constant[None]
keyword[def] identifier[uncork] ( identifier[self] ): literal[string] identifier[self] . identifier[_send] = identifier[self] . identifier[context] . identifier[send] keyword[for] identifier[msg] keyword[in] identifier[self] . identifier[_buffer] : identifier[self] . identifier[_send] ( identifier[msg] ) identifier[self] . identifier[_buffer] = keyword[None]
def uncork(self): """ #305: during startup :class:`LogHandler` may be installed before it is possible to route messages, therefore messages are buffered until :meth:`uncork` is called by :class:`ExternalContext`. """ self._send = self.context.send for msg in self._buffer: self._send(msg) # depends on [control=['for'], data=['msg']] self._buffer = None
def str_slice(arr, start=None, stop=None, step=None): """ Slice substrings from each element in the Series or Index. Parameters ---------- start : int, optional Start position for slice operation. stop : int, optional Stop position for slice operation. step : int, optional Step size for slice operation. Returns ------- Series or Index of object Series or Index from sliced substring from original string object. See Also -------- Series.str.slice_replace : Replace a slice with a string. Series.str.get : Return element at position. Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i` being the position. Examples -------- >>> s = pd.Series(["koala", "fox", "chameleon"]) >>> s 0 koala 1 fox 2 chameleon dtype: object >>> s.str.slice(start=1) 0 oala 1 ox 2 hameleon dtype: object >>> s.str.slice(stop=2) 0 ko 1 fo 2 ch dtype: object >>> s.str.slice(step=2) 0 kaa 1 fx 2 caeen dtype: object >>> s.str.slice(start=0, stop=5, step=3) 0 kl 1 f 2 cm dtype: object Equivalent behaviour to: >>> s.str[0:5:3] 0 kl 1 f 2 cm dtype: object """ obj = slice(start, stop, step) f = lambda x: x[obj] return _na_map(f, arr)
def function[str_slice, parameter[arr, start, stop, step]]: constant[ Slice substrings from each element in the Series or Index. Parameters ---------- start : int, optional Start position for slice operation. stop : int, optional Stop position for slice operation. step : int, optional Step size for slice operation. Returns ------- Series or Index of object Series or Index from sliced substring from original string object. See Also -------- Series.str.slice_replace : Replace a slice with a string. Series.str.get : Return element at position. Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i` being the position. Examples -------- >>> s = pd.Series(["koala", "fox", "chameleon"]) >>> s 0 koala 1 fox 2 chameleon dtype: object >>> s.str.slice(start=1) 0 oala 1 ox 2 hameleon dtype: object >>> s.str.slice(stop=2) 0 ko 1 fo 2 ch dtype: object >>> s.str.slice(step=2) 0 kaa 1 fx 2 caeen dtype: object >>> s.str.slice(start=0, stop=5, step=3) 0 kl 1 f 2 cm dtype: object Equivalent behaviour to: >>> s.str[0:5:3] 0 kl 1 f 2 cm dtype: object ] variable[obj] assign[=] call[name[slice], parameter[name[start], name[stop], name[step]]] variable[f] assign[=] <ast.Lambda object at 0x7da20cabfd00> return[call[name[_na_map], parameter[name[f], name[arr]]]]
keyword[def] identifier[str_slice] ( identifier[arr] , identifier[start] = keyword[None] , identifier[stop] = keyword[None] , identifier[step] = keyword[None] ): literal[string] identifier[obj] = identifier[slice] ( identifier[start] , identifier[stop] , identifier[step] ) identifier[f] = keyword[lambda] identifier[x] : identifier[x] [ identifier[obj] ] keyword[return] identifier[_na_map] ( identifier[f] , identifier[arr] )
def str_slice(arr, start=None, stop=None, step=None): """ Slice substrings from each element in the Series or Index. Parameters ---------- start : int, optional Start position for slice operation. stop : int, optional Stop position for slice operation. step : int, optional Step size for slice operation. Returns ------- Series or Index of object Series or Index from sliced substring from original string object. See Also -------- Series.str.slice_replace : Replace a slice with a string. Series.str.get : Return element at position. Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i` being the position. Examples -------- >>> s = pd.Series(["koala", "fox", "chameleon"]) >>> s 0 koala 1 fox 2 chameleon dtype: object >>> s.str.slice(start=1) 0 oala 1 ox 2 hameleon dtype: object >>> s.str.slice(stop=2) 0 ko 1 fo 2 ch dtype: object >>> s.str.slice(step=2) 0 kaa 1 fx 2 caeen dtype: object >>> s.str.slice(start=0, stop=5, step=3) 0 kl 1 f 2 cm dtype: object Equivalent behaviour to: >>> s.str[0:5:3] 0 kl 1 f 2 cm dtype: object """ obj = slice(start, stop, step) f = lambda x: x[obj] return _na_map(f, arr)
def for_branch(self, branch): """ Return a UsageLocator for the same block in a different branch of the library. """ return self.replace(library_key=self.library_key.for_branch(branch))
def function[for_branch, parameter[self, branch]]: constant[ Return a UsageLocator for the same block in a different branch of the library. ] return[call[name[self].replace, parameter[]]]
keyword[def] identifier[for_branch] ( identifier[self] , identifier[branch] ): literal[string] keyword[return] identifier[self] . identifier[replace] ( identifier[library_key] = identifier[self] . identifier[library_key] . identifier[for_branch] ( identifier[branch] ))
def for_branch(self, branch): """ Return a UsageLocator for the same block in a different branch of the library. """ return self.replace(library_key=self.library_key.for_branch(branch))
def _rx_timer_handler(self): """Method called every time the rx_timer times out, due to the peer not sending a consecutive frame within the expected time window""" with self.rx_mutex: if self.rx_state == ISOTP_WAIT_DATA: # we did not get new data frames in time. # reset rx state self.rx_state = ISOTP_IDLE warning("RX state was reset due to timeout")
def function[_rx_timer_handler, parameter[self]]: constant[Method called every time the rx_timer times out, due to the peer not sending a consecutive frame within the expected time window] with name[self].rx_mutex begin[:] if compare[name[self].rx_state equal[==] name[ISOTP_WAIT_DATA]] begin[:] name[self].rx_state assign[=] name[ISOTP_IDLE] call[name[warning], parameter[constant[RX state was reset due to timeout]]]
keyword[def] identifier[_rx_timer_handler] ( identifier[self] ): literal[string] keyword[with] identifier[self] . identifier[rx_mutex] : keyword[if] identifier[self] . identifier[rx_state] == identifier[ISOTP_WAIT_DATA] : identifier[self] . identifier[rx_state] = identifier[ISOTP_IDLE] identifier[warning] ( literal[string] )
def _rx_timer_handler(self): """Method called every time the rx_timer times out, due to the peer not sending a consecutive frame within the expected time window""" with self.rx_mutex: if self.rx_state == ISOTP_WAIT_DATA: # we did not get new data frames in time. # reset rx state self.rx_state = ISOTP_IDLE warning('RX state was reset due to timeout') # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]