code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def check_adjust(builder, file_o): # type: (Builder, Dict[Text, Any]) -> Dict[Text, Any] """ Map files to assigned path inside a container. We need to also explicitly walk over input, as implicit reassignment doesn't reach everything in builder.bindings """ if not builder.pathmapper: raise ValueError("Do not call check_adjust using a builder that doesn't have a pathmapper.") file_o["path"] = docker_windows_path_adjust( builder.pathmapper.mapper(file_o["location"])[1]) dn, bn = os.path.split(file_o["path"]) if file_o.get("dirname") != dn: file_o["dirname"] = Text(dn) if file_o.get("basename") != bn: file_o["basename"] = Text(bn) if file_o["class"] == "File": nr, ne = os.path.splitext(file_o["basename"]) if file_o.get("nameroot") != nr: file_o["nameroot"] = Text(nr) if file_o.get("nameext") != ne: file_o["nameext"] = Text(ne) if not ACCEPTLIST_RE.match(file_o["basename"]): raise WorkflowException( "Invalid filename: '{}' contains illegal characters".format( file_o["basename"])) return file_o
def function[check_adjust, parameter[builder, file_o]]: constant[ Map files to assigned path inside a container. We need to also explicitly walk over input, as implicit reassignment doesn't reach everything in builder.bindings ] if <ast.UnaryOp object at 0x7da20c990580> begin[:] <ast.Raise object at 0x7da20c991570> call[name[file_o]][constant[path]] assign[=] call[name[docker_windows_path_adjust], parameter[call[call[name[builder].pathmapper.mapper, parameter[call[name[file_o]][constant[location]]]]][constant[1]]]] <ast.Tuple object at 0x7da20c9912a0> assign[=] call[name[os].path.split, parameter[call[name[file_o]][constant[path]]]] if compare[call[name[file_o].get, parameter[constant[dirname]]] not_equal[!=] name[dn]] begin[:] call[name[file_o]][constant[dirname]] assign[=] call[name[Text], parameter[name[dn]]] if compare[call[name[file_o].get, parameter[constant[basename]]] not_equal[!=] name[bn]] begin[:] call[name[file_o]][constant[basename]] assign[=] call[name[Text], parameter[name[bn]]] if compare[call[name[file_o]][constant[class]] equal[==] constant[File]] begin[:] <ast.Tuple object at 0x7da20c992f20> assign[=] call[name[os].path.splitext, parameter[call[name[file_o]][constant[basename]]]] if compare[call[name[file_o].get, parameter[constant[nameroot]]] not_equal[!=] name[nr]] begin[:] call[name[file_o]][constant[nameroot]] assign[=] call[name[Text], parameter[name[nr]]] if compare[call[name[file_o].get, parameter[constant[nameext]]] not_equal[!=] name[ne]] begin[:] call[name[file_o]][constant[nameext]] assign[=] call[name[Text], parameter[name[ne]]] if <ast.UnaryOp object at 0x7da20cabf490> begin[:] <ast.Raise object at 0x7da20cabeb00> return[name[file_o]]
keyword[def] identifier[check_adjust] ( identifier[builder] , identifier[file_o] ): literal[string] keyword[if] keyword[not] identifier[builder] . identifier[pathmapper] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[file_o] [ literal[string] ]= identifier[docker_windows_path_adjust] ( identifier[builder] . identifier[pathmapper] . identifier[mapper] ( identifier[file_o] [ literal[string] ])[ literal[int] ]) identifier[dn] , identifier[bn] = identifier[os] . identifier[path] . identifier[split] ( identifier[file_o] [ literal[string] ]) keyword[if] identifier[file_o] . identifier[get] ( literal[string] )!= identifier[dn] : identifier[file_o] [ literal[string] ]= identifier[Text] ( identifier[dn] ) keyword[if] identifier[file_o] . identifier[get] ( literal[string] )!= identifier[bn] : identifier[file_o] [ literal[string] ]= identifier[Text] ( identifier[bn] ) keyword[if] identifier[file_o] [ literal[string] ]== literal[string] : identifier[nr] , identifier[ne] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[file_o] [ literal[string] ]) keyword[if] identifier[file_o] . identifier[get] ( literal[string] )!= identifier[nr] : identifier[file_o] [ literal[string] ]= identifier[Text] ( identifier[nr] ) keyword[if] identifier[file_o] . identifier[get] ( literal[string] )!= identifier[ne] : identifier[file_o] [ literal[string] ]= identifier[Text] ( identifier[ne] ) keyword[if] keyword[not] identifier[ACCEPTLIST_RE] . identifier[match] ( identifier[file_o] [ literal[string] ]): keyword[raise] identifier[WorkflowException] ( literal[string] . identifier[format] ( identifier[file_o] [ literal[string] ])) keyword[return] identifier[file_o]
def check_adjust(builder, file_o): # type: (Builder, Dict[Text, Any]) -> Dict[Text, Any] "\n Map files to assigned path inside a container.\n\n We need to also explicitly walk over input, as implicit reassignment\n doesn't reach everything in builder.bindings\n " if not builder.pathmapper: raise ValueError("Do not call check_adjust using a builder that doesn't have a pathmapper.") # depends on [control=['if'], data=[]] file_o['path'] = docker_windows_path_adjust(builder.pathmapper.mapper(file_o['location'])[1]) (dn, bn) = os.path.split(file_o['path']) if file_o.get('dirname') != dn: file_o['dirname'] = Text(dn) # depends on [control=['if'], data=['dn']] if file_o.get('basename') != bn: file_o['basename'] = Text(bn) # depends on [control=['if'], data=['bn']] if file_o['class'] == 'File': (nr, ne) = os.path.splitext(file_o['basename']) if file_o.get('nameroot') != nr: file_o['nameroot'] = Text(nr) # depends on [control=['if'], data=['nr']] if file_o.get('nameext') != ne: file_o['nameext'] = Text(ne) # depends on [control=['if'], data=['ne']] # depends on [control=['if'], data=[]] if not ACCEPTLIST_RE.match(file_o['basename']): raise WorkflowException("Invalid filename: '{}' contains illegal characters".format(file_o['basename'])) # depends on [control=['if'], data=[]] return file_o
def _set_network(self, v, load=False): """ Setter method for network, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/network (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_network is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_network() directly. YANG Description: To configure the OSPF network type.The default setting of the parameter depends on the network type. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """network must be of a type compatible with enumeration""", 'defined-type': "brocade-ospfv3:enumeration", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True)""", }) self.__network = t if hasattr(self, '_set'): self._set()
def function[_set_network, parameter[self, v, load]]: constant[ Setter method for network, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/network (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_network is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_network() directly. YANG Description: To configure the OSPF network type.The default setting of the parameter depends on the network type. ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da2046208b0> name[self].__network assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_network] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[unicode] , identifier[restriction_type] = literal[string] , identifier[restriction_arg] ={ literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }},), identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : keyword[None] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__network] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_network(self, v, load=False): """ Setter method for network, mapped from YANG variable /routing_system/interface/ve/ipv6/interface_ospfv3_conf/network (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_network is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_network() directly. YANG Description: To configure the OSPF network type.The default setting of the parameter depends on the network type. """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=RestrictedClassType(base_type=unicode, restriction_type='dict_key', restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}}), is_leaf=True, yang_name='network', rest_name='network', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'network must be of a type compatible with enumeration', 'defined-type': 'brocade-ospfv3:enumeration', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u\'broadcast\': {\'value\': 1}, u\'point-to-point\': {\'value\': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'cli-full-command\': None, u\'info\': u\'Interface type\'}}, namespace=\'urn:brocade.com:mgmt:brocade-ospfv3\', defining_module=\'brocade-ospfv3\', yang_type=\'enumeration\', is_config=True)'}) # depends on [control=['except'], data=[]] self.__network = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def CopyFiles(source_dir, target_dir, create_target_dir=False, md5_check=False): ''' Copy files from the given source to the target. :param unicode source_dir: A filename, URL or a file mask. Ex. x:\coilib50 x:\coilib50\* http://server/directory/file ftp://server/directory/file :param unicode target_dir: A directory or an URL Ex. d:\Temp ftp://server/directory :param bool create_target_dir: If True, creates the target path if it doesn't exists. :param bool md5_check: .. seealso:: CopyFile :raises DirectoryNotFoundError: If target_dir does not exist, and create_target_dir is False .. seealso:: CopyFile for documentation on accepted protocols .. seealso:: FTP LIMITATIONS at this module's doc for performance issues information ''' import fnmatch # Check if we were given a directory or a directory with mask if IsDir(source_dir): # Yes, it's a directory, copy everything from it source_mask = '*' else: # Split directory and mask source_dir, source_mask = os.path.split(source_dir) # Create directory if necessary if not IsDir(target_dir): if create_target_dir: CreateDirectory(target_dir) else: from ._exceptions import DirectoryNotFoundError raise DirectoryNotFoundError(target_dir) # List and match files filenames = ListFiles(source_dir) # Check if we have a source directory if filenames is None: return # Copy files for i_filename in filenames: if md5_check and i_filename.endswith('.md5'): continue # md5 files will be copied by CopyFile when copying their associated files if fnmatch.fnmatch(i_filename, source_mask): source_path = source_dir + '/' + i_filename target_path = target_dir + '/' + i_filename if IsDir(source_path): # If we found a directory, copy it recursively CopyFiles(source_path, target_path, create_target_dir=True, md5_check=md5_check) else: CopyFile(source_path, target_path, md5_check=md5_check)
def function[CopyFiles, parameter[source_dir, target_dir, create_target_dir, md5_check]]: constant[ Copy files from the given source to the target. :param unicode source_dir: A filename, URL or a file mask. Ex. x:\coilib50 x:\coilib50\* http://server/directory/file ftp://server/directory/file :param unicode target_dir: A directory or an URL Ex. d:\Temp ftp://server/directory :param bool create_target_dir: If True, creates the target path if it doesn't exists. :param bool md5_check: .. seealso:: CopyFile :raises DirectoryNotFoundError: If target_dir does not exist, and create_target_dir is False .. seealso:: CopyFile for documentation on accepted protocols .. seealso:: FTP LIMITATIONS at this module's doc for performance issues information ] import module[fnmatch] if call[name[IsDir], parameter[name[source_dir]]] begin[:] variable[source_mask] assign[=] constant[*] if <ast.UnaryOp object at 0x7da1affee230> begin[:] if name[create_target_dir] begin[:] call[name[CreateDirectory], parameter[name[target_dir]]] variable[filenames] assign[=] call[name[ListFiles], parameter[name[source_dir]]] if compare[name[filenames] is constant[None]] begin[:] return[None] for taget[name[i_filename]] in starred[name[filenames]] begin[:] if <ast.BoolOp object at 0x7da1affed4b0> begin[:] continue if call[name[fnmatch].fnmatch, parameter[name[i_filename], name[source_mask]]] begin[:] variable[source_path] assign[=] binary_operation[binary_operation[name[source_dir] + constant[/]] + name[i_filename]] variable[target_path] assign[=] binary_operation[binary_operation[name[target_dir] + constant[/]] + name[i_filename]] if call[name[IsDir], parameter[name[source_path]]] begin[:] call[name[CopyFiles], parameter[name[source_path], name[target_path]]]
keyword[def] identifier[CopyFiles] ( identifier[source_dir] , identifier[target_dir] , identifier[create_target_dir] = keyword[False] , identifier[md5_check] = keyword[False] ): literal[string] keyword[import] identifier[fnmatch] keyword[if] identifier[IsDir] ( identifier[source_dir] ): identifier[source_mask] = literal[string] keyword[else] : identifier[source_dir] , identifier[source_mask] = identifier[os] . identifier[path] . identifier[split] ( identifier[source_dir] ) keyword[if] keyword[not] identifier[IsDir] ( identifier[target_dir] ): keyword[if] identifier[create_target_dir] : identifier[CreateDirectory] ( identifier[target_dir] ) keyword[else] : keyword[from] . identifier[_exceptions] keyword[import] identifier[DirectoryNotFoundError] keyword[raise] identifier[DirectoryNotFoundError] ( identifier[target_dir] ) identifier[filenames] = identifier[ListFiles] ( identifier[source_dir] ) keyword[if] identifier[filenames] keyword[is] keyword[None] : keyword[return] keyword[for] identifier[i_filename] keyword[in] identifier[filenames] : keyword[if] identifier[md5_check] keyword[and] identifier[i_filename] . identifier[endswith] ( literal[string] ): keyword[continue] keyword[if] identifier[fnmatch] . identifier[fnmatch] ( identifier[i_filename] , identifier[source_mask] ): identifier[source_path] = identifier[source_dir] + literal[string] + identifier[i_filename] identifier[target_path] = identifier[target_dir] + literal[string] + identifier[i_filename] keyword[if] identifier[IsDir] ( identifier[source_path] ): identifier[CopyFiles] ( identifier[source_path] , identifier[target_path] , identifier[create_target_dir] = keyword[True] , identifier[md5_check] = identifier[md5_check] ) keyword[else] : identifier[CopyFile] ( identifier[source_path] , identifier[target_path] , identifier[md5_check] = identifier[md5_check] )
def CopyFiles(source_dir, target_dir, create_target_dir=False, md5_check=False): """ Copy files from the given source to the target. :param unicode source_dir: A filename, URL or a file mask. Ex. x:\\coilib50 x:\\coilib50\\* http://server/directory/file ftp://server/directory/file :param unicode target_dir: A directory or an URL Ex. d:\\Temp ftp://server/directory :param bool create_target_dir: If True, creates the target path if it doesn't exists. :param bool md5_check: .. seealso:: CopyFile :raises DirectoryNotFoundError: If target_dir does not exist, and create_target_dir is False .. seealso:: CopyFile for documentation on accepted protocols .. seealso:: FTP LIMITATIONS at this module's doc for performance issues information """ import fnmatch # Check if we were given a directory or a directory with mask if IsDir(source_dir): # Yes, it's a directory, copy everything from it source_mask = '*' # depends on [control=['if'], data=[]] else: # Split directory and mask (source_dir, source_mask) = os.path.split(source_dir) # Create directory if necessary if not IsDir(target_dir): if create_target_dir: CreateDirectory(target_dir) # depends on [control=['if'], data=[]] else: from ._exceptions import DirectoryNotFoundError raise DirectoryNotFoundError(target_dir) # depends on [control=['if'], data=[]] # List and match files filenames = ListFiles(source_dir) # Check if we have a source directory if filenames is None: return # depends on [control=['if'], data=[]] # Copy files for i_filename in filenames: if md5_check and i_filename.endswith('.md5'): continue # md5 files will be copied by CopyFile when copying their associated files # depends on [control=['if'], data=[]] if fnmatch.fnmatch(i_filename, source_mask): source_path = source_dir + '/' + i_filename target_path = target_dir + '/' + i_filename if IsDir(source_path): # If we found a directory, copy it recursively CopyFiles(source_path, target_path, create_target_dir=True, md5_check=md5_check) # depends on [control=['if'], data=[]] else: CopyFile(source_path, target_path, md5_check=md5_check) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i_filename']]
def facets(self, request): """ Sets up a list route for ``faceted`` results. This will add ie ^search/facets/$ to your existing ^search pattern. """ queryset = self.filter_facet_queryset(self.get_queryset()) for facet in request.query_params.getlist(self.facet_query_params_text): if ":" not in facet: continue field, value = facet.split(":", 1) if value: queryset = queryset.narrow('%s:"%s"' % (field, queryset.query.clean(value))) serializer = self.get_facet_serializer(queryset.facet_counts(), objects=queryset, many=False) return Response(serializer.data)
def function[facets, parameter[self, request]]: constant[ Sets up a list route for ``faceted`` results. This will add ie ^search/facets/$ to your existing ^search pattern. ] variable[queryset] assign[=] call[name[self].filter_facet_queryset, parameter[call[name[self].get_queryset, parameter[]]]] for taget[name[facet]] in starred[call[name[request].query_params.getlist, parameter[name[self].facet_query_params_text]]] begin[:] if compare[constant[:] <ast.NotIn object at 0x7da2590d7190> name[facet]] begin[:] continue <ast.Tuple object at 0x7da1b110e1d0> assign[=] call[name[facet].split, parameter[constant[:], constant[1]]] if name[value] begin[:] variable[queryset] assign[=] call[name[queryset].narrow, parameter[binary_operation[constant[%s:"%s"] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b110c880>, <ast.Call object at 0x7da1b110f880>]]]]] variable[serializer] assign[=] call[name[self].get_facet_serializer, parameter[call[name[queryset].facet_counts, parameter[]]]] return[call[name[Response], parameter[name[serializer].data]]]
keyword[def] identifier[facets] ( identifier[self] , identifier[request] ): literal[string] identifier[queryset] = identifier[self] . identifier[filter_facet_queryset] ( identifier[self] . identifier[get_queryset] ()) keyword[for] identifier[facet] keyword[in] identifier[request] . identifier[query_params] . identifier[getlist] ( identifier[self] . identifier[facet_query_params_text] ): keyword[if] literal[string] keyword[not] keyword[in] identifier[facet] : keyword[continue] identifier[field] , identifier[value] = identifier[facet] . identifier[split] ( literal[string] , literal[int] ) keyword[if] identifier[value] : identifier[queryset] = identifier[queryset] . identifier[narrow] ( literal[string] %( identifier[field] , identifier[queryset] . identifier[query] . identifier[clean] ( identifier[value] ))) identifier[serializer] = identifier[self] . identifier[get_facet_serializer] ( identifier[queryset] . identifier[facet_counts] (), identifier[objects] = identifier[queryset] , identifier[many] = keyword[False] ) keyword[return] identifier[Response] ( identifier[serializer] . identifier[data] )
def facets(self, request): """ Sets up a list route for ``faceted`` results. This will add ie ^search/facets/$ to your existing ^search pattern. """ queryset = self.filter_facet_queryset(self.get_queryset()) for facet in request.query_params.getlist(self.facet_query_params_text): if ':' not in facet: continue # depends on [control=['if'], data=[]] (field, value) = facet.split(':', 1) if value: queryset = queryset.narrow('%s:"%s"' % (field, queryset.query.clean(value))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['facet']] serializer = self.get_facet_serializer(queryset.facet_counts(), objects=queryset, many=False) return Response(serializer.data)
def doi_query(pmid, search_limit=10): """Get the DOI for a PMID by matching CrossRef and Pubmed metadata. Searches CrossRef using the article title and then accepts search hits only if they have a matching journal ISSN and page number with what is obtained from the Pubmed database. """ # Get article metadata from PubMed pubmed_meta_dict = pubmed_client.get_metadata_for_ids([pmid], get_issns_from_nlm=True) if pubmed_meta_dict is None or pubmed_meta_dict.get(pmid) is None: logger.warning('No metadata found in Pubmed for PMID%s' % pmid) return None # The test above ensures we've got this now pubmed_meta = pubmed_meta_dict[pmid] # Check if we already got a DOI from Pubmed itself! if pubmed_meta.get('doi'): return pubmed_meta.get('doi') # Check for the title, which we'll need for the CrossRef search pm_article_title = pubmed_meta.get('title') if pm_article_title is None: logger.warning('No article title found in Pubmed for PMID%s' % pmid) return None # Get the ISSN list pm_issn_list = pubmed_meta.get('issn_list') if not pm_issn_list: logger.warning('No ISSNs found in Pubmed for PMID%s' % pmid) return None # Get the page number pm_page = pubmed_meta.get('page') if not pm_page: logger.debug('No page number found in Pubmed for PMID%s' % pmid) return None # Now query CrossRef using the title we've got url = crossref_search_url params = {'q': pm_article_title, 'sort': 'score'} try: res = requests.get(crossref_search_url, params) except requests.exceptions.ConnectionError as e: logger.error('CrossRef service could not be reached.') logger.error(e) return None except Exception as e: logger.error('Error accessing CrossRef service: %s' % str(e)) return None if res.status_code != 200: logger.info('PMID%s: no search results from CrossRef, code %d' % (pmid, res.status_code)) return None raw_message = res.json() mapped_doi = None # Iterate over the search results, looking up XREF metadata for result_ix, result in enumerate(raw_message): if result_ix > search_limit: logger.info('PMID%s: No match found within first %s results, ' 'giving up!' % (pmid, search_limit)) break xref_doi_url = result['doi'] # Strip the URL prefix off of the DOI m = re.match('^http://dx.doi.org/(.*)$', xref_doi_url) xref_doi = m.groups()[0] # Get the XREF metadata using the DOI xref_meta = get_metadata(xref_doi) if xref_meta is None: continue xref_issn_list = xref_meta.get('ISSN') xref_page = xref_meta.get('page') # If there's no ISSN info for this article, skip to the next result if not xref_issn_list: logger.debug('No ISSN found for DOI %s, skipping' % xref_doi_url) continue # If there's no page info for this article, skip to the next result if not xref_page: logger.debug('No page number found for DOI %s, skipping' % xref_doi_url) continue # Now check for an ISSN match by looking for the set intersection # between the Pubmed ISSN list and the CrossRef ISSN list. matching_issns = set(pm_issn_list).intersection(set(xref_issn_list)) # Before comparing page numbers, regularize the page numbers a bit. # Note that we only compare the first page number, since frequently # the final page number will simply be missing in one of the data # sources. We also canonicalize page numbers of the form '14E' to # 'E14' (which is the format used by Pubmed). pm_start_page = pm_page.split('-')[0].upper() xr_start_page = xref_page.split('-')[0].upper() if xr_start_page.endswith('E'): xr_start_page = 'E' + xr_start_page[:-1] # Now compare the ISSN list and page numbers if matching_issns and pm_start_page == xr_start_page: # We found a match! mapped_doi = xref_doi break # Otherwise, keep looking through the results... # Return a DOI, or None if we didn't find one that met our matching # criteria return mapped_doi
def function[doi_query, parameter[pmid, search_limit]]: constant[Get the DOI for a PMID by matching CrossRef and Pubmed metadata. Searches CrossRef using the article title and then accepts search hits only if they have a matching journal ISSN and page number with what is obtained from the Pubmed database. ] variable[pubmed_meta_dict] assign[=] call[name[pubmed_client].get_metadata_for_ids, parameter[list[[<ast.Name object at 0x7da2041dbca0>]]]] if <ast.BoolOp object at 0x7da2041db8e0> begin[:] call[name[logger].warning, parameter[binary_operation[constant[No metadata found in Pubmed for PMID%s] <ast.Mod object at 0x7da2590d6920> name[pmid]]]] return[constant[None]] variable[pubmed_meta] assign[=] call[name[pubmed_meta_dict]][name[pmid]] if call[name[pubmed_meta].get, parameter[constant[doi]]] begin[:] return[call[name[pubmed_meta].get, parameter[constant[doi]]]] variable[pm_article_title] assign[=] call[name[pubmed_meta].get, parameter[constant[title]]] if compare[name[pm_article_title] is constant[None]] begin[:] call[name[logger].warning, parameter[binary_operation[constant[No article title found in Pubmed for PMID%s] <ast.Mod object at 0x7da2590d6920> name[pmid]]]] return[constant[None]] variable[pm_issn_list] assign[=] call[name[pubmed_meta].get, parameter[constant[issn_list]]] if <ast.UnaryOp object at 0x7da2041db640> begin[:] call[name[logger].warning, parameter[binary_operation[constant[No ISSNs found in Pubmed for PMID%s] <ast.Mod object at 0x7da2590d6920> name[pmid]]]] return[constant[None]] variable[pm_page] assign[=] call[name[pubmed_meta].get, parameter[constant[page]]] if <ast.UnaryOp object at 0x7da2041d8370> begin[:] call[name[logger].debug, parameter[binary_operation[constant[No page number found in Pubmed for PMID%s] <ast.Mod object at 0x7da2590d6920> name[pmid]]]] return[constant[None]] variable[url] assign[=] name[crossref_search_url] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da2041d92a0>, <ast.Constant object at 0x7da20c7c86d0>], [<ast.Name object at 0x7da20c7cb520>, <ast.Constant object at 0x7da20c7c8be0>]] <ast.Try object at 0x7da20c7c9150> if compare[name[res].status_code not_equal[!=] constant[200]] begin[:] call[name[logger].info, parameter[binary_operation[constant[PMID%s: no search results from CrossRef, code %d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c7c9450>, <ast.Attribute object at 0x7da20c7ca920>]]]]] return[constant[None]] variable[raw_message] assign[=] call[name[res].json, parameter[]] variable[mapped_doi] assign[=] constant[None] for taget[tuple[[<ast.Name object at 0x7da20c7ca290>, <ast.Name object at 0x7da20c7ca830>]]] in starred[call[name[enumerate], parameter[name[raw_message]]]] begin[:] if compare[name[result_ix] greater[>] name[search_limit]] begin[:] call[name[logger].info, parameter[binary_operation[constant[PMID%s: No match found within first %s results, giving up!] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c992050>, <ast.Name object at 0x7da20c990a60>]]]]] break variable[xref_doi_url] assign[=] call[name[result]][constant[doi]] variable[m] assign[=] call[name[re].match, parameter[constant[^http://dx.doi.org/(.*)$], name[xref_doi_url]]] variable[xref_doi] assign[=] call[call[name[m].groups, parameter[]]][constant[0]] variable[xref_meta] assign[=] call[name[get_metadata], parameter[name[xref_doi]]] if compare[name[xref_meta] is constant[None]] begin[:] continue variable[xref_issn_list] assign[=] call[name[xref_meta].get, parameter[constant[ISSN]]] variable[xref_page] assign[=] call[name[xref_meta].get, parameter[constant[page]]] if <ast.UnaryOp object at 0x7da20c9929e0> begin[:] call[name[logger].debug, parameter[binary_operation[constant[No ISSN found for DOI %s, skipping] <ast.Mod object at 0x7da2590d6920> name[xref_doi_url]]]] continue if <ast.UnaryOp object at 0x7da20c990cd0> begin[:] call[name[logger].debug, parameter[binary_operation[constant[No page number found for DOI %s, skipping] <ast.Mod object at 0x7da2590d6920> name[xref_doi_url]]]] continue variable[matching_issns] assign[=] call[call[name[set], parameter[name[pm_issn_list]]].intersection, parameter[call[name[set], parameter[name[xref_issn_list]]]]] variable[pm_start_page] assign[=] call[call[call[name[pm_page].split, parameter[constant[-]]]][constant[0]].upper, parameter[]] variable[xr_start_page] assign[=] call[call[call[name[xref_page].split, parameter[constant[-]]]][constant[0]].upper, parameter[]] if call[name[xr_start_page].endswith, parameter[constant[E]]] begin[:] variable[xr_start_page] assign[=] binary_operation[constant[E] + call[name[xr_start_page]][<ast.Slice object at 0x7da20c993160>]] if <ast.BoolOp object at 0x7da20c9927d0> begin[:] variable[mapped_doi] assign[=] name[xref_doi] break return[name[mapped_doi]]
keyword[def] identifier[doi_query] ( identifier[pmid] , identifier[search_limit] = literal[int] ): literal[string] identifier[pubmed_meta_dict] = identifier[pubmed_client] . identifier[get_metadata_for_ids] ([ identifier[pmid] ], identifier[get_issns_from_nlm] = keyword[True] ) keyword[if] identifier[pubmed_meta_dict] keyword[is] keyword[None] keyword[or] identifier[pubmed_meta_dict] . identifier[get] ( identifier[pmid] ) keyword[is] keyword[None] : identifier[logger] . identifier[warning] ( literal[string] % identifier[pmid] ) keyword[return] keyword[None] identifier[pubmed_meta] = identifier[pubmed_meta_dict] [ identifier[pmid] ] keyword[if] identifier[pubmed_meta] . identifier[get] ( literal[string] ): keyword[return] identifier[pubmed_meta] . identifier[get] ( literal[string] ) identifier[pm_article_title] = identifier[pubmed_meta] . identifier[get] ( literal[string] ) keyword[if] identifier[pm_article_title] keyword[is] keyword[None] : identifier[logger] . identifier[warning] ( literal[string] % identifier[pmid] ) keyword[return] keyword[None] identifier[pm_issn_list] = identifier[pubmed_meta] . identifier[get] ( literal[string] ) keyword[if] keyword[not] identifier[pm_issn_list] : identifier[logger] . identifier[warning] ( literal[string] % identifier[pmid] ) keyword[return] keyword[None] identifier[pm_page] = identifier[pubmed_meta] . identifier[get] ( literal[string] ) keyword[if] keyword[not] identifier[pm_page] : identifier[logger] . identifier[debug] ( literal[string] % identifier[pmid] ) keyword[return] keyword[None] identifier[url] = identifier[crossref_search_url] identifier[params] ={ literal[string] : identifier[pm_article_title] , literal[string] : literal[string] } keyword[try] : identifier[res] = identifier[requests] . identifier[get] ( identifier[crossref_search_url] , identifier[params] ) keyword[except] identifier[requests] . identifier[exceptions] . identifier[ConnectionError] keyword[as] identifier[e] : identifier[logger] . identifier[error] ( literal[string] ) identifier[logger] . identifier[error] ( identifier[e] ) keyword[return] keyword[None] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[logger] . identifier[error] ( literal[string] % identifier[str] ( identifier[e] )) keyword[return] keyword[None] keyword[if] identifier[res] . identifier[status_code] != literal[int] : identifier[logger] . identifier[info] ( literal[string] % ( identifier[pmid] , identifier[res] . identifier[status_code] )) keyword[return] keyword[None] identifier[raw_message] = identifier[res] . identifier[json] () identifier[mapped_doi] = keyword[None] keyword[for] identifier[result_ix] , identifier[result] keyword[in] identifier[enumerate] ( identifier[raw_message] ): keyword[if] identifier[result_ix] > identifier[search_limit] : identifier[logger] . identifier[info] ( literal[string] literal[string] %( identifier[pmid] , identifier[search_limit] )) keyword[break] identifier[xref_doi_url] = identifier[result] [ literal[string] ] identifier[m] = identifier[re] . identifier[match] ( literal[string] , identifier[xref_doi_url] ) identifier[xref_doi] = identifier[m] . identifier[groups] ()[ literal[int] ] identifier[xref_meta] = identifier[get_metadata] ( identifier[xref_doi] ) keyword[if] identifier[xref_meta] keyword[is] keyword[None] : keyword[continue] identifier[xref_issn_list] = identifier[xref_meta] . identifier[get] ( literal[string] ) identifier[xref_page] = identifier[xref_meta] . identifier[get] ( literal[string] ) keyword[if] keyword[not] identifier[xref_issn_list] : identifier[logger] . identifier[debug] ( literal[string] % identifier[xref_doi_url] ) keyword[continue] keyword[if] keyword[not] identifier[xref_page] : identifier[logger] . identifier[debug] ( literal[string] % identifier[xref_doi_url] ) keyword[continue] identifier[matching_issns] = identifier[set] ( identifier[pm_issn_list] ). identifier[intersection] ( identifier[set] ( identifier[xref_issn_list] )) identifier[pm_start_page] = identifier[pm_page] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[upper] () identifier[xr_start_page] = identifier[xref_page] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[upper] () keyword[if] identifier[xr_start_page] . identifier[endswith] ( literal[string] ): identifier[xr_start_page] = literal[string] + identifier[xr_start_page] [:- literal[int] ] keyword[if] identifier[matching_issns] keyword[and] identifier[pm_start_page] == identifier[xr_start_page] : identifier[mapped_doi] = identifier[xref_doi] keyword[break] keyword[return] identifier[mapped_doi]
def doi_query(pmid, search_limit=10): """Get the DOI for a PMID by matching CrossRef and Pubmed metadata. Searches CrossRef using the article title and then accepts search hits only if they have a matching journal ISSN and page number with what is obtained from the Pubmed database. """ # Get article metadata from PubMed pubmed_meta_dict = pubmed_client.get_metadata_for_ids([pmid], get_issns_from_nlm=True) if pubmed_meta_dict is None or pubmed_meta_dict.get(pmid) is None: logger.warning('No metadata found in Pubmed for PMID%s' % pmid) return None # depends on [control=['if'], data=[]] # The test above ensures we've got this now pubmed_meta = pubmed_meta_dict[pmid] # Check if we already got a DOI from Pubmed itself! if pubmed_meta.get('doi'): return pubmed_meta.get('doi') # depends on [control=['if'], data=[]] # Check for the title, which we'll need for the CrossRef search pm_article_title = pubmed_meta.get('title') if pm_article_title is None: logger.warning('No article title found in Pubmed for PMID%s' % pmid) return None # depends on [control=['if'], data=[]] # Get the ISSN list pm_issn_list = pubmed_meta.get('issn_list') if not pm_issn_list: logger.warning('No ISSNs found in Pubmed for PMID%s' % pmid) return None # depends on [control=['if'], data=[]] # Get the page number pm_page = pubmed_meta.get('page') if not pm_page: logger.debug('No page number found in Pubmed for PMID%s' % pmid) return None # depends on [control=['if'], data=[]] # Now query CrossRef using the title we've got url = crossref_search_url params = {'q': pm_article_title, 'sort': 'score'} try: res = requests.get(crossref_search_url, params) # depends on [control=['try'], data=[]] except requests.exceptions.ConnectionError as e: logger.error('CrossRef service could not be reached.') logger.error(e) return None # depends on [control=['except'], data=['e']] except Exception as e: logger.error('Error accessing CrossRef service: %s' % str(e)) return None # depends on [control=['except'], data=['e']] if res.status_code != 200: logger.info('PMID%s: no search results from CrossRef, code %d' % (pmid, res.status_code)) return None # depends on [control=['if'], data=[]] raw_message = res.json() mapped_doi = None # Iterate over the search results, looking up XREF metadata for (result_ix, result) in enumerate(raw_message): if result_ix > search_limit: logger.info('PMID%s: No match found within first %s results, giving up!' % (pmid, search_limit)) break # depends on [control=['if'], data=['search_limit']] xref_doi_url = result['doi'] # Strip the URL prefix off of the DOI m = re.match('^http://dx.doi.org/(.*)$', xref_doi_url) xref_doi = m.groups()[0] # Get the XREF metadata using the DOI xref_meta = get_metadata(xref_doi) if xref_meta is None: continue # depends on [control=['if'], data=[]] xref_issn_list = xref_meta.get('ISSN') xref_page = xref_meta.get('page') # If there's no ISSN info for this article, skip to the next result if not xref_issn_list: logger.debug('No ISSN found for DOI %s, skipping' % xref_doi_url) continue # depends on [control=['if'], data=[]] # If there's no page info for this article, skip to the next result if not xref_page: logger.debug('No page number found for DOI %s, skipping' % xref_doi_url) continue # depends on [control=['if'], data=[]] # Now check for an ISSN match by looking for the set intersection # between the Pubmed ISSN list and the CrossRef ISSN list. matching_issns = set(pm_issn_list).intersection(set(xref_issn_list)) # Before comparing page numbers, regularize the page numbers a bit. # Note that we only compare the first page number, since frequently # the final page number will simply be missing in one of the data # sources. We also canonicalize page numbers of the form '14E' to # 'E14' (which is the format used by Pubmed). pm_start_page = pm_page.split('-')[0].upper() xr_start_page = xref_page.split('-')[0].upper() if xr_start_page.endswith('E'): xr_start_page = 'E' + xr_start_page[:-1] # depends on [control=['if'], data=[]] # Now compare the ISSN list and page numbers if matching_issns and pm_start_page == xr_start_page: # We found a match! mapped_doi = xref_doi break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # Otherwise, keep looking through the results... # Return a DOI, or None if we didn't find one that met our matching # criteria return mapped_doi
def immerkaer(input, mode="reflect", cval=0.0): r""" Estimate the global noise. The input image is assumed to have additive zero mean Gaussian noise. Using a convolution with a Laplacian operator and a subsequent averaging the standard deviation sigma of this noise is estimated. This estimation is global i.e. the noise is assumed to be globally homogeneous over the image. Implementation based on [1]_. Immerkaer suggested a Laplacian-based 2D kernel:: [[ 1, -2, 1], [-2, 4, -1], [ 1, -2, 1]] , which is separable and can therefore be applied by consecutive convolutions with the one dimensional kernel [1, -2, 1]. We generalize from this 1D-kernel to an ND-kernel by applying N consecutive convolutions with the 1D-kernel along all N dimensions. This is equivalent with convolving the image with an ND-kernel constructed by calling >>> kernel1d = numpy.asarray([1, -2, 1]) >>> kernel = kernel1d.copy() >>> for _ in range(input.ndim): >>> kernel = numpy.tensordot(kernel, kernel1d, 0) Parameters ---------- input : array_like Array of which to estimate the noise. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0 Returns ------- sigma : float The estimated standard deviation of the images Gaussian noise. Notes ----- Does not take the voxel spacing into account. Works good with medium to strong noise. Tends to underestimate for low noise levels. See also -------- immerkaer_local References ---------- .. [1] John Immerkaer, "Fast Noise Variance Estimation", Computer Vision and Image Understanding, Volume 64, Issue 2, September 1996, Pages 300-302, ISSN 1077-3142 """ # build nd-kernel to acquire square root of sum of squared elements kernel = [1, -2, 1] for _ in range(input.ndim - 1): kernel = numpy.tensordot(kernel, [1, -2, 1], 0) divider = numpy.square(numpy.abs(kernel)).sum() # 36 for 1d, 216 for 3D, etc. # compute laplace of input and derive noise sigma laplace = separable_convolution(input, [1, -2, 1], None, mode, cval) factor = numpy.sqrt(numpy.pi / 2.) * 1. / ( numpy.sqrt(divider) * numpy.prod(laplace.shape) ) sigma = factor * numpy.abs(laplace).sum() return sigma
def function[immerkaer, parameter[input, mode, cval]]: constant[ Estimate the global noise. The input image is assumed to have additive zero mean Gaussian noise. Using a convolution with a Laplacian operator and a subsequent averaging the standard deviation sigma of this noise is estimated. This estimation is global i.e. the noise is assumed to be globally homogeneous over the image. Implementation based on [1]_. Immerkaer suggested a Laplacian-based 2D kernel:: [[ 1, -2, 1], [-2, 4, -1], [ 1, -2, 1]] , which is separable and can therefore be applied by consecutive convolutions with the one dimensional kernel [1, -2, 1]. We generalize from this 1D-kernel to an ND-kernel by applying N consecutive convolutions with the 1D-kernel along all N dimensions. This is equivalent with convolving the image with an ND-kernel constructed by calling >>> kernel1d = numpy.asarray([1, -2, 1]) >>> kernel = kernel1d.copy() >>> for _ in range(input.ndim): >>> kernel = numpy.tensordot(kernel, kernel1d, 0) Parameters ---------- input : array_like Array of which to estimate the noise. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0 Returns ------- sigma : float The estimated standard deviation of the images Gaussian noise. Notes ----- Does not take the voxel spacing into account. Works good with medium to strong noise. Tends to underestimate for low noise levels. See also -------- immerkaer_local References ---------- .. [1] John Immerkaer, "Fast Noise Variance Estimation", Computer Vision and Image Understanding, Volume 64, Issue 2, September 1996, Pages 300-302, ISSN 1077-3142 ] variable[kernel] assign[=] list[[<ast.Constant object at 0x7da2054a6920>, <ast.UnaryOp object at 0x7da2054a4580>, <ast.Constant object at 0x7da2054a4910>]] for taget[name[_]] in starred[call[name[range], parameter[binary_operation[name[input].ndim - constant[1]]]]] begin[:] variable[kernel] assign[=] call[name[numpy].tensordot, parameter[name[kernel], list[[<ast.Constant object at 0x7da2054a6410>, <ast.UnaryOp object at 0x7da2054a5840>, <ast.Constant object at 0x7da2054a6ec0>]], constant[0]]] variable[divider] assign[=] call[call[name[numpy].square, parameter[call[name[numpy].abs, parameter[name[kernel]]]]].sum, parameter[]] variable[laplace] assign[=] call[name[separable_convolution], parameter[name[input], list[[<ast.Constant object at 0x7da2054a5930>, <ast.UnaryOp object at 0x7da2054a56f0>, <ast.Constant object at 0x7da2054a4520>]], constant[None], name[mode], name[cval]]] variable[factor] assign[=] binary_operation[binary_operation[call[name[numpy].sqrt, parameter[binary_operation[name[numpy].pi / constant[2.0]]]] * constant[1.0]] / binary_operation[call[name[numpy].sqrt, parameter[name[divider]]] * call[name[numpy].prod, parameter[name[laplace].shape]]]] variable[sigma] assign[=] binary_operation[name[factor] * call[call[name[numpy].abs, parameter[name[laplace]]].sum, parameter[]]] return[name[sigma]]
keyword[def] identifier[immerkaer] ( identifier[input] , identifier[mode] = literal[string] , identifier[cval] = literal[int] ): literal[string] identifier[kernel] =[ literal[int] ,- literal[int] , literal[int] ] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[input] . identifier[ndim] - literal[int] ): identifier[kernel] = identifier[numpy] . identifier[tensordot] ( identifier[kernel] ,[ literal[int] ,- literal[int] , literal[int] ], literal[int] ) identifier[divider] = identifier[numpy] . identifier[square] ( identifier[numpy] . identifier[abs] ( identifier[kernel] )). identifier[sum] () identifier[laplace] = identifier[separable_convolution] ( identifier[input] ,[ literal[int] ,- literal[int] , literal[int] ], keyword[None] , identifier[mode] , identifier[cval] ) identifier[factor] = identifier[numpy] . identifier[sqrt] ( identifier[numpy] . identifier[pi] / literal[int] )* literal[int] /( identifier[numpy] . identifier[sqrt] ( identifier[divider] )* identifier[numpy] . identifier[prod] ( identifier[laplace] . identifier[shape] )) identifier[sigma] = identifier[factor] * identifier[numpy] . identifier[abs] ( identifier[laplace] ). identifier[sum] () keyword[return] identifier[sigma]
def immerkaer(input, mode='reflect', cval=0.0): """ Estimate the global noise. The input image is assumed to have additive zero mean Gaussian noise. Using a convolution with a Laplacian operator and a subsequent averaging the standard deviation sigma of this noise is estimated. This estimation is global i.e. the noise is assumed to be globally homogeneous over the image. Implementation based on [1]_. Immerkaer suggested a Laplacian-based 2D kernel:: [[ 1, -2, 1], [-2, 4, -1], [ 1, -2, 1]] , which is separable and can therefore be applied by consecutive convolutions with the one dimensional kernel [1, -2, 1]. We generalize from this 1D-kernel to an ND-kernel by applying N consecutive convolutions with the 1D-kernel along all N dimensions. This is equivalent with convolving the image with an ND-kernel constructed by calling >>> kernel1d = numpy.asarray([1, -2, 1]) >>> kernel = kernel1d.copy() >>> for _ in range(input.ndim): >>> kernel = numpy.tensordot(kernel, kernel1d, 0) Parameters ---------- input : array_like Array of which to estimate the noise. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0 Returns ------- sigma : float The estimated standard deviation of the images Gaussian noise. Notes ----- Does not take the voxel spacing into account. Works good with medium to strong noise. Tends to underestimate for low noise levels. See also -------- immerkaer_local References ---------- .. [1] John Immerkaer, "Fast Noise Variance Estimation", Computer Vision and Image Understanding, Volume 64, Issue 2, September 1996, Pages 300-302, ISSN 1077-3142 """ # build nd-kernel to acquire square root of sum of squared elements kernel = [1, -2, 1] for _ in range(input.ndim - 1): kernel = numpy.tensordot(kernel, [1, -2, 1], 0) # depends on [control=['for'], data=[]] divider = numpy.square(numpy.abs(kernel)).sum() # 36 for 1d, 216 for 3D, etc. # compute laplace of input and derive noise sigma laplace = separable_convolution(input, [1, -2, 1], None, mode, cval) factor = numpy.sqrt(numpy.pi / 2.0) * 1.0 / (numpy.sqrt(divider) * numpy.prod(laplace.shape)) sigma = factor * numpy.abs(laplace).sum() return sigma
def download_dxfile(dxid, filename, chunksize=dxfile.DEFAULT_BUFFER_SIZE, append=False, show_progress=False, project=None, describe_output=None, **kwargs): ''' :param dxid: DNAnexus file ID or DXFile (file handler) object :type dxid: string or DXFile :param filename: Local filename :type filename: string :param append: If True, appends to the local file (default is to truncate local file if it exists) :type append: boolean :param project: project to use as context for this download (may affect which billing account is billed for this download). If None or DXFile.NO_PROJECT_HINT, no project hint is supplied to the API server. :type project: str or None :param describe_output: (experimental) output of the file-xxxx/describe API call, if available. It will make it possible to skip another describe API call. It should contain the default fields of the describe API call output and the "parts" field, not included in the output by default. :type describe_output: dict or None Downloads the remote file referenced by *dxid* and saves it to *filename*. Example:: download_dxfile("file-xxxx", "localfilename.fastq") ''' # retry the inner loop while there are retriable errors part_retry_counter = defaultdict(lambda: 3) success = False while not success: success = _download_dxfile(dxid, filename, part_retry_counter, chunksize=chunksize, append=append, show_progress=show_progress, project=project, describe_output=describe_output, **kwargs)
def function[download_dxfile, parameter[dxid, filename, chunksize, append, show_progress, project, describe_output]]: constant[ :param dxid: DNAnexus file ID or DXFile (file handler) object :type dxid: string or DXFile :param filename: Local filename :type filename: string :param append: If True, appends to the local file (default is to truncate local file if it exists) :type append: boolean :param project: project to use as context for this download (may affect which billing account is billed for this download). If None or DXFile.NO_PROJECT_HINT, no project hint is supplied to the API server. :type project: str or None :param describe_output: (experimental) output of the file-xxxx/describe API call, if available. It will make it possible to skip another describe API call. It should contain the default fields of the describe API call output and the "parts" field, not included in the output by default. :type describe_output: dict or None Downloads the remote file referenced by *dxid* and saves it to *filename*. Example:: download_dxfile("file-xxxx", "localfilename.fastq") ] variable[part_retry_counter] assign[=] call[name[defaultdict], parameter[<ast.Lambda object at 0x7da1b0592860>]] variable[success] assign[=] constant[False] while <ast.UnaryOp object at 0x7da1b05931f0> begin[:] variable[success] assign[=] call[name[_download_dxfile], parameter[name[dxid], name[filename], name[part_retry_counter]]]
keyword[def] identifier[download_dxfile] ( identifier[dxid] , identifier[filename] , identifier[chunksize] = identifier[dxfile] . identifier[DEFAULT_BUFFER_SIZE] , identifier[append] = keyword[False] , identifier[show_progress] = keyword[False] , identifier[project] = keyword[None] , identifier[describe_output] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[part_retry_counter] = identifier[defaultdict] ( keyword[lambda] : literal[int] ) identifier[success] = keyword[False] keyword[while] keyword[not] identifier[success] : identifier[success] = identifier[_download_dxfile] ( identifier[dxid] , identifier[filename] , identifier[part_retry_counter] , identifier[chunksize] = identifier[chunksize] , identifier[append] = identifier[append] , identifier[show_progress] = identifier[show_progress] , identifier[project] = identifier[project] , identifier[describe_output] = identifier[describe_output] , ** identifier[kwargs] )
def download_dxfile(dxid, filename, chunksize=dxfile.DEFAULT_BUFFER_SIZE, append=False, show_progress=False, project=None, describe_output=None, **kwargs): """ :param dxid: DNAnexus file ID or DXFile (file handler) object :type dxid: string or DXFile :param filename: Local filename :type filename: string :param append: If True, appends to the local file (default is to truncate local file if it exists) :type append: boolean :param project: project to use as context for this download (may affect which billing account is billed for this download). If None or DXFile.NO_PROJECT_HINT, no project hint is supplied to the API server. :type project: str or None :param describe_output: (experimental) output of the file-xxxx/describe API call, if available. It will make it possible to skip another describe API call. It should contain the default fields of the describe API call output and the "parts" field, not included in the output by default. :type describe_output: dict or None Downloads the remote file referenced by *dxid* and saves it to *filename*. Example:: download_dxfile("file-xxxx", "localfilename.fastq") """ # retry the inner loop while there are retriable errors part_retry_counter = defaultdict(lambda : 3) success = False while not success: success = _download_dxfile(dxid, filename, part_retry_counter, chunksize=chunksize, append=append, show_progress=show_progress, project=project, describe_output=describe_output, **kwargs) # depends on [control=['while'], data=[]]
def first(self, **kwargs): """ Retrieve the first node from the set matching supplied parameters :param kwargs: same syntax as `filter()` :return: node """ result = result = self._get(limit=1, **kwargs) if result: return result[0] else: raise self.source_class.DoesNotExist(repr(kwargs))
def function[first, parameter[self]]: constant[ Retrieve the first node from the set matching supplied parameters :param kwargs: same syntax as `filter()` :return: node ] variable[result] assign[=] call[name[self]._get, parameter[]] if name[result] begin[:] return[call[name[result]][constant[0]]]
keyword[def] identifier[first] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[result] = identifier[result] = identifier[self] . identifier[_get] ( identifier[limit] = literal[int] ,** identifier[kwargs] ) keyword[if] identifier[result] : keyword[return] identifier[result] [ literal[int] ] keyword[else] : keyword[raise] identifier[self] . identifier[source_class] . identifier[DoesNotExist] ( identifier[repr] ( identifier[kwargs] ))
def first(self, **kwargs): """ Retrieve the first node from the set matching supplied parameters :param kwargs: same syntax as `filter()` :return: node """ result = result = self._get(limit=1, **kwargs) if result: return result[0] # depends on [control=['if'], data=[]] else: raise self.source_class.DoesNotExist(repr(kwargs))
def is_cors_enabled_on_request(self, request: web.Request) -> bool: """Is `request` is a request for CORS-enabled resource.""" return self._request_resource(request) in self._resource_config
def function[is_cors_enabled_on_request, parameter[self, request]]: constant[Is `request` is a request for CORS-enabled resource.] return[compare[call[name[self]._request_resource, parameter[name[request]]] in name[self]._resource_config]]
keyword[def] identifier[is_cors_enabled_on_request] ( identifier[self] , identifier[request] : identifier[web] . identifier[Request] )-> identifier[bool] : literal[string] keyword[return] identifier[self] . identifier[_request_resource] ( identifier[request] ) keyword[in] identifier[self] . identifier[_resource_config]
def is_cors_enabled_on_request(self, request: web.Request) -> bool: """Is `request` is a request for CORS-enabled resource.""" return self._request_resource(request) in self._resource_config
def create_variable(descriptor): """ Creates a variable from a dictionary descriptor """ if descriptor['type'] == 'continuous': return ContinuousVariable(descriptor['name'], descriptor['domain'], descriptor.get('dimensionality', 1)) elif descriptor['type'] == 'bandit': return BanditVariable(descriptor['name'], descriptor['domain'], descriptor.get('dimensionality', None)) # bandits variables cannot be repeated elif descriptor['type'] == 'discrete': return DiscreteVariable(descriptor['name'], descriptor['domain'], descriptor.get('dimensionality', 1)) elif descriptor['type'] == 'categorical': return CategoricalVariable(descriptor['name'], descriptor['domain'], descriptor.get('dimensionality', 1)) else: raise InvalidConfigError('Unknown variable type ' + descriptor['type'])
def function[create_variable, parameter[descriptor]]: constant[ Creates a variable from a dictionary descriptor ] if compare[call[name[descriptor]][constant[type]] equal[==] constant[continuous]] begin[:] return[call[name[ContinuousVariable], parameter[call[name[descriptor]][constant[name]], call[name[descriptor]][constant[domain]], call[name[descriptor].get, parameter[constant[dimensionality], constant[1]]]]]]
keyword[def] identifier[create_variable] ( identifier[descriptor] ): literal[string] keyword[if] identifier[descriptor] [ literal[string] ]== literal[string] : keyword[return] identifier[ContinuousVariable] ( identifier[descriptor] [ literal[string] ], identifier[descriptor] [ literal[string] ], identifier[descriptor] . identifier[get] ( literal[string] , literal[int] )) keyword[elif] identifier[descriptor] [ literal[string] ]== literal[string] : keyword[return] identifier[BanditVariable] ( identifier[descriptor] [ literal[string] ], identifier[descriptor] [ literal[string] ], identifier[descriptor] . identifier[get] ( literal[string] , keyword[None] )) keyword[elif] identifier[descriptor] [ literal[string] ]== literal[string] : keyword[return] identifier[DiscreteVariable] ( identifier[descriptor] [ literal[string] ], identifier[descriptor] [ literal[string] ], identifier[descriptor] . identifier[get] ( literal[string] , literal[int] )) keyword[elif] identifier[descriptor] [ literal[string] ]== literal[string] : keyword[return] identifier[CategoricalVariable] ( identifier[descriptor] [ literal[string] ], identifier[descriptor] [ literal[string] ], identifier[descriptor] . identifier[get] ( literal[string] , literal[int] )) keyword[else] : keyword[raise] identifier[InvalidConfigError] ( literal[string] + identifier[descriptor] [ literal[string] ])
def create_variable(descriptor): """ Creates a variable from a dictionary descriptor """ if descriptor['type'] == 'continuous': return ContinuousVariable(descriptor['name'], descriptor['domain'], descriptor.get('dimensionality', 1)) # depends on [control=['if'], data=[]] elif descriptor['type'] == 'bandit': return BanditVariable(descriptor['name'], descriptor['domain'], descriptor.get('dimensionality', None)) # bandits variables cannot be repeated # depends on [control=['if'], data=[]] elif descriptor['type'] == 'discrete': return DiscreteVariable(descriptor['name'], descriptor['domain'], descriptor.get('dimensionality', 1)) # depends on [control=['if'], data=[]] elif descriptor['type'] == 'categorical': return CategoricalVariable(descriptor['name'], descriptor['domain'], descriptor.get('dimensionality', 1)) # depends on [control=['if'], data=[]] else: raise InvalidConfigError('Unknown variable type ' + descriptor['type'])
def dot(self, w): """Return the dotproduct between self and another vector.""" return sum([x * y for x, y in zip(self, w)])
def function[dot, parameter[self, w]]: constant[Return the dotproduct between self and another vector.] return[call[name[sum], parameter[<ast.ListComp object at 0x7da207f983a0>]]]
keyword[def] identifier[dot] ( identifier[self] , identifier[w] ): literal[string] keyword[return] identifier[sum] ([ identifier[x] * identifier[y] keyword[for] identifier[x] , identifier[y] keyword[in] identifier[zip] ( identifier[self] , identifier[w] )])
def dot(self, w): """Return the dotproduct between self and another vector.""" return sum([x * y for (x, y) in zip(self, w)])
def drop_all(self, queue_name): """ Drops all the task in the queue. :param queue_name: The name of the queue. Usually handled by the ``Gator`` instance. :type queue_name: string """ # This is very inefficient, but Beanstalk (as of v1.10) doesn't support # deleting an entire tube. queue_length = self.len(queue_name) self._only_watch_from(queue_name) for i in xrange(queue_length): job = self.conn.reserve(timeout=0) if job: job.delete()
def function[drop_all, parameter[self, queue_name]]: constant[ Drops all the task in the queue. :param queue_name: The name of the queue. Usually handled by the ``Gator`` instance. :type queue_name: string ] variable[queue_length] assign[=] call[name[self].len, parameter[name[queue_name]]] call[name[self]._only_watch_from, parameter[name[queue_name]]] for taget[name[i]] in starred[call[name[xrange], parameter[name[queue_length]]]] begin[:] variable[job] assign[=] call[name[self].conn.reserve, parameter[]] if name[job] begin[:] call[name[job].delete, parameter[]]
keyword[def] identifier[drop_all] ( identifier[self] , identifier[queue_name] ): literal[string] identifier[queue_length] = identifier[self] . identifier[len] ( identifier[queue_name] ) identifier[self] . identifier[_only_watch_from] ( identifier[queue_name] ) keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[queue_length] ): identifier[job] = identifier[self] . identifier[conn] . identifier[reserve] ( identifier[timeout] = literal[int] ) keyword[if] identifier[job] : identifier[job] . identifier[delete] ()
def drop_all(self, queue_name): """ Drops all the task in the queue. :param queue_name: The name of the queue. Usually handled by the ``Gator`` instance. :type queue_name: string """ # This is very inefficient, but Beanstalk (as of v1.10) doesn't support # deleting an entire tube. queue_length = self.len(queue_name) self._only_watch_from(queue_name) for i in xrange(queue_length): job = self.conn.reserve(timeout=0) if job: job.delete() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def abort(self, signum): """ Run all abort tasks, then all exit tasks, then exit with error return status""" self.log.info('Signal handler received abort request') self._abort(signum) self._exit(signum) os._exit(1)
def function[abort, parameter[self, signum]]: constant[ Run all abort tasks, then all exit tasks, then exit with error return status] call[name[self].log.info, parameter[constant[Signal handler received abort request]]] call[name[self]._abort, parameter[name[signum]]] call[name[self]._exit, parameter[name[signum]]] call[name[os]._exit, parameter[constant[1]]]
keyword[def] identifier[abort] ( identifier[self] , identifier[signum] ): literal[string] identifier[self] . identifier[log] . identifier[info] ( literal[string] ) identifier[self] . identifier[_abort] ( identifier[signum] ) identifier[self] . identifier[_exit] ( identifier[signum] ) identifier[os] . identifier[_exit] ( literal[int] )
def abort(self, signum): """ Run all abort tasks, then all exit tasks, then exit with error return status""" self.log.info('Signal handler received abort request') self._abort(signum) self._exit(signum) os._exit(1)
def uri_to_graph(uri, **kwargs): ''' :param string uri: :term:`URI` where the RDF data can be found. :rtype: rdflib.Graph or `False` if the URI does not exist :raises skosprovider.exceptions.ProviderUnavailableException: if the getty.edu services are down ''' s = kwargs.get('session', requests.Session()) graph = rdflib.Graph() try: res = s.get(uri) except requests.ConnectionError as e: raise ProviderUnavailableException("URI not available: %s" % uri) if res.status_code == 404: return False graph.parse(data=res.content) return graph
def function[uri_to_graph, parameter[uri]]: constant[ :param string uri: :term:`URI` where the RDF data can be found. :rtype: rdflib.Graph or `False` if the URI does not exist :raises skosprovider.exceptions.ProviderUnavailableException: if the getty.edu services are down ] variable[s] assign[=] call[name[kwargs].get, parameter[constant[session], call[name[requests].Session, parameter[]]]] variable[graph] assign[=] call[name[rdflib].Graph, parameter[]] <ast.Try object at 0x7da1b0ae3eb0> if compare[name[res].status_code equal[==] constant[404]] begin[:] return[constant[False]] call[name[graph].parse, parameter[]] return[name[graph]]
keyword[def] identifier[uri_to_graph] ( identifier[uri] ,** identifier[kwargs] ): literal[string] identifier[s] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[requests] . identifier[Session] ()) identifier[graph] = identifier[rdflib] . identifier[Graph] () keyword[try] : identifier[res] = identifier[s] . identifier[get] ( identifier[uri] ) keyword[except] identifier[requests] . identifier[ConnectionError] keyword[as] identifier[e] : keyword[raise] identifier[ProviderUnavailableException] ( literal[string] % identifier[uri] ) keyword[if] identifier[res] . identifier[status_code] == literal[int] : keyword[return] keyword[False] identifier[graph] . identifier[parse] ( identifier[data] = identifier[res] . identifier[content] ) keyword[return] identifier[graph]
def uri_to_graph(uri, **kwargs): """ :param string uri: :term:`URI` where the RDF data can be found. :rtype: rdflib.Graph or `False` if the URI does not exist :raises skosprovider.exceptions.ProviderUnavailableException: if the getty.edu services are down """ s = kwargs.get('session', requests.Session()) graph = rdflib.Graph() try: res = s.get(uri) # depends on [control=['try'], data=[]] except requests.ConnectionError as e: raise ProviderUnavailableException('URI not available: %s' % uri) # depends on [control=['except'], data=[]] if res.status_code == 404: return False # depends on [control=['if'], data=[]] graph.parse(data=res.content) return graph
def _accumulate(data_list, no_concat=()): """Concatenate a list of dicts `(name, array)`. You can specify some names which arrays should not be concatenated. This is necessary with lists of plots with different sizes. """ acc = Accumulator() for data in data_list: for name, val in data.items(): acc.add(name, val) out = {name: acc[name] for name in acc.names if name not in no_concat} # Some variables should not be concatenated but should be kept as lists. # This is when there can be several arrays of variable length (NumPy # doesn't support ragged arrays). out.update({name: acc.get(name) for name in no_concat}) return out
def function[_accumulate, parameter[data_list, no_concat]]: constant[Concatenate a list of dicts `(name, array)`. You can specify some names which arrays should not be concatenated. This is necessary with lists of plots with different sizes. ] variable[acc] assign[=] call[name[Accumulator], parameter[]] for taget[name[data]] in starred[name[data_list]] begin[:] for taget[tuple[[<ast.Name object at 0x7da18dc07ee0>, <ast.Name object at 0x7da18dc04f10>]]] in starred[call[name[data].items, parameter[]]] begin[:] call[name[acc].add, parameter[name[name], name[val]]] variable[out] assign[=] <ast.DictComp object at 0x7da18dc06f50> call[name[out].update, parameter[<ast.DictComp object at 0x7da18dc05d20>]] return[name[out]]
keyword[def] identifier[_accumulate] ( identifier[data_list] , identifier[no_concat] =()): literal[string] identifier[acc] = identifier[Accumulator] () keyword[for] identifier[data] keyword[in] identifier[data_list] : keyword[for] identifier[name] , identifier[val] keyword[in] identifier[data] . identifier[items] (): identifier[acc] . identifier[add] ( identifier[name] , identifier[val] ) identifier[out] ={ identifier[name] : identifier[acc] [ identifier[name] ] keyword[for] identifier[name] keyword[in] identifier[acc] . identifier[names] keyword[if] identifier[name] keyword[not] keyword[in] identifier[no_concat] } identifier[out] . identifier[update] ({ identifier[name] : identifier[acc] . identifier[get] ( identifier[name] ) keyword[for] identifier[name] keyword[in] identifier[no_concat] }) keyword[return] identifier[out]
def _accumulate(data_list, no_concat=()): """Concatenate a list of dicts `(name, array)`. You can specify some names which arrays should not be concatenated. This is necessary with lists of plots with different sizes. """ acc = Accumulator() for data in data_list: for (name, val) in data.items(): acc.add(name, val) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['data']] out = {name: acc[name] for name in acc.names if name not in no_concat} # Some variables should not be concatenated but should be kept as lists. # This is when there can be several arrays of variable length (NumPy # doesn't support ragged arrays). out.update({name: acc.get(name) for name in no_concat}) return out
def print_rev_id(localRepoPath): """prints information about the specified local repository to STDOUT. Expected method of execution: command-line or shell script call Parameters ---------- localRepoPath: string Local repository path. Returns ======= Nothing as such. subroutine will exit with a state of 0 if everything ran OK, and a value of '111' if something went wrong. """ start_path = os.getcwd() try: log.info("Local repository path: {}".format(localRepoPath)) os.chdir(localRepoPath) log.info("\n== Remote URL") os.system('git remote -v') # log.info("\n== Remote Branches") # os.system("git branch -r") log.info("\n== Local Branches") os.system("git branch") log.info("\n== Most Recent Commit") os.system("git log |head -1") rv = 0 except: rv = 111 log.info("WARNING! get_git_rev_info.print_rev_id() encountered a problem and cannot continue.") finally: os.chdir(start_path) if rv != 0: sys.exit(rv)
def function[print_rev_id, parameter[localRepoPath]]: constant[prints information about the specified local repository to STDOUT. Expected method of execution: command-line or shell script call Parameters ---------- localRepoPath: string Local repository path. Returns ======= Nothing as such. subroutine will exit with a state of 0 if everything ran OK, and a value of '111' if something went wrong. ] variable[start_path] assign[=] call[name[os].getcwd, parameter[]] <ast.Try object at 0x7da1b1bbda20>
keyword[def] identifier[print_rev_id] ( identifier[localRepoPath] ): literal[string] identifier[start_path] = identifier[os] . identifier[getcwd] () keyword[try] : identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[localRepoPath] )) identifier[os] . identifier[chdir] ( identifier[localRepoPath] ) identifier[log] . identifier[info] ( literal[string] ) identifier[os] . identifier[system] ( literal[string] ) identifier[log] . identifier[info] ( literal[string] ) identifier[os] . identifier[system] ( literal[string] ) identifier[log] . identifier[info] ( literal[string] ) identifier[os] . identifier[system] ( literal[string] ) identifier[rv] = literal[int] keyword[except] : identifier[rv] = literal[int] identifier[log] . identifier[info] ( literal[string] ) keyword[finally] : identifier[os] . identifier[chdir] ( identifier[start_path] ) keyword[if] identifier[rv] != literal[int] : identifier[sys] . identifier[exit] ( identifier[rv] )
def print_rev_id(localRepoPath): """prints information about the specified local repository to STDOUT. Expected method of execution: command-line or shell script call Parameters ---------- localRepoPath: string Local repository path. Returns ======= Nothing as such. subroutine will exit with a state of 0 if everything ran OK, and a value of '111' if something went wrong. """ start_path = os.getcwd() try: log.info('Local repository path: {}'.format(localRepoPath)) os.chdir(localRepoPath) log.info('\n== Remote URL') os.system('git remote -v') # log.info("\n== Remote Branches") # os.system("git branch -r") log.info('\n== Local Branches') os.system('git branch') log.info('\n== Most Recent Commit') os.system('git log |head -1') rv = 0 # depends on [control=['try'], data=[]] except: rv = 111 log.info('WARNING! get_git_rev_info.print_rev_id() encountered a problem and cannot continue.') # depends on [control=['except'], data=[]] finally: os.chdir(start_path) if rv != 0: sys.exit(rv) # depends on [control=['if'], data=['rv']]
def dir_tails(self, rr_id: str) -> str: """ Return path to the correct directory for the tails file on input revocation registry identifier. :param rr_id: revocation registry identifier of interest :return: path to tails dir for input revocation registry identifier """ LOGGER.debug('HolderProver.dir_tails >>>') if not ok_rev_reg_id(rr_id): LOGGER.debug('HolderProver.dir_tails <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) rv = Tails.dir(self._dir_tails, rr_id) LOGGER.debug('HolderProver.dir_tails <<< %s', rv) return rv
def function[dir_tails, parameter[self, rr_id]]: constant[ Return path to the correct directory for the tails file on input revocation registry identifier. :param rr_id: revocation registry identifier of interest :return: path to tails dir for input revocation registry identifier ] call[name[LOGGER].debug, parameter[constant[HolderProver.dir_tails >>>]]] if <ast.UnaryOp object at 0x7da18c4cd9f0> begin[:] call[name[LOGGER].debug, parameter[constant[HolderProver.dir_tails <!< Bad rev reg id %s], name[rr_id]]] <ast.Raise object at 0x7da18c4cdf90> variable[rv] assign[=] call[name[Tails].dir, parameter[name[self]._dir_tails, name[rr_id]]] call[name[LOGGER].debug, parameter[constant[HolderProver.dir_tails <<< %s], name[rv]]] return[name[rv]]
keyword[def] identifier[dir_tails] ( identifier[self] , identifier[rr_id] : identifier[str] )-> identifier[str] : literal[string] identifier[LOGGER] . identifier[debug] ( literal[string] ) keyword[if] keyword[not] identifier[ok_rev_reg_id] ( identifier[rr_id] ): identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[rr_id] ) keyword[raise] identifier[BadIdentifier] ( literal[string] . identifier[format] ( identifier[rr_id] )) identifier[rv] = identifier[Tails] . identifier[dir] ( identifier[self] . identifier[_dir_tails] , identifier[rr_id] ) identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[rv] ) keyword[return] identifier[rv]
def dir_tails(self, rr_id: str) -> str: """ Return path to the correct directory for the tails file on input revocation registry identifier. :param rr_id: revocation registry identifier of interest :return: path to tails dir for input revocation registry identifier """ LOGGER.debug('HolderProver.dir_tails >>>') if not ok_rev_reg_id(rr_id): LOGGER.debug('HolderProver.dir_tails <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) # depends on [control=['if'], data=[]] rv = Tails.dir(self._dir_tails, rr_id) LOGGER.debug('HolderProver.dir_tails <<< %s', rv) return rv
def get(self, idx, default=None): """ Return the first placeholder shape with matching *idx* value, or *default* if not found. """ for placeholder in self: if placeholder.element.ph_idx == idx: return placeholder return default
def function[get, parameter[self, idx, default]]: constant[ Return the first placeholder shape with matching *idx* value, or *default* if not found. ] for taget[name[placeholder]] in starred[name[self]] begin[:] if compare[name[placeholder].element.ph_idx equal[==] name[idx]] begin[:] return[name[placeholder]] return[name[default]]
keyword[def] identifier[get] ( identifier[self] , identifier[idx] , identifier[default] = keyword[None] ): literal[string] keyword[for] identifier[placeholder] keyword[in] identifier[self] : keyword[if] identifier[placeholder] . identifier[element] . identifier[ph_idx] == identifier[idx] : keyword[return] identifier[placeholder] keyword[return] identifier[default]
def get(self, idx, default=None): """ Return the first placeholder shape with matching *idx* value, or *default* if not found. """ for placeholder in self: if placeholder.element.ph_idx == idx: return placeholder # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['placeholder']] return default
def pop_context(self): """Pops the last set of keyword arguments provided to the processor.""" processor = getattr(self, 'processor', None) if processor is not None: pop_context = getattr(processor, 'pop_context', None) if pop_context is None: pop_context = getattr(processor, 'pop', None) if pop_context is not None: return pop_context() if self._pop_next: self._pop_next = False
def function[pop_context, parameter[self]]: constant[Pops the last set of keyword arguments provided to the processor.] variable[processor] assign[=] call[name[getattr], parameter[name[self], constant[processor], constant[None]]] if compare[name[processor] is_not constant[None]] begin[:] variable[pop_context] assign[=] call[name[getattr], parameter[name[processor], constant[pop_context], constant[None]]] if compare[name[pop_context] is constant[None]] begin[:] variable[pop_context] assign[=] call[name[getattr], parameter[name[processor], constant[pop], constant[None]]] if compare[name[pop_context] is_not constant[None]] begin[:] return[call[name[pop_context], parameter[]]] if name[self]._pop_next begin[:] name[self]._pop_next assign[=] constant[False]
keyword[def] identifier[pop_context] ( identifier[self] ): literal[string] identifier[processor] = identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ) keyword[if] identifier[processor] keyword[is] keyword[not] keyword[None] : identifier[pop_context] = identifier[getattr] ( identifier[processor] , literal[string] , keyword[None] ) keyword[if] identifier[pop_context] keyword[is] keyword[None] : identifier[pop_context] = identifier[getattr] ( identifier[processor] , literal[string] , keyword[None] ) keyword[if] identifier[pop_context] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[pop_context] () keyword[if] identifier[self] . identifier[_pop_next] : identifier[self] . identifier[_pop_next] = keyword[False]
def pop_context(self): """Pops the last set of keyword arguments provided to the processor.""" processor = getattr(self, 'processor', None) if processor is not None: pop_context = getattr(processor, 'pop_context', None) if pop_context is None: pop_context = getattr(processor, 'pop', None) # depends on [control=['if'], data=['pop_context']] if pop_context is not None: return pop_context() # depends on [control=['if'], data=['pop_context']] # depends on [control=['if'], data=['processor']] if self._pop_next: self._pop_next = False # depends on [control=['if'], data=[]]
def registerMUX(self, stm: Union[HdlStatement, Operator], sig: RtlSignal, inputs_cnt: int): """ mux record is in format (self.MUX, n, m) where n is number of bits of this mux and m is number of possible inputs """ assert inputs_cnt > 1 res = self.resources w = sig._dtype.bit_length() k = (ResourceMUX, w, inputs_cnt) res[k] = res.get(k, 0) + 1 self.resource_for_object[(stm, sig)] = k
def function[registerMUX, parameter[self, stm, sig, inputs_cnt]]: constant[ mux record is in format (self.MUX, n, m) where n is number of bits of this mux and m is number of possible inputs ] assert[compare[name[inputs_cnt] greater[>] constant[1]]] variable[res] assign[=] name[self].resources variable[w] assign[=] call[name[sig]._dtype.bit_length, parameter[]] variable[k] assign[=] tuple[[<ast.Name object at 0x7da1b03f9630>, <ast.Name object at 0x7da1b03f8ca0>, <ast.Name object at 0x7da1b03fbe50>]] call[name[res]][name[k]] assign[=] binary_operation[call[name[res].get, parameter[name[k], constant[0]]] + constant[1]] call[name[self].resource_for_object][tuple[[<ast.Name object at 0x7da1b03f8e20>, <ast.Name object at 0x7da1b03f8970>]]] assign[=] name[k]
keyword[def] identifier[registerMUX] ( identifier[self] , identifier[stm] : identifier[Union] [ identifier[HdlStatement] , identifier[Operator] ], identifier[sig] : identifier[RtlSignal] , identifier[inputs_cnt] : identifier[int] ): literal[string] keyword[assert] identifier[inputs_cnt] > literal[int] identifier[res] = identifier[self] . identifier[resources] identifier[w] = identifier[sig] . identifier[_dtype] . identifier[bit_length] () identifier[k] =( identifier[ResourceMUX] , identifier[w] , identifier[inputs_cnt] ) identifier[res] [ identifier[k] ]= identifier[res] . identifier[get] ( identifier[k] , literal[int] )+ literal[int] identifier[self] . identifier[resource_for_object] [( identifier[stm] , identifier[sig] )]= identifier[k]
def registerMUX(self, stm: Union[HdlStatement, Operator], sig: RtlSignal, inputs_cnt: int): """ mux record is in format (self.MUX, n, m) where n is number of bits of this mux and m is number of possible inputs """ assert inputs_cnt > 1 res = self.resources w = sig._dtype.bit_length() k = (ResourceMUX, w, inputs_cnt) res[k] = res.get(k, 0) + 1 self.resource_for_object[stm, sig] = k
def get_grid_spatial_dimensions(self, variable): """Returns (width, height) for the given variable""" data = self.open_dataset(self.service).variables[variable.variable] dimensions = list(data.dimensions) return data.shape[dimensions.index(variable.x_dimension)], data.shape[dimensions.index(variable.y_dimension)]
def function[get_grid_spatial_dimensions, parameter[self, variable]]: constant[Returns (width, height) for the given variable] variable[data] assign[=] call[call[name[self].open_dataset, parameter[name[self].service]].variables][name[variable].variable] variable[dimensions] assign[=] call[name[list], parameter[name[data].dimensions]] return[tuple[[<ast.Subscript object at 0x7da18fe92050>, <ast.Subscript object at 0x7da18fe910c0>]]]
keyword[def] identifier[get_grid_spatial_dimensions] ( identifier[self] , identifier[variable] ): literal[string] identifier[data] = identifier[self] . identifier[open_dataset] ( identifier[self] . identifier[service] ). identifier[variables] [ identifier[variable] . identifier[variable] ] identifier[dimensions] = identifier[list] ( identifier[data] . identifier[dimensions] ) keyword[return] identifier[data] . identifier[shape] [ identifier[dimensions] . identifier[index] ( identifier[variable] . identifier[x_dimension] )], identifier[data] . identifier[shape] [ identifier[dimensions] . identifier[index] ( identifier[variable] . identifier[y_dimension] )]
def get_grid_spatial_dimensions(self, variable): """Returns (width, height) for the given variable""" data = self.open_dataset(self.service).variables[variable.variable] dimensions = list(data.dimensions) return (data.shape[dimensions.index(variable.x_dimension)], data.shape[dimensions.index(variable.y_dimension)])
def ReadFrom(self, byte_stream): """Read values from a byte stream. Args: byte_stream (bytes): byte stream. Returns: tuple[object, ...]: values copies from the byte stream. Raises: IOError: if byte stream cannot be read. OSError: if byte stream cannot be read. """ try: return self._struct.unpack_from(byte_stream) except (TypeError, struct.error) as exception: raise IOError('Unable to read byte stream with error: {0!s}'.format( exception))
def function[ReadFrom, parameter[self, byte_stream]]: constant[Read values from a byte stream. Args: byte_stream (bytes): byte stream. Returns: tuple[object, ...]: values copies from the byte stream. Raises: IOError: if byte stream cannot be read. OSError: if byte stream cannot be read. ] <ast.Try object at 0x7da207f9b580>
keyword[def] identifier[ReadFrom] ( identifier[self] , identifier[byte_stream] ): literal[string] keyword[try] : keyword[return] identifier[self] . identifier[_struct] . identifier[unpack_from] ( identifier[byte_stream] ) keyword[except] ( identifier[TypeError] , identifier[struct] . identifier[error] ) keyword[as] identifier[exception] : keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[exception] ))
def ReadFrom(self, byte_stream): """Read values from a byte stream. Args: byte_stream (bytes): byte stream. Returns: tuple[object, ...]: values copies from the byte stream. Raises: IOError: if byte stream cannot be read. OSError: if byte stream cannot be read. """ try: return self._struct.unpack_from(byte_stream) # depends on [control=['try'], data=[]] except (TypeError, struct.error) as exception: raise IOError('Unable to read byte stream with error: {0!s}'.format(exception)) # depends on [control=['except'], data=['exception']]
def run(self, message): """ This function needs to be fast at the same time aware of the possibility of ZMQ pipes overflowing. The timeout increases slowly if contention is detected on ZMQ pipes. We could set copy=False and get slightly better latency but this results in ZMQ sockets reaching a broken state once there are ~10k tasks in flight. This issue can be magnified if each the serialized buffer itself is larger. """ self.zmq_socket.send_pyobj(message, copy=True) reply = self.zmq_socket.recv_pyobj() return reply
def function[run, parameter[self, message]]: constant[ This function needs to be fast at the same time aware of the possibility of ZMQ pipes overflowing. The timeout increases slowly if contention is detected on ZMQ pipes. We could set copy=False and get slightly better latency but this results in ZMQ sockets reaching a broken state once there are ~10k tasks in flight. This issue can be magnified if each the serialized buffer itself is larger. ] call[name[self].zmq_socket.send_pyobj, parameter[name[message]]] variable[reply] assign[=] call[name[self].zmq_socket.recv_pyobj, parameter[]] return[name[reply]]
keyword[def] identifier[run] ( identifier[self] , identifier[message] ): literal[string] identifier[self] . identifier[zmq_socket] . identifier[send_pyobj] ( identifier[message] , identifier[copy] = keyword[True] ) identifier[reply] = identifier[self] . identifier[zmq_socket] . identifier[recv_pyobj] () keyword[return] identifier[reply]
def run(self, message): """ This function needs to be fast at the same time aware of the possibility of ZMQ pipes overflowing. The timeout increases slowly if contention is detected on ZMQ pipes. We could set copy=False and get slightly better latency but this results in ZMQ sockets reaching a broken state once there are ~10k tasks in flight. This issue can be magnified if each the serialized buffer itself is larger. """ self.zmq_socket.send_pyobj(message, copy=True) reply = self.zmq_socket.recv_pyobj() return reply
def generate_clinamen(self, input_word, list_of_dict_words, swerve): """ Generate a clinamen. Here we looks for words via the damerau levenshtein distance with a distance of 2. """ results = [] selected_list = [] for i in list_of_dict_words: #produce a subset for efficency if len(i) < len(input_word)+1 and len(i) > len(input_word)/2: if '_' not in i: selected_list.append(i) for i in selected_list: match = self.damerau_levenshtein_distance(input_word,i) if match == swerve: results.append(i) results = {'input' : input_word, 'results' : results, 'category' : 'clinamen'} return results
def function[generate_clinamen, parameter[self, input_word, list_of_dict_words, swerve]]: constant[ Generate a clinamen. Here we looks for words via the damerau levenshtein distance with a distance of 2. ] variable[results] assign[=] list[[]] variable[selected_list] assign[=] list[[]] for taget[name[i]] in starred[name[list_of_dict_words]] begin[:] if <ast.BoolOp object at 0x7da20c794670> begin[:] if compare[constant[_] <ast.NotIn object at 0x7da2590d7190> name[i]] begin[:] call[name[selected_list].append, parameter[name[i]]] for taget[name[i]] in starred[name[selected_list]] begin[:] variable[match] assign[=] call[name[self].damerau_levenshtein_distance, parameter[name[input_word], name[i]]] if compare[name[match] equal[==] name[swerve]] begin[:] call[name[results].append, parameter[name[i]]] variable[results] assign[=] dictionary[[<ast.Constant object at 0x7da20c796170>, <ast.Constant object at 0x7da20c796890>, <ast.Constant object at 0x7da20c7966b0>], [<ast.Name object at 0x7da20c7950f0>, <ast.Name object at 0x7da20c795a20>, <ast.Constant object at 0x7da20c794c10>]] return[name[results]]
keyword[def] identifier[generate_clinamen] ( identifier[self] , identifier[input_word] , identifier[list_of_dict_words] , identifier[swerve] ): literal[string] identifier[results] =[] identifier[selected_list] =[] keyword[for] identifier[i] keyword[in] identifier[list_of_dict_words] : keyword[if] identifier[len] ( identifier[i] )< identifier[len] ( identifier[input_word] )+ literal[int] keyword[and] identifier[len] ( identifier[i] )> identifier[len] ( identifier[input_word] )/ literal[int] : keyword[if] literal[string] keyword[not] keyword[in] identifier[i] : identifier[selected_list] . identifier[append] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[selected_list] : identifier[match] = identifier[self] . identifier[damerau_levenshtein_distance] ( identifier[input_word] , identifier[i] ) keyword[if] identifier[match] == identifier[swerve] : identifier[results] . identifier[append] ( identifier[i] ) identifier[results] ={ literal[string] : identifier[input_word] , literal[string] : identifier[results] , literal[string] : literal[string] } keyword[return] identifier[results]
def generate_clinamen(self, input_word, list_of_dict_words, swerve): """ Generate a clinamen. Here we looks for words via the damerau levenshtein distance with a distance of 2. """ results = [] selected_list = [] for i in list_of_dict_words: #produce a subset for efficency if len(i) < len(input_word) + 1 and len(i) > len(input_word) / 2: if '_' not in i: selected_list.append(i) # depends on [control=['if'], data=['i']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] for i in selected_list: match = self.damerau_levenshtein_distance(input_word, i) if match == swerve: results.append(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] results = {'input': input_word, 'results': results, 'category': 'clinamen'} return results
def avail_locations(call=None): ''' Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node return ret
def function[avail_locations, parameter[call]]: constant[ Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config ] if compare[name[call] equal[==] constant[action]] begin[:] <ast.Raise object at 0x7da18f58f610> variable[nodes] assign[=] call[name[query], parameter[constant[get], constant[nodes]]] variable[ret] assign[=] dictionary[[], []] for taget[name[node]] in starred[name[nodes]] begin[:] variable[name] assign[=] call[name[node]][constant[node]] call[name[ret]][name[name]] assign[=] name[node] return[name[ret]]
keyword[def] identifier[avail_locations] ( identifier[call] = keyword[None] ): literal[string] keyword[if] identifier[call] == literal[string] : keyword[raise] identifier[SaltCloudSystemExit] ( literal[string] literal[string] ) identifier[nodes] = identifier[query] ( literal[string] , literal[string] ) identifier[ret] ={} keyword[for] identifier[node] keyword[in] identifier[nodes] : identifier[name] = identifier[node] [ literal[string] ] identifier[ret] [ identifier[name] ]= identifier[node] keyword[return] identifier[ret]
def avail_locations(call=None): """ Return a list of the hypervisors (nodes) which this Proxmox PVE machine manages CLI Example: .. code-block:: bash salt-cloud --list-locations my-proxmox-config """ if call == 'action': raise SaltCloudSystemExit('The avail_locations function must be called with -f or --function, or with the --list-locations option') # depends on [control=['if'], data=[]] # could also use the get_resources_nodes but speed is ~the same nodes = query('get', 'nodes') ret = {} for node in nodes: name = node['node'] ret[name] = node # depends on [control=['for'], data=['node']] return ret
def datapath(self): """ Get an item's data path. """ path = self._fields['path'] if not path: # stopped item with no base_dir? path = self.fetch('directory') if path and not self._fields['is_multi_file']: path = os.path.join(path, self._fields['name']) return os.path.expanduser(fmt.to_unicode(path))
def function[datapath, parameter[self]]: constant[ Get an item's data path. ] variable[path] assign[=] call[name[self]._fields][constant[path]] if <ast.UnaryOp object at 0x7da1b12c7c40> begin[:] variable[path] assign[=] call[name[self].fetch, parameter[constant[directory]]] if <ast.BoolOp object at 0x7da20c6ab1f0> begin[:] variable[path] assign[=] call[name[os].path.join, parameter[name[path], call[name[self]._fields][constant[name]]]] return[call[name[os].path.expanduser, parameter[call[name[fmt].to_unicode, parameter[name[path]]]]]]
keyword[def] identifier[datapath] ( identifier[self] ): literal[string] identifier[path] = identifier[self] . identifier[_fields] [ literal[string] ] keyword[if] keyword[not] identifier[path] : identifier[path] = identifier[self] . identifier[fetch] ( literal[string] ) keyword[if] identifier[path] keyword[and] keyword[not] identifier[self] . identifier[_fields] [ literal[string] ]: identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[self] . identifier[_fields] [ literal[string] ]) keyword[return] identifier[os] . identifier[path] . identifier[expanduser] ( identifier[fmt] . identifier[to_unicode] ( identifier[path] ))
def datapath(self): """ Get an item's data path. """ path = self._fields['path'] if not path: # stopped item with no base_dir? path = self.fetch('directory') if path and (not self._fields['is_multi_file']): path = os.path.join(path, self._fields['name']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return os.path.expanduser(fmt.to_unicode(path))
def type_list(signature, doc, header): """ Construct a list of types, preferring type annotations to docstrings if they are available. Parameters ---------- signature : Signature Signature of thing doc : list of tuple Numpydoc's type list section Returns ------- list of str Markdown formatted type list """ lines = [] docced = set() lines.append(header) try: for names, types, description in doc: names, types = _get_names(names, types) unannotated = [] for name in names: docced.add(name) try: typ = signature.parameters[name].annotation if typ == inspect._empty: raise AttributeError default = signature.parameters[name].default type_string = string_annotation(typ, default) lines.append(f"- `{name}`: {type_string}") lines.append("\n\n") except (AttributeError, KeyError): unannotated.append(name) # No annotation if len(unannotated) > 0: lines.append("- ") lines.append(", ".join(f"`{name}`" for name in unannotated)) if types != "" and len(unannotated) > 0: lines.append(f": {mangle_types(types)}") lines.append("\n\n") lines.append(f" {' '.join(description)}\n\n") for names, types, description in doc: names, types = _get_names(names, types) for name in names: if name not in docced: try: typ = signature.parameters[name].annotation default = signature.parameters[name].default type_string = string_annotation(typ, default) lines.append(f"- `{name}`: {type_string}") lines.append("\n\n") except (AttributeError, KeyError): lines.append(f"- `{name}`") lines.append("\n\n") except Exception as e: print(e) return lines if len(lines) > 1 else []
def function[type_list, parameter[signature, doc, header]]: constant[ Construct a list of types, preferring type annotations to docstrings if they are available. Parameters ---------- signature : Signature Signature of thing doc : list of tuple Numpydoc's type list section Returns ------- list of str Markdown formatted type list ] variable[lines] assign[=] list[[]] variable[docced] assign[=] call[name[set], parameter[]] call[name[lines].append, parameter[name[header]]] <ast.Try object at 0x7da2046233d0> return[<ast.IfExp object at 0x7da207f02560>]
keyword[def] identifier[type_list] ( identifier[signature] , identifier[doc] , identifier[header] ): literal[string] identifier[lines] =[] identifier[docced] = identifier[set] () identifier[lines] . identifier[append] ( identifier[header] ) keyword[try] : keyword[for] identifier[names] , identifier[types] , identifier[description] keyword[in] identifier[doc] : identifier[names] , identifier[types] = identifier[_get_names] ( identifier[names] , identifier[types] ) identifier[unannotated] =[] keyword[for] identifier[name] keyword[in] identifier[names] : identifier[docced] . identifier[add] ( identifier[name] ) keyword[try] : identifier[typ] = identifier[signature] . identifier[parameters] [ identifier[name] ]. identifier[annotation] keyword[if] identifier[typ] == identifier[inspect] . identifier[_empty] : keyword[raise] identifier[AttributeError] identifier[default] = identifier[signature] . identifier[parameters] [ identifier[name] ]. identifier[default] identifier[type_string] = identifier[string_annotation] ( identifier[typ] , identifier[default] ) identifier[lines] . identifier[append] ( literal[string] ) identifier[lines] . identifier[append] ( literal[string] ) keyword[except] ( identifier[AttributeError] , identifier[KeyError] ): identifier[unannotated] . identifier[append] ( identifier[name] ) keyword[if] identifier[len] ( identifier[unannotated] )> literal[int] : identifier[lines] . identifier[append] ( literal[string] ) identifier[lines] . identifier[append] ( literal[string] . identifier[join] ( literal[string] keyword[for] identifier[name] keyword[in] identifier[unannotated] )) keyword[if] identifier[types] != literal[string] keyword[and] identifier[len] ( identifier[unannotated] )> literal[int] : identifier[lines] . identifier[append] ( literal[string] ) identifier[lines] . identifier[append] ( literal[string] ) identifier[lines] . identifier[append] ( literal[string] ) keyword[for] identifier[names] , identifier[types] , identifier[description] keyword[in] identifier[doc] : identifier[names] , identifier[types] = identifier[_get_names] ( identifier[names] , identifier[types] ) keyword[for] identifier[name] keyword[in] identifier[names] : keyword[if] identifier[name] keyword[not] keyword[in] identifier[docced] : keyword[try] : identifier[typ] = identifier[signature] . identifier[parameters] [ identifier[name] ]. identifier[annotation] identifier[default] = identifier[signature] . identifier[parameters] [ identifier[name] ]. identifier[default] identifier[type_string] = identifier[string_annotation] ( identifier[typ] , identifier[default] ) identifier[lines] . identifier[append] ( literal[string] ) identifier[lines] . identifier[append] ( literal[string] ) keyword[except] ( identifier[AttributeError] , identifier[KeyError] ): identifier[lines] . identifier[append] ( literal[string] ) identifier[lines] . identifier[append] ( literal[string] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[print] ( identifier[e] ) keyword[return] identifier[lines] keyword[if] identifier[len] ( identifier[lines] )> literal[int] keyword[else] []
def type_list(signature, doc, header): """ Construct a list of types, preferring type annotations to docstrings if they are available. Parameters ---------- signature : Signature Signature of thing doc : list of tuple Numpydoc's type list section Returns ------- list of str Markdown formatted type list """ lines = [] docced = set() lines.append(header) try: for (names, types, description) in doc: (names, types) = _get_names(names, types) unannotated = [] for name in names: docced.add(name) try: typ = signature.parameters[name].annotation if typ == inspect._empty: raise AttributeError # depends on [control=['if'], data=[]] default = signature.parameters[name].default type_string = string_annotation(typ, default) lines.append(f'- `{name}`: {type_string}') lines.append('\n\n') # depends on [control=['try'], data=[]] except (AttributeError, KeyError): unannotated.append(name) # No annotation # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['name']] if len(unannotated) > 0: lines.append('- ') lines.append(', '.join((f'`{name}`' for name in unannotated))) if types != '' and len(unannotated) > 0: lines.append(f': {mangle_types(types)}') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] lines.append('\n\n') lines.append(f" {' '.join(description)}\n\n") # depends on [control=['for'], data=[]] for (names, types, description) in doc: (names, types) = _get_names(names, types) for name in names: if name not in docced: try: typ = signature.parameters[name].annotation default = signature.parameters[name].default type_string = string_annotation(typ, default) lines.append(f'- `{name}`: {type_string}') lines.append('\n\n') # depends on [control=['try'], data=[]] except (AttributeError, KeyError): lines.append(f'- `{name}`') lines.append('\n\n') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=['name']] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]] except Exception as e: print(e) # depends on [control=['except'], data=['e']] return lines if len(lines) > 1 else []
def _reload_maybe(self): """ Reload the config if the config\ model has been updated. This is called\ once on every request by the middleware.\ Should not be called directly. """ ConfigModel = apps.get_model('djconfig.Config') data = dict( ConfigModel.objects .filter(key='_updated_at') .values_list('key', 'value')) if (not hasattr(self, '_updated_at') or self._updated_at != data.get('_updated_at')): self._reload()
def function[_reload_maybe, parameter[self]]: constant[ Reload the config if the config model has been updated. This is called once on every request by the middleware. Should not be called directly. ] variable[ConfigModel] assign[=] call[name[apps].get_model, parameter[constant[djconfig.Config]]] variable[data] assign[=] call[name[dict], parameter[call[call[name[ConfigModel].objects.filter, parameter[]].values_list, parameter[constant[key], constant[value]]]]] if <ast.BoolOp object at 0x7da1b1035fc0> begin[:] call[name[self]._reload, parameter[]]
keyword[def] identifier[_reload_maybe] ( identifier[self] ): literal[string] identifier[ConfigModel] = identifier[apps] . identifier[get_model] ( literal[string] ) identifier[data] = identifier[dict] ( identifier[ConfigModel] . identifier[objects] . identifier[filter] ( identifier[key] = literal[string] ) . identifier[values_list] ( literal[string] , literal[string] )) keyword[if] ( keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[or] identifier[self] . identifier[_updated_at] != identifier[data] . identifier[get] ( literal[string] )): identifier[self] . identifier[_reload] ()
def _reload_maybe(self): """ Reload the config if the config model has been updated. This is called once on every request by the middleware. Should not be called directly. """ ConfigModel = apps.get_model('djconfig.Config') data = dict(ConfigModel.objects.filter(key='_updated_at').values_list('key', 'value')) if not hasattr(self, '_updated_at') or self._updated_at != data.get('_updated_at'): self._reload() # depends on [control=['if'], data=[]]
def write(self, outfile, clobber=True, **kwargs): """ Write the current object catalog to FITS file. Parameters: ----------- filename : the FITS file to write. clobber : remove existing file kwargs : passed to fitsio.write Returns: -------- None """ fitsio.write(outfile,self.data,clobber=True,**kwargs)
def function[write, parameter[self, outfile, clobber]]: constant[ Write the current object catalog to FITS file. Parameters: ----------- filename : the FITS file to write. clobber : remove existing file kwargs : passed to fitsio.write Returns: -------- None ] call[name[fitsio].write, parameter[name[outfile], name[self].data]]
keyword[def] identifier[write] ( identifier[self] , identifier[outfile] , identifier[clobber] = keyword[True] ,** identifier[kwargs] ): literal[string] identifier[fitsio] . identifier[write] ( identifier[outfile] , identifier[self] . identifier[data] , identifier[clobber] = keyword[True] ,** identifier[kwargs] )
def write(self, outfile, clobber=True, **kwargs): """ Write the current object catalog to FITS file. Parameters: ----------- filename : the FITS file to write. clobber : remove existing file kwargs : passed to fitsio.write Returns: -------- None """ fitsio.write(outfile, self.data, clobber=True, **kwargs)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw", desc_as_label=False, label=None, high_speed=False, utc=False): """ Retrieves data from eDNA history for a given tag. :param tag_name: fully-qualified (site.service.tag) eDNA tag :param start_time: must be in format mm/dd/yy hh:mm:ss :param end_time: must be in format mm/dd/yy hh:mm:ss :param period: specify the number of seconds for the pull interval :param mode: "raw", "snap", "avg", "interp", "max", "min" See eDNA documentation for more information. :param desc_as_label: use the tag description as the column name instead of the full tag :param label: supply a custom label to use as the DataFrame column name :param high_speed: if True, pull millisecond data :param utc: if True, use the integer time format instead of DateTime :return: a pandas DataFrame with timestamp, value, and status """ # Check if the point even exists if not DoesIDExist(tag_name): warnings.warn("WARNING- " + tag_name + " does not exist or " + "connection was dropped. Try again if tag does exist.") return pd.DataFrame() # Define all required variables in the correct ctypes format szPoint = c_char_p(tag_name.encode('utf-8')) tStart = c_long(StringToUTCTime(start_time)) tEnd = c_long(StringToUTCTime(end_time)) tPeriod = c_long(period) pulKey = c_ulong(0) # Initialize the data pull using the specified pulKey, which is an # identifier that tells eDNA which data pull is occurring mode = mode.lower().strip() if not high_speed: if mode == "avg": nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey)) if mode == "interp": nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey)) if mode == "min": nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey)) if mode == "max": nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey)) if mode == "snap": nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey)) else: nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey)) time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet) else: nStartMillis = c_ushort(0) nEndMillis = c_ushort(0) nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis, tEnd, nEndMillis, byref(pulKey)) time_, val, stat = _GetNextHSHistUTC(pulKey, nRet) # The history request must be cancelled to free up network resources dna_dll.DnaCancelHistRequest(pulKey) # To construct the pandas DataFrame, the tag name will be used as the # column name, and the index (which is in the strange eDNA format) must be # converted to an actual DateTime d = {tag_name + ' Status': stat, tag_name: val} df = pd.DataFrame(data=d, index=time_) if not utc: if not high_speed: df.index = pd.to_datetime(df.index, unit="s") else: df.index = pd.to_datetime(df.index, unit="ms") if df.empty: warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' + 'Check eDNA connection, ensure that the start time is ' + 'not later than the end time, verify that the ' + 'DateTime formatting matches eDNA requirements, and ' + 'check that data exists in the query time period.') # Check if the user would rather use the description as the column name if desc_as_label or label: if label: new_label = label else: new_label = _GetLabel(tag_name) df.rename(inplace=True, columns={tag_name: new_label, tag_name + " Status": new_label + " Status"}) return df
def function[GetHist, parameter[tag_name, start_time, end_time, period, mode, desc_as_label, label, high_speed, utc]]: constant[ Retrieves data from eDNA history for a given tag. :param tag_name: fully-qualified (site.service.tag) eDNA tag :param start_time: must be in format mm/dd/yy hh:mm:ss :param end_time: must be in format mm/dd/yy hh:mm:ss :param period: specify the number of seconds for the pull interval :param mode: "raw", "snap", "avg", "interp", "max", "min" See eDNA documentation for more information. :param desc_as_label: use the tag description as the column name instead of the full tag :param label: supply a custom label to use as the DataFrame column name :param high_speed: if True, pull millisecond data :param utc: if True, use the integer time format instead of DateTime :return: a pandas DataFrame with timestamp, value, and status ] if <ast.UnaryOp object at 0x7da1b1e5bb80> begin[:] call[name[warnings].warn, parameter[binary_operation[binary_operation[binary_operation[constant[WARNING- ] + name[tag_name]] + constant[ does not exist or ]] + constant[connection was dropped. Try again if tag does exist.]]]] return[call[name[pd].DataFrame, parameter[]]] variable[szPoint] assign[=] call[name[c_char_p], parameter[call[name[tag_name].encode, parameter[constant[utf-8]]]]] variable[tStart] assign[=] call[name[c_long], parameter[call[name[StringToUTCTime], parameter[name[start_time]]]]] variable[tEnd] assign[=] call[name[c_long], parameter[call[name[StringToUTCTime], parameter[name[end_time]]]]] variable[tPeriod] assign[=] call[name[c_long], parameter[name[period]]] variable[pulKey] assign[=] call[name[c_ulong], parameter[constant[0]]] variable[mode] assign[=] call[call[name[mode].lower, parameter[]].strip, parameter[]] if <ast.UnaryOp object at 0x7da1b1e5b010> begin[:] if compare[name[mode] equal[==] constant[avg]] begin[:] variable[nRet] assign[=] call[name[dna_dll].DnaGetHistAvgUTC, parameter[name[szPoint], name[tStart], name[tEnd], name[tPeriod], call[name[byref], parameter[name[pulKey]]]]] if compare[name[mode] equal[==] constant[interp]] begin[:] variable[nRet] assign[=] call[name[dna_dll].DnaGetHistInterpUTC, parameter[name[szPoint], name[tStart], name[tEnd], name[tPeriod], call[name[byref], parameter[name[pulKey]]]]] if compare[name[mode] equal[==] constant[min]] begin[:] variable[nRet] assign[=] call[name[dna_dll].DnaGetHistMinUTC, parameter[name[szPoint], name[tStart], name[tEnd], name[tPeriod], call[name[byref], parameter[name[pulKey]]]]] if compare[name[mode] equal[==] constant[max]] begin[:] variable[nRet] assign[=] call[name[dna_dll].DnaGetHistMaxUTC, parameter[name[szPoint], name[tStart], name[tEnd], name[tPeriod], call[name[byref], parameter[name[pulKey]]]]] if compare[name[mode] equal[==] constant[snap]] begin[:] variable[nRet] assign[=] call[name[dna_dll].DnaGetHistSnapUTC, parameter[name[szPoint], name[tStart], name[tEnd], name[tPeriod], call[name[byref], parameter[name[pulKey]]]]] <ast.Tuple object at 0x7da1b1e59d50> assign[=] call[name[_GetNextHistSmallUTC], parameter[name[pulKey], name[nRet]]] call[name[dna_dll].DnaCancelHistRequest, parameter[name[pulKey]]] variable[d] assign[=] dictionary[[<ast.BinOp object at 0x7da1b1e593f0>, <ast.Name object at 0x7da1b1e59360>], [<ast.Name object at 0x7da1b1e59330>, <ast.Name object at 0x7da1b1e59300>]] variable[df] assign[=] call[name[pd].DataFrame, parameter[]] if <ast.UnaryOp object at 0x7da1b1e590f0> begin[:] if <ast.UnaryOp object at 0x7da1b1e59060> begin[:] name[df].index assign[=] call[name[pd].to_datetime, parameter[name[df].index]] if name[df].empty begin[:] call[name[warnings].warn, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[WARNING- No data retrieved for ] + name[tag_name]] + constant[. ]] + constant[Check eDNA connection, ensure that the start time is ]] + constant[not later than the end time, verify that the ]] + constant[DateTime formatting matches eDNA requirements, and ]] + constant[check that data exists in the query time period.]]]] if <ast.BoolOp object at 0x7da1b1e58820> begin[:] if name[label] begin[:] variable[new_label] assign[=] name[label] call[name[df].rename, parameter[]] return[name[df]]
keyword[def] identifier[GetHist] ( identifier[tag_name] , identifier[start_time] , identifier[end_time] , identifier[period] = literal[int] , identifier[mode] = literal[string] , identifier[desc_as_label] = keyword[False] , identifier[label] = keyword[None] , identifier[high_speed] = keyword[False] , identifier[utc] = keyword[False] ): literal[string] keyword[if] keyword[not] identifier[DoesIDExist] ( identifier[tag_name] ): identifier[warnings] . identifier[warn] ( literal[string] + identifier[tag_name] + literal[string] + literal[string] ) keyword[return] identifier[pd] . identifier[DataFrame] () identifier[szPoint] = identifier[c_char_p] ( identifier[tag_name] . identifier[encode] ( literal[string] )) identifier[tStart] = identifier[c_long] ( identifier[StringToUTCTime] ( identifier[start_time] )) identifier[tEnd] = identifier[c_long] ( identifier[StringToUTCTime] ( identifier[end_time] )) identifier[tPeriod] = identifier[c_long] ( identifier[period] ) identifier[pulKey] = identifier[c_ulong] ( literal[int] ) identifier[mode] = identifier[mode] . identifier[lower] (). identifier[strip] () keyword[if] keyword[not] identifier[high_speed] : keyword[if] identifier[mode] == literal[string] : identifier[nRet] = identifier[dna_dll] . identifier[DnaGetHistAvgUTC] ( identifier[szPoint] , identifier[tStart] , identifier[tEnd] , identifier[tPeriod] , identifier[byref] ( identifier[pulKey] )) keyword[if] identifier[mode] == literal[string] : identifier[nRet] = identifier[dna_dll] . identifier[DnaGetHistInterpUTC] ( identifier[szPoint] , identifier[tStart] , identifier[tEnd] , identifier[tPeriod] , identifier[byref] ( identifier[pulKey] )) keyword[if] identifier[mode] == literal[string] : identifier[nRet] = identifier[dna_dll] . identifier[DnaGetHistMinUTC] ( identifier[szPoint] , identifier[tStart] , identifier[tEnd] , identifier[tPeriod] , identifier[byref] ( identifier[pulKey] )) keyword[if] identifier[mode] == literal[string] : identifier[nRet] = identifier[dna_dll] . identifier[DnaGetHistMaxUTC] ( identifier[szPoint] , identifier[tStart] , identifier[tEnd] , identifier[tPeriod] , identifier[byref] ( identifier[pulKey] )) keyword[if] identifier[mode] == literal[string] : identifier[nRet] = identifier[dna_dll] . identifier[DnaGetHistSnapUTC] ( identifier[szPoint] , identifier[tStart] , identifier[tEnd] , identifier[tPeriod] , identifier[byref] ( identifier[pulKey] )) keyword[else] : identifier[nRet] = identifier[dna_dll] . identifier[DnaGetHistRawUTC] ( identifier[szPoint] , identifier[tStart] , identifier[tEnd] , identifier[byref] ( identifier[pulKey] )) identifier[time_] , identifier[val] , identifier[stat] = identifier[_GetNextHistSmallUTC] ( identifier[pulKey] , identifier[nRet] ) keyword[else] : identifier[nStartMillis] = identifier[c_ushort] ( literal[int] ) identifier[nEndMillis] = identifier[c_ushort] ( literal[int] ) identifier[nRet] = identifier[dna_dll] . identifier[DnaGetHSHistRawUTC] ( identifier[szPoint] , identifier[tStart] , identifier[nStartMillis] , identifier[tEnd] , identifier[nEndMillis] , identifier[byref] ( identifier[pulKey] )) identifier[time_] , identifier[val] , identifier[stat] = identifier[_GetNextHSHistUTC] ( identifier[pulKey] , identifier[nRet] ) identifier[dna_dll] . identifier[DnaCancelHistRequest] ( identifier[pulKey] ) identifier[d] ={ identifier[tag_name] + literal[string] : identifier[stat] , identifier[tag_name] : identifier[val] } identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[data] = identifier[d] , identifier[index] = identifier[time_] ) keyword[if] keyword[not] identifier[utc] : keyword[if] keyword[not] identifier[high_speed] : identifier[df] . identifier[index] = identifier[pd] . identifier[to_datetime] ( identifier[df] . identifier[index] , identifier[unit] = literal[string] ) keyword[else] : identifier[df] . identifier[index] = identifier[pd] . identifier[to_datetime] ( identifier[df] . identifier[index] , identifier[unit] = literal[string] ) keyword[if] identifier[df] . identifier[empty] : identifier[warnings] . identifier[warn] ( literal[string] + identifier[tag_name] + literal[string] + literal[string] + literal[string] + literal[string] + literal[string] ) keyword[if] identifier[desc_as_label] keyword[or] identifier[label] : keyword[if] identifier[label] : identifier[new_label] = identifier[label] keyword[else] : identifier[new_label] = identifier[_GetLabel] ( identifier[tag_name] ) identifier[df] . identifier[rename] ( identifier[inplace] = keyword[True] , identifier[columns] ={ identifier[tag_name] : identifier[new_label] , identifier[tag_name] + literal[string] : identifier[new_label] + literal[string] }) keyword[return] identifier[df]
def GetHist(tag_name, start_time, end_time, period=5, mode='raw', desc_as_label=False, label=None, high_speed=False, utc=False): """ Retrieves data from eDNA history for a given tag. :param tag_name: fully-qualified (site.service.tag) eDNA tag :param start_time: must be in format mm/dd/yy hh:mm:ss :param end_time: must be in format mm/dd/yy hh:mm:ss :param period: specify the number of seconds for the pull interval :param mode: "raw", "snap", "avg", "interp", "max", "min" See eDNA documentation for more information. :param desc_as_label: use the tag description as the column name instead of the full tag :param label: supply a custom label to use as the DataFrame column name :param high_speed: if True, pull millisecond data :param utc: if True, use the integer time format instead of DateTime :return: a pandas DataFrame with timestamp, value, and status """ # Check if the point even exists if not DoesIDExist(tag_name): warnings.warn('WARNING- ' + tag_name + ' does not exist or ' + 'connection was dropped. Try again if tag does exist.') return pd.DataFrame() # depends on [control=['if'], data=[]] # Define all required variables in the correct ctypes format szPoint = c_char_p(tag_name.encode('utf-8')) tStart = c_long(StringToUTCTime(start_time)) tEnd = c_long(StringToUTCTime(end_time)) tPeriod = c_long(period) pulKey = c_ulong(0) # Initialize the data pull using the specified pulKey, which is an # identifier that tells eDNA which data pull is occurring mode = mode.lower().strip() if not high_speed: if mode == 'avg': nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey)) # depends on [control=['if'], data=[]] if mode == 'interp': nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey)) # depends on [control=['if'], data=[]] if mode == 'min': nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey)) # depends on [control=['if'], data=[]] if mode == 'max': nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey)) # depends on [control=['if'], data=[]] if mode == 'snap': nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey)) # depends on [control=['if'], data=[]] else: nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey)) (time_, val, stat) = _GetNextHistSmallUTC(pulKey, nRet) # depends on [control=['if'], data=[]] else: nStartMillis = c_ushort(0) nEndMillis = c_ushort(0) nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis, tEnd, nEndMillis, byref(pulKey)) (time_, val, stat) = _GetNextHSHistUTC(pulKey, nRet) # The history request must be cancelled to free up network resources dna_dll.DnaCancelHistRequest(pulKey) # To construct the pandas DataFrame, the tag name will be used as the # column name, and the index (which is in the strange eDNA format) must be # converted to an actual DateTime d = {tag_name + ' Status': stat, tag_name: val} df = pd.DataFrame(data=d, index=time_) if not utc: if not high_speed: df.index = pd.to_datetime(df.index, unit='s') # depends on [control=['if'], data=[]] else: df.index = pd.to_datetime(df.index, unit='ms') # depends on [control=['if'], data=[]] if df.empty: warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' + 'Check eDNA connection, ensure that the start time is ' + 'not later than the end time, verify that the ' + 'DateTime formatting matches eDNA requirements, and ' + 'check that data exists in the query time period.') # depends on [control=['if'], data=[]] # Check if the user would rather use the description as the column name if desc_as_label or label: if label: new_label = label # depends on [control=['if'], data=[]] else: new_label = _GetLabel(tag_name) df.rename(inplace=True, columns={tag_name: new_label, tag_name + ' Status': new_label + ' Status'}) # depends on [control=['if'], data=[]] return df
def from_torch_layers(cls, module_graph, variable): """Recover something like neural net layers from PyTorch Module's and the compute graph from a Variable. Example output for a multi-layer RNN. We confusingly assign shared embedding values to the encoder, but ordered next to the decoder. rnns.0.linear.module.weight_raw rnns.0 rnns.0.linear.module.bias rnns.0 rnns.1.linear.module.weight_raw rnns.1 rnns.1.linear.module.bias rnns.1 rnns.2.linear.module.weight_raw rnns.2 rnns.2.linear.module.bias rnns.2 rnns.3.linear.module.weight_raw rnns.3 rnns.3.linear.module.bias rnns.3 decoder.weight encoder decoder.bias decoder """ # TODO: We're currently not using this, but I left it here incase we want to resurrect! - CVP torch = util.get_module("torch", "Could not import torch") module_nodes_by_hash = {id(n): n for n in module_graph.nodes} module_parameter_nodes = [ n for n in module_graph.nodes if isinstance(n.obj, torch.nn.Parameter)] names_by_pid = {id(n.obj): n.name for n in module_parameter_nodes} reachable_param_nodes = module_graph[0].reachable_descendents() reachable_params = {} module_reachable_params = {} names = {} for pid, reachable_nodes in reachable_param_nodes.items(): node = module_nodes_by_hash[pid] if not isinstance(node.obj, torch.nn.Module): continue module = node.obj reachable_params = {} # by object id module_reachable_params[id(module)] = reachable_params names[node.name] = set() for reachable_hash in reachable_nodes: reachable = module_nodes_by_hash[reachable_hash] if isinstance(reachable.obj, torch.nn.Parameter): param = reachable.obj reachable_params[id(param)] = param names[node.name].add(names_by_pid[id(param)]) # we look for correspondences between sets of parameters used in subtrees of the # computation graph and sets of parameters contained in subtrees of the module # graph node_depths = {id(n): d for n, d in module_graph[0].descendent_bfs()} parameter_module_names = {} parameter_modules = {} for param_node in (n for n in module_graph.nodes if isinstance(n.obj, torch.nn.Parameter)): pid = id(param_node.obj) best_node = None best_depth = None best_reachable_params = None for node in module_graph.nodes: if not isinstance(node.obj, torch.nn.Module): continue module = node.obj reachable_params = module_reachable_params[id(module)] if pid in reachable_params: depth = node_depths[id(node)] if best_node is None or (len(reachable_params), depth) <= (len(best_reachable_params), best_depth): best_node = node best_depth = depth best_reachable_params = reachable_params parameter_modules[pid] = best_node parameter_module_names[param_node.name] = best_node.name # contains all parameters but only a minimal set of modules necessary # to contain them (and which ideally correspond to conceptual layers) reduced_module_graph = cls() rmg_ids = itertools.count() rmg_root = Node(id=next(rmg_ids), node=module_graph[0]) reduced_module_graph.add_node(rmg_root) reduced_module_graph.root = rmg_root rmg_nodes_by_pid = {} module_nodes_by_pid = {id(n.obj): n for n in module_graph.nodes} compute_graph, compute_node_vars = cls.from_torch_compute_graph( variable) for node, _ in reversed(list(compute_graph[0].ancestor_bfs())): param = compute_node_vars.get(node.id) pid = id(param) if not isinstance(param, torch.nn.Parameter): continue if pid not in module_nodes_by_pid: # not all Parameters that occur in the compute graph come from the Module graph continue # add the nodes in the order we want to display them on the frontend mid = id(parameter_modules[pid].obj) if mid in rmg_nodes_by_pid: rmg_module = rmg_nodes_by_pid[mid] else: rmg_module = rmg_nodes_by_pid[mid] = Node( id=next(rmg_ids), node=module_nodes_by_pid[mid]) reduced_module_graph.add_node(rmg_module) reduced_module_graph.add_edge(rmg_root, rmg_module) rmg_param = Node(id=next(rmg_ids), node=module_nodes_by_pid[pid]) rmg_nodes_by_pid[pid] = rmg_param reduced_module_graph.add_node(rmg_param) reduced_module_graph.add_edge(rmg_module, rmg_param) return reduced_module_graph
def function[from_torch_layers, parameter[cls, module_graph, variable]]: constant[Recover something like neural net layers from PyTorch Module's and the compute graph from a Variable. Example output for a multi-layer RNN. We confusingly assign shared embedding values to the encoder, but ordered next to the decoder. rnns.0.linear.module.weight_raw rnns.0 rnns.0.linear.module.bias rnns.0 rnns.1.linear.module.weight_raw rnns.1 rnns.1.linear.module.bias rnns.1 rnns.2.linear.module.weight_raw rnns.2 rnns.2.linear.module.bias rnns.2 rnns.3.linear.module.weight_raw rnns.3 rnns.3.linear.module.bias rnns.3 decoder.weight encoder decoder.bias decoder ] variable[torch] assign[=] call[name[util].get_module, parameter[constant[torch], constant[Could not import torch]]] variable[module_nodes_by_hash] assign[=] <ast.DictComp object at 0x7da1b08a60b0> variable[module_parameter_nodes] assign[=] <ast.ListComp object at 0x7da1b08a6740> variable[names_by_pid] assign[=] <ast.DictComp object at 0x7da1b08a4ac0> variable[reachable_param_nodes] assign[=] call[call[name[module_graph]][constant[0]].reachable_descendents, parameter[]] variable[reachable_params] assign[=] dictionary[[], []] variable[module_reachable_params] assign[=] dictionary[[], []] variable[names] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b08a4250>, <ast.Name object at 0x7da1b08a4040>]]] in starred[call[name[reachable_param_nodes].items, parameter[]]] begin[:] variable[node] assign[=] call[name[module_nodes_by_hash]][name[pid]] if <ast.UnaryOp object at 0x7da1b08a5750> begin[:] continue variable[module] assign[=] name[node].obj variable[reachable_params] assign[=] dictionary[[], []] call[name[module_reachable_params]][call[name[id], parameter[name[module]]]] assign[=] name[reachable_params] call[name[names]][name[node].name] assign[=] call[name[set], parameter[]] for taget[name[reachable_hash]] in starred[name[reachable_nodes]] begin[:] variable[reachable] assign[=] call[name[module_nodes_by_hash]][name[reachable_hash]] if call[name[isinstance], parameter[name[reachable].obj, name[torch].nn.Parameter]] begin[:] variable[param] assign[=] name[reachable].obj call[name[reachable_params]][call[name[id], parameter[name[param]]]] assign[=] name[param] call[call[name[names]][name[node].name].add, parameter[call[name[names_by_pid]][call[name[id], parameter[name[param]]]]]] variable[node_depths] assign[=] <ast.DictComp object at 0x7da1b08a7d90> variable[parameter_module_names] assign[=] dictionary[[], []] variable[parameter_modules] assign[=] dictionary[[], []] for taget[name[param_node]] in starred[<ast.GeneratorExp object at 0x7da1b08a74c0>] begin[:] variable[pid] assign[=] call[name[id], parameter[name[param_node].obj]] variable[best_node] assign[=] constant[None] variable[best_depth] assign[=] constant[None] variable[best_reachable_params] assign[=] constant[None] for taget[name[node]] in starred[name[module_graph].nodes] begin[:] if <ast.UnaryOp object at 0x7da1b08a4fd0> begin[:] continue variable[module] assign[=] name[node].obj variable[reachable_params] assign[=] call[name[module_reachable_params]][call[name[id], parameter[name[module]]]] if compare[name[pid] in name[reachable_params]] begin[:] variable[depth] assign[=] call[name[node_depths]][call[name[id], parameter[name[node]]]] if <ast.BoolOp object at 0x7da1b08a6800> begin[:] variable[best_node] assign[=] name[node] variable[best_depth] assign[=] name[depth] variable[best_reachable_params] assign[=] name[reachable_params] call[name[parameter_modules]][name[pid]] assign[=] name[best_node] call[name[parameter_module_names]][name[param_node].name] assign[=] name[best_node].name variable[reduced_module_graph] assign[=] call[name[cls], parameter[]] variable[rmg_ids] assign[=] call[name[itertools].count, parameter[]] variable[rmg_root] assign[=] call[name[Node], parameter[]] call[name[reduced_module_graph].add_node, parameter[name[rmg_root]]] name[reduced_module_graph].root assign[=] name[rmg_root] variable[rmg_nodes_by_pid] assign[=] dictionary[[], []] variable[module_nodes_by_pid] assign[=] <ast.DictComp object at 0x7da2044c3160> <ast.Tuple object at 0x7da2044c0790> assign[=] call[name[cls].from_torch_compute_graph, parameter[name[variable]]] for taget[tuple[[<ast.Name object at 0x7da2044c0cd0>, <ast.Name object at 0x7da2044c24a0>]]] in starred[call[name[reversed], parameter[call[name[list], parameter[call[call[name[compute_graph]][constant[0]].ancestor_bfs, parameter[]]]]]]] begin[:] variable[param] assign[=] call[name[compute_node_vars].get, parameter[name[node].id]] variable[pid] assign[=] call[name[id], parameter[name[param]]] if <ast.UnaryOp object at 0x7da204345cc0> begin[:] continue if compare[name[pid] <ast.NotIn object at 0x7da2590d7190> name[module_nodes_by_pid]] begin[:] continue variable[mid] assign[=] call[name[id], parameter[call[name[parameter_modules]][name[pid]].obj]] if compare[name[mid] in name[rmg_nodes_by_pid]] begin[:] variable[rmg_module] assign[=] call[name[rmg_nodes_by_pid]][name[mid]] variable[rmg_param] assign[=] call[name[Node], parameter[]] call[name[rmg_nodes_by_pid]][name[pid]] assign[=] name[rmg_param] call[name[reduced_module_graph].add_node, parameter[name[rmg_param]]] call[name[reduced_module_graph].add_edge, parameter[name[rmg_module], name[rmg_param]]] return[name[reduced_module_graph]]
keyword[def] identifier[from_torch_layers] ( identifier[cls] , identifier[module_graph] , identifier[variable] ): literal[string] identifier[torch] = identifier[util] . identifier[get_module] ( literal[string] , literal[string] ) identifier[module_nodes_by_hash] ={ identifier[id] ( identifier[n] ): identifier[n] keyword[for] identifier[n] keyword[in] identifier[module_graph] . identifier[nodes] } identifier[module_parameter_nodes] =[ identifier[n] keyword[for] identifier[n] keyword[in] identifier[module_graph] . identifier[nodes] keyword[if] identifier[isinstance] ( identifier[n] . identifier[obj] , identifier[torch] . identifier[nn] . identifier[Parameter] )] identifier[names_by_pid] ={ identifier[id] ( identifier[n] . identifier[obj] ): identifier[n] . identifier[name] keyword[for] identifier[n] keyword[in] identifier[module_parameter_nodes] } identifier[reachable_param_nodes] = identifier[module_graph] [ literal[int] ]. identifier[reachable_descendents] () identifier[reachable_params] ={} identifier[module_reachable_params] ={} identifier[names] ={} keyword[for] identifier[pid] , identifier[reachable_nodes] keyword[in] identifier[reachable_param_nodes] . identifier[items] (): identifier[node] = identifier[module_nodes_by_hash] [ identifier[pid] ] keyword[if] keyword[not] identifier[isinstance] ( identifier[node] . identifier[obj] , identifier[torch] . identifier[nn] . identifier[Module] ): keyword[continue] identifier[module] = identifier[node] . identifier[obj] identifier[reachable_params] ={} identifier[module_reachable_params] [ identifier[id] ( identifier[module] )]= identifier[reachable_params] identifier[names] [ identifier[node] . identifier[name] ]= identifier[set] () keyword[for] identifier[reachable_hash] keyword[in] identifier[reachable_nodes] : identifier[reachable] = identifier[module_nodes_by_hash] [ identifier[reachable_hash] ] keyword[if] identifier[isinstance] ( identifier[reachable] . identifier[obj] , identifier[torch] . identifier[nn] . identifier[Parameter] ): identifier[param] = identifier[reachable] . identifier[obj] identifier[reachable_params] [ identifier[id] ( identifier[param] )]= identifier[param] identifier[names] [ identifier[node] . identifier[name] ]. identifier[add] ( identifier[names_by_pid] [ identifier[id] ( identifier[param] )]) identifier[node_depths] ={ identifier[id] ( identifier[n] ): identifier[d] keyword[for] identifier[n] , identifier[d] keyword[in] identifier[module_graph] [ literal[int] ]. identifier[descendent_bfs] ()} identifier[parameter_module_names] ={} identifier[parameter_modules] ={} keyword[for] identifier[param_node] keyword[in] ( identifier[n] keyword[for] identifier[n] keyword[in] identifier[module_graph] . identifier[nodes] keyword[if] identifier[isinstance] ( identifier[n] . identifier[obj] , identifier[torch] . identifier[nn] . identifier[Parameter] )): identifier[pid] = identifier[id] ( identifier[param_node] . identifier[obj] ) identifier[best_node] = keyword[None] identifier[best_depth] = keyword[None] identifier[best_reachable_params] = keyword[None] keyword[for] identifier[node] keyword[in] identifier[module_graph] . identifier[nodes] : keyword[if] keyword[not] identifier[isinstance] ( identifier[node] . identifier[obj] , identifier[torch] . identifier[nn] . identifier[Module] ): keyword[continue] identifier[module] = identifier[node] . identifier[obj] identifier[reachable_params] = identifier[module_reachable_params] [ identifier[id] ( identifier[module] )] keyword[if] identifier[pid] keyword[in] identifier[reachable_params] : identifier[depth] = identifier[node_depths] [ identifier[id] ( identifier[node] )] keyword[if] identifier[best_node] keyword[is] keyword[None] keyword[or] ( identifier[len] ( identifier[reachable_params] ), identifier[depth] )<=( identifier[len] ( identifier[best_reachable_params] ), identifier[best_depth] ): identifier[best_node] = identifier[node] identifier[best_depth] = identifier[depth] identifier[best_reachable_params] = identifier[reachable_params] identifier[parameter_modules] [ identifier[pid] ]= identifier[best_node] identifier[parameter_module_names] [ identifier[param_node] . identifier[name] ]= identifier[best_node] . identifier[name] identifier[reduced_module_graph] = identifier[cls] () identifier[rmg_ids] = identifier[itertools] . identifier[count] () identifier[rmg_root] = identifier[Node] ( identifier[id] = identifier[next] ( identifier[rmg_ids] ), identifier[node] = identifier[module_graph] [ literal[int] ]) identifier[reduced_module_graph] . identifier[add_node] ( identifier[rmg_root] ) identifier[reduced_module_graph] . identifier[root] = identifier[rmg_root] identifier[rmg_nodes_by_pid] ={} identifier[module_nodes_by_pid] ={ identifier[id] ( identifier[n] . identifier[obj] ): identifier[n] keyword[for] identifier[n] keyword[in] identifier[module_graph] . identifier[nodes] } identifier[compute_graph] , identifier[compute_node_vars] = identifier[cls] . identifier[from_torch_compute_graph] ( identifier[variable] ) keyword[for] identifier[node] , identifier[_] keyword[in] identifier[reversed] ( identifier[list] ( identifier[compute_graph] [ literal[int] ]. identifier[ancestor_bfs] ())): identifier[param] = identifier[compute_node_vars] . identifier[get] ( identifier[node] . identifier[id] ) identifier[pid] = identifier[id] ( identifier[param] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[param] , identifier[torch] . identifier[nn] . identifier[Parameter] ): keyword[continue] keyword[if] identifier[pid] keyword[not] keyword[in] identifier[module_nodes_by_pid] : keyword[continue] identifier[mid] = identifier[id] ( identifier[parameter_modules] [ identifier[pid] ]. identifier[obj] ) keyword[if] identifier[mid] keyword[in] identifier[rmg_nodes_by_pid] : identifier[rmg_module] = identifier[rmg_nodes_by_pid] [ identifier[mid] ] keyword[else] : identifier[rmg_module] = identifier[rmg_nodes_by_pid] [ identifier[mid] ]= identifier[Node] ( identifier[id] = identifier[next] ( identifier[rmg_ids] ), identifier[node] = identifier[module_nodes_by_pid] [ identifier[mid] ]) identifier[reduced_module_graph] . identifier[add_node] ( identifier[rmg_module] ) identifier[reduced_module_graph] . identifier[add_edge] ( identifier[rmg_root] , identifier[rmg_module] ) identifier[rmg_param] = identifier[Node] ( identifier[id] = identifier[next] ( identifier[rmg_ids] ), identifier[node] = identifier[module_nodes_by_pid] [ identifier[pid] ]) identifier[rmg_nodes_by_pid] [ identifier[pid] ]= identifier[rmg_param] identifier[reduced_module_graph] . identifier[add_node] ( identifier[rmg_param] ) identifier[reduced_module_graph] . identifier[add_edge] ( identifier[rmg_module] , identifier[rmg_param] ) keyword[return] identifier[reduced_module_graph]
def from_torch_layers(cls, module_graph, variable): """Recover something like neural net layers from PyTorch Module's and the compute graph from a Variable. Example output for a multi-layer RNN. We confusingly assign shared embedding values to the encoder, but ordered next to the decoder. rnns.0.linear.module.weight_raw rnns.0 rnns.0.linear.module.bias rnns.0 rnns.1.linear.module.weight_raw rnns.1 rnns.1.linear.module.bias rnns.1 rnns.2.linear.module.weight_raw rnns.2 rnns.2.linear.module.bias rnns.2 rnns.3.linear.module.weight_raw rnns.3 rnns.3.linear.module.bias rnns.3 decoder.weight encoder decoder.bias decoder """ # TODO: We're currently not using this, but I left it here incase we want to resurrect! - CVP torch = util.get_module('torch', 'Could not import torch') module_nodes_by_hash = {id(n): n for n in module_graph.nodes} module_parameter_nodes = [n for n in module_graph.nodes if isinstance(n.obj, torch.nn.Parameter)] names_by_pid = {id(n.obj): n.name for n in module_parameter_nodes} reachable_param_nodes = module_graph[0].reachable_descendents() reachable_params = {} module_reachable_params = {} names = {} for (pid, reachable_nodes) in reachable_param_nodes.items(): node = module_nodes_by_hash[pid] if not isinstance(node.obj, torch.nn.Module): continue # depends on [control=['if'], data=[]] module = node.obj reachable_params = {} # by object id module_reachable_params[id(module)] = reachable_params names[node.name] = set() for reachable_hash in reachable_nodes: reachable = module_nodes_by_hash[reachable_hash] if isinstance(reachable.obj, torch.nn.Parameter): param = reachable.obj reachable_params[id(param)] = param names[node.name].add(names_by_pid[id(param)]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['reachable_hash']] # depends on [control=['for'], data=[]] # we look for correspondences between sets of parameters used in subtrees of the # computation graph and sets of parameters contained in subtrees of the module # graph node_depths = {id(n): d for (n, d) in module_graph[0].descendent_bfs()} parameter_module_names = {} parameter_modules = {} for param_node in (n for n in module_graph.nodes if isinstance(n.obj, torch.nn.Parameter)): pid = id(param_node.obj) best_node = None best_depth = None best_reachable_params = None for node in module_graph.nodes: if not isinstance(node.obj, torch.nn.Module): continue # depends on [control=['if'], data=[]] module = node.obj reachable_params = module_reachable_params[id(module)] if pid in reachable_params: depth = node_depths[id(node)] if best_node is None or (len(reachable_params), depth) <= (len(best_reachable_params), best_depth): best_node = node best_depth = depth best_reachable_params = reachable_params # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['reachable_params']] # depends on [control=['for'], data=['node']] parameter_modules[pid] = best_node parameter_module_names[param_node.name] = best_node.name # depends on [control=['for'], data=['param_node']] # contains all parameters but only a minimal set of modules necessary # to contain them (and which ideally correspond to conceptual layers) reduced_module_graph = cls() rmg_ids = itertools.count() rmg_root = Node(id=next(rmg_ids), node=module_graph[0]) reduced_module_graph.add_node(rmg_root) reduced_module_graph.root = rmg_root rmg_nodes_by_pid = {} module_nodes_by_pid = {id(n.obj): n for n in module_graph.nodes} (compute_graph, compute_node_vars) = cls.from_torch_compute_graph(variable) for (node, _) in reversed(list(compute_graph[0].ancestor_bfs())): param = compute_node_vars.get(node.id) pid = id(param) if not isinstance(param, torch.nn.Parameter): continue # depends on [control=['if'], data=[]] if pid not in module_nodes_by_pid: # not all Parameters that occur in the compute graph come from the Module graph continue # depends on [control=['if'], data=[]] # add the nodes in the order we want to display them on the frontend mid = id(parameter_modules[pid].obj) if mid in rmg_nodes_by_pid: rmg_module = rmg_nodes_by_pid[mid] # depends on [control=['if'], data=['mid', 'rmg_nodes_by_pid']] else: rmg_module = rmg_nodes_by_pid[mid] = Node(id=next(rmg_ids), node=module_nodes_by_pid[mid]) reduced_module_graph.add_node(rmg_module) reduced_module_graph.add_edge(rmg_root, rmg_module) rmg_param = Node(id=next(rmg_ids), node=module_nodes_by_pid[pid]) rmg_nodes_by_pid[pid] = rmg_param reduced_module_graph.add_node(rmg_param) reduced_module_graph.add_edge(rmg_module, rmg_param) # depends on [control=['for'], data=[]] return reduced_module_graph
def trigger(self): """Triggers the device. The trigger method sens a GET(group execute trigger) command byte to the device. """ ibsta = self._lib.ibtrg(self._device) self._check_status(ibsta)
def function[trigger, parameter[self]]: constant[Triggers the device. The trigger method sens a GET(group execute trigger) command byte to the device. ] variable[ibsta] assign[=] call[name[self]._lib.ibtrg, parameter[name[self]._device]] call[name[self]._check_status, parameter[name[ibsta]]]
keyword[def] identifier[trigger] ( identifier[self] ): literal[string] identifier[ibsta] = identifier[self] . identifier[_lib] . identifier[ibtrg] ( identifier[self] . identifier[_device] ) identifier[self] . identifier[_check_status] ( identifier[ibsta] )
def trigger(self): """Triggers the device. The trigger method sens a GET(group execute trigger) command byte to the device. """ ibsta = self._lib.ibtrg(self._device) self._check_status(ibsta)
def parse_control_options(controls, variable_defaults=None): """ Parse a set of control options. Args: controls: The dictionary of control options. variable_defaults: If the controls are for a Query with variables, then this is the default variable values defined in the Query module. The options in the controls parameter can override these but if a variable has no 'value' property then we fall back to these. Returns: - the HTML for the controls. - the default values for the controls as a dict. - the list of DIV IDs of the controls. """ controls_html = '' control_defaults = {} control_ids = [] div_id = _html.Html.next_id() if variable_defaults is None: variable_defaults = {} for varname, control in list(controls.items()): label = control.get('label', varname) control_id = div_id + '__' + varname control_ids.append(control_id) value = control.get('value', variable_defaults.get(varname, None)) # The user should usually specify the type but we will default to 'textbox' for strings # and 'set' for lists. if isinstance(value, basestring): type = 'textbox' elif isinstance(value, list): type = 'set' else: type = None type = control.get('type', type) if type == 'picker': choices = control.get('choices', value) if not isinstance(choices, list) or len(choices) == 0: raise Exception('picker control must specify a nonempty set of choices') if value is None: value = choices[0] choices_html = '' for i, choice in enumerate(choices): choices_html += "<option value=\"%s\" %s>%s</option>" % \ (choice, ("selected=\"selected\"" if choice == value else ''), choice) control_html = "{label}<select disabled id=\"{id}\">{choices}</select>" \ .format(label=label, id=control_id, choices=choices_html) elif type == 'set': # Multi-picker; implemented as checkboxes. # TODO(gram): consider using "name" property of the control to group checkboxes. That # way we can save the code of constructing and parsing control Ids with sequential # numbers in it. Multiple checkboxes can share the same name. choices = control.get('choices', value) if not isinstance(choices, list) or len(choices) == 0: raise Exception('set control must specify a nonempty set of choices') if value is None: value = choices choices_html = '' control_ids[-1] = '%s:%d' % (control_id, len(choices)) # replace ID to include count. for i, choice in enumerate(choices): checked = choice in value choice_id = '%s:%d' % (control_id, i) # TODO(gram): we may want a 'Submit/Refresh button as we may not want to rerun # query on each checkbox change. choices_html += """ <div> <label> <input type="checkbox" id="{id}" value="{choice}" {checked} disabled> {choice} </label> </div> """.format(id=choice_id, choice=choice, checked="checked" if checked else '') control_html = "{label}<div>{choices}</div>".format(label=label, choices=choices_html) elif type == 'checkbox': control_html = """ <label> <input type="checkbox" id="{id}" {checked} disabled> {label} </label> """.format(label=label, id=control_id, checked="checked" if value else '') elif type == 'slider': min_ = control.get('min', None) max_ = control.get('max', None) if min_ is None or max_ is None: raise Exception('slider control must specify a min and max value') if max_ <= min_: raise Exception('slider control must specify a min value less than max value') step = control.get('step', 1 if isinstance(min_, int) and isinstance(max_, int) else (float(max_ - min_) / 10.0)) if value is None: value = min_ control_html = """ {label} <input type="text" class="gchart-slider_value" id="{id}_value" value="{value}" disabled/> <input type="range" class="gchart-slider" id="{id}" min="{min}" max="{max}" step="{step}" value="{value}" disabled/> """.format(label=label, id=control_id, value=value, min=min_, max=max_, step=step) elif type == 'textbox': if value is None: value = '' control_html = "{label}<input type=\"text\" value=\"{value}\" id=\"{id}\" disabled/>" \ .format(label=label, value=value, id=control_id) else: raise Exception( 'Unknown control type %s (expected picker, slider, checkbox, textbox or set)' % type) control_defaults[varname] = value controls_html += "<div class=\"gchart-control\">{control}</div>\n" \ .format(control=control_html) controls_html = "<div class=\"gchart-controls\">{controls}</div>".format(controls=controls_html) return controls_html, control_defaults, control_ids
def function[parse_control_options, parameter[controls, variable_defaults]]: constant[ Parse a set of control options. Args: controls: The dictionary of control options. variable_defaults: If the controls are for a Query with variables, then this is the default variable values defined in the Query module. The options in the controls parameter can override these but if a variable has no 'value' property then we fall back to these. Returns: - the HTML for the controls. - the default values for the controls as a dict. - the list of DIV IDs of the controls. ] variable[controls_html] assign[=] constant[] variable[control_defaults] assign[=] dictionary[[], []] variable[control_ids] assign[=] list[[]] variable[div_id] assign[=] call[name[_html].Html.next_id, parameter[]] if compare[name[variable_defaults] is constant[None]] begin[:] variable[variable_defaults] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da20e955fc0>, <ast.Name object at 0x7da20e955150>]]] in starred[call[name[list], parameter[call[name[controls].items, parameter[]]]]] begin[:] variable[label] assign[=] call[name[control].get, parameter[constant[label], name[varname]]] variable[control_id] assign[=] binary_operation[binary_operation[name[div_id] + constant[__]] + name[varname]] call[name[control_ids].append, parameter[name[control_id]]] variable[value] assign[=] call[name[control].get, parameter[constant[value], call[name[variable_defaults].get, parameter[name[varname], constant[None]]]]] if call[name[isinstance], parameter[name[value], name[basestring]]] begin[:] variable[type] assign[=] constant[textbox] variable[type] assign[=] call[name[control].get, parameter[constant[type], name[type]]] if compare[name[type] equal[==] constant[picker]] begin[:] variable[choices] assign[=] call[name[control].get, parameter[constant[choices], name[value]]] if <ast.BoolOp object at 0x7da20e9577f0> begin[:] <ast.Raise object at 0x7da20e955180> if compare[name[value] is constant[None]] begin[:] variable[value] assign[=] call[name[choices]][constant[0]] variable[choices_html] assign[=] constant[] for taget[tuple[[<ast.Name object at 0x7da20e957fd0>, <ast.Name object at 0x7da20e957580>]]] in starred[call[name[enumerate], parameter[name[choices]]]] begin[:] <ast.AugAssign object at 0x7da20e957ca0> variable[control_html] assign[=] call[constant[{label}<select disabled id="{id}">{choices}</select>].format, parameter[]] call[name[control_defaults]][name[varname]] assign[=] name[value] <ast.AugAssign object at 0x7da1b113e710> variable[controls_html] assign[=] call[constant[<div class="gchart-controls">{controls}</div>].format, parameter[]] return[tuple[[<ast.Name object at 0x7da1b113ef80>, <ast.Name object at 0x7da1b113f610>, <ast.Name object at 0x7da1b113fd00>]]]
keyword[def] identifier[parse_control_options] ( identifier[controls] , identifier[variable_defaults] = keyword[None] ): literal[string] identifier[controls_html] = literal[string] identifier[control_defaults] ={} identifier[control_ids] =[] identifier[div_id] = identifier[_html] . identifier[Html] . identifier[next_id] () keyword[if] identifier[variable_defaults] keyword[is] keyword[None] : identifier[variable_defaults] ={} keyword[for] identifier[varname] , identifier[control] keyword[in] identifier[list] ( identifier[controls] . identifier[items] ()): identifier[label] = identifier[control] . identifier[get] ( literal[string] , identifier[varname] ) identifier[control_id] = identifier[div_id] + literal[string] + identifier[varname] identifier[control_ids] . identifier[append] ( identifier[control_id] ) identifier[value] = identifier[control] . identifier[get] ( literal[string] , identifier[variable_defaults] . identifier[get] ( identifier[varname] , keyword[None] )) keyword[if] identifier[isinstance] ( identifier[value] , identifier[basestring] ): identifier[type] = literal[string] keyword[elif] identifier[isinstance] ( identifier[value] , identifier[list] ): identifier[type] = literal[string] keyword[else] : identifier[type] = keyword[None] identifier[type] = identifier[control] . identifier[get] ( literal[string] , identifier[type] ) keyword[if] identifier[type] == literal[string] : identifier[choices] = identifier[control] . identifier[get] ( literal[string] , identifier[value] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[choices] , identifier[list] ) keyword[or] identifier[len] ( identifier[choices] )== literal[int] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[if] identifier[value] keyword[is] keyword[None] : identifier[value] = identifier[choices] [ literal[int] ] identifier[choices_html] = literal[string] keyword[for] identifier[i] , identifier[choice] keyword[in] identifier[enumerate] ( identifier[choices] ): identifier[choices_html] += literal[string] %( identifier[choice] ,( literal[string] keyword[if] identifier[choice] == identifier[value] keyword[else] literal[string] ), identifier[choice] ) identifier[control_html] = literal[string] . identifier[format] ( identifier[label] = identifier[label] , identifier[id] = identifier[control_id] , identifier[choices] = identifier[choices_html] ) keyword[elif] identifier[type] == literal[string] : identifier[choices] = identifier[control] . identifier[get] ( literal[string] , identifier[value] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[choices] , identifier[list] ) keyword[or] identifier[len] ( identifier[choices] )== literal[int] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[if] identifier[value] keyword[is] keyword[None] : identifier[value] = identifier[choices] identifier[choices_html] = literal[string] identifier[control_ids] [- literal[int] ]= literal[string] %( identifier[control_id] , identifier[len] ( identifier[choices] )) keyword[for] identifier[i] , identifier[choice] keyword[in] identifier[enumerate] ( identifier[choices] ): identifier[checked] = identifier[choice] keyword[in] identifier[value] identifier[choice_id] = literal[string] %( identifier[control_id] , identifier[i] ) identifier[choices_html] += literal[string] . identifier[format] ( identifier[id] = identifier[choice_id] , identifier[choice] = identifier[choice] , identifier[checked] = literal[string] keyword[if] identifier[checked] keyword[else] literal[string] ) identifier[control_html] = literal[string] . identifier[format] ( identifier[label] = identifier[label] , identifier[choices] = identifier[choices_html] ) keyword[elif] identifier[type] == literal[string] : identifier[control_html] = literal[string] . identifier[format] ( identifier[label] = identifier[label] , identifier[id] = identifier[control_id] , identifier[checked] = literal[string] keyword[if] identifier[value] keyword[else] literal[string] ) keyword[elif] identifier[type] == literal[string] : identifier[min_] = identifier[control] . identifier[get] ( literal[string] , keyword[None] ) identifier[max_] = identifier[control] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] identifier[min_] keyword[is] keyword[None] keyword[or] identifier[max_] keyword[is] keyword[None] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[if] identifier[max_] <= identifier[min_] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[step] = identifier[control] . identifier[get] ( literal[string] , literal[int] keyword[if] identifier[isinstance] ( identifier[min_] , identifier[int] ) keyword[and] identifier[isinstance] ( identifier[max_] , identifier[int] ) keyword[else] ( identifier[float] ( identifier[max_] - identifier[min_] )/ literal[int] )) keyword[if] identifier[value] keyword[is] keyword[None] : identifier[value] = identifier[min_] identifier[control_html] = literal[string] . identifier[format] ( identifier[label] = identifier[label] , identifier[id] = identifier[control_id] , identifier[value] = identifier[value] , identifier[min] = identifier[min_] , identifier[max] = identifier[max_] , identifier[step] = identifier[step] ) keyword[elif] identifier[type] == literal[string] : keyword[if] identifier[value] keyword[is] keyword[None] : identifier[value] = literal[string] identifier[control_html] = literal[string] . identifier[format] ( identifier[label] = identifier[label] , identifier[value] = identifier[value] , identifier[id] = identifier[control_id] ) keyword[else] : keyword[raise] identifier[Exception] ( literal[string] % identifier[type] ) identifier[control_defaults] [ identifier[varname] ]= identifier[value] identifier[controls_html] += literal[string] . identifier[format] ( identifier[control] = identifier[control_html] ) identifier[controls_html] = literal[string] . identifier[format] ( identifier[controls] = identifier[controls_html] ) keyword[return] identifier[controls_html] , identifier[control_defaults] , identifier[control_ids]
def parse_control_options(controls, variable_defaults=None): """ Parse a set of control options. Args: controls: The dictionary of control options. variable_defaults: If the controls are for a Query with variables, then this is the default variable values defined in the Query module. The options in the controls parameter can override these but if a variable has no 'value' property then we fall back to these. Returns: - the HTML for the controls. - the default values for the controls as a dict. - the list of DIV IDs of the controls. """ controls_html = '' control_defaults = {} control_ids = [] div_id = _html.Html.next_id() if variable_defaults is None: variable_defaults = {} # depends on [control=['if'], data=['variable_defaults']] for (varname, control) in list(controls.items()): label = control.get('label', varname) control_id = div_id + '__' + varname control_ids.append(control_id) value = control.get('value', variable_defaults.get(varname, None)) # The user should usually specify the type but we will default to 'textbox' for strings # and 'set' for lists. if isinstance(value, basestring): type = 'textbox' # depends on [control=['if'], data=[]] elif isinstance(value, list): type = 'set' # depends on [control=['if'], data=[]] else: type = None type = control.get('type', type) if type == 'picker': choices = control.get('choices', value) if not isinstance(choices, list) or len(choices) == 0: raise Exception('picker control must specify a nonempty set of choices') # depends on [control=['if'], data=[]] if value is None: value = choices[0] # depends on [control=['if'], data=['value']] choices_html = '' for (i, choice) in enumerate(choices): choices_html += '<option value="%s" %s>%s</option>' % (choice, 'selected="selected"' if choice == value else '', choice) # depends on [control=['for'], data=[]] control_html = '{label}<select disabled id="{id}">{choices}</select>'.format(label=label, id=control_id, choices=choices_html) # depends on [control=['if'], data=[]] elif type == 'set': # Multi-picker; implemented as checkboxes. # TODO(gram): consider using "name" property of the control to group checkboxes. That # way we can save the code of constructing and parsing control Ids with sequential # numbers in it. Multiple checkboxes can share the same name. choices = control.get('choices', value) if not isinstance(choices, list) or len(choices) == 0: raise Exception('set control must specify a nonempty set of choices') # depends on [control=['if'], data=[]] if value is None: value = choices # depends on [control=['if'], data=['value']] choices_html = '' control_ids[-1] = '%s:%d' % (control_id, len(choices)) # replace ID to include count. for (i, choice) in enumerate(choices): checked = choice in value choice_id = '%s:%d' % (control_id, i) # TODO(gram): we may want a 'Submit/Refresh button as we may not want to rerun # query on each checkbox change. choices_html += '\n <div>\n <label>\n <input type="checkbox" id="{id}" value="{choice}" {checked} disabled>\n {choice}\n </label>\n </div>\n '.format(id=choice_id, choice=choice, checked='checked' if checked else '') # depends on [control=['for'], data=[]] control_html = '{label}<div>{choices}</div>'.format(label=label, choices=choices_html) # depends on [control=['if'], data=[]] elif type == 'checkbox': control_html = '\n <label>\n <input type="checkbox" id="{id}" {checked} disabled>\n {label}\n </label>\n '.format(label=label, id=control_id, checked='checked' if value else '') # depends on [control=['if'], data=[]] elif type == 'slider': min_ = control.get('min', None) max_ = control.get('max', None) if min_ is None or max_ is None: raise Exception('slider control must specify a min and max value') # depends on [control=['if'], data=[]] if max_ <= min_: raise Exception('slider control must specify a min value less than max value') # depends on [control=['if'], data=[]] step = control.get('step', 1 if isinstance(min_, int) and isinstance(max_, int) else float(max_ - min_) / 10.0) if value is None: value = min_ # depends on [control=['if'], data=['value']] control_html = '\n {label}\n <input type="text" class="gchart-slider_value" id="{id}_value" value="{value}" disabled/>\n <input type="range" class="gchart-slider" id="{id}" min="{min}" max="{max}" step="{step}"\n value="{value}" disabled/>\n '.format(label=label, id=control_id, value=value, min=min_, max=max_, step=step) # depends on [control=['if'], data=[]] elif type == 'textbox': if value is None: value = '' # depends on [control=['if'], data=['value']] control_html = '{label}<input type="text" value="{value}" id="{id}" disabled/>'.format(label=label, value=value, id=control_id) # depends on [control=['if'], data=[]] else: raise Exception('Unknown control type %s (expected picker, slider, checkbox, textbox or set)' % type) control_defaults[varname] = value controls_html += '<div class="gchart-control">{control}</div>\n'.format(control=control_html) # depends on [control=['for'], data=[]] controls_html = '<div class="gchart-controls">{controls}</div>'.format(controls=controls_html) return (controls_html, control_defaults, control_ids)
def token(cls: Type[OperatorType], keyword: str) -> OperatorType: """ Return Operator instance from keyword :param keyword: Operator keyword in expression :return: """ op = cls(keyword) return op
def function[token, parameter[cls, keyword]]: constant[ Return Operator instance from keyword :param keyword: Operator keyword in expression :return: ] variable[op] assign[=] call[name[cls], parameter[name[keyword]]] return[name[op]]
keyword[def] identifier[token] ( identifier[cls] : identifier[Type] [ identifier[OperatorType] ], identifier[keyword] : identifier[str] )-> identifier[OperatorType] : literal[string] identifier[op] = identifier[cls] ( identifier[keyword] ) keyword[return] identifier[op]
def token(cls: Type[OperatorType], keyword: str) -> OperatorType: """ Return Operator instance from keyword :param keyword: Operator keyword in expression :return: """ op = cls(keyword) return op
def polling_loop(timeout, interval=1): """Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration.""" start_time = time.time() iteration = 0 end_time = start_time + timeout while time.time() < end_time: yield iteration iteration += 1 time.sleep(interval)
def function[polling_loop, parameter[timeout, interval]]: constant[Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration.] variable[start_time] assign[=] call[name[time].time, parameter[]] variable[iteration] assign[=] constant[0] variable[end_time] assign[=] binary_operation[name[start_time] + name[timeout]] while compare[call[name[time].time, parameter[]] less[<] name[end_time]] begin[:] <ast.Yield object at 0x7da1b2178490> <ast.AugAssign object at 0x7da1b21780d0> call[name[time].sleep, parameter[name[interval]]]
keyword[def] identifier[polling_loop] ( identifier[timeout] , identifier[interval] = literal[int] ): literal[string] identifier[start_time] = identifier[time] . identifier[time] () identifier[iteration] = literal[int] identifier[end_time] = identifier[start_time] + identifier[timeout] keyword[while] identifier[time] . identifier[time] ()< identifier[end_time] : keyword[yield] identifier[iteration] identifier[iteration] += literal[int] identifier[time] . identifier[sleep] ( identifier[interval] )
def polling_loop(timeout, interval=1): """Returns an iterator that returns values until timeout has passed. Timeout is measured from start of iteration.""" start_time = time.time() iteration = 0 end_time = start_time + timeout while time.time() < end_time: yield iteration iteration += 1 time.sleep(interval) # depends on [control=['while'], data=[]]
def path(edges, source, sink, flavor="longest"): """ Calculates shortest/longest path from list of edges in a graph >>> g = [(1,2,1),(2,3,9),(2,4,3),(2,5,2),(3,6,8),(4,6,10),(4,7,4)] >>> g += [(6,8,7),(7,9,5),(8,9,6),(9,10,11)] >>> path(g, 1, 8, flavor="shortest") ([1, 2, 4, 6, 8], 21) >>> path(g, 1, 8, flavor="longest") ([1, 2, 3, 6, 8], 25) """ outgoing, incoming, nodes = node_to_edge(edges) nedges = len(edges) L = LPInstance() assert flavor in ("longest", "shortest") objective = MAXIMIZE if flavor == "longest" else MINIMIZE L.add_objective(edges, objective=objective) # Balancing constraint, incoming edges equal to outgoing edges except # source and sink constraints = [] for v in nodes: incoming_edges = incoming[v] outgoing_edges = outgoing[v] icc = summation(incoming_edges) occ = summation(outgoing_edges) if v == source: if not outgoing_edges: return None constraints.append("{0} = 1".format(occ)) elif v == sink: if not incoming_edges: return None constraints.append("{0} = 1".format(icc)) else: # Balancing constraints.append("{0}{1} = 0".format(icc, occ.replace('+', '-'))) # Simple path if incoming_edges: constraints.append("{0} <= 1".format(icc)) if outgoing_edges: constraints.append("{0} <= 1".format(occ)) L.constraints = constraints L.add_vars(nedges) selected, obj_val = L.lpsolve() results = sorted(x for i, x in enumerate(edges) if i in selected) \ if selected else None results = edges_to_path(results) return results, obj_val
def function[path, parameter[edges, source, sink, flavor]]: constant[ Calculates shortest/longest path from list of edges in a graph >>> g = [(1,2,1),(2,3,9),(2,4,3),(2,5,2),(3,6,8),(4,6,10),(4,7,4)] >>> g += [(6,8,7),(7,9,5),(8,9,6),(9,10,11)] >>> path(g, 1, 8, flavor="shortest") ([1, 2, 4, 6, 8], 21) >>> path(g, 1, 8, flavor="longest") ([1, 2, 3, 6, 8], 25) ] <ast.Tuple object at 0x7da20c76e830> assign[=] call[name[node_to_edge], parameter[name[edges]]] variable[nedges] assign[=] call[name[len], parameter[name[edges]]] variable[L] assign[=] call[name[LPInstance], parameter[]] assert[compare[name[flavor] in tuple[[<ast.Constant object at 0x7da20c76c0d0>, <ast.Constant object at 0x7da20c76f2b0>]]]] variable[objective] assign[=] <ast.IfExp object at 0x7da20c76cf10> call[name[L].add_objective, parameter[name[edges]]] variable[constraints] assign[=] list[[]] for taget[name[v]] in starred[name[nodes]] begin[:] variable[incoming_edges] assign[=] call[name[incoming]][name[v]] variable[outgoing_edges] assign[=] call[name[outgoing]][name[v]] variable[icc] assign[=] call[name[summation], parameter[name[incoming_edges]]] variable[occ] assign[=] call[name[summation], parameter[name[outgoing_edges]]] if compare[name[v] equal[==] name[source]] begin[:] if <ast.UnaryOp object at 0x7da20c76c5e0> begin[:] return[constant[None]] call[name[constraints].append, parameter[call[constant[{0} = 1].format, parameter[name[occ]]]]] name[L].constraints assign[=] name[constraints] call[name[L].add_vars, parameter[name[nedges]]] <ast.Tuple object at 0x7da18fe900d0> assign[=] call[name[L].lpsolve, parameter[]] variable[results] assign[=] <ast.IfExp object at 0x7da18fe91ea0> variable[results] assign[=] call[name[edges_to_path], parameter[name[results]]] return[tuple[[<ast.Name object at 0x7da18fe90130>, <ast.Name object at 0x7da18fe924d0>]]]
keyword[def] identifier[path] ( identifier[edges] , identifier[source] , identifier[sink] , identifier[flavor] = literal[string] ): literal[string] identifier[outgoing] , identifier[incoming] , identifier[nodes] = identifier[node_to_edge] ( identifier[edges] ) identifier[nedges] = identifier[len] ( identifier[edges] ) identifier[L] = identifier[LPInstance] () keyword[assert] identifier[flavor] keyword[in] ( literal[string] , literal[string] ) identifier[objective] = identifier[MAXIMIZE] keyword[if] identifier[flavor] == literal[string] keyword[else] identifier[MINIMIZE] identifier[L] . identifier[add_objective] ( identifier[edges] , identifier[objective] = identifier[objective] ) identifier[constraints] =[] keyword[for] identifier[v] keyword[in] identifier[nodes] : identifier[incoming_edges] = identifier[incoming] [ identifier[v] ] identifier[outgoing_edges] = identifier[outgoing] [ identifier[v] ] identifier[icc] = identifier[summation] ( identifier[incoming_edges] ) identifier[occ] = identifier[summation] ( identifier[outgoing_edges] ) keyword[if] identifier[v] == identifier[source] : keyword[if] keyword[not] identifier[outgoing_edges] : keyword[return] keyword[None] identifier[constraints] . identifier[append] ( literal[string] . identifier[format] ( identifier[occ] )) keyword[elif] identifier[v] == identifier[sink] : keyword[if] keyword[not] identifier[incoming_edges] : keyword[return] keyword[None] identifier[constraints] . identifier[append] ( literal[string] . identifier[format] ( identifier[icc] )) keyword[else] : identifier[constraints] . identifier[append] ( literal[string] . identifier[format] ( identifier[icc] , identifier[occ] . identifier[replace] ( literal[string] , literal[string] ))) keyword[if] identifier[incoming_edges] : identifier[constraints] . identifier[append] ( literal[string] . identifier[format] ( identifier[icc] )) keyword[if] identifier[outgoing_edges] : identifier[constraints] . identifier[append] ( literal[string] . identifier[format] ( identifier[occ] )) identifier[L] . identifier[constraints] = identifier[constraints] identifier[L] . identifier[add_vars] ( identifier[nedges] ) identifier[selected] , identifier[obj_val] = identifier[L] . identifier[lpsolve] () identifier[results] = identifier[sorted] ( identifier[x] keyword[for] identifier[i] , identifier[x] keyword[in] identifier[enumerate] ( identifier[edges] ) keyword[if] identifier[i] keyword[in] identifier[selected] ) keyword[if] identifier[selected] keyword[else] keyword[None] identifier[results] = identifier[edges_to_path] ( identifier[results] ) keyword[return] identifier[results] , identifier[obj_val]
def path(edges, source, sink, flavor='longest'): """ Calculates shortest/longest path from list of edges in a graph >>> g = [(1,2,1),(2,3,9),(2,4,3),(2,5,2),(3,6,8),(4,6,10),(4,7,4)] >>> g += [(6,8,7),(7,9,5),(8,9,6),(9,10,11)] >>> path(g, 1, 8, flavor="shortest") ([1, 2, 4, 6, 8], 21) >>> path(g, 1, 8, flavor="longest") ([1, 2, 3, 6, 8], 25) """ (outgoing, incoming, nodes) = node_to_edge(edges) nedges = len(edges) L = LPInstance() assert flavor in ('longest', 'shortest') objective = MAXIMIZE if flavor == 'longest' else MINIMIZE L.add_objective(edges, objective=objective) # Balancing constraint, incoming edges equal to outgoing edges except # source and sink constraints = [] for v in nodes: incoming_edges = incoming[v] outgoing_edges = outgoing[v] icc = summation(incoming_edges) occ = summation(outgoing_edges) if v == source: if not outgoing_edges: return None # depends on [control=['if'], data=[]] constraints.append('{0} = 1'.format(occ)) # depends on [control=['if'], data=[]] elif v == sink: if not incoming_edges: return None # depends on [control=['if'], data=[]] constraints.append('{0} = 1'.format(icc)) # depends on [control=['if'], data=[]] else: # Balancing constraints.append('{0}{1} = 0'.format(icc, occ.replace('+', '-'))) # Simple path if incoming_edges: constraints.append('{0} <= 1'.format(icc)) # depends on [control=['if'], data=[]] if outgoing_edges: constraints.append('{0} <= 1'.format(occ)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']] L.constraints = constraints L.add_vars(nedges) (selected, obj_val) = L.lpsolve() results = sorted((x for (i, x) in enumerate(edges) if i in selected)) if selected else None results = edges_to_path(results) return (results, obj_val)
def execute(self, context): """ Call the DiscordWebhookHook to post message """ self.hook = DiscordWebhookHook( self.http_conn_id, self.webhook_endpoint, self.message, self.username, self.avatar_url, self.tts, self.proxy ) self.hook.execute()
def function[execute, parameter[self, context]]: constant[ Call the DiscordWebhookHook to post message ] name[self].hook assign[=] call[name[DiscordWebhookHook], parameter[name[self].http_conn_id, name[self].webhook_endpoint, name[self].message, name[self].username, name[self].avatar_url, name[self].tts, name[self].proxy]] call[name[self].hook.execute, parameter[]]
keyword[def] identifier[execute] ( identifier[self] , identifier[context] ): literal[string] identifier[self] . identifier[hook] = identifier[DiscordWebhookHook] ( identifier[self] . identifier[http_conn_id] , identifier[self] . identifier[webhook_endpoint] , identifier[self] . identifier[message] , identifier[self] . identifier[username] , identifier[self] . identifier[avatar_url] , identifier[self] . identifier[tts] , identifier[self] . identifier[proxy] ) identifier[self] . identifier[hook] . identifier[execute] ()
def execute(self, context): """ Call the DiscordWebhookHook to post message """ self.hook = DiscordWebhookHook(self.http_conn_id, self.webhook_endpoint, self.message, self.username, self.avatar_url, self.tts, self.proxy) self.hook.execute()
def __set_cache(self, tokens): """ Sets the tokens cache. :param tokens: Completer tokens list. :type tokens: tuple or list """ if DefaultCompleter._DefaultCompleter__tokens.get(self.__language): return DefaultCompleter._DefaultCompleter__tokens[self.__language] = tokens
def function[__set_cache, parameter[self, tokens]]: constant[ Sets the tokens cache. :param tokens: Completer tokens list. :type tokens: tuple or list ] if call[name[DefaultCompleter]._DefaultCompleter__tokens.get, parameter[name[self].__language]] begin[:] return[None] call[name[DefaultCompleter]._DefaultCompleter__tokens][name[self].__language] assign[=] name[tokens]
keyword[def] identifier[__set_cache] ( identifier[self] , identifier[tokens] ): literal[string] keyword[if] identifier[DefaultCompleter] . identifier[_DefaultCompleter__tokens] . identifier[get] ( identifier[self] . identifier[__language] ): keyword[return] identifier[DefaultCompleter] . identifier[_DefaultCompleter__tokens] [ identifier[self] . identifier[__language] ]= identifier[tokens]
def __set_cache(self, tokens): """ Sets the tokens cache. :param tokens: Completer tokens list. :type tokens: tuple or list """ if DefaultCompleter._DefaultCompleter__tokens.get(self.__language): return # depends on [control=['if'], data=[]] DefaultCompleter._DefaultCompleter__tokens[self.__language] = tokens
def Run(self, args): """Run.""" # Verify the executable blob. args.executable.Verify( config.CONFIG["Client.executable_signing_public_key"]) path = self.WriteBlobToFile(args) # Only actually run the file on the last chunk. if not args.more_data: self.ProcessFile(path, args) self.CleanUp(path)
def function[Run, parameter[self, args]]: constant[Run.] call[name[args].executable.Verify, parameter[call[name[config].CONFIG][constant[Client.executable_signing_public_key]]]] variable[path] assign[=] call[name[self].WriteBlobToFile, parameter[name[args]]] if <ast.UnaryOp object at 0x7da1b1b47f10> begin[:] call[name[self].ProcessFile, parameter[name[path], name[args]]] call[name[self].CleanUp, parameter[name[path]]]
keyword[def] identifier[Run] ( identifier[self] , identifier[args] ): literal[string] identifier[args] . identifier[executable] . identifier[Verify] ( identifier[config] . identifier[CONFIG] [ literal[string] ]) identifier[path] = identifier[self] . identifier[WriteBlobToFile] ( identifier[args] ) keyword[if] keyword[not] identifier[args] . identifier[more_data] : identifier[self] . identifier[ProcessFile] ( identifier[path] , identifier[args] ) identifier[self] . identifier[CleanUp] ( identifier[path] )
def Run(self, args): """Run.""" # Verify the executable blob. args.executable.Verify(config.CONFIG['Client.executable_signing_public_key']) path = self.WriteBlobToFile(args) # Only actually run the file on the last chunk. if not args.more_data: self.ProcessFile(path, args) self.CleanUp(path) # depends on [control=['if'], data=[]]
def parse_item(self, item): """ Receives an item and returns a dictionary of field values. """ # Create a dictionary from values for each field parsed_data = {} for field_name in self.fields: # A field-name may be mapped to another identifier on the source, # it could be a XML path or a CSV column name / position. # Defaults to the field-name itself. source_name = self.field_map.get(field_name, field_name) # Uses a custom method "parse_%(field_name)" # or get the value from the item parse = getattr(self, 'parse_%s' % field_name, None) if parse: value = parse(item, field_name, source_name) else: value = self.get_value(item, source_name) # Add the value to the parsed data parsed_data[field_name] = value return parsed_data
def function[parse_item, parameter[self, item]]: constant[ Receives an item and returns a dictionary of field values. ] variable[parsed_data] assign[=] dictionary[[], []] for taget[name[field_name]] in starred[name[self].fields] begin[:] variable[source_name] assign[=] call[name[self].field_map.get, parameter[name[field_name], name[field_name]]] variable[parse] assign[=] call[name[getattr], parameter[name[self], binary_operation[constant[parse_%s] <ast.Mod object at 0x7da2590d6920> name[field_name]], constant[None]]] if name[parse] begin[:] variable[value] assign[=] call[name[parse], parameter[name[item], name[field_name], name[source_name]]] call[name[parsed_data]][name[field_name]] assign[=] name[value] return[name[parsed_data]]
keyword[def] identifier[parse_item] ( identifier[self] , identifier[item] ): literal[string] identifier[parsed_data] ={} keyword[for] identifier[field_name] keyword[in] identifier[self] . identifier[fields] : identifier[source_name] = identifier[self] . identifier[field_map] . identifier[get] ( identifier[field_name] , identifier[field_name] ) identifier[parse] = identifier[getattr] ( identifier[self] , literal[string] % identifier[field_name] , keyword[None] ) keyword[if] identifier[parse] : identifier[value] = identifier[parse] ( identifier[item] , identifier[field_name] , identifier[source_name] ) keyword[else] : identifier[value] = identifier[self] . identifier[get_value] ( identifier[item] , identifier[source_name] ) identifier[parsed_data] [ identifier[field_name] ]= identifier[value] keyword[return] identifier[parsed_data]
def parse_item(self, item): """ Receives an item and returns a dictionary of field values. """ # Create a dictionary from values for each field parsed_data = {} for field_name in self.fields: # A field-name may be mapped to another identifier on the source, # it could be a XML path or a CSV column name / position. # Defaults to the field-name itself. source_name = self.field_map.get(field_name, field_name) # Uses a custom method "parse_%(field_name)" # or get the value from the item parse = getattr(self, 'parse_%s' % field_name, None) if parse: value = parse(item, field_name, source_name) # depends on [control=['if'], data=[]] else: value = self.get_value(item, source_name) # Add the value to the parsed data parsed_data[field_name] = value # depends on [control=['for'], data=['field_name']] return parsed_data
def get(self, channel): """Read single ADC Channel""" checked_channel = self._check_channel_no(channel) self.i2c.write_raw8(checked_channel | self._dac_enabled) reading = self.i2c.read_raw8() reading = self.i2c.read_raw8() return reading / 255.0
def function[get, parameter[self, channel]]: constant[Read single ADC Channel] variable[checked_channel] assign[=] call[name[self]._check_channel_no, parameter[name[channel]]] call[name[self].i2c.write_raw8, parameter[binary_operation[name[checked_channel] <ast.BitOr object at 0x7da2590d6aa0> name[self]._dac_enabled]]] variable[reading] assign[=] call[name[self].i2c.read_raw8, parameter[]] variable[reading] assign[=] call[name[self].i2c.read_raw8, parameter[]] return[binary_operation[name[reading] / constant[255.0]]]
keyword[def] identifier[get] ( identifier[self] , identifier[channel] ): literal[string] identifier[checked_channel] = identifier[self] . identifier[_check_channel_no] ( identifier[channel] ) identifier[self] . identifier[i2c] . identifier[write_raw8] ( identifier[checked_channel] | identifier[self] . identifier[_dac_enabled] ) identifier[reading] = identifier[self] . identifier[i2c] . identifier[read_raw8] () identifier[reading] = identifier[self] . identifier[i2c] . identifier[read_raw8] () keyword[return] identifier[reading] / literal[int]
def get(self, channel): """Read single ADC Channel""" checked_channel = self._check_channel_no(channel) self.i2c.write_raw8(checked_channel | self._dac_enabled) reading = self.i2c.read_raw8() reading = self.i2c.read_raw8() return reading / 255.0
def _fact_ref_eval(cls, cpeset, wfn): """ Returns True if wfn is a non-proper superset (True superset or equal to) any of the names in cpeset, otherwise False. :param CPESet cpeset: list of CPE bound Names. :param CPE2_3_WFN wfn: WFN CPE Name. :returns: True if wfn is a non-proper superset any of the names in cpeset, otherwise False :rtype: boolean """ for n in cpeset: # Need to convert each n from bound form to WFN if (CPESet2_3.cpe_superset(wfn, n)): return True return False
def function[_fact_ref_eval, parameter[cls, cpeset, wfn]]: constant[ Returns True if wfn is a non-proper superset (True superset or equal to) any of the names in cpeset, otherwise False. :param CPESet cpeset: list of CPE bound Names. :param CPE2_3_WFN wfn: WFN CPE Name. :returns: True if wfn is a non-proper superset any of the names in cpeset, otherwise False :rtype: boolean ] for taget[name[n]] in starred[name[cpeset]] begin[:] if call[name[CPESet2_3].cpe_superset, parameter[name[wfn], name[n]]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[_fact_ref_eval] ( identifier[cls] , identifier[cpeset] , identifier[wfn] ): literal[string] keyword[for] identifier[n] keyword[in] identifier[cpeset] : keyword[if] ( identifier[CPESet2_3] . identifier[cpe_superset] ( identifier[wfn] , identifier[n] )): keyword[return] keyword[True] keyword[return] keyword[False]
def _fact_ref_eval(cls, cpeset, wfn): """ Returns True if wfn is a non-proper superset (True superset or equal to) any of the names in cpeset, otherwise False. :param CPESet cpeset: list of CPE bound Names. :param CPE2_3_WFN wfn: WFN CPE Name. :returns: True if wfn is a non-proper superset any of the names in cpeset, otherwise False :rtype: boolean """ for n in cpeset: # Need to convert each n from bound form to WFN if CPESet2_3.cpe_superset(wfn, n): return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n']] return False
def get_domain(self, name): """ 获取域名信息,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name Args: name: 域名, 如果是泛域名,必须以点号 . 开头 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息 """ url = '{0}/domain/{1}'.format(self.server, name) return self.__post(url)
def function[get_domain, parameter[self, name]]: constant[ 获取域名信息,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name Args: name: 域名, 如果是泛域名,必须以点号 . 开头 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息 ] variable[url] assign[=] call[constant[{0}/domain/{1}].format, parameter[name[self].server, name[name]]] return[call[name[self].__post, parameter[name[url]]]]
keyword[def] identifier[get_domain] ( identifier[self] , identifier[name] ): literal[string] identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[server] , identifier[name] ) keyword[return] identifier[self] . identifier[__post] ( identifier[url] )
def get_domain(self, name): """ 获取域名信息,文档 https://developer.qiniu.com/fusion/api/4246/the-domain-name Args: name: 域名, 如果是泛域名,必须以点号 . 开头 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息 """ url = '{0}/domain/{1}'.format(self.server, name) return self.__post(url)
def computed_displaywidth(): '''Figure out a reasonable default with. Use os.environ['COLUMNS'] if possible, and failing that use 80. ''' try: width = int(os.environ['COLUMNS']) except (KeyError, ValueError): width = get_terminal_size().columns return width or 80
def function[computed_displaywidth, parameter[]]: constant[Figure out a reasonable default with. Use os.environ['COLUMNS'] if possible, and failing that use 80. ] <ast.Try object at 0x7da1b0292a10> return[<ast.BoolOp object at 0x7da1b0291210>]
keyword[def] identifier[computed_displaywidth] (): literal[string] keyword[try] : identifier[width] = identifier[int] ( identifier[os] . identifier[environ] [ literal[string] ]) keyword[except] ( identifier[KeyError] , identifier[ValueError] ): identifier[width] = identifier[get_terminal_size] (). identifier[columns] keyword[return] identifier[width] keyword[or] literal[int]
def computed_displaywidth(): """Figure out a reasonable default with. Use os.environ['COLUMNS'] if possible, and failing that use 80. """ try: width = int(os.environ['COLUMNS']) # depends on [control=['try'], data=[]] except (KeyError, ValueError): width = get_terminal_size().columns # depends on [control=['except'], data=[]] return width or 80
def calculate_linescan_psf(x, y, z, normalize=False, kfki=0.889, zint=100., polar_angle=0., wrap=True, **kwargs): """ Calculates the point spread function of a line-scanning confocal. Make x,y,z __1D__ numpy.arrays, with x the direction along the scan line. (to make the calculation faster since I dont' need the line ilm for each x). Parameters ---------- x : numpy.ndarray _One_dimensional_ array of the x grid points (along the line illumination) at which to evaluate the psf. In units of 1/k_incoming. y : numpy.ndarray _One_dimensional_ array of the y grid points (in plane, perpendicular to the line illumination) at which to evaluate the psf. In units of 1/k_incoming. z : numpy.ndarray _One_dimensional_ array of the z grid points (along the optical axis) at which to evaluate the psf. In units of 1/k_incoming. normalize : Bool, optional Set to True to include the effects of PSF normalization on the image intensity. Default is False. kfki : Float, optional The ratio of the final light's wavevector to the incoming. Default is 0.889 zint : Float, optional The position of the optical interface, in units of 1/k_incoming Default is 100. wrap : Bool, optional If True, wraps the psf calculation for speed, assuming that the input x, y are regularly-spaced points. If x,y are not regularly spaced then `wrap` must be set to False. Default is True. polar_angle : Float, optional The polarization angle of the light (radians) with respect to the line direction (x). Default is 0. Other Parameters ---------------- alpha : Float The opening angle of the lens. Default is 1. n2n1 : Float The ratio of the index in the 2nd medium to that in the first. Default is 0.95 Returns ------- numpy.ndarray A 3D- numpy.array of the point-spread function. Indexing is psf[x,y,z]; shape is [x.size, y,size, z.size] """ #0. Set up vecs if wrap: xpts = vec_to_halfvec(x) ypts = vec_to_halfvec(y) x3, y3, z3 = np.meshgrid(xpts, ypts, z, indexing='ij') else: x3,y3,z3 = np.meshgrid(x, y, z, indexing='ij') rho3 = np.sqrt(x3*x3 + y3*y3) #1. Hilm if wrap: y2,z2 = np.meshgrid(ypts, z, indexing='ij') hilm0 = calculate_linescan_ilm_psf(y2, z2, zint=zint, polar_angle=polar_angle, **kwargs) if ypts[0] == 0: hilm = np.append(hilm0[-1:0:-1], hilm0, axis=0) else: hilm = np.append(hilm0[::-1], hilm0, axis=0) else: y2,z2 = np.meshgrid(y, z, indexing='ij') hilm = calculate_linescan_ilm_psf(y2, z2, zint=zint, polar_angle=polar_angle, **kwargs) #2. Hdet if wrap: #Lambda function that ignores its args but still returns correct values func = lambda *args: get_hsym_asym(rho3*kfki, z3*kfki, zint=kfki*zint, get_hdet=True, **kwargs)[0] hdet = wrap_and_calc_psf(xpts, ypts, z, func) else: hdet, toss = get_hsym_asym(rho3*kfki, z3*kfki, zint=kfki*zint, get_hdet=True, **kwargs) if normalize: hilm /= hilm.sum() hdet /= hdet.sum() for a in range(x.size): hdet[a] *= hilm return hdet if normalize else hdet / hdet.sum()
def function[calculate_linescan_psf, parameter[x, y, z, normalize, kfki, zint, polar_angle, wrap]]: constant[ Calculates the point spread function of a line-scanning confocal. Make x,y,z __1D__ numpy.arrays, with x the direction along the scan line. (to make the calculation faster since I dont' need the line ilm for each x). Parameters ---------- x : numpy.ndarray _One_dimensional_ array of the x grid points (along the line illumination) at which to evaluate the psf. In units of 1/k_incoming. y : numpy.ndarray _One_dimensional_ array of the y grid points (in plane, perpendicular to the line illumination) at which to evaluate the psf. In units of 1/k_incoming. z : numpy.ndarray _One_dimensional_ array of the z grid points (along the optical axis) at which to evaluate the psf. In units of 1/k_incoming. normalize : Bool, optional Set to True to include the effects of PSF normalization on the image intensity. Default is False. kfki : Float, optional The ratio of the final light's wavevector to the incoming. Default is 0.889 zint : Float, optional The position of the optical interface, in units of 1/k_incoming Default is 100. wrap : Bool, optional If True, wraps the psf calculation for speed, assuming that the input x, y are regularly-spaced points. If x,y are not regularly spaced then `wrap` must be set to False. Default is True. polar_angle : Float, optional The polarization angle of the light (radians) with respect to the line direction (x). Default is 0. Other Parameters ---------------- alpha : Float The opening angle of the lens. Default is 1. n2n1 : Float The ratio of the index in the 2nd medium to that in the first. Default is 0.95 Returns ------- numpy.ndarray A 3D- numpy.array of the point-spread function. Indexing is psf[x,y,z]; shape is [x.size, y,size, z.size] ] if name[wrap] begin[:] variable[xpts] assign[=] call[name[vec_to_halfvec], parameter[name[x]]] variable[ypts] assign[=] call[name[vec_to_halfvec], parameter[name[y]]] <ast.Tuple object at 0x7da1b2346ad0> assign[=] call[name[np].meshgrid, parameter[name[xpts], name[ypts], name[z]]] variable[rho3] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[name[x3] * name[x3]] + binary_operation[name[y3] * name[y3]]]]] if name[wrap] begin[:] <ast.Tuple object at 0x7da1b2347190> assign[=] call[name[np].meshgrid, parameter[name[ypts], name[z]]] variable[hilm0] assign[=] call[name[calculate_linescan_ilm_psf], parameter[name[y2], name[z2]]] if compare[call[name[ypts]][constant[0]] equal[==] constant[0]] begin[:] variable[hilm] assign[=] call[name[np].append, parameter[call[name[hilm0]][<ast.Slice object at 0x7da1b2347e20>], name[hilm0]]] if name[wrap] begin[:] variable[func] assign[=] <ast.Lambda object at 0x7da1b26acbb0> variable[hdet] assign[=] call[name[wrap_and_calc_psf], parameter[name[xpts], name[ypts], name[z], name[func]]] if name[normalize] begin[:] <ast.AugAssign object at 0x7da1b021c550> <ast.AugAssign object at 0x7da1b021c8b0> for taget[name[a]] in starred[call[name[range], parameter[name[x].size]]] begin[:] <ast.AugAssign object at 0x7da1b021ca90> return[<ast.IfExp object at 0x7da1b021e1a0>]
keyword[def] identifier[calculate_linescan_psf] ( identifier[x] , identifier[y] , identifier[z] , identifier[normalize] = keyword[False] , identifier[kfki] = literal[int] , identifier[zint] = literal[int] , identifier[polar_angle] = literal[int] , identifier[wrap] = keyword[True] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[wrap] : identifier[xpts] = identifier[vec_to_halfvec] ( identifier[x] ) identifier[ypts] = identifier[vec_to_halfvec] ( identifier[y] ) identifier[x3] , identifier[y3] , identifier[z3] = identifier[np] . identifier[meshgrid] ( identifier[xpts] , identifier[ypts] , identifier[z] , identifier[indexing] = literal[string] ) keyword[else] : identifier[x3] , identifier[y3] , identifier[z3] = identifier[np] . identifier[meshgrid] ( identifier[x] , identifier[y] , identifier[z] , identifier[indexing] = literal[string] ) identifier[rho3] = identifier[np] . identifier[sqrt] ( identifier[x3] * identifier[x3] + identifier[y3] * identifier[y3] ) keyword[if] identifier[wrap] : identifier[y2] , identifier[z2] = identifier[np] . identifier[meshgrid] ( identifier[ypts] , identifier[z] , identifier[indexing] = literal[string] ) identifier[hilm0] = identifier[calculate_linescan_ilm_psf] ( identifier[y2] , identifier[z2] , identifier[zint] = identifier[zint] , identifier[polar_angle] = identifier[polar_angle] ,** identifier[kwargs] ) keyword[if] identifier[ypts] [ literal[int] ]== literal[int] : identifier[hilm] = identifier[np] . identifier[append] ( identifier[hilm0] [- literal[int] : literal[int] :- literal[int] ], identifier[hilm0] , identifier[axis] = literal[int] ) keyword[else] : identifier[hilm] = identifier[np] . identifier[append] ( identifier[hilm0] [::- literal[int] ], identifier[hilm0] , identifier[axis] = literal[int] ) keyword[else] : identifier[y2] , identifier[z2] = identifier[np] . identifier[meshgrid] ( identifier[y] , identifier[z] , identifier[indexing] = literal[string] ) identifier[hilm] = identifier[calculate_linescan_ilm_psf] ( identifier[y2] , identifier[z2] , identifier[zint] = identifier[zint] , identifier[polar_angle] = identifier[polar_angle] ,** identifier[kwargs] ) keyword[if] identifier[wrap] : identifier[func] = keyword[lambda] * identifier[args] : identifier[get_hsym_asym] ( identifier[rho3] * identifier[kfki] , identifier[z3] * identifier[kfki] , identifier[zint] = identifier[kfki] * identifier[zint] , identifier[get_hdet] = keyword[True] ,** identifier[kwargs] )[ literal[int] ] identifier[hdet] = identifier[wrap_and_calc_psf] ( identifier[xpts] , identifier[ypts] , identifier[z] , identifier[func] ) keyword[else] : identifier[hdet] , identifier[toss] = identifier[get_hsym_asym] ( identifier[rho3] * identifier[kfki] , identifier[z3] * identifier[kfki] , identifier[zint] = identifier[kfki] * identifier[zint] , identifier[get_hdet] = keyword[True] ,** identifier[kwargs] ) keyword[if] identifier[normalize] : identifier[hilm] /= identifier[hilm] . identifier[sum] () identifier[hdet] /= identifier[hdet] . identifier[sum] () keyword[for] identifier[a] keyword[in] identifier[range] ( identifier[x] . identifier[size] ): identifier[hdet] [ identifier[a] ]*= identifier[hilm] keyword[return] identifier[hdet] keyword[if] identifier[normalize] keyword[else] identifier[hdet] / identifier[hdet] . identifier[sum] ()
def calculate_linescan_psf(x, y, z, normalize=False, kfki=0.889, zint=100.0, polar_angle=0.0, wrap=True, **kwargs): """ Calculates the point spread function of a line-scanning confocal. Make x,y,z __1D__ numpy.arrays, with x the direction along the scan line. (to make the calculation faster since I dont' need the line ilm for each x). Parameters ---------- x : numpy.ndarray _One_dimensional_ array of the x grid points (along the line illumination) at which to evaluate the psf. In units of 1/k_incoming. y : numpy.ndarray _One_dimensional_ array of the y grid points (in plane, perpendicular to the line illumination) at which to evaluate the psf. In units of 1/k_incoming. z : numpy.ndarray _One_dimensional_ array of the z grid points (along the optical axis) at which to evaluate the psf. In units of 1/k_incoming. normalize : Bool, optional Set to True to include the effects of PSF normalization on the image intensity. Default is False. kfki : Float, optional The ratio of the final light's wavevector to the incoming. Default is 0.889 zint : Float, optional The position of the optical interface, in units of 1/k_incoming Default is 100. wrap : Bool, optional If True, wraps the psf calculation for speed, assuming that the input x, y are regularly-spaced points. If x,y are not regularly spaced then `wrap` must be set to False. Default is True. polar_angle : Float, optional The polarization angle of the light (radians) with respect to the line direction (x). Default is 0. Other Parameters ---------------- alpha : Float The opening angle of the lens. Default is 1. n2n1 : Float The ratio of the index in the 2nd medium to that in the first. Default is 0.95 Returns ------- numpy.ndarray A 3D- numpy.array of the point-spread function. Indexing is psf[x,y,z]; shape is [x.size, y,size, z.size] """ #0. Set up vecs if wrap: xpts = vec_to_halfvec(x) ypts = vec_to_halfvec(y) (x3, y3, z3) = np.meshgrid(xpts, ypts, z, indexing='ij') # depends on [control=['if'], data=[]] else: (x3, y3, z3) = np.meshgrid(x, y, z, indexing='ij') rho3 = np.sqrt(x3 * x3 + y3 * y3) #1. Hilm if wrap: (y2, z2) = np.meshgrid(ypts, z, indexing='ij') hilm0 = calculate_linescan_ilm_psf(y2, z2, zint=zint, polar_angle=polar_angle, **kwargs) if ypts[0] == 0: hilm = np.append(hilm0[-1:0:-1], hilm0, axis=0) # depends on [control=['if'], data=[]] else: hilm = np.append(hilm0[::-1], hilm0, axis=0) # depends on [control=['if'], data=[]] else: (y2, z2) = np.meshgrid(y, z, indexing='ij') hilm = calculate_linescan_ilm_psf(y2, z2, zint=zint, polar_angle=polar_angle, **kwargs) #2. Hdet if wrap: #Lambda function that ignores its args but still returns correct values func = lambda *args: get_hsym_asym(rho3 * kfki, z3 * kfki, zint=kfki * zint, get_hdet=True, **kwargs)[0] hdet = wrap_and_calc_psf(xpts, ypts, z, func) # depends on [control=['if'], data=[]] else: (hdet, toss) = get_hsym_asym(rho3 * kfki, z3 * kfki, zint=kfki * zint, get_hdet=True, **kwargs) if normalize: hilm /= hilm.sum() hdet /= hdet.sum() # depends on [control=['if'], data=[]] for a in range(x.size): hdet[a] *= hilm # depends on [control=['for'], data=['a']] return hdet if normalize else hdet / hdet.sum()
def wait_for_redis_to_start(redis_ip_address, redis_port, password=None, num_retries=5): """Wait for a Redis server to be available. This is accomplished by creating a Redis client and sending a random command to the server until the command gets through. Args: redis_ip_address (str): The IP address of the redis server. redis_port (int): The port of the redis server. password (str): The password of the redis server. num_retries (int): The number of times to try connecting with redis. The client will sleep for one second between attempts. Raises: Exception: An exception is raised if we could not connect with Redis. """ redis_client = redis.StrictRedis( host=redis_ip_address, port=redis_port, password=password) # Wait for the Redis server to start. counter = 0 while counter < num_retries: try: # Run some random command and see if it worked. logger.info( "Waiting for redis server at {}:{} to respond...".format( redis_ip_address, redis_port)) redis_client.client_list() except redis.ConnectionError: # Wait a little bit. time.sleep(1) logger.info("Failed to connect to the redis server, retrying.") counter += 1 else: break if counter == num_retries: raise Exception("Unable to connect to Redis. If the Redis instance is " "on a different machine, check that your firewall is " "configured properly.")
def function[wait_for_redis_to_start, parameter[redis_ip_address, redis_port, password, num_retries]]: constant[Wait for a Redis server to be available. This is accomplished by creating a Redis client and sending a random command to the server until the command gets through. Args: redis_ip_address (str): The IP address of the redis server. redis_port (int): The port of the redis server. password (str): The password of the redis server. num_retries (int): The number of times to try connecting with redis. The client will sleep for one second between attempts. Raises: Exception: An exception is raised if we could not connect with Redis. ] variable[redis_client] assign[=] call[name[redis].StrictRedis, parameter[]] variable[counter] assign[=] constant[0] while compare[name[counter] less[<] name[num_retries]] begin[:] <ast.Try object at 0x7da18fe93be0> if compare[name[counter] equal[==] name[num_retries]] begin[:] <ast.Raise object at 0x7da18fe92500>
keyword[def] identifier[wait_for_redis_to_start] ( identifier[redis_ip_address] , identifier[redis_port] , identifier[password] = keyword[None] , identifier[num_retries] = literal[int] ): literal[string] identifier[redis_client] = identifier[redis] . identifier[StrictRedis] ( identifier[host] = identifier[redis_ip_address] , identifier[port] = identifier[redis_port] , identifier[password] = identifier[password] ) identifier[counter] = literal[int] keyword[while] identifier[counter] < identifier[num_retries] : keyword[try] : identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[redis_ip_address] , identifier[redis_port] )) identifier[redis_client] . identifier[client_list] () keyword[except] identifier[redis] . identifier[ConnectionError] : identifier[time] . identifier[sleep] ( literal[int] ) identifier[logger] . identifier[info] ( literal[string] ) identifier[counter] += literal[int] keyword[else] : keyword[break] keyword[if] identifier[counter] == identifier[num_retries] : keyword[raise] identifier[Exception] ( literal[string] literal[string] literal[string] )
def wait_for_redis_to_start(redis_ip_address, redis_port, password=None, num_retries=5): """Wait for a Redis server to be available. This is accomplished by creating a Redis client and sending a random command to the server until the command gets through. Args: redis_ip_address (str): The IP address of the redis server. redis_port (int): The port of the redis server. password (str): The password of the redis server. num_retries (int): The number of times to try connecting with redis. The client will sleep for one second between attempts. Raises: Exception: An exception is raised if we could not connect with Redis. """ redis_client = redis.StrictRedis(host=redis_ip_address, port=redis_port, password=password) # Wait for the Redis server to start. counter = 0 while counter < num_retries: try: # Run some random command and see if it worked. logger.info('Waiting for redis server at {}:{} to respond...'.format(redis_ip_address, redis_port)) redis_client.client_list() # depends on [control=['try'], data=[]] except redis.ConnectionError: # Wait a little bit. time.sleep(1) logger.info('Failed to connect to the redis server, retrying.') counter += 1 # depends on [control=['except'], data=[]] else: break # depends on [control=['while'], data=['counter']] if counter == num_retries: raise Exception('Unable to connect to Redis. If the Redis instance is on a different machine, check that your firewall is configured properly.') # depends on [control=['if'], data=[]]
def Print(self): """Prints the hypotheses and their probabilities.""" for hypo, prob in sorted(self.Items()): print(hypo, prob)
def function[Print, parameter[self]]: constant[Prints the hypotheses and their probabilities.] for taget[tuple[[<ast.Name object at 0x7da1b03c80d0>, <ast.Name object at 0x7da1b03cae00>]]] in starred[call[name[sorted], parameter[call[name[self].Items, parameter[]]]]] begin[:] call[name[print], parameter[name[hypo], name[prob]]]
keyword[def] identifier[Print] ( identifier[self] ): literal[string] keyword[for] identifier[hypo] , identifier[prob] keyword[in] identifier[sorted] ( identifier[self] . identifier[Items] ()): identifier[print] ( identifier[hypo] , identifier[prob] )
def Print(self): """Prints the hypotheses and their probabilities.""" for (hypo, prob) in sorted(self.Items()): print(hypo, prob) # depends on [control=['for'], data=[]]
def getClosest(self, inputPattern, topKCategories=3): """Returns the index of the pattern that is closest to inputPattern, the distances of all patterns to inputPattern, and the indices of the k closest categories. """ inferenceResult = numpy.zeros(max(self._categoryList)+1) dist = self._getDistances(inputPattern) sorted = dist.argsort() validVectorCount = len(self._categoryList) - self._categoryList.count(-1) for j in sorted[:min(self.k, validVectorCount)]: inferenceResult[self._categoryList[j]] += 1.0 winner = inferenceResult.argmax() topNCats = [] for i in range(topKCategories): topNCats.append((self._categoryList[sorted[i]], dist[sorted[i]] )) return winner, dist, topNCats
def function[getClosest, parameter[self, inputPattern, topKCategories]]: constant[Returns the index of the pattern that is closest to inputPattern, the distances of all patterns to inputPattern, and the indices of the k closest categories. ] variable[inferenceResult] assign[=] call[name[numpy].zeros, parameter[binary_operation[call[name[max], parameter[name[self]._categoryList]] + constant[1]]]] variable[dist] assign[=] call[name[self]._getDistances, parameter[name[inputPattern]]] variable[sorted] assign[=] call[name[dist].argsort, parameter[]] variable[validVectorCount] assign[=] binary_operation[call[name[len], parameter[name[self]._categoryList]] - call[name[self]._categoryList.count, parameter[<ast.UnaryOp object at 0x7da20e9b0700>]]] for taget[name[j]] in starred[call[name[sorted]][<ast.Slice object at 0x7da20e9b2860>]] begin[:] <ast.AugAssign object at 0x7da20e9b0e20> variable[winner] assign[=] call[name[inferenceResult].argmax, parameter[]] variable[topNCats] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[name[topKCategories]]]] begin[:] call[name[topNCats].append, parameter[tuple[[<ast.Subscript object at 0x7da20e9b1d50>, <ast.Subscript object at 0x7da1b2346380>]]]] return[tuple[[<ast.Name object at 0x7da1b2346140>, <ast.Name object at 0x7da1b2347ee0>, <ast.Name object at 0x7da1b2346890>]]]
keyword[def] identifier[getClosest] ( identifier[self] , identifier[inputPattern] , identifier[topKCategories] = literal[int] ): literal[string] identifier[inferenceResult] = identifier[numpy] . identifier[zeros] ( identifier[max] ( identifier[self] . identifier[_categoryList] )+ literal[int] ) identifier[dist] = identifier[self] . identifier[_getDistances] ( identifier[inputPattern] ) identifier[sorted] = identifier[dist] . identifier[argsort] () identifier[validVectorCount] = identifier[len] ( identifier[self] . identifier[_categoryList] )- identifier[self] . identifier[_categoryList] . identifier[count] (- literal[int] ) keyword[for] identifier[j] keyword[in] identifier[sorted] [: identifier[min] ( identifier[self] . identifier[k] , identifier[validVectorCount] )]: identifier[inferenceResult] [ identifier[self] . identifier[_categoryList] [ identifier[j] ]]+= literal[int] identifier[winner] = identifier[inferenceResult] . identifier[argmax] () identifier[topNCats] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[topKCategories] ): identifier[topNCats] . identifier[append] (( identifier[self] . identifier[_categoryList] [ identifier[sorted] [ identifier[i] ]], identifier[dist] [ identifier[sorted] [ identifier[i] ]])) keyword[return] identifier[winner] , identifier[dist] , identifier[topNCats]
def getClosest(self, inputPattern, topKCategories=3): """Returns the index of the pattern that is closest to inputPattern, the distances of all patterns to inputPattern, and the indices of the k closest categories. """ inferenceResult = numpy.zeros(max(self._categoryList) + 1) dist = self._getDistances(inputPattern) sorted = dist.argsort() validVectorCount = len(self._categoryList) - self._categoryList.count(-1) for j in sorted[:min(self.k, validVectorCount)]: inferenceResult[self._categoryList[j]] += 1.0 # depends on [control=['for'], data=['j']] winner = inferenceResult.argmax() topNCats = [] for i in range(topKCategories): topNCats.append((self._categoryList[sorted[i]], dist[sorted[i]])) # depends on [control=['for'], data=['i']] return (winner, dist, topNCats)
def widget_kwargs_for_field(self, field_name): """ Returns widget kwargs for given field_name. """ if self._meta.widgets: return self._meta.widgets.get(field_name, {}) return {}
def function[widget_kwargs_for_field, parameter[self, field_name]]: constant[ Returns widget kwargs for given field_name. ] if name[self]._meta.widgets begin[:] return[call[name[self]._meta.widgets.get, parameter[name[field_name], dictionary[[], []]]]] return[dictionary[[], []]]
keyword[def] identifier[widget_kwargs_for_field] ( identifier[self] , identifier[field_name] ): literal[string] keyword[if] identifier[self] . identifier[_meta] . identifier[widgets] : keyword[return] identifier[self] . identifier[_meta] . identifier[widgets] . identifier[get] ( identifier[field_name] ,{}) keyword[return] {}
def widget_kwargs_for_field(self, field_name): """ Returns widget kwargs for given field_name. """ if self._meta.widgets: return self._meta.widgets.get(field_name, {}) # depends on [control=['if'], data=[]] return {}
def order(self, did, service_definition_id, consumer_account, auto_consume=False): """ Sign service agreement. Sign the service agreement defined in the service section identified by `service_definition_id` in the ddo and send the signed agreement to the purchase endpoint associated with this service. :param did: str starting with the prefix `did:op:` and followed by the asset id which is a hex str :param service_definition_id: str the service definition id identifying a specific service in the DDO (DID document) :param consumer_account: Account instance of the consumer :param auto_consume: boolean :return: tuple(agreement_id, signature) the service agreement id (can be used to query the keeper-contracts for the status of the service agreement) and signed agreement hash """ assert consumer_account.address in self._keeper.accounts, f'Unrecognized consumer ' \ f'address `consumer_account`' agreement_id, signature = self._agreements.prepare( did, service_definition_id, consumer_account ) logger.debug(f'about to request create agreement: {agreement_id}') self._agreements.send( did, agreement_id, service_definition_id, signature, consumer_account, auto_consume=auto_consume ) return agreement_id
def function[order, parameter[self, did, service_definition_id, consumer_account, auto_consume]]: constant[ Sign service agreement. Sign the service agreement defined in the service section identified by `service_definition_id` in the ddo and send the signed agreement to the purchase endpoint associated with this service. :param did: str starting with the prefix `did:op:` and followed by the asset id which is a hex str :param service_definition_id: str the service definition id identifying a specific service in the DDO (DID document) :param consumer_account: Account instance of the consumer :param auto_consume: boolean :return: tuple(agreement_id, signature) the service agreement id (can be used to query the keeper-contracts for the status of the service agreement) and signed agreement hash ] assert[compare[name[consumer_account].address in name[self]._keeper.accounts]] <ast.Tuple object at 0x7da18f00d570> assign[=] call[name[self]._agreements.prepare, parameter[name[did], name[service_definition_id], name[consumer_account]]] call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da18f00db40>]] call[name[self]._agreements.send, parameter[name[did], name[agreement_id], name[service_definition_id], name[signature], name[consumer_account]]] return[name[agreement_id]]
keyword[def] identifier[order] ( identifier[self] , identifier[did] , identifier[service_definition_id] , identifier[consumer_account] , identifier[auto_consume] = keyword[False] ): literal[string] keyword[assert] identifier[consumer_account] . identifier[address] keyword[in] identifier[self] . identifier[_keeper] . identifier[accounts] , literal[string] literal[string] identifier[agreement_id] , identifier[signature] = identifier[self] . identifier[_agreements] . identifier[prepare] ( identifier[did] , identifier[service_definition_id] , identifier[consumer_account] ) identifier[logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[_agreements] . identifier[send] ( identifier[did] , identifier[agreement_id] , identifier[service_definition_id] , identifier[signature] , identifier[consumer_account] , identifier[auto_consume] = identifier[auto_consume] ) keyword[return] identifier[agreement_id]
def order(self, did, service_definition_id, consumer_account, auto_consume=False): """ Sign service agreement. Sign the service agreement defined in the service section identified by `service_definition_id` in the ddo and send the signed agreement to the purchase endpoint associated with this service. :param did: str starting with the prefix `did:op:` and followed by the asset id which is a hex str :param service_definition_id: str the service definition id identifying a specific service in the DDO (DID document) :param consumer_account: Account instance of the consumer :param auto_consume: boolean :return: tuple(agreement_id, signature) the service agreement id (can be used to query the keeper-contracts for the status of the service agreement) and signed agreement hash """ assert consumer_account.address in self._keeper.accounts, f'Unrecognized consumer address `consumer_account`' (agreement_id, signature) = self._agreements.prepare(did, service_definition_id, consumer_account) logger.debug(f'about to request create agreement: {agreement_id}') self._agreements.send(did, agreement_id, service_definition_id, signature, consumer_account, auto_consume=auto_consume) return agreement_id
def set_right_table(self, table): """ Sets the right table for this join clause and try to automatically set the condition if one isn't specified """ self.right_table = table if self.left_table is None: return # find table prefix if type(self.left_table) is ModelTable and type(self.right_table) is ModelTable: # loop through fields to find the field for this model # check if this join type is for a related field for field in self.get_all_related_objects(self.left_table): related_model = field.model if hasattr(field, 'related_model'): related_model = field.related_model if related_model == self.right_table.model: if self.right_table.field_prefix is None: self.right_table.field_prefix = field.get_accessor_name() if len(self.right_table.field_prefix) > 4 and self.right_table.field_prefix[-4:] == '_set': self.right_table.field_prefix = self.right_table.field_prefix[:-4] return # check if this join type is for a foreign key for field in self.left_table.model._meta.fields: if ( field.get_internal_type() == 'OneToOneField' or field.get_internal_type() == 'ForeignKey' ): if field.remote_field.model == self.right_table.model: if self.right_table.field_prefix is None: self.right_table.field_prefix = field.name return
def function[set_right_table, parameter[self, table]]: constant[ Sets the right table for this join clause and try to automatically set the condition if one isn't specified ] name[self].right_table assign[=] name[table] if compare[name[self].left_table is constant[None]] begin[:] return[None] if <ast.BoolOp object at 0x7da1b28df5b0> begin[:] for taget[name[field]] in starred[call[name[self].get_all_related_objects, parameter[name[self].left_table]]] begin[:] variable[related_model] assign[=] name[field].model if call[name[hasattr], parameter[name[field], constant[related_model]]] begin[:] variable[related_model] assign[=] name[field].related_model if compare[name[related_model] equal[==] name[self].right_table.model] begin[:] if compare[name[self].right_table.field_prefix is constant[None]] begin[:] name[self].right_table.field_prefix assign[=] call[name[field].get_accessor_name, parameter[]] if <ast.BoolOp object at 0x7da1b28445e0> begin[:] name[self].right_table.field_prefix assign[=] call[name[self].right_table.field_prefix][<ast.Slice object at 0x7da1b2844e80>] return[None] for taget[name[field]] in starred[name[self].left_table.model._meta.fields] begin[:] if <ast.BoolOp object at 0x7da1b2844760> begin[:] if compare[name[field].remote_field.model equal[==] name[self].right_table.model] begin[:] if compare[name[self].right_table.field_prefix is constant[None]] begin[:] name[self].right_table.field_prefix assign[=] name[field].name return[None]
keyword[def] identifier[set_right_table] ( identifier[self] , identifier[table] ): literal[string] identifier[self] . identifier[right_table] = identifier[table] keyword[if] identifier[self] . identifier[left_table] keyword[is] keyword[None] : keyword[return] keyword[if] identifier[type] ( identifier[self] . identifier[left_table] ) keyword[is] identifier[ModelTable] keyword[and] identifier[type] ( identifier[self] . identifier[right_table] ) keyword[is] identifier[ModelTable] : keyword[for] identifier[field] keyword[in] identifier[self] . identifier[get_all_related_objects] ( identifier[self] . identifier[left_table] ): identifier[related_model] = identifier[field] . identifier[model] keyword[if] identifier[hasattr] ( identifier[field] , literal[string] ): identifier[related_model] = identifier[field] . identifier[related_model] keyword[if] identifier[related_model] == identifier[self] . identifier[right_table] . identifier[model] : keyword[if] identifier[self] . identifier[right_table] . identifier[field_prefix] keyword[is] keyword[None] : identifier[self] . identifier[right_table] . identifier[field_prefix] = identifier[field] . identifier[get_accessor_name] () keyword[if] identifier[len] ( identifier[self] . identifier[right_table] . identifier[field_prefix] )> literal[int] keyword[and] identifier[self] . identifier[right_table] . identifier[field_prefix] [- literal[int] :]== literal[string] : identifier[self] . identifier[right_table] . identifier[field_prefix] = identifier[self] . identifier[right_table] . identifier[field_prefix] [:- literal[int] ] keyword[return] keyword[for] identifier[field] keyword[in] identifier[self] . identifier[left_table] . identifier[model] . identifier[_meta] . identifier[fields] : keyword[if] ( identifier[field] . identifier[get_internal_type] ()== literal[string] keyword[or] identifier[field] . identifier[get_internal_type] ()== literal[string] ): keyword[if] identifier[field] . identifier[remote_field] . identifier[model] == identifier[self] . identifier[right_table] . identifier[model] : keyword[if] identifier[self] . identifier[right_table] . identifier[field_prefix] keyword[is] keyword[None] : identifier[self] . identifier[right_table] . identifier[field_prefix] = identifier[field] . identifier[name] keyword[return]
def set_right_table(self, table): """ Sets the right table for this join clause and try to automatically set the condition if one isn't specified """ self.right_table = table if self.left_table is None: return # depends on [control=['if'], data=[]] # find table prefix if type(self.left_table) is ModelTable and type(self.right_table) is ModelTable: # loop through fields to find the field for this model # check if this join type is for a related field for field in self.get_all_related_objects(self.left_table): related_model = field.model if hasattr(field, 'related_model'): related_model = field.related_model # depends on [control=['if'], data=[]] if related_model == self.right_table.model: if self.right_table.field_prefix is None: self.right_table.field_prefix = field.get_accessor_name() if len(self.right_table.field_prefix) > 4 and self.right_table.field_prefix[-4:] == '_set': self.right_table.field_prefix = self.right_table.field_prefix[:-4] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']] # check if this join type is for a foreign key for field in self.left_table.model._meta.fields: if field.get_internal_type() == 'OneToOneField' or field.get_internal_type() == 'ForeignKey': if field.remote_field.model == self.right_table.model: if self.right_table.field_prefix is None: self.right_table.field_prefix = field.name # depends on [control=['if'], data=[]] return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']] # depends on [control=['if'], data=[]]
def integrate_days(self, days=1.0, verbose=True): """Integrates the model forward for a specified number of days. It convertes the given number of days into years and calls :func:`integrate_years`. :param float days: integration time for the model in days [default: 1.0] :param bool verbose: information whether model time details should be printed [default: True] :Example: :: >>> import climlab >>> model = climlab.EBM() >>> model.global_mean_temperature() Field(11.997968598413685) >>> model.integrate_days(80.) Integrating for 19 steps, 80.0 days, or 0.219032740466 years. Total elapsed time is 0.211111111111 years. >>> model.global_mean_temperature() Field(11.873680783355553) """ years = days / const.days_per_year self.integrate_years(years=years, verbose=verbose)
def function[integrate_days, parameter[self, days, verbose]]: constant[Integrates the model forward for a specified number of days. It convertes the given number of days into years and calls :func:`integrate_years`. :param float days: integration time for the model in days [default: 1.0] :param bool verbose: information whether model time details should be printed [default: True] :Example: :: >>> import climlab >>> model = climlab.EBM() >>> model.global_mean_temperature() Field(11.997968598413685) >>> model.integrate_days(80.) Integrating for 19 steps, 80.0 days, or 0.219032740466 years. Total elapsed time is 0.211111111111 years. >>> model.global_mean_temperature() Field(11.873680783355553) ] variable[years] assign[=] binary_operation[name[days] / name[const].days_per_year] call[name[self].integrate_years, parameter[]]
keyword[def] identifier[integrate_days] ( identifier[self] , identifier[days] = literal[int] , identifier[verbose] = keyword[True] ): literal[string] identifier[years] = identifier[days] / identifier[const] . identifier[days_per_year] identifier[self] . identifier[integrate_years] ( identifier[years] = identifier[years] , identifier[verbose] = identifier[verbose] )
def integrate_days(self, days=1.0, verbose=True): """Integrates the model forward for a specified number of days. It convertes the given number of days into years and calls :func:`integrate_years`. :param float days: integration time for the model in days [default: 1.0] :param bool verbose: information whether model time details should be printed [default: True] :Example: :: >>> import climlab >>> model = climlab.EBM() >>> model.global_mean_temperature() Field(11.997968598413685) >>> model.integrate_days(80.) Integrating for 19 steps, 80.0 days, or 0.219032740466 years. Total elapsed time is 0.211111111111 years. >>> model.global_mean_temperature() Field(11.873680783355553) """ years = days / const.days_per_year self.integrate_years(years=years, verbose=verbose)
def mouse_move_event(self, event): """ Forward mouse cursor position events to the example """ self.example.mouse_position_event(event.x(), event.y())
def function[mouse_move_event, parameter[self, event]]: constant[ Forward mouse cursor position events to the example ] call[name[self].example.mouse_position_event, parameter[call[name[event].x, parameter[]], call[name[event].y, parameter[]]]]
keyword[def] identifier[mouse_move_event] ( identifier[self] , identifier[event] ): literal[string] identifier[self] . identifier[example] . identifier[mouse_position_event] ( identifier[event] . identifier[x] (), identifier[event] . identifier[y] ())
def mouse_move_event(self, event): """ Forward mouse cursor position events to the example """ self.example.mouse_position_event(event.x(), event.y())
def model_counts_spectrum(self, name, logemin, logemax, weighted=False): """Return the model counts spectrum of a source. Parameters ---------- name : str Source name. """ # EAC, we need this b/c older version of the ST don't have the right signature try: cs = np.array(self.like.logLike.modelCountsSpectrum( str(name), weighted)) except (TypeError, NotImplementedError): cs = np.array(self.like.logLike.modelCountsSpectrum(str(name))) imin = utils.val_to_edge(self.log_energies, logemin)[0] imax = utils.val_to_edge(self.log_energies, logemax)[0] if imax <= imin: raise Exception('Invalid energy range.') return cs[imin:imax]
def function[model_counts_spectrum, parameter[self, name, logemin, logemax, weighted]]: constant[Return the model counts spectrum of a source. Parameters ---------- name : str Source name. ] <ast.Try object at 0x7da20c6a8d60> variable[imin] assign[=] call[call[name[utils].val_to_edge, parameter[name[self].log_energies, name[logemin]]]][constant[0]] variable[imax] assign[=] call[call[name[utils].val_to_edge, parameter[name[self].log_energies, name[logemax]]]][constant[0]] if compare[name[imax] less_or_equal[<=] name[imin]] begin[:] <ast.Raise object at 0x7da207f995d0> return[call[name[cs]][<ast.Slice object at 0x7da207f9b250>]]
keyword[def] identifier[model_counts_spectrum] ( identifier[self] , identifier[name] , identifier[logemin] , identifier[logemax] , identifier[weighted] = keyword[False] ): literal[string] keyword[try] : identifier[cs] = identifier[np] . identifier[array] ( identifier[self] . identifier[like] . identifier[logLike] . identifier[modelCountsSpectrum] ( identifier[str] ( identifier[name] ), identifier[weighted] )) keyword[except] ( identifier[TypeError] , identifier[NotImplementedError] ): identifier[cs] = identifier[np] . identifier[array] ( identifier[self] . identifier[like] . identifier[logLike] . identifier[modelCountsSpectrum] ( identifier[str] ( identifier[name] ))) identifier[imin] = identifier[utils] . identifier[val_to_edge] ( identifier[self] . identifier[log_energies] , identifier[logemin] )[ literal[int] ] identifier[imax] = identifier[utils] . identifier[val_to_edge] ( identifier[self] . identifier[log_energies] , identifier[logemax] )[ literal[int] ] keyword[if] identifier[imax] <= identifier[imin] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[return] identifier[cs] [ identifier[imin] : identifier[imax] ]
def model_counts_spectrum(self, name, logemin, logemax, weighted=False): """Return the model counts spectrum of a source. Parameters ---------- name : str Source name. """ # EAC, we need this b/c older version of the ST don't have the right signature try: cs = np.array(self.like.logLike.modelCountsSpectrum(str(name), weighted)) # depends on [control=['try'], data=[]] except (TypeError, NotImplementedError): cs = np.array(self.like.logLike.modelCountsSpectrum(str(name))) # depends on [control=['except'], data=[]] imin = utils.val_to_edge(self.log_energies, logemin)[0] imax = utils.val_to_edge(self.log_energies, logemax)[0] if imax <= imin: raise Exception('Invalid energy range.') # depends on [control=['if'], data=[]] return cs[imin:imax]
def _set_cir(self, v, load=False): """ Setter method for cir, mapped from YANG variable /policy_map/class/police/cir (uint64) If this variable is read-only (config: false) in the source YANG file, then _set_cir is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cir() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'40000..100000000000']}), is_leaf=True, yang_name="cir", rest_name="cir", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Committed Information Rate.', u'cli-suppress-no': None, u'cli-hide-in-submode': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """cir must be of a type compatible with uint64""", 'defined-type': "uint64", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'40000..100000000000']}), is_leaf=True, yang_name="cir", rest_name="cir", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Committed Information Rate.', u'cli-suppress-no': None, u'cli-hide-in-submode': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True)""", }) self.__cir = t if hasattr(self, '_set'): self._set()
def function[_set_cir, parameter[self, v, load]]: constant[ Setter method for cir, mapped from YANG variable /policy_map/class/police/cir (uint64) If this variable is read-only (config: false) in the source YANG file, then _set_cir is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cir() directly. ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da18fe91750> name[self].__cir assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_cir] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[long] , identifier[restriction_dict] ={ literal[string] :[ literal[string] ]}, identifier[int_size] = literal[int] ), identifier[restriction_dict] ={ literal[string] :[ literal[string] ]}), identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__cir] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_cir(self, v, load=False): """ Setter method for cir, mapped from YANG variable /policy_map/class/police/cir (uint64) If this variable is read-only (config: false) in the source YANG file, then _set_cir is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cir() directly. """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), restriction_dict={'range': [u'40000..100000000000']}), is_leaf=True, yang_name='cir', rest_name='cir', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Committed Information Rate.', u'cli-suppress-no': None, u'cli-hide-in-submode': None}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='uint64', is_config=True) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'cir must be of a type compatible with uint64', 'defined-type': 'uint64', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={\'range\': [\'0..18446744073709551615\']}, int_size=64), restriction_dict={\'range\': [u\'40000..100000000000\']}), is_leaf=True, yang_name="cir", rest_name="cir", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Committed Information Rate.\', u\'cli-suppress-no\': None, u\'cli-hide-in-submode\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-policer\', defining_module=\'brocade-policer\', yang_type=\'uint64\', is_config=True)'}) # depends on [control=['except'], data=[]] self.__cir = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def activate_next(self, _previous=False): """ Activate next value. """ current = self.get_current_value() options = sorted(self.values.keys()) # Get current index. try: index = options.index(current) except ValueError: index = 0 # Go to previous/next index. if _previous: index -= 1 else: index += 1 # Call handler for this option. next_option = options[index % len(options)] self.values[next_option]()
def function[activate_next, parameter[self, _previous]]: constant[ Activate next value. ] variable[current] assign[=] call[name[self].get_current_value, parameter[]] variable[options] assign[=] call[name[sorted], parameter[call[name[self].values.keys, parameter[]]]] <ast.Try object at 0x7da1b08a4a90> if name[_previous] begin[:] <ast.AugAssign object at 0x7da1b0856620> variable[next_option] assign[=] call[name[options]][binary_operation[name[index] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[options]]]]] call[call[name[self].values][name[next_option]], parameter[]]
keyword[def] identifier[activate_next] ( identifier[self] , identifier[_previous] = keyword[False] ): literal[string] identifier[current] = identifier[self] . identifier[get_current_value] () identifier[options] = identifier[sorted] ( identifier[self] . identifier[values] . identifier[keys] ()) keyword[try] : identifier[index] = identifier[options] . identifier[index] ( identifier[current] ) keyword[except] identifier[ValueError] : identifier[index] = literal[int] keyword[if] identifier[_previous] : identifier[index] -= literal[int] keyword[else] : identifier[index] += literal[int] identifier[next_option] = identifier[options] [ identifier[index] % identifier[len] ( identifier[options] )] identifier[self] . identifier[values] [ identifier[next_option] ]()
def activate_next(self, _previous=False): """ Activate next value. """ current = self.get_current_value() options = sorted(self.values.keys()) # Get current index. try: index = options.index(current) # depends on [control=['try'], data=[]] except ValueError: index = 0 # depends on [control=['except'], data=[]] # Go to previous/next index. if _previous: index -= 1 # depends on [control=['if'], data=[]] else: index += 1 # Call handler for this option. next_option = options[index % len(options)] self.values[next_option]()
def agent_delete(self, agent_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/agents#delete-agent" api_path = "/api/v2/agents/{agent_id}" api_path = api_path.format(agent_id=agent_id) return self.call(api_path, method="DELETE", **kwargs)
def function[agent_delete, parameter[self, agent_id]]: constant[https://developer.zendesk.com/rest_api/docs/chat/agents#delete-agent] variable[api_path] assign[=] constant[/api/v2/agents/{agent_id}] variable[api_path] assign[=] call[name[api_path].format, parameter[]] return[call[name[self].call, parameter[name[api_path]]]]
keyword[def] identifier[agent_delete] ( identifier[self] , identifier[agent_id] ,** identifier[kwargs] ): literal[string] identifier[api_path] = literal[string] identifier[api_path] = identifier[api_path] . identifier[format] ( identifier[agent_id] = identifier[agent_id] ) keyword[return] identifier[self] . identifier[call] ( identifier[api_path] , identifier[method] = literal[string] ,** identifier[kwargs] )
def agent_delete(self, agent_id, **kwargs): """https://developer.zendesk.com/rest_api/docs/chat/agents#delete-agent""" api_path = '/api/v2/agents/{agent_id}' api_path = api_path.format(agent_id=agent_id) return self.call(api_path, method='DELETE', **kwargs)
def bound(self, instance): """ Return a new dispatcher, which will switch all command functions with bounded methods of given instance matched by name. It will match only regular methods. :param instance: object instance :type instance: object :return: new Dispatcher :rtype: CommandDispatcher """ bounded_dispatcher = CommandDispatcher() bounded_dispatcher.commands = self.commands.copy() for name in self.commands: method = getattr(instance, name, None) if method and inspect.ismethod(method) and method.__self__ == instance: bounded_dispatcher.commands[name] = method return bounded_dispatcher
def function[bound, parameter[self, instance]]: constant[ Return a new dispatcher, which will switch all command functions with bounded methods of given instance matched by name. It will match only regular methods. :param instance: object instance :type instance: object :return: new Dispatcher :rtype: CommandDispatcher ] variable[bounded_dispatcher] assign[=] call[name[CommandDispatcher], parameter[]] name[bounded_dispatcher].commands assign[=] call[name[self].commands.copy, parameter[]] for taget[name[name]] in starred[name[self].commands] begin[:] variable[method] assign[=] call[name[getattr], parameter[name[instance], name[name], constant[None]]] if <ast.BoolOp object at 0x7da18dc993f0> begin[:] call[name[bounded_dispatcher].commands][name[name]] assign[=] name[method] return[name[bounded_dispatcher]]
keyword[def] identifier[bound] ( identifier[self] , identifier[instance] ): literal[string] identifier[bounded_dispatcher] = identifier[CommandDispatcher] () identifier[bounded_dispatcher] . identifier[commands] = identifier[self] . identifier[commands] . identifier[copy] () keyword[for] identifier[name] keyword[in] identifier[self] . identifier[commands] : identifier[method] = identifier[getattr] ( identifier[instance] , identifier[name] , keyword[None] ) keyword[if] identifier[method] keyword[and] identifier[inspect] . identifier[ismethod] ( identifier[method] ) keyword[and] identifier[method] . identifier[__self__] == identifier[instance] : identifier[bounded_dispatcher] . identifier[commands] [ identifier[name] ]= identifier[method] keyword[return] identifier[bounded_dispatcher]
def bound(self, instance): """ Return a new dispatcher, which will switch all command functions with bounded methods of given instance matched by name. It will match only regular methods. :param instance: object instance :type instance: object :return: new Dispatcher :rtype: CommandDispatcher """ bounded_dispatcher = CommandDispatcher() bounded_dispatcher.commands = self.commands.copy() for name in self.commands: method = getattr(instance, name, None) if method and inspect.ismethod(method) and (method.__self__ == instance): bounded_dispatcher.commands[name] = method # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']] return bounded_dispatcher
def read_header_at( cls, f): """ Given an open file-like object, read a block header from it and return it as a dict containing: * version (int) * prev_block_hash (hex str) * merkle_root (hex str) * timestamp (int) * bits (int) * nonce (ini) * hash (hex str) """ header_parser = BlockHeaderSerializer() hdr = header_parser.deserialize( f ) h = {} h['version'] = hdr.version h['prev_block_hash'] = "%064x" % hdr.prev_block h['merkle_root'] = "%064x" % hdr.merkle_root h['timestamp'] = hdr.timestamp h['bits'] = hdr.bits h['nonce'] = hdr.nonce h['hash'] = hdr.calculate_hash() return h
def function[read_header_at, parameter[cls, f]]: constant[ Given an open file-like object, read a block header from it and return it as a dict containing: * version (int) * prev_block_hash (hex str) * merkle_root (hex str) * timestamp (int) * bits (int) * nonce (ini) * hash (hex str) ] variable[header_parser] assign[=] call[name[BlockHeaderSerializer], parameter[]] variable[hdr] assign[=] call[name[header_parser].deserialize, parameter[name[f]]] variable[h] assign[=] dictionary[[], []] call[name[h]][constant[version]] assign[=] name[hdr].version call[name[h]][constant[prev_block_hash]] assign[=] binary_operation[constant[%064x] <ast.Mod object at 0x7da2590d6920> name[hdr].prev_block] call[name[h]][constant[merkle_root]] assign[=] binary_operation[constant[%064x] <ast.Mod object at 0x7da2590d6920> name[hdr].merkle_root] call[name[h]][constant[timestamp]] assign[=] name[hdr].timestamp call[name[h]][constant[bits]] assign[=] name[hdr].bits call[name[h]][constant[nonce]] assign[=] name[hdr].nonce call[name[h]][constant[hash]] assign[=] call[name[hdr].calculate_hash, parameter[]] return[name[h]]
keyword[def] identifier[read_header_at] ( identifier[cls] , identifier[f] ): literal[string] identifier[header_parser] = identifier[BlockHeaderSerializer] () identifier[hdr] = identifier[header_parser] . identifier[deserialize] ( identifier[f] ) identifier[h] ={} identifier[h] [ literal[string] ]= identifier[hdr] . identifier[version] identifier[h] [ literal[string] ]= literal[string] % identifier[hdr] . identifier[prev_block] identifier[h] [ literal[string] ]= literal[string] % identifier[hdr] . identifier[merkle_root] identifier[h] [ literal[string] ]= identifier[hdr] . identifier[timestamp] identifier[h] [ literal[string] ]= identifier[hdr] . identifier[bits] identifier[h] [ literal[string] ]= identifier[hdr] . identifier[nonce] identifier[h] [ literal[string] ]= identifier[hdr] . identifier[calculate_hash] () keyword[return] identifier[h]
def read_header_at(cls, f): """ Given an open file-like object, read a block header from it and return it as a dict containing: * version (int) * prev_block_hash (hex str) * merkle_root (hex str) * timestamp (int) * bits (int) * nonce (ini) * hash (hex str) """ header_parser = BlockHeaderSerializer() hdr = header_parser.deserialize(f) h = {} h['version'] = hdr.version h['prev_block_hash'] = '%064x' % hdr.prev_block h['merkle_root'] = '%064x' % hdr.merkle_root h['timestamp'] = hdr.timestamp h['bits'] = hdr.bits h['nonce'] = hdr.nonce h['hash'] = hdr.calculate_hash() return h
def top_intent( results: RecognizerResult, default_intent: str = "None", min_score: float = 0.0 ) -> str: """Returns the name of the top scoring intent from a set of LUIS results. :param results: Result set to be searched. :type results: RecognizerResult :param default_intent: Intent name to return should a top intent be found, defaults to "None" :param default_intent: str, optional :param min_score: Minimum score needed for an intent to be considered as a top intent. If all intents in the set are below this threshold then the `defaultIntent` will be returned, defaults to 0.0 :param min_score: float, optional :raises TypeError: :return: The top scoring intent name. :rtype: str """ if results is None: raise TypeError("LuisRecognizer.top_intent(): results cannot be None.") top_intent: str = None top_score: float = -1.0 if results.intents: for intent_name, intent_score in results.intents.items(): score = intent_score.score if score > top_score and score >= min_score: top_intent = intent_name top_score = score return top_intent or default_intent
def function[top_intent, parameter[results, default_intent, min_score]]: constant[Returns the name of the top scoring intent from a set of LUIS results. :param results: Result set to be searched. :type results: RecognizerResult :param default_intent: Intent name to return should a top intent be found, defaults to "None" :param default_intent: str, optional :param min_score: Minimum score needed for an intent to be considered as a top intent. If all intents in the set are below this threshold then the `defaultIntent` will be returned, defaults to 0.0 :param min_score: float, optional :raises TypeError: :return: The top scoring intent name. :rtype: str ] if compare[name[results] is constant[None]] begin[:] <ast.Raise object at 0x7da1b055ea10> <ast.AnnAssign object at 0x7da1b055d960> <ast.AnnAssign object at 0x7da1b055d6c0> if name[results].intents begin[:] for taget[tuple[[<ast.Name object at 0x7da1b055c2b0>, <ast.Name object at 0x7da1b055d720>]]] in starred[call[name[results].intents.items, parameter[]]] begin[:] variable[score] assign[=] name[intent_score].score if <ast.BoolOp object at 0x7da1b055ed40> begin[:] variable[top_intent] assign[=] name[intent_name] variable[top_score] assign[=] name[score] return[<ast.BoolOp object at 0x7da20c993640>]
keyword[def] identifier[top_intent] ( identifier[results] : identifier[RecognizerResult] , identifier[default_intent] : identifier[str] = literal[string] , identifier[min_score] : identifier[float] = literal[int] )-> identifier[str] : literal[string] keyword[if] identifier[results] keyword[is] keyword[None] : keyword[raise] identifier[TypeError] ( literal[string] ) identifier[top_intent] : identifier[str] = keyword[None] identifier[top_score] : identifier[float] =- literal[int] keyword[if] identifier[results] . identifier[intents] : keyword[for] identifier[intent_name] , identifier[intent_score] keyword[in] identifier[results] . identifier[intents] . identifier[items] (): identifier[score] = identifier[intent_score] . identifier[score] keyword[if] identifier[score] > identifier[top_score] keyword[and] identifier[score] >= identifier[min_score] : identifier[top_intent] = identifier[intent_name] identifier[top_score] = identifier[score] keyword[return] identifier[top_intent] keyword[or] identifier[default_intent]
def top_intent(results: RecognizerResult, default_intent: str='None', min_score: float=0.0) -> str: """Returns the name of the top scoring intent from a set of LUIS results. :param results: Result set to be searched. :type results: RecognizerResult :param default_intent: Intent name to return should a top intent be found, defaults to "None" :param default_intent: str, optional :param min_score: Minimum score needed for an intent to be considered as a top intent. If all intents in the set are below this threshold then the `defaultIntent` will be returned, defaults to 0.0 :param min_score: float, optional :raises TypeError: :return: The top scoring intent name. :rtype: str """ if results is None: raise TypeError('LuisRecognizer.top_intent(): results cannot be None.') # depends on [control=['if'], data=[]] top_intent: str = None top_score: float = -1.0 if results.intents: for (intent_name, intent_score) in results.intents.items(): score = intent_score.score if score > top_score and score >= min_score: top_intent = intent_name top_score = score # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] return top_intent or default_intent
def readline(self): """ Get the next line from the input buffer. """ self.line_number += 1 if self.line_number > len(self.lines): return '' return self.lines[self.line_number - 1]
def function[readline, parameter[self]]: constant[ Get the next line from the input buffer. ] <ast.AugAssign object at 0x7da1b0a22140> if compare[name[self].line_number greater[>] call[name[len], parameter[name[self].lines]]] begin[:] return[constant[]] return[call[name[self].lines][binary_operation[name[self].line_number - constant[1]]]]
keyword[def] identifier[readline] ( identifier[self] ): literal[string] identifier[self] . identifier[line_number] += literal[int] keyword[if] identifier[self] . identifier[line_number] > identifier[len] ( identifier[self] . identifier[lines] ): keyword[return] literal[string] keyword[return] identifier[self] . identifier[lines] [ identifier[self] . identifier[line_number] - literal[int] ]
def readline(self): """ Get the next line from the input buffer. """ self.line_number += 1 if self.line_number > len(self.lines): return '' # depends on [control=['if'], data=[]] return self.lines[self.line_number - 1]
def exit_statistics(hostname, start_time, count_sent, count_received, min_time, avg_time, max_time, deviation): """ Print ping exit statistics """ end_time = datetime.datetime.now() duration = end_time - start_time duration_sec = float(duration.seconds * 1000) duration_ms = float(duration.microseconds / 1000) duration = duration_sec + duration_ms package_loss = 100 - ((float(count_received) / float(count_sent)) * 100) print(f'\b\b--- {hostname} ping statistics ---') try: print(f'{count_sent} packages transmitted, {count_received} received, {package_loss}% package loss, time {duration}ms') except ZeroDivisionError: print(f'{count_sent} packets transmitted, {count_received} received, 100% packet loss, time {duration}ms') print( 'rtt min/avg/max/dev = %.2f/%.2f/%.2f/%.2f ms' % ( min_time.seconds*1000 + float(min_time.microseconds)/1000, float(avg_time) / 1000, max_time.seconds*1000 + float(max_time.microseconds)/1000, float(deviation) ) )
def function[exit_statistics, parameter[hostname, start_time, count_sent, count_received, min_time, avg_time, max_time, deviation]]: constant[ Print ping exit statistics ] variable[end_time] assign[=] call[name[datetime].datetime.now, parameter[]] variable[duration] assign[=] binary_operation[name[end_time] - name[start_time]] variable[duration_sec] assign[=] call[name[float], parameter[binary_operation[name[duration].seconds * constant[1000]]]] variable[duration_ms] assign[=] call[name[float], parameter[binary_operation[name[duration].microseconds / constant[1000]]]] variable[duration] assign[=] binary_operation[name[duration_sec] + name[duration_ms]] variable[package_loss] assign[=] binary_operation[constant[100] - binary_operation[binary_operation[call[name[float], parameter[name[count_received]]] / call[name[float], parameter[name[count_sent]]]] * constant[100]]] call[name[print], parameter[<ast.JoinedStr object at 0x7da1b257c310>]] <ast.Try object at 0x7da1b257f580> call[name[print], parameter[binary_operation[constant[rtt min/avg/max/dev = %.2f/%.2f/%.2f/%.2f ms] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da20c6abd00>, <ast.BinOp object at 0x7da20c6a9030>, <ast.BinOp object at 0x7da20c6aaa70>, <ast.Call object at 0x7da1b253b700>]]]]]
keyword[def] identifier[exit_statistics] ( identifier[hostname] , identifier[start_time] , identifier[count_sent] , identifier[count_received] , identifier[min_time] , identifier[avg_time] , identifier[max_time] , identifier[deviation] ): literal[string] identifier[end_time] = identifier[datetime] . identifier[datetime] . identifier[now] () identifier[duration] = identifier[end_time] - identifier[start_time] identifier[duration_sec] = identifier[float] ( identifier[duration] . identifier[seconds] * literal[int] ) identifier[duration_ms] = identifier[float] ( identifier[duration] . identifier[microseconds] / literal[int] ) identifier[duration] = identifier[duration_sec] + identifier[duration_ms] identifier[package_loss] = literal[int] -(( identifier[float] ( identifier[count_received] )/ identifier[float] ( identifier[count_sent] ))* literal[int] ) identifier[print] ( literal[string] ) keyword[try] : identifier[print] ( literal[string] ) keyword[except] identifier[ZeroDivisionError] : identifier[print] ( literal[string] ) identifier[print] ( literal[string] %( identifier[min_time] . identifier[seconds] * literal[int] + identifier[float] ( identifier[min_time] . identifier[microseconds] )/ literal[int] , identifier[float] ( identifier[avg_time] )/ literal[int] , identifier[max_time] . identifier[seconds] * literal[int] + identifier[float] ( identifier[max_time] . identifier[microseconds] )/ literal[int] , identifier[float] ( identifier[deviation] ) ) )
def exit_statistics(hostname, start_time, count_sent, count_received, min_time, avg_time, max_time, deviation): """ Print ping exit statistics """ end_time = datetime.datetime.now() duration = end_time - start_time duration_sec = float(duration.seconds * 1000) duration_ms = float(duration.microseconds / 1000) duration = duration_sec + duration_ms package_loss = 100 - float(count_received) / float(count_sent) * 100 print(f'\x08\x08--- {hostname} ping statistics ---') try: print(f'{count_sent} packages transmitted, {count_received} received, {package_loss}% package loss, time {duration}ms') # depends on [control=['try'], data=[]] except ZeroDivisionError: print(f'{count_sent} packets transmitted, {count_received} received, 100% packet loss, time {duration}ms') # depends on [control=['except'], data=[]] print('rtt min/avg/max/dev = %.2f/%.2f/%.2f/%.2f ms' % (min_time.seconds * 1000 + float(min_time.microseconds) / 1000, float(avg_time) / 1000, max_time.seconds * 1000 + float(max_time.microseconds) / 1000, float(deviation)))
def init_date_constraints(self, ancestral_inference=False, clock_rate=None, **kwarks): """ Get the conversion coefficients between the dates and the branch lengths as they are used in ML computations. The conversion formula is assumed to be 'length = k*numdate + b'. For convenience, these coefficients as well as regression parameters are stored in the 'dates2dist' object. .. Note:: The tree must have dates set to all nodes before calling this function. Parameters ---------- ancestral_inference: bool If True, reinfer ancestral sequences when ancestral sequences are missing clock_rate: float If specified, timetree optimization will be done assuming a fixed clock rate as specified """ self.logger("ClockTree.init_date_constraints...",2) self.tree.coalescent_joint_LH = 0 if self.aln and (ancestral_inference or (not hasattr(self.tree.root, 'sequence'))): self.infer_ancestral_sequences('probabilistic', marginal=self.branch_length_mode=='marginal', sample_from_profile='root',**kwarks) # set the None for the date-related attributes in the internal nodes. # make interpolation objects for the branches self.logger('ClockTree.init_date_constraints: Initializing branch length interpolation objects...',3) has_clock_length = [] for node in self.tree.find_clades(order='postorder'): if node.up is None: node.branch_length_interpolator = None else: has_clock_length.append(hasattr(node, 'clock_length')) # copy the merger rate and gamma if they are set if hasattr(node,'branch_length_interpolator') and node.branch_length_interpolator is not None: gamma = node.branch_length_interpolator.gamma merger_cost = node.branch_length_interpolator.merger_cost else: gamma = 1.0 merger_cost = None if self.branch_length_mode=='marginal': node.profile_pair = self.marginal_branch_profile(node) node.branch_length_interpolator = BranchLenInterpolator(node, self.gtr, pattern_multiplicity = self.multiplicity, min_width=self.min_width, one_mutation=self.one_mutation, branch_length_mode=self.branch_length_mode) node.branch_length_interpolator.merger_cost = merger_cost node.branch_length_interpolator.gamma = gamma # use covariance in clock model only after initial timetree estimation is done use_cov = (np.sum(has_clock_length) > len(has_clock_length)*0.7) and self.use_covariation self.get_clock_model(covariation=use_cov, slope=clock_rate) # make node distribution objects for node in self.tree.find_clades(order="postorder"): # node is constrained if hasattr(node, 'raw_date_constraint') and node.raw_date_constraint is not None: # set the absolute time before present in branch length units if np.isscalar(node.raw_date_constraint): tbp = self.date2dist.get_time_before_present(node.raw_date_constraint) node.date_constraint = Distribution.delta_function(tbp, weight=1.0, min_width=self.min_width) else: tbp = self.date2dist.get_time_before_present(np.array(node.raw_date_constraint)) node.date_constraint = Distribution(tbp, np.ones_like(tbp), is_log=False, min_width=self.min_width) if hasattr(node, 'bad_branch') and node.bad_branch is True: self.logger("ClockTree.init_date_constraints -- WARNING: Branch is marked as bad" ", excluding it from the optimization process.\n" "\t\tDate constraint will be ignored!", 4, warn=True) else: # node without sampling date set node.raw_date_constraint = None node.date_constraint = None
def function[init_date_constraints, parameter[self, ancestral_inference, clock_rate]]: constant[ Get the conversion coefficients between the dates and the branch lengths as they are used in ML computations. The conversion formula is assumed to be 'length = k*numdate + b'. For convenience, these coefficients as well as regression parameters are stored in the 'dates2dist' object. .. Note:: The tree must have dates set to all nodes before calling this function. Parameters ---------- ancestral_inference: bool If True, reinfer ancestral sequences when ancestral sequences are missing clock_rate: float If specified, timetree optimization will be done assuming a fixed clock rate as specified ] call[name[self].logger, parameter[constant[ClockTree.init_date_constraints...], constant[2]]] name[self].tree.coalescent_joint_LH assign[=] constant[0] if <ast.BoolOp object at 0x7da2054a74f0> begin[:] call[name[self].infer_ancestral_sequences, parameter[constant[probabilistic]]] call[name[self].logger, parameter[constant[ClockTree.init_date_constraints: Initializing branch length interpolation objects...], constant[3]]] variable[has_clock_length] assign[=] list[[]] for taget[name[node]] in starred[call[name[self].tree.find_clades, parameter[]]] begin[:] if compare[name[node].up is constant[None]] begin[:] name[node].branch_length_interpolator assign[=] constant[None] variable[use_cov] assign[=] <ast.BoolOp object at 0x7da2054a4af0> call[name[self].get_clock_model, parameter[]] for taget[name[node]] in starred[call[name[self].tree.find_clades, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da2054a4100> begin[:] if call[name[np].isscalar, parameter[name[node].raw_date_constraint]] begin[:] variable[tbp] assign[=] call[name[self].date2dist.get_time_before_present, parameter[name[node].raw_date_constraint]] name[node].date_constraint assign[=] call[name[Distribution].delta_function, parameter[name[tbp]]] if <ast.BoolOp object at 0x7da2054a4fa0> begin[:] call[name[self].logger, parameter[constant[ClockTree.init_date_constraints -- WARNING: Branch is marked as bad, excluding it from the optimization process. Date constraint will be ignored!], constant[4]]]
keyword[def] identifier[init_date_constraints] ( identifier[self] , identifier[ancestral_inference] = keyword[False] , identifier[clock_rate] = keyword[None] ,** identifier[kwarks] ): literal[string] identifier[self] . identifier[logger] ( literal[string] , literal[int] ) identifier[self] . identifier[tree] . identifier[coalescent_joint_LH] = literal[int] keyword[if] identifier[self] . identifier[aln] keyword[and] ( identifier[ancestral_inference] keyword[or] ( keyword[not] identifier[hasattr] ( identifier[self] . identifier[tree] . identifier[root] , literal[string] ))): identifier[self] . identifier[infer_ancestral_sequences] ( literal[string] , identifier[marginal] = identifier[self] . identifier[branch_length_mode] == literal[string] , identifier[sample_from_profile] = literal[string] ,** identifier[kwarks] ) identifier[self] . identifier[logger] ( literal[string] , literal[int] ) identifier[has_clock_length] =[] keyword[for] identifier[node] keyword[in] identifier[self] . identifier[tree] . identifier[find_clades] ( identifier[order] = literal[string] ): keyword[if] identifier[node] . identifier[up] keyword[is] keyword[None] : identifier[node] . identifier[branch_length_interpolator] = keyword[None] keyword[else] : identifier[has_clock_length] . identifier[append] ( identifier[hasattr] ( identifier[node] , literal[string] )) keyword[if] identifier[hasattr] ( identifier[node] , literal[string] ) keyword[and] identifier[node] . identifier[branch_length_interpolator] keyword[is] keyword[not] keyword[None] : identifier[gamma] = identifier[node] . identifier[branch_length_interpolator] . identifier[gamma] identifier[merger_cost] = identifier[node] . identifier[branch_length_interpolator] . identifier[merger_cost] keyword[else] : identifier[gamma] = literal[int] identifier[merger_cost] = keyword[None] keyword[if] identifier[self] . identifier[branch_length_mode] == literal[string] : identifier[node] . identifier[profile_pair] = identifier[self] . identifier[marginal_branch_profile] ( identifier[node] ) identifier[node] . identifier[branch_length_interpolator] = identifier[BranchLenInterpolator] ( identifier[node] , identifier[self] . identifier[gtr] , identifier[pattern_multiplicity] = identifier[self] . identifier[multiplicity] , identifier[min_width] = identifier[self] . identifier[min_width] , identifier[one_mutation] = identifier[self] . identifier[one_mutation] , identifier[branch_length_mode] = identifier[self] . identifier[branch_length_mode] ) identifier[node] . identifier[branch_length_interpolator] . identifier[merger_cost] = identifier[merger_cost] identifier[node] . identifier[branch_length_interpolator] . identifier[gamma] = identifier[gamma] identifier[use_cov] =( identifier[np] . identifier[sum] ( identifier[has_clock_length] )> identifier[len] ( identifier[has_clock_length] )* literal[int] ) keyword[and] identifier[self] . identifier[use_covariation] identifier[self] . identifier[get_clock_model] ( identifier[covariation] = identifier[use_cov] , identifier[slope] = identifier[clock_rate] ) keyword[for] identifier[node] keyword[in] identifier[self] . identifier[tree] . identifier[find_clades] ( identifier[order] = literal[string] ): keyword[if] identifier[hasattr] ( identifier[node] , literal[string] ) keyword[and] identifier[node] . identifier[raw_date_constraint] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[np] . identifier[isscalar] ( identifier[node] . identifier[raw_date_constraint] ): identifier[tbp] = identifier[self] . identifier[date2dist] . identifier[get_time_before_present] ( identifier[node] . identifier[raw_date_constraint] ) identifier[node] . identifier[date_constraint] = identifier[Distribution] . identifier[delta_function] ( identifier[tbp] , identifier[weight] = literal[int] , identifier[min_width] = identifier[self] . identifier[min_width] ) keyword[else] : identifier[tbp] = identifier[self] . identifier[date2dist] . identifier[get_time_before_present] ( identifier[np] . identifier[array] ( identifier[node] . identifier[raw_date_constraint] )) identifier[node] . identifier[date_constraint] = identifier[Distribution] ( identifier[tbp] , identifier[np] . identifier[ones_like] ( identifier[tbp] ), identifier[is_log] = keyword[False] , identifier[min_width] = identifier[self] . identifier[min_width] ) keyword[if] identifier[hasattr] ( identifier[node] , literal[string] ) keyword[and] identifier[node] . identifier[bad_branch] keyword[is] keyword[True] : identifier[self] . identifier[logger] ( literal[string] literal[string] literal[string] , literal[int] , identifier[warn] = keyword[True] ) keyword[else] : identifier[node] . identifier[raw_date_constraint] = keyword[None] identifier[node] . identifier[date_constraint] = keyword[None]
def init_date_constraints(self, ancestral_inference=False, clock_rate=None, **kwarks): """ Get the conversion coefficients between the dates and the branch lengths as they are used in ML computations. The conversion formula is assumed to be 'length = k*numdate + b'. For convenience, these coefficients as well as regression parameters are stored in the 'dates2dist' object. .. Note:: The tree must have dates set to all nodes before calling this function. Parameters ---------- ancestral_inference: bool If True, reinfer ancestral sequences when ancestral sequences are missing clock_rate: float If specified, timetree optimization will be done assuming a fixed clock rate as specified """ self.logger('ClockTree.init_date_constraints...', 2) self.tree.coalescent_joint_LH = 0 if self.aln and (ancestral_inference or not hasattr(self.tree.root, 'sequence')): self.infer_ancestral_sequences('probabilistic', marginal=self.branch_length_mode == 'marginal', sample_from_profile='root', **kwarks) # depends on [control=['if'], data=[]] # set the None for the date-related attributes in the internal nodes. # make interpolation objects for the branches self.logger('ClockTree.init_date_constraints: Initializing branch length interpolation objects...', 3) has_clock_length = [] for node in self.tree.find_clades(order='postorder'): if node.up is None: node.branch_length_interpolator = None # depends on [control=['if'], data=[]] else: has_clock_length.append(hasattr(node, 'clock_length')) # copy the merger rate and gamma if they are set if hasattr(node, 'branch_length_interpolator') and node.branch_length_interpolator is not None: gamma = node.branch_length_interpolator.gamma merger_cost = node.branch_length_interpolator.merger_cost # depends on [control=['if'], data=[]] else: gamma = 1.0 merger_cost = None if self.branch_length_mode == 'marginal': node.profile_pair = self.marginal_branch_profile(node) # depends on [control=['if'], data=[]] node.branch_length_interpolator = BranchLenInterpolator(node, self.gtr, pattern_multiplicity=self.multiplicity, min_width=self.min_width, one_mutation=self.one_mutation, branch_length_mode=self.branch_length_mode) node.branch_length_interpolator.merger_cost = merger_cost node.branch_length_interpolator.gamma = gamma # depends on [control=['for'], data=['node']] # use covariance in clock model only after initial timetree estimation is done use_cov = np.sum(has_clock_length) > len(has_clock_length) * 0.7 and self.use_covariation self.get_clock_model(covariation=use_cov, slope=clock_rate) # make node distribution objects for node in self.tree.find_clades(order='postorder'): # node is constrained if hasattr(node, 'raw_date_constraint') and node.raw_date_constraint is not None: # set the absolute time before present in branch length units if np.isscalar(node.raw_date_constraint): tbp = self.date2dist.get_time_before_present(node.raw_date_constraint) node.date_constraint = Distribution.delta_function(tbp, weight=1.0, min_width=self.min_width) # depends on [control=['if'], data=[]] else: tbp = self.date2dist.get_time_before_present(np.array(node.raw_date_constraint)) node.date_constraint = Distribution(tbp, np.ones_like(tbp), is_log=False, min_width=self.min_width) if hasattr(node, 'bad_branch') and node.bad_branch is True: self.logger('ClockTree.init_date_constraints -- WARNING: Branch is marked as bad, excluding it from the optimization process.\n\t\tDate constraint will be ignored!', 4, warn=True) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: # node without sampling date set node.raw_date_constraint = None node.date_constraint = None # depends on [control=['for'], data=['node']]
def commonancestors(Class, *args): """Generator function to find common ancestors of a particular type for any two or more FoLiA element instances. The function produces all common ancestors of the type specified, starting from the closest one up to the most distant one. Parameters: Class: The type of ancestor to find, should be the :class:`AbstractElement` class or any subclass thereof (not an instance!) *args: The elements to find the common ancestors of, elements are instances derived from :class:`AbstractElement` Yields: instance derived from :class:`AbstractElement`: A common ancestor of the arguments, an instance of the specified ``Class``. """ commonancestors = None #pylint: disable=redefined-outer-name for sibling in args: ancestors = list( sibling.ancestors(Class) ) if commonancestors is None: commonancestors = copy(ancestors) else: removeancestors = [] for a in commonancestors: #pylint: disable=not-an-iterable if not a in ancestors: removeancestors.append(a) for a in removeancestors: commonancestors.remove(a) if commonancestors: for commonancestor in commonancestors: yield commonancestor
def function[commonancestors, parameter[Class]]: constant[Generator function to find common ancestors of a particular type for any two or more FoLiA element instances. The function produces all common ancestors of the type specified, starting from the closest one up to the most distant one. Parameters: Class: The type of ancestor to find, should be the :class:`AbstractElement` class or any subclass thereof (not an instance!) *args: The elements to find the common ancestors of, elements are instances derived from :class:`AbstractElement` Yields: instance derived from :class:`AbstractElement`: A common ancestor of the arguments, an instance of the specified ``Class``. ] variable[commonancestors] assign[=] constant[None] for taget[name[sibling]] in starred[name[args]] begin[:] variable[ancestors] assign[=] call[name[list], parameter[call[name[sibling].ancestors, parameter[name[Class]]]]] if compare[name[commonancestors] is constant[None]] begin[:] variable[commonancestors] assign[=] call[name[copy], parameter[name[ancestors]]] if name[commonancestors] begin[:] for taget[name[commonancestor]] in starred[name[commonancestors]] begin[:] <ast.Yield object at 0x7da20c990d90>
keyword[def] identifier[commonancestors] ( identifier[Class] ,* identifier[args] ): literal[string] identifier[commonancestors] = keyword[None] keyword[for] identifier[sibling] keyword[in] identifier[args] : identifier[ancestors] = identifier[list] ( identifier[sibling] . identifier[ancestors] ( identifier[Class] )) keyword[if] identifier[commonancestors] keyword[is] keyword[None] : identifier[commonancestors] = identifier[copy] ( identifier[ancestors] ) keyword[else] : identifier[removeancestors] =[] keyword[for] identifier[a] keyword[in] identifier[commonancestors] : keyword[if] keyword[not] identifier[a] keyword[in] identifier[ancestors] : identifier[removeancestors] . identifier[append] ( identifier[a] ) keyword[for] identifier[a] keyword[in] identifier[removeancestors] : identifier[commonancestors] . identifier[remove] ( identifier[a] ) keyword[if] identifier[commonancestors] : keyword[for] identifier[commonancestor] keyword[in] identifier[commonancestors] : keyword[yield] identifier[commonancestor]
def commonancestors(Class, *args): """Generator function to find common ancestors of a particular type for any two or more FoLiA element instances. The function produces all common ancestors of the type specified, starting from the closest one up to the most distant one. Parameters: Class: The type of ancestor to find, should be the :class:`AbstractElement` class or any subclass thereof (not an instance!) *args: The elements to find the common ancestors of, elements are instances derived from :class:`AbstractElement` Yields: instance derived from :class:`AbstractElement`: A common ancestor of the arguments, an instance of the specified ``Class``. """ commonancestors = None #pylint: disable=redefined-outer-name for sibling in args: ancestors = list(sibling.ancestors(Class)) if commonancestors is None: commonancestors = copy(ancestors) # depends on [control=['if'], data=['commonancestors']] else: removeancestors = [] for a in commonancestors: #pylint: disable=not-an-iterable if not a in ancestors: removeancestors.append(a) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']] for a in removeancestors: commonancestors.remove(a) # depends on [control=['for'], data=['a']] # depends on [control=['for'], data=['sibling']] if commonancestors: for commonancestor in commonancestors: yield commonancestor # depends on [control=['for'], data=['commonancestor']] # depends on [control=['if'], data=[]]
def copy(self): '''Create a copy of the current instance. :returns: A safely editable copy of the current sequence. :rtype: coral.Peptide ''' return type(self)(str(self._sequence), features=self.features, run_checks=False)
def function[copy, parameter[self]]: constant[Create a copy of the current instance. :returns: A safely editable copy of the current sequence. :rtype: coral.Peptide ] return[call[call[name[type], parameter[name[self]]], parameter[call[name[str], parameter[name[self]._sequence]]]]]
keyword[def] identifier[copy] ( identifier[self] ): literal[string] keyword[return] identifier[type] ( identifier[self] )( identifier[str] ( identifier[self] . identifier[_sequence] ), identifier[features] = identifier[self] . identifier[features] , identifier[run_checks] = keyword[False] )
def copy(self): """Create a copy of the current instance. :returns: A safely editable copy of the current sequence. :rtype: coral.Peptide """ return type(self)(str(self._sequence), features=self.features, run_checks=False)
def get_total_size(self, entries): """ Returns the total size of a collection of entries. :param entries: ``list`` of entries to calculate the total size of. """ size = 0 for entry in entries: if entry['response']['bodySize'] > 0: size += entry['response']['bodySize'] return size
def function[get_total_size, parameter[self, entries]]: constant[ Returns the total size of a collection of entries. :param entries: ``list`` of entries to calculate the total size of. ] variable[size] assign[=] constant[0] for taget[name[entry]] in starred[name[entries]] begin[:] if compare[call[call[name[entry]][constant[response]]][constant[bodySize]] greater[>] constant[0]] begin[:] <ast.AugAssign object at 0x7da1b26af8e0> return[name[size]]
keyword[def] identifier[get_total_size] ( identifier[self] , identifier[entries] ): literal[string] identifier[size] = literal[int] keyword[for] identifier[entry] keyword[in] identifier[entries] : keyword[if] identifier[entry] [ literal[string] ][ literal[string] ]> literal[int] : identifier[size] += identifier[entry] [ literal[string] ][ literal[string] ] keyword[return] identifier[size]
def get_total_size(self, entries): """ Returns the total size of a collection of entries. :param entries: ``list`` of entries to calculate the total size of. """ size = 0 for entry in entries: if entry['response']['bodySize'] > 0: size += entry['response']['bodySize'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry']] return size
def pad(a, desiredlength): """ Pad an n-dimensional numpy array with zeros along the zero-th dimension so that it is the desired length. Return it unchanged if it is greater than or equal to the desired length """ if len(a) >= desiredlength: return a islist = isinstance(a, list) a = np.array(a) diff = desiredlength - len(a) shape = list(a.shape) shape[0] = diff padded = np.concatenate([a, np.zeros(shape, dtype=a.dtype)]) return padded.tolist() if islist else padded
def function[pad, parameter[a, desiredlength]]: constant[ Pad an n-dimensional numpy array with zeros along the zero-th dimension so that it is the desired length. Return it unchanged if it is greater than or equal to the desired length ] if compare[call[name[len], parameter[name[a]]] greater_or_equal[>=] name[desiredlength]] begin[:] return[name[a]] variable[islist] assign[=] call[name[isinstance], parameter[name[a], name[list]]] variable[a] assign[=] call[name[np].array, parameter[name[a]]] variable[diff] assign[=] binary_operation[name[desiredlength] - call[name[len], parameter[name[a]]]] variable[shape] assign[=] call[name[list], parameter[name[a].shape]] call[name[shape]][constant[0]] assign[=] name[diff] variable[padded] assign[=] call[name[np].concatenate, parameter[list[[<ast.Name object at 0x7da1b19d2b30>, <ast.Call object at 0x7da1b19d2b60>]]]] return[<ast.IfExp object at 0x7da1b19d0490>]
keyword[def] identifier[pad] ( identifier[a] , identifier[desiredlength] ): literal[string] keyword[if] identifier[len] ( identifier[a] )>= identifier[desiredlength] : keyword[return] identifier[a] identifier[islist] = identifier[isinstance] ( identifier[a] , identifier[list] ) identifier[a] = identifier[np] . identifier[array] ( identifier[a] ) identifier[diff] = identifier[desiredlength] - identifier[len] ( identifier[a] ) identifier[shape] = identifier[list] ( identifier[a] . identifier[shape] ) identifier[shape] [ literal[int] ]= identifier[diff] identifier[padded] = identifier[np] . identifier[concatenate] ([ identifier[a] , identifier[np] . identifier[zeros] ( identifier[shape] , identifier[dtype] = identifier[a] . identifier[dtype] )]) keyword[return] identifier[padded] . identifier[tolist] () keyword[if] identifier[islist] keyword[else] identifier[padded]
def pad(a, desiredlength): """ Pad an n-dimensional numpy array with zeros along the zero-th dimension so that it is the desired length. Return it unchanged if it is greater than or equal to the desired length """ if len(a) >= desiredlength: return a # depends on [control=['if'], data=[]] islist = isinstance(a, list) a = np.array(a) diff = desiredlength - len(a) shape = list(a.shape) shape[0] = diff padded = np.concatenate([a, np.zeros(shape, dtype=a.dtype)]) return padded.tolist() if islist else padded
def get_supported_versions(self): """ Gets a list of supported U2F versions from the device. """ if not hasattr(self, '_versions'): try: self._versions = [self.send_apdu(INS_GET_VERSION).decode()] except exc.APDUError as e: # v0 didn't support the instruction. self._versions = ['v0'] if e.code == 0x6d00 else [] return self._versions
def function[get_supported_versions, parameter[self]]: constant[ Gets a list of supported U2F versions from the device. ] if <ast.UnaryOp object at 0x7da1b033ded0> begin[:] <ast.Try object at 0x7da1b033e590> return[name[self]._versions]
keyword[def] identifier[get_supported_versions] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ): keyword[try] : identifier[self] . identifier[_versions] =[ identifier[self] . identifier[send_apdu] ( identifier[INS_GET_VERSION] ). identifier[decode] ()] keyword[except] identifier[exc] . identifier[APDUError] keyword[as] identifier[e] : identifier[self] . identifier[_versions] =[ literal[string] ] keyword[if] identifier[e] . identifier[code] == literal[int] keyword[else] [] keyword[return] identifier[self] . identifier[_versions]
def get_supported_versions(self): """ Gets a list of supported U2F versions from the device. """ if not hasattr(self, '_versions'): try: self._versions = [self.send_apdu(INS_GET_VERSION).decode()] # depends on [control=['try'], data=[]] except exc.APDUError as e: # v0 didn't support the instruction. self._versions = ['v0'] if e.code == 27904 else [] # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] return self._versions
def fill(self, name_or_slot, value): """Fills an output slot required by this Pipeline. Args: name_or_slot: The name of the slot (a string) or Slot record to fill. value: The serializable value to assign to this slot. Raises: UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError if trying to output to a slot that was not declared ahead of time. """ if isinstance(name_or_slot, basestring): slot = getattr(self.outputs, name_or_slot) elif isinstance(name_or_slot, Slot): slot = name_or_slot else: raise UnexpectedPipelineError( 'Could not fill invalid output name: %r' % name_or_slot) if not slot._exists: raise SlotNotDeclaredError( 'Cannot fill output with name "%s" that was just ' 'declared within the Pipeline context.' % slot.name) self._context.fill_slot(self._pipeline_key, slot, value)
def function[fill, parameter[self, name_or_slot, value]]: constant[Fills an output slot required by this Pipeline. Args: name_or_slot: The name of the slot (a string) or Slot record to fill. value: The serializable value to assign to this slot. Raises: UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError if trying to output to a slot that was not declared ahead of time. ] if call[name[isinstance], parameter[name[name_or_slot], name[basestring]]] begin[:] variable[slot] assign[=] call[name[getattr], parameter[name[self].outputs, name[name_or_slot]]] if <ast.UnaryOp object at 0x7da1b031e020> begin[:] <ast.Raise object at 0x7da1b031ed40> call[name[self]._context.fill_slot, parameter[name[self]._pipeline_key, name[slot], name[value]]]
keyword[def] identifier[fill] ( identifier[self] , identifier[name_or_slot] , identifier[value] ): literal[string] keyword[if] identifier[isinstance] ( identifier[name_or_slot] , identifier[basestring] ): identifier[slot] = identifier[getattr] ( identifier[self] . identifier[outputs] , identifier[name_or_slot] ) keyword[elif] identifier[isinstance] ( identifier[name_or_slot] , identifier[Slot] ): identifier[slot] = identifier[name_or_slot] keyword[else] : keyword[raise] identifier[UnexpectedPipelineError] ( literal[string] % identifier[name_or_slot] ) keyword[if] keyword[not] identifier[slot] . identifier[_exists] : keyword[raise] identifier[SlotNotDeclaredError] ( literal[string] literal[string] % identifier[slot] . identifier[name] ) identifier[self] . identifier[_context] . identifier[fill_slot] ( identifier[self] . identifier[_pipeline_key] , identifier[slot] , identifier[value] )
def fill(self, name_or_slot, value): """Fills an output slot required by this Pipeline. Args: name_or_slot: The name of the slot (a string) or Slot record to fill. value: The serializable value to assign to this slot. Raises: UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError if trying to output to a slot that was not declared ahead of time. """ if isinstance(name_or_slot, basestring): slot = getattr(self.outputs, name_or_slot) # depends on [control=['if'], data=[]] elif isinstance(name_or_slot, Slot): slot = name_or_slot # depends on [control=['if'], data=[]] else: raise UnexpectedPipelineError('Could not fill invalid output name: %r' % name_or_slot) if not slot._exists: raise SlotNotDeclaredError('Cannot fill output with name "%s" that was just declared within the Pipeline context.' % slot.name) # depends on [control=['if'], data=[]] self._context.fill_slot(self._pipeline_key, slot, value)
def lookup(self, keys, read_consistency=None, transaction=None): """ Lookup some entities by key. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/lookup :param keys: the keys to lookup. :type keys: list :param read_consistency: the read consistency to use. default, strong or eventual. Cannot be used with a transaction. :type read_consistency: str :param transaction: the transaction to use, if any. :type transaction: str :return: the response body of the lookup request. :rtype: dict """ conn = self.get_conn() body = {'keys': keys} if read_consistency: body['readConsistency'] = read_consistency if transaction: body['transaction'] = transaction resp = (conn .projects() .lookup(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
def function[lookup, parameter[self, keys, read_consistency, transaction]]: constant[ Lookup some entities by key. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/lookup :param keys: the keys to lookup. :type keys: list :param read_consistency: the read consistency to use. default, strong or eventual. Cannot be used with a transaction. :type read_consistency: str :param transaction: the transaction to use, if any. :type transaction: str :return: the response body of the lookup request. :rtype: dict ] variable[conn] assign[=] call[name[self].get_conn, parameter[]] variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da18bccad10>], [<ast.Name object at 0x7da18bccb7c0>]] if name[read_consistency] begin[:] call[name[body]][constant[readConsistency]] assign[=] name[read_consistency] if name[transaction] begin[:] call[name[body]][constant[transaction]] assign[=] name[transaction] variable[resp] assign[=] call[call[call[name[conn].projects, parameter[]].lookup, parameter[]].execute, parameter[]] return[name[resp]]
keyword[def] identifier[lookup] ( identifier[self] , identifier[keys] , identifier[read_consistency] = keyword[None] , identifier[transaction] = keyword[None] ): literal[string] identifier[conn] = identifier[self] . identifier[get_conn] () identifier[body] ={ literal[string] : identifier[keys] } keyword[if] identifier[read_consistency] : identifier[body] [ literal[string] ]= identifier[read_consistency] keyword[if] identifier[transaction] : identifier[body] [ literal[string] ]= identifier[transaction] identifier[resp] =( identifier[conn] . identifier[projects] () . identifier[lookup] ( identifier[projectId] = identifier[self] . identifier[project_id] , identifier[body] = identifier[body] ) . identifier[execute] ( identifier[num_retries] = identifier[self] . identifier[num_retries] )) keyword[return] identifier[resp]
def lookup(self, keys, read_consistency=None, transaction=None): """ Lookup some entities by key. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/lookup :param keys: the keys to lookup. :type keys: list :param read_consistency: the read consistency to use. default, strong or eventual. Cannot be used with a transaction. :type read_consistency: str :param transaction: the transaction to use, if any. :type transaction: str :return: the response body of the lookup request. :rtype: dict """ conn = self.get_conn() body = {'keys': keys} if read_consistency: body['readConsistency'] = read_consistency # depends on [control=['if'], data=[]] if transaction: body['transaction'] = transaction # depends on [control=['if'], data=[]] resp = conn.projects().lookup(projectId=self.project_id, body=body).execute(num_retries=self.num_retries) return resp
def _get_min_val(spaceshift, *params): r"""Calculate minimum resolved amplitude or maximum r.""" # Get parameters from tuples spacing, shift = spaceshift n, fI, fC, r, r_def, error, reim, cvar, verb, plot, log = params # Get filter for these parameters dlf = _calculate_filter(n, spacing, shift, fI, r_def, reim, 'filt') # Calculate rhs-response with this filter k = dlf.base/r[:, None] # Loop over transforms for i, f in enumerate(fC): # Calculate lhs and rhs; rhs depends on ftype lhs = f.lhs(k) if f.name == 'j2': rhs0 = np.dot(lhs[0], getattr(dlf, 'j0'))/r rhs1 = np.dot(lhs[1], getattr(dlf, 'j1'))/r**2 rhs = rhs0 + rhs1 else: rhs = np.dot(lhs, getattr(dlf, f.name))/r # Get relative error rel_error = np.abs((rhs - f.rhs)/f.rhs) # Get indices where relative error is bigger than error imin0 = np.where(rel_error > error)[0] # Find first occurrence of failure if np.all(rhs == 0) or np.all(np.isnan(rhs)): # if all rhs are zeros or nans, the filter is useless imin0 = 0 elif imin0.size == 0: # if imin0.size == 0: # empty array, all rel_error < error. imin0 = rhs.size-1 # set to last r if verb > 0 and log['warn-r'] == 0: print('* WARNING :: all data have error < ' + str(error) + '; choose larger r or set error-level higher.') log['warn-r'] = 1 # Only do this once else: # Kind of a dirty hack: Permit to jump up to four bad values, # resulting for instance from high rel_error from zero crossings # of the transform pair. Should be made an input argument or # generally improved. if imin0.size > 4: imin0 = np.max([0, imin0[4]-5]) else: # just take the first one (no jumping allowed; normal case) imin0 = np.max([0, imin0[0]-1]) # Note that both version yield the same result if the failure is # consistent. # Depending on cvar, store minimum amplitude or 1/maxr if cvar == 'amp': min_val0 = np.abs(rhs[imin0]) else: min_val0 = 1/r[imin0] # Check if this inversion is better than previous ones if i == 0: # First run, store these values imin = dc(imin0) min_val = dc(min_val0) else: # Replace imin, min_val if this one is better if min_val0 > min_val: min_val = dc(min_val0) imin = dc(imin0) # QC plot if plot > 2: _plot_inversion(f, rhs, r, k, imin0, spacing, shift, cvar) # If verbose, print progress if verb > 1: log = _print_count(log) # If there is no point with rel_error < error (imin=0) it returns np.inf. return np.where(imin == 0, np.inf, min_val)
def function[_get_min_val, parameter[spaceshift]]: constant[Calculate minimum resolved amplitude or maximum r.] <ast.Tuple object at 0x7da18f812bf0> assign[=] name[spaceshift] <ast.Tuple object at 0x7da18f8103d0> assign[=] name[params] variable[dlf] assign[=] call[name[_calculate_filter], parameter[name[n], name[spacing], name[shift], name[fI], name[r_def], name[reim], constant[filt]]] variable[k] assign[=] binary_operation[name[dlf].base / call[name[r]][tuple[[<ast.Slice object at 0x7da18f813250>, <ast.Constant object at 0x7da18f8103a0>]]]] for taget[tuple[[<ast.Name object at 0x7da18f812950>, <ast.Name object at 0x7da18f812440>]]] in starred[call[name[enumerate], parameter[name[fC]]]] begin[:] variable[lhs] assign[=] call[name[f].lhs, parameter[name[k]]] if compare[name[f].name equal[==] constant[j2]] begin[:] variable[rhs0] assign[=] binary_operation[call[name[np].dot, parameter[call[name[lhs]][constant[0]], call[name[getattr], parameter[name[dlf], constant[j0]]]]] / name[r]] variable[rhs1] assign[=] binary_operation[call[name[np].dot, parameter[call[name[lhs]][constant[1]], call[name[getattr], parameter[name[dlf], constant[j1]]]]] / binary_operation[name[r] ** constant[2]]] variable[rhs] assign[=] binary_operation[name[rhs0] + name[rhs1]] variable[rel_error] assign[=] call[name[np].abs, parameter[binary_operation[binary_operation[name[rhs] - name[f].rhs] / name[f].rhs]]] variable[imin0] assign[=] call[call[name[np].where, parameter[compare[name[rel_error] greater[>] name[error]]]]][constant[0]] if <ast.BoolOp object at 0x7da18f813f70> begin[:] variable[imin0] assign[=] constant[0] if compare[name[cvar] equal[==] constant[amp]] begin[:] variable[min_val0] assign[=] call[name[np].abs, parameter[call[name[rhs]][name[imin0]]]] if compare[name[i] equal[==] constant[0]] begin[:] variable[imin] assign[=] call[name[dc], parameter[name[imin0]]] variable[min_val] assign[=] call[name[dc], parameter[name[min_val0]]] if compare[name[plot] greater[>] constant[2]] begin[:] call[name[_plot_inversion], parameter[name[f], name[rhs], name[r], name[k], name[imin0], name[spacing], name[shift], name[cvar]]] if compare[name[verb] greater[>] constant[1]] begin[:] variable[log] assign[=] call[name[_print_count], parameter[name[log]]] return[call[name[np].where, parameter[compare[name[imin] equal[==] constant[0]], name[np].inf, name[min_val]]]]
keyword[def] identifier[_get_min_val] ( identifier[spaceshift] ,* identifier[params] ): literal[string] identifier[spacing] , identifier[shift] = identifier[spaceshift] identifier[n] , identifier[fI] , identifier[fC] , identifier[r] , identifier[r_def] , identifier[error] , identifier[reim] , identifier[cvar] , identifier[verb] , identifier[plot] , identifier[log] = identifier[params] identifier[dlf] = identifier[_calculate_filter] ( identifier[n] , identifier[spacing] , identifier[shift] , identifier[fI] , identifier[r_def] , identifier[reim] , literal[string] ) identifier[k] = identifier[dlf] . identifier[base] / identifier[r] [:, keyword[None] ] keyword[for] identifier[i] , identifier[f] keyword[in] identifier[enumerate] ( identifier[fC] ): identifier[lhs] = identifier[f] . identifier[lhs] ( identifier[k] ) keyword[if] identifier[f] . identifier[name] == literal[string] : identifier[rhs0] = identifier[np] . identifier[dot] ( identifier[lhs] [ literal[int] ], identifier[getattr] ( identifier[dlf] , literal[string] ))/ identifier[r] identifier[rhs1] = identifier[np] . identifier[dot] ( identifier[lhs] [ literal[int] ], identifier[getattr] ( identifier[dlf] , literal[string] ))/ identifier[r] ** literal[int] identifier[rhs] = identifier[rhs0] + identifier[rhs1] keyword[else] : identifier[rhs] = identifier[np] . identifier[dot] ( identifier[lhs] , identifier[getattr] ( identifier[dlf] , identifier[f] . identifier[name] ))/ identifier[r] identifier[rel_error] = identifier[np] . identifier[abs] (( identifier[rhs] - identifier[f] . identifier[rhs] )/ identifier[f] . identifier[rhs] ) identifier[imin0] = identifier[np] . identifier[where] ( identifier[rel_error] > identifier[error] )[ literal[int] ] keyword[if] identifier[np] . identifier[all] ( identifier[rhs] == literal[int] ) keyword[or] identifier[np] . identifier[all] ( identifier[np] . identifier[isnan] ( identifier[rhs] )): identifier[imin0] = literal[int] keyword[elif] identifier[imin0] . identifier[size] == literal[int] : identifier[imin0] = identifier[rhs] . identifier[size] - literal[int] keyword[if] identifier[verb] > literal[int] keyword[and] identifier[log] [ literal[string] ]== literal[int] : identifier[print] ( literal[string] + identifier[str] ( identifier[error] )+ literal[string] ) identifier[log] [ literal[string] ]= literal[int] keyword[else] : keyword[if] identifier[imin0] . identifier[size] > literal[int] : identifier[imin0] = identifier[np] . identifier[max] ([ literal[int] , identifier[imin0] [ literal[int] ]- literal[int] ]) keyword[else] : identifier[imin0] = identifier[np] . identifier[max] ([ literal[int] , identifier[imin0] [ literal[int] ]- literal[int] ]) keyword[if] identifier[cvar] == literal[string] : identifier[min_val0] = identifier[np] . identifier[abs] ( identifier[rhs] [ identifier[imin0] ]) keyword[else] : identifier[min_val0] = literal[int] / identifier[r] [ identifier[imin0] ] keyword[if] identifier[i] == literal[int] : identifier[imin] = identifier[dc] ( identifier[imin0] ) identifier[min_val] = identifier[dc] ( identifier[min_val0] ) keyword[else] : keyword[if] identifier[min_val0] > identifier[min_val] : identifier[min_val] = identifier[dc] ( identifier[min_val0] ) identifier[imin] = identifier[dc] ( identifier[imin0] ) keyword[if] identifier[plot] > literal[int] : identifier[_plot_inversion] ( identifier[f] , identifier[rhs] , identifier[r] , identifier[k] , identifier[imin0] , identifier[spacing] , identifier[shift] , identifier[cvar] ) keyword[if] identifier[verb] > literal[int] : identifier[log] = identifier[_print_count] ( identifier[log] ) keyword[return] identifier[np] . identifier[where] ( identifier[imin] == literal[int] , identifier[np] . identifier[inf] , identifier[min_val] )
def _get_min_val(spaceshift, *params): """Calculate minimum resolved amplitude or maximum r.""" # Get parameters from tuples (spacing, shift) = spaceshift (n, fI, fC, r, r_def, error, reim, cvar, verb, plot, log) = params # Get filter for these parameters dlf = _calculate_filter(n, spacing, shift, fI, r_def, reim, 'filt') # Calculate rhs-response with this filter k = dlf.base / r[:, None] # Loop over transforms for (i, f) in enumerate(fC): # Calculate lhs and rhs; rhs depends on ftype lhs = f.lhs(k) if f.name == 'j2': rhs0 = np.dot(lhs[0], getattr(dlf, 'j0')) / r rhs1 = np.dot(lhs[1], getattr(dlf, 'j1')) / r ** 2 rhs = rhs0 + rhs1 # depends on [control=['if'], data=[]] else: rhs = np.dot(lhs, getattr(dlf, f.name)) / r # Get relative error rel_error = np.abs((rhs - f.rhs) / f.rhs) # Get indices where relative error is bigger than error imin0 = np.where(rel_error > error)[0] # Find first occurrence of failure if np.all(rhs == 0) or np.all(np.isnan(rhs)): # if all rhs are zeros or nans, the filter is useless imin0 = 0 # depends on [control=['if'], data=[]] elif imin0.size == 0: # if imin0.size == 0: # empty array, all rel_error < error. imin0 = rhs.size - 1 # set to last r if verb > 0 and log['warn-r'] == 0: print('* WARNING :: all data have error < ' + str(error) + '; choose larger r or set error-level higher.') log['warn-r'] = 1 # Only do this once # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # Kind of a dirty hack: Permit to jump up to four bad values, # resulting for instance from high rel_error from zero crossings # of the transform pair. Should be made an input argument or # generally improved. elif imin0.size > 4: imin0 = np.max([0, imin0[4] - 5]) # depends on [control=['if'], data=[]] else: # just take the first one (no jumping allowed; normal case) imin0 = np.max([0, imin0[0] - 1]) # Note that both version yield the same result if the failure is # consistent. # Depending on cvar, store minimum amplitude or 1/maxr if cvar == 'amp': min_val0 = np.abs(rhs[imin0]) # depends on [control=['if'], data=[]] else: min_val0 = 1 / r[imin0] # Check if this inversion is better than previous ones if i == 0: # First run, store these values imin = dc(imin0) min_val = dc(min_val0) # depends on [control=['if'], data=[]] # Replace imin, min_val if this one is better elif min_val0 > min_val: min_val = dc(min_val0) imin = dc(imin0) # depends on [control=['if'], data=['min_val0', 'min_val']] # QC plot if plot > 2: _plot_inversion(f, rhs, r, k, imin0, spacing, shift, cvar) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # If verbose, print progress if verb > 1: log = _print_count(log) # depends on [control=['if'], data=[]] # If there is no point with rel_error < error (imin=0) it returns np.inf. return np.where(imin == 0, np.inf, min_val)
def to_cortex(c): ''' to_cortex(c) yields a Cortex object if the argument c can be coerced to one and otherwise raises an error. An object can be coerced to a Cortex object if: * it is a cortex object * it is a tuple (subject, h) where subject is a subject object and h is a subject hemisphere. ''' if is_cortex(c): return c elif pimms.is_vector(c) and len(c) == 2: (s,h) = c if is_subject(s) and pimms.is_str(h): if h in s.hemis: return s.hemis[h] else: raise ValueError('to_cortex: hemi %s not found in given subject' % h) raise ValueError('Could not coerce argument to Cortex object')
def function[to_cortex, parameter[c]]: constant[ to_cortex(c) yields a Cortex object if the argument c can be coerced to one and otherwise raises an error. An object can be coerced to a Cortex object if: * it is a cortex object * it is a tuple (subject, h) where subject is a subject object and h is a subject hemisphere. ] if call[name[is_cortex], parameter[name[c]]] begin[:] return[name[c]] <ast.Raise object at 0x7da18f09c2b0>
keyword[def] identifier[to_cortex] ( identifier[c] ): literal[string] keyword[if] identifier[is_cortex] ( identifier[c] ): keyword[return] identifier[c] keyword[elif] identifier[pimms] . identifier[is_vector] ( identifier[c] ) keyword[and] identifier[len] ( identifier[c] )== literal[int] : ( identifier[s] , identifier[h] )= identifier[c] keyword[if] identifier[is_subject] ( identifier[s] ) keyword[and] identifier[pimms] . identifier[is_str] ( identifier[h] ): keyword[if] identifier[h] keyword[in] identifier[s] . identifier[hemis] : keyword[return] identifier[s] . identifier[hemis] [ identifier[h] ] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[h] ) keyword[raise] identifier[ValueError] ( literal[string] )
def to_cortex(c): """ to_cortex(c) yields a Cortex object if the argument c can be coerced to one and otherwise raises an error. An object can be coerced to a Cortex object if: * it is a cortex object * it is a tuple (subject, h) where subject is a subject object and h is a subject hemisphere. """ if is_cortex(c): return c # depends on [control=['if'], data=[]] elif pimms.is_vector(c) and len(c) == 2: (s, h) = c if is_subject(s) and pimms.is_str(h): if h in s.hemis: return s.hemis[h] # depends on [control=['if'], data=['h']] else: raise ValueError('to_cortex: hemi %s not found in given subject' % h) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] raise ValueError('Could not coerce argument to Cortex object')
def get_signature(self, req): """calculate the signature of the oss request Returns the signatue """ oss_url = url.URL(req.url) oss_headers = [ "{0}:{1}\n".format(key, val) for key, val in req.headers.lower_items() if key.startswith(self.X_OSS_PREFIX) ] canonicalized_headers = "".join(sorted(oss_headers)) logger.debug( "canonicalized header : [{0}]".format(canonicalized_headers) ) oss_url.params = { key: val for key, val in oss_url.params.items() if key in self.SUB_RESOURCES or key in self.OVERRIDE_QUERIES } oss_url.forge(key=lambda x: x[0]) canonicalized_str = "{0}/{1}{2}".format( canonicalized_headers, self.get_bucket(oss_url.host), oss_url.uri ) str_to_sign = "\n".join([ req.method, req.headers["content-md5"], req.headers["content-type"], req.headers["date"], canonicalized_str ]) logger.debug( "signature str is \n{0}\n{1}\n{0}\n".format("#" * 20, str_to_sign) ) if isinstance(str_to_sign, requests.compat.str): str_to_sign = str_to_sign.encode("utf8") signature_bin = hmac.new(self._secret_key, str_to_sign, hashlib.sha1) signature = base64.b64encode(signature_bin.digest()).decode("utf8") logger.debug("signature is [{0}]".format(signature)) return signature
def function[get_signature, parameter[self, req]]: constant[calculate the signature of the oss request Returns the signatue ] variable[oss_url] assign[=] call[name[url].URL, parameter[name[req].url]] variable[oss_headers] assign[=] <ast.ListComp object at 0x7da1b0ac9f30> variable[canonicalized_headers] assign[=] call[constant[].join, parameter[call[name[sorted], parameter[name[oss_headers]]]]] call[name[logger].debug, parameter[call[constant[canonicalized header : [{0}]].format, parameter[name[canonicalized_headers]]]]] name[oss_url].params assign[=] <ast.DictComp object at 0x7da1b0ac94b0> call[name[oss_url].forge, parameter[]] variable[canonicalized_str] assign[=] call[constant[{0}/{1}{2}].format, parameter[name[canonicalized_headers], call[name[self].get_bucket, parameter[name[oss_url].host]], name[oss_url].uri]] variable[str_to_sign] assign[=] call[constant[ ].join, parameter[list[[<ast.Attribute object at 0x7da1b0af8d90>, <ast.Subscript object at 0x7da1b0af8c10>, <ast.Subscript object at 0x7da1b0af8af0>, <ast.Subscript object at 0x7da1b0af8bb0>, <ast.Name object at 0x7da1b0af89a0>]]]] call[name[logger].debug, parameter[call[constant[signature str is {0} {1} {0} ].format, parameter[binary_operation[constant[#] * constant[20]], name[str_to_sign]]]]] if call[name[isinstance], parameter[name[str_to_sign], name[requests].compat.str]] begin[:] variable[str_to_sign] assign[=] call[name[str_to_sign].encode, parameter[constant[utf8]]] variable[signature_bin] assign[=] call[name[hmac].new, parameter[name[self]._secret_key, name[str_to_sign], name[hashlib].sha1]] variable[signature] assign[=] call[call[name[base64].b64encode, parameter[call[name[signature_bin].digest, parameter[]]]].decode, parameter[constant[utf8]]] call[name[logger].debug, parameter[call[constant[signature is [{0}]].format, parameter[name[signature]]]]] return[name[signature]]
keyword[def] identifier[get_signature] ( identifier[self] , identifier[req] ): literal[string] identifier[oss_url] = identifier[url] . identifier[URL] ( identifier[req] . identifier[url] ) identifier[oss_headers] =[ literal[string] . identifier[format] ( identifier[key] , identifier[val] ) keyword[for] identifier[key] , identifier[val] keyword[in] identifier[req] . identifier[headers] . identifier[lower_items] () keyword[if] identifier[key] . identifier[startswith] ( identifier[self] . identifier[X_OSS_PREFIX] ) ] identifier[canonicalized_headers] = literal[string] . identifier[join] ( identifier[sorted] ( identifier[oss_headers] )) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[canonicalized_headers] ) ) identifier[oss_url] . identifier[params] ={ identifier[key] : identifier[val] keyword[for] identifier[key] , identifier[val] keyword[in] identifier[oss_url] . identifier[params] . identifier[items] () keyword[if] identifier[key] keyword[in] identifier[self] . identifier[SUB_RESOURCES] keyword[or] identifier[key] keyword[in] identifier[self] . identifier[OVERRIDE_QUERIES] } identifier[oss_url] . identifier[forge] ( identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]) identifier[canonicalized_str] = literal[string] . identifier[format] ( identifier[canonicalized_headers] , identifier[self] . identifier[get_bucket] ( identifier[oss_url] . identifier[host] ), identifier[oss_url] . identifier[uri] ) identifier[str_to_sign] = literal[string] . identifier[join] ([ identifier[req] . identifier[method] , identifier[req] . identifier[headers] [ literal[string] ], identifier[req] . identifier[headers] [ literal[string] ], identifier[req] . identifier[headers] [ literal[string] ], identifier[canonicalized_str] ]) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( literal[string] * literal[int] , identifier[str_to_sign] ) ) keyword[if] identifier[isinstance] ( identifier[str_to_sign] , identifier[requests] . identifier[compat] . identifier[str] ): identifier[str_to_sign] = identifier[str_to_sign] . identifier[encode] ( literal[string] ) identifier[signature_bin] = identifier[hmac] . identifier[new] ( identifier[self] . identifier[_secret_key] , identifier[str_to_sign] , identifier[hashlib] . identifier[sha1] ) identifier[signature] = identifier[base64] . identifier[b64encode] ( identifier[signature_bin] . identifier[digest] ()). identifier[decode] ( literal[string] ) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[signature] )) keyword[return] identifier[signature]
def get_signature(self, req): """calculate the signature of the oss request Returns the signatue """ oss_url = url.URL(req.url) oss_headers = ['{0}:{1}\n'.format(key, val) for (key, val) in req.headers.lower_items() if key.startswith(self.X_OSS_PREFIX)] canonicalized_headers = ''.join(sorted(oss_headers)) logger.debug('canonicalized header : [{0}]'.format(canonicalized_headers)) oss_url.params = {key: val for (key, val) in oss_url.params.items() if key in self.SUB_RESOURCES or key in self.OVERRIDE_QUERIES} oss_url.forge(key=lambda x: x[0]) canonicalized_str = '{0}/{1}{2}'.format(canonicalized_headers, self.get_bucket(oss_url.host), oss_url.uri) str_to_sign = '\n'.join([req.method, req.headers['content-md5'], req.headers['content-type'], req.headers['date'], canonicalized_str]) logger.debug('signature str is \n{0}\n{1}\n{0}\n'.format('#' * 20, str_to_sign)) if isinstance(str_to_sign, requests.compat.str): str_to_sign = str_to_sign.encode('utf8') # depends on [control=['if'], data=[]] signature_bin = hmac.new(self._secret_key, str_to_sign, hashlib.sha1) signature = base64.b64encode(signature_bin.digest()).decode('utf8') logger.debug('signature is [{0}]'.format(signature)) return signature
def trigger_hook(self, name, *args, **kwargs): """Recursively call a method named ``name`` with the provided args and keyword args if defined.""" method = getattr(self, name, None) if is_callable(method): method(*args, **kwargs) try: children = self.children except AttributeError: return else: if children is not None: for child in children: method = getattr(child, 'trigger_hook', None) if is_callable(method): method(name, *args, **kwargs) else: method = getattr(child, name, None) if is_callable(method): method(*args, **kwargs)
def function[trigger_hook, parameter[self, name]]: constant[Recursively call a method named ``name`` with the provided args and keyword args if defined.] variable[method] assign[=] call[name[getattr], parameter[name[self], name[name], constant[None]]] if call[name[is_callable], parameter[name[method]]] begin[:] call[name[method], parameter[<ast.Starred object at 0x7da20e9b1c60>]] <ast.Try object at 0x7da20e9b2c80>
keyword[def] identifier[trigger_hook] ( identifier[self] , identifier[name] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[method] = identifier[getattr] ( identifier[self] , identifier[name] , keyword[None] ) keyword[if] identifier[is_callable] ( identifier[method] ): identifier[method] (* identifier[args] ,** identifier[kwargs] ) keyword[try] : identifier[children] = identifier[self] . identifier[children] keyword[except] identifier[AttributeError] : keyword[return] keyword[else] : keyword[if] identifier[children] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[child] keyword[in] identifier[children] : identifier[method] = identifier[getattr] ( identifier[child] , literal[string] , keyword[None] ) keyword[if] identifier[is_callable] ( identifier[method] ): identifier[method] ( identifier[name] ,* identifier[args] ,** identifier[kwargs] ) keyword[else] : identifier[method] = identifier[getattr] ( identifier[child] , identifier[name] , keyword[None] ) keyword[if] identifier[is_callable] ( identifier[method] ): identifier[method] (* identifier[args] ,** identifier[kwargs] )
def trigger_hook(self, name, *args, **kwargs): """Recursively call a method named ``name`` with the provided args and keyword args if defined.""" method = getattr(self, name, None) if is_callable(method): method(*args, **kwargs) # depends on [control=['if'], data=[]] try: children = self.children # depends on [control=['try'], data=[]] except AttributeError: return # depends on [control=['except'], data=[]] else: if children is not None: for child in children: method = getattr(child, 'trigger_hook', None) if is_callable(method): method(name, *args, **kwargs) # depends on [control=['if'], data=[]] else: method = getattr(child, name, None) if is_callable(method): method(*args, **kwargs) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=['children']]
def copy(self): """ returns a copy of the pca analysis object """ cp = copy.deepcopy(self) cp.genotypes = allel.GenotypeArray(self.genotypes, copy=True) return cp
def function[copy, parameter[self]]: constant[ returns a copy of the pca analysis object ] variable[cp] assign[=] call[name[copy].deepcopy, parameter[name[self]]] name[cp].genotypes assign[=] call[name[allel].GenotypeArray, parameter[name[self].genotypes]] return[name[cp]]
keyword[def] identifier[copy] ( identifier[self] ): literal[string] identifier[cp] = identifier[copy] . identifier[deepcopy] ( identifier[self] ) identifier[cp] . identifier[genotypes] = identifier[allel] . identifier[GenotypeArray] ( identifier[self] . identifier[genotypes] , identifier[copy] = keyword[True] ) keyword[return] identifier[cp]
def copy(self): """ returns a copy of the pca analysis object """ cp = copy.deepcopy(self) cp.genotypes = allel.GenotypeArray(self.genotypes, copy=True) return cp
def cmd_output_sysid(self, args): '''add new output for a specific MAVLink sysID''' sysid = int(args[0]) device = args[1] print("Adding output %s for sysid %u" % (device, sysid)) try: conn = mavutil.mavlink_connection(device, input=False, source_system=self.settings.source_system) conn.mav.srcComponent = self.settings.source_component except Exception: print("Failed to connect to %s" % device) return try: mp_util.child_fd_list_add(conn.port.fileno()) except Exception: pass if sysid in self.mpstate.sysid_outputs: self.mpstate.sysid_outputs[sysid].close() self.mpstate.sysid_outputs[sysid] = conn
def function[cmd_output_sysid, parameter[self, args]]: constant[add new output for a specific MAVLink sysID] variable[sysid] assign[=] call[name[int], parameter[call[name[args]][constant[0]]]] variable[device] assign[=] call[name[args]][constant[1]] call[name[print], parameter[binary_operation[constant[Adding output %s for sysid %u] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204623b80>, <ast.Name object at 0x7da204623eb0>]]]]] <ast.Try object at 0x7da204623e20> <ast.Try object at 0x7da204620040> if compare[name[sysid] in name[self].mpstate.sysid_outputs] begin[:] call[call[name[self].mpstate.sysid_outputs][name[sysid]].close, parameter[]] call[name[self].mpstate.sysid_outputs][name[sysid]] assign[=] name[conn]
keyword[def] identifier[cmd_output_sysid] ( identifier[self] , identifier[args] ): literal[string] identifier[sysid] = identifier[int] ( identifier[args] [ literal[int] ]) identifier[device] = identifier[args] [ literal[int] ] identifier[print] ( literal[string] %( identifier[device] , identifier[sysid] )) keyword[try] : identifier[conn] = identifier[mavutil] . identifier[mavlink_connection] ( identifier[device] , identifier[input] = keyword[False] , identifier[source_system] = identifier[self] . identifier[settings] . identifier[source_system] ) identifier[conn] . identifier[mav] . identifier[srcComponent] = identifier[self] . identifier[settings] . identifier[source_component] keyword[except] identifier[Exception] : identifier[print] ( literal[string] % identifier[device] ) keyword[return] keyword[try] : identifier[mp_util] . identifier[child_fd_list_add] ( identifier[conn] . identifier[port] . identifier[fileno] ()) keyword[except] identifier[Exception] : keyword[pass] keyword[if] identifier[sysid] keyword[in] identifier[self] . identifier[mpstate] . identifier[sysid_outputs] : identifier[self] . identifier[mpstate] . identifier[sysid_outputs] [ identifier[sysid] ]. identifier[close] () identifier[self] . identifier[mpstate] . identifier[sysid_outputs] [ identifier[sysid] ]= identifier[conn]
def cmd_output_sysid(self, args): """add new output for a specific MAVLink sysID""" sysid = int(args[0]) device = args[1] print('Adding output %s for sysid %u' % (device, sysid)) try: conn = mavutil.mavlink_connection(device, input=False, source_system=self.settings.source_system) conn.mav.srcComponent = self.settings.source_component # depends on [control=['try'], data=[]] except Exception: print('Failed to connect to %s' % device) return # depends on [control=['except'], data=[]] try: mp_util.child_fd_list_add(conn.port.fileno()) # depends on [control=['try'], data=[]] except Exception: pass # depends on [control=['except'], data=[]] if sysid in self.mpstate.sysid_outputs: self.mpstate.sysid_outputs[sysid].close() # depends on [control=['if'], data=['sysid']] self.mpstate.sysid_outputs[sysid] = conn
def main(): """Provide the program's entry point when directly executed.""" if len(sys.argv) != 2: print("Usage: {} USERNAME".format(sys.argv[0])) return 1 caching_requestor = prawcore.Requestor( "prawcore_device_id_auth_example", session=CachingSession() ) authenticator = prawcore.TrustedAuthenticator( caching_requestor, os.environ["PRAWCORE_CLIENT_ID"], os.environ["PRAWCORE_CLIENT_SECRET"], ) authorizer = prawcore.ReadOnlyAuthorizer(authenticator) authorizer.refresh() user = sys.argv[1] with prawcore.session(authorizer) as session: data1 = session.request("GET", "/api/v1/user/{}/trophies".format(user)) with prawcore.session(authorizer) as session: data2 = session.request("GET", "/api/v1/user/{}/trophies".format(user)) for trophy in data1["data"]["trophies"]: description = trophy["data"]["description"] print( "Original:", trophy["data"]["name"] + (" ({})".format(description) if description else ""), ) for trophy in data2["data"]["trophies"]: description = trophy["data"]["description"] print( "Cached:", trophy["data"]["name"] + (" ({})".format(description) if description else ""), ) print( "----\nCached == Original:", data2["data"]["trophies"] == data2["data"]["trophies"], ) return 0
def function[main, parameter[]]: constant[Provide the program's entry point when directly executed.] if compare[call[name[len], parameter[name[sys].argv]] not_equal[!=] constant[2]] begin[:] call[name[print], parameter[call[constant[Usage: {} USERNAME].format, parameter[call[name[sys].argv][constant[0]]]]]] return[constant[1]] variable[caching_requestor] assign[=] call[name[prawcore].Requestor, parameter[constant[prawcore_device_id_auth_example]]] variable[authenticator] assign[=] call[name[prawcore].TrustedAuthenticator, parameter[name[caching_requestor], call[name[os].environ][constant[PRAWCORE_CLIENT_ID]], call[name[os].environ][constant[PRAWCORE_CLIENT_SECRET]]]] variable[authorizer] assign[=] call[name[prawcore].ReadOnlyAuthorizer, parameter[name[authenticator]]] call[name[authorizer].refresh, parameter[]] variable[user] assign[=] call[name[sys].argv][constant[1]] with call[name[prawcore].session, parameter[name[authorizer]]] begin[:] variable[data1] assign[=] call[name[session].request, parameter[constant[GET], call[constant[/api/v1/user/{}/trophies].format, parameter[name[user]]]]] with call[name[prawcore].session, parameter[name[authorizer]]] begin[:] variable[data2] assign[=] call[name[session].request, parameter[constant[GET], call[constant[/api/v1/user/{}/trophies].format, parameter[name[user]]]]] for taget[name[trophy]] in starred[call[call[name[data1]][constant[data]]][constant[trophies]]] begin[:] variable[description] assign[=] call[call[name[trophy]][constant[data]]][constant[description]] call[name[print], parameter[constant[Original:], binary_operation[call[call[name[trophy]][constant[data]]][constant[name]] + <ast.IfExp object at 0x7da204963340>]]] for taget[name[trophy]] in starred[call[call[name[data2]][constant[data]]][constant[trophies]]] begin[:] variable[description] assign[=] call[call[name[trophy]][constant[data]]][constant[description]] call[name[print], parameter[constant[Cached:], binary_operation[call[call[name[trophy]][constant[data]]][constant[name]] + <ast.IfExp object at 0x7da204960ca0>]]] call[name[print], parameter[constant[---- Cached == Original:], compare[call[call[name[data2]][constant[data]]][constant[trophies]] equal[==] call[call[name[data2]][constant[data]]][constant[trophies]]]]] return[constant[0]]
keyword[def] identifier[main] (): literal[string] keyword[if] identifier[len] ( identifier[sys] . identifier[argv] )!= literal[int] : identifier[print] ( literal[string] . identifier[format] ( identifier[sys] . identifier[argv] [ literal[int] ])) keyword[return] literal[int] identifier[caching_requestor] = identifier[prawcore] . identifier[Requestor] ( literal[string] , identifier[session] = identifier[CachingSession] () ) identifier[authenticator] = identifier[prawcore] . identifier[TrustedAuthenticator] ( identifier[caching_requestor] , identifier[os] . identifier[environ] [ literal[string] ], identifier[os] . identifier[environ] [ literal[string] ], ) identifier[authorizer] = identifier[prawcore] . identifier[ReadOnlyAuthorizer] ( identifier[authenticator] ) identifier[authorizer] . identifier[refresh] () identifier[user] = identifier[sys] . identifier[argv] [ literal[int] ] keyword[with] identifier[prawcore] . identifier[session] ( identifier[authorizer] ) keyword[as] identifier[session] : identifier[data1] = identifier[session] . identifier[request] ( literal[string] , literal[string] . identifier[format] ( identifier[user] )) keyword[with] identifier[prawcore] . identifier[session] ( identifier[authorizer] ) keyword[as] identifier[session] : identifier[data2] = identifier[session] . identifier[request] ( literal[string] , literal[string] . identifier[format] ( identifier[user] )) keyword[for] identifier[trophy] keyword[in] identifier[data1] [ literal[string] ][ literal[string] ]: identifier[description] = identifier[trophy] [ literal[string] ][ literal[string] ] identifier[print] ( literal[string] , identifier[trophy] [ literal[string] ][ literal[string] ] +( literal[string] . identifier[format] ( identifier[description] ) keyword[if] identifier[description] keyword[else] literal[string] ), ) keyword[for] identifier[trophy] keyword[in] identifier[data2] [ literal[string] ][ literal[string] ]: identifier[description] = identifier[trophy] [ literal[string] ][ literal[string] ] identifier[print] ( literal[string] , identifier[trophy] [ literal[string] ][ literal[string] ] +( literal[string] . identifier[format] ( identifier[description] ) keyword[if] identifier[description] keyword[else] literal[string] ), ) identifier[print] ( literal[string] , identifier[data2] [ literal[string] ][ literal[string] ]== identifier[data2] [ literal[string] ][ literal[string] ], ) keyword[return] literal[int]
def main(): """Provide the program's entry point when directly executed.""" if len(sys.argv) != 2: print('Usage: {} USERNAME'.format(sys.argv[0])) return 1 # depends on [control=['if'], data=[]] caching_requestor = prawcore.Requestor('prawcore_device_id_auth_example', session=CachingSession()) authenticator = prawcore.TrustedAuthenticator(caching_requestor, os.environ['PRAWCORE_CLIENT_ID'], os.environ['PRAWCORE_CLIENT_SECRET']) authorizer = prawcore.ReadOnlyAuthorizer(authenticator) authorizer.refresh() user = sys.argv[1] with prawcore.session(authorizer) as session: data1 = session.request('GET', '/api/v1/user/{}/trophies'.format(user)) # depends on [control=['with'], data=['session']] with prawcore.session(authorizer) as session: data2 = session.request('GET', '/api/v1/user/{}/trophies'.format(user)) # depends on [control=['with'], data=['session']] for trophy in data1['data']['trophies']: description = trophy['data']['description'] print('Original:', trophy['data']['name'] + (' ({})'.format(description) if description else '')) # depends on [control=['for'], data=['trophy']] for trophy in data2['data']['trophies']: description = trophy['data']['description'] print('Cached:', trophy['data']['name'] + (' ({})'.format(description) if description else '')) # depends on [control=['for'], data=['trophy']] print('----\nCached == Original:', data2['data']['trophies'] == data2['data']['trophies']) return 0
def count_unread_messages_between(self, um_to_user, um_from_user): """ Returns the amount of unread messages between two users :param um_to_user: A Django :class:`User` for who the messages are for. :param um_from_user: A Django :class:`User` from whom the messages originate from. :return: An integer with the amount of unread messages. """ unread_total = self.filter(message__sender=um_from_user, user=um_to_user, read_at__isnull=True, deleted_at__isnull=True).count() return unread_total
def function[count_unread_messages_between, parameter[self, um_to_user, um_from_user]]: constant[ Returns the amount of unread messages between two users :param um_to_user: A Django :class:`User` for who the messages are for. :param um_from_user: A Django :class:`User` from whom the messages originate from. :return: An integer with the amount of unread messages. ] variable[unread_total] assign[=] call[call[name[self].filter, parameter[]].count, parameter[]] return[name[unread_total]]
keyword[def] identifier[count_unread_messages_between] ( identifier[self] , identifier[um_to_user] , identifier[um_from_user] ): literal[string] identifier[unread_total] = identifier[self] . identifier[filter] ( identifier[message__sender] = identifier[um_from_user] , identifier[user] = identifier[um_to_user] , identifier[read_at__isnull] = keyword[True] , identifier[deleted_at__isnull] = keyword[True] ). identifier[count] () keyword[return] identifier[unread_total]
def count_unread_messages_between(self, um_to_user, um_from_user): """ Returns the amount of unread messages between two users :param um_to_user: A Django :class:`User` for who the messages are for. :param um_from_user: A Django :class:`User` from whom the messages originate from. :return: An integer with the amount of unread messages. """ unread_total = self.filter(message__sender=um_from_user, user=um_to_user, read_at__isnull=True, deleted_at__isnull=True).count() return unread_total
def db_value(self, value): """ Convert UUID to binary blob """ # ensure we have a valid UUID if not isinstance(value, UUID): value = UUID(value) # reconstruct for optimal indexing parts = str(value).split("-") reordered = ''.join([parts[2], parts[1], parts[0], parts[3], parts[4]]) value = binascii.unhexlify(reordered) return super(OrderedUUIDField, self).db_value(value)
def function[db_value, parameter[self, value]]: constant[ Convert UUID to binary blob ] if <ast.UnaryOp object at 0x7da20c6e60b0> begin[:] variable[value] assign[=] call[name[UUID], parameter[name[value]]] variable[parts] assign[=] call[call[name[str], parameter[name[value]]].split, parameter[constant[-]]] variable[reordered] assign[=] call[constant[].join, parameter[list[[<ast.Subscript object at 0x7da20c6e68c0>, <ast.Subscript object at 0x7da20c6e71c0>, <ast.Subscript object at 0x7da20c6e7ac0>, <ast.Subscript object at 0x7da20c6e54e0>, <ast.Subscript object at 0x7da20c6e59c0>]]]] variable[value] assign[=] call[name[binascii].unhexlify, parameter[name[reordered]]] return[call[call[name[super], parameter[name[OrderedUUIDField], name[self]]].db_value, parameter[name[value]]]]
keyword[def] identifier[db_value] ( identifier[self] , identifier[value] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[UUID] ): identifier[value] = identifier[UUID] ( identifier[value] ) identifier[parts] = identifier[str] ( identifier[value] ). identifier[split] ( literal[string] ) identifier[reordered] = literal[string] . identifier[join] ([ identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ]]) identifier[value] = identifier[binascii] . identifier[unhexlify] ( identifier[reordered] ) keyword[return] identifier[super] ( identifier[OrderedUUIDField] , identifier[self] ). identifier[db_value] ( identifier[value] )
def db_value(self, value): """ Convert UUID to binary blob """ # ensure we have a valid UUID if not isinstance(value, UUID): value = UUID(value) # depends on [control=['if'], data=[]] # reconstruct for optimal indexing parts = str(value).split('-') reordered = ''.join([parts[2], parts[1], parts[0], parts[3], parts[4]]) value = binascii.unhexlify(reordered) return super(OrderedUUIDField, self).db_value(value)
def ls(args): """ List sites ---------- Show list of installed sites. :: usage: makesite ls [-h] [-v] [-p PATH] Show list of installed sites. optional arguments: -p PATH, --path PATH path to makesite sites instalation dir. you can set it in $makesite_home env variable. Examples: :: makesite ls """ assert args.path, "Not finded MAKESITE HOME." print_header("Installed sites:") for site in gen_sites(args.path): LOGGER.debug(site.get_info()) return True
def function[ls, parameter[args]]: constant[ List sites ---------- Show list of installed sites. :: usage: makesite ls [-h] [-v] [-p PATH] Show list of installed sites. optional arguments: -p PATH, --path PATH path to makesite sites instalation dir. you can set it in $makesite_home env variable. Examples: :: makesite ls ] assert[name[args].path] call[name[print_header], parameter[constant[Installed sites:]]] for taget[name[site]] in starred[call[name[gen_sites], parameter[name[args].path]]] begin[:] call[name[LOGGER].debug, parameter[call[name[site].get_info, parameter[]]]] return[constant[True]]
keyword[def] identifier[ls] ( identifier[args] ): literal[string] keyword[assert] identifier[args] . identifier[path] , literal[string] identifier[print_header] ( literal[string] ) keyword[for] identifier[site] keyword[in] identifier[gen_sites] ( identifier[args] . identifier[path] ): identifier[LOGGER] . identifier[debug] ( identifier[site] . identifier[get_info] ()) keyword[return] keyword[True]
def ls(args): """ List sites ---------- Show list of installed sites. :: usage: makesite ls [-h] [-v] [-p PATH] Show list of installed sites. optional arguments: -p PATH, --path PATH path to makesite sites instalation dir. you can set it in $makesite_home env variable. Examples: :: makesite ls """ assert args.path, 'Not finded MAKESITE HOME.' print_header('Installed sites:') for site in gen_sites(args.path): LOGGER.debug(site.get_info()) # depends on [control=['for'], data=['site']] return True
def repack_archive (archive, archive_new, verbosity=0, interactive=True): """Repack archive to different file and/or format.""" util.check_existing_filename(archive) util.check_new_filename(archive_new) if verbosity >= 0: util.log_info("Repacking %s to %s ..." % (archive, archive_new)) res = _repack_archive(archive, archive_new, verbosity=verbosity, interactive=interactive) if verbosity >= 0: util.log_info("... repacking successful.") return res
def function[repack_archive, parameter[archive, archive_new, verbosity, interactive]]: constant[Repack archive to different file and/or format.] call[name[util].check_existing_filename, parameter[name[archive]]] call[name[util].check_new_filename, parameter[name[archive_new]]] if compare[name[verbosity] greater_or_equal[>=] constant[0]] begin[:] call[name[util].log_info, parameter[binary_operation[constant[Repacking %s to %s ...] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0677580>, <ast.Name object at 0x7da1b0677790>]]]]] variable[res] assign[=] call[name[_repack_archive], parameter[name[archive], name[archive_new]]] if compare[name[verbosity] greater_or_equal[>=] constant[0]] begin[:] call[name[util].log_info, parameter[constant[... repacking successful.]]] return[name[res]]
keyword[def] identifier[repack_archive] ( identifier[archive] , identifier[archive_new] , identifier[verbosity] = literal[int] , identifier[interactive] = keyword[True] ): literal[string] identifier[util] . identifier[check_existing_filename] ( identifier[archive] ) identifier[util] . identifier[check_new_filename] ( identifier[archive_new] ) keyword[if] identifier[verbosity] >= literal[int] : identifier[util] . identifier[log_info] ( literal[string] %( identifier[archive] , identifier[archive_new] )) identifier[res] = identifier[_repack_archive] ( identifier[archive] , identifier[archive_new] , identifier[verbosity] = identifier[verbosity] , identifier[interactive] = identifier[interactive] ) keyword[if] identifier[verbosity] >= literal[int] : identifier[util] . identifier[log_info] ( literal[string] ) keyword[return] identifier[res]
def repack_archive(archive, archive_new, verbosity=0, interactive=True): """Repack archive to different file and/or format.""" util.check_existing_filename(archive) util.check_new_filename(archive_new) if verbosity >= 0: util.log_info('Repacking %s to %s ...' % (archive, archive_new)) # depends on [control=['if'], data=[]] res = _repack_archive(archive, archive_new, verbosity=verbosity, interactive=interactive) if verbosity >= 0: util.log_info('... repacking successful.') # depends on [control=['if'], data=[]] return res
def _install(self, name, version, repos): '''Check existence and version match of R library. cran and bioc packages are unique yet might overlap with github. Therefore if the input name is {repo}/{pkg} the package will be installed from github if not available, else from cran or bioc ''' from sos.pattern import glob_wildcards import tempfile import subprocess output_file = tempfile.NamedTemporaryFile( mode='w+t', suffix='.txt', delete=False).name script_file = tempfile.NamedTemporaryFile( mode='w+t', suffix='.R', delete=False).name # package_loaded = 'suppressMessages(require(package, character.only=TRUE, quietly=TRUE))' version_satisfied = 'TRUE' for opt in ('==', '>=', '>', '<=', '<', '!='): if opt in name: if version is not None: raise ValueError( f"Specifying 'version=' option in addition to '{name}' is not allowed" ) name, version = [x.strip() for x in name.split(opt, 1)] if ',' in version: raise ValueError( f'SoS does not yet support multiple version comparisons. {version} provided' ) version = (opt + version,) break if version is not None: version = list(version) operators = [] for idx, value in enumerate(version): value = str(value) if value.startswith('>='): operators.append('>=') version[idx] = value[2:] elif value.startswith('>'): operators.append('>') version[idx] = value[1:] elif value.startswith('<='): operators.append('<=') version[idx] = value[2:] elif value.startswith('<'): operators.append('<') version[idx] = value[1:] elif value.startswith('=='): operators.append('==') version[idx] = value[2:] elif value.startswith('!='): operators.append('!=') version[idx] = value[2:] else: operators.append('==') # check version and mark version mismatch # if current version satisfies any of the # requirement the check program quits version_satisfied = '||'.join([ f'(cur_version {y} {repr(x)})' for x, y in zip(version, operators) ]) # if len(glob_wildcards('{repo}@{pkg}', [name])['repo']): # package is from github self._install('remotes', None, repos) install_script = f''' options(warn=-1) package_repo <-strsplit("{name}", split="@")[[1]][2] package <-strsplit("{name}", split="@")[[1]][1] if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL if (!is.null(cur_version) && {version_satisfied}) {{ write(paste(package, cur_version, "AVAILABLE"), file={repr(output_file)}) }} else if ({"TRUE" if self._autoinstall else "FALSE"}) {{ remotes::install_github(package_repo, force = TRUE) if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL # if it still does not exist, write the package name to output if (!is.null(cur_version)) {{ if ({version_satisfied}) write(paste(package, cur_version, "INSTALLED"), file={repr(output_file)}) else write(paste(package, cur_version, "VERSION_MISMATCH"), file={repr(output_file)}) }} else {{ write(paste(package, "NA", "MISSING"), file={repr(output_file)}) quit("no") }} }} else {{ if (!is.null(cur_version)) write(paste(package, cur_version, "VERSION_MISMATCH"), file={repr(output_file)}) else write(paste(package, cur_version, "UNAVAILABLE"), file={repr(output_file)}) }} ''' else: # package is from cran or bioc install_script = f''' options(warn=-1) package <- "{name}" if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL if (!is.null(cur_version) && {version_satisfied}) {{ write(paste(package, cur_version, "AVAILABLE"), file={repr(output_file)}) }} else if ({"TRUE" if self._autoinstall else "FALSE"}) {{ install.packages(package, repos="{repos}", quiet=FALSE) # if the package still does not exist if (!{package_loaded}) {{ source("http://bioconductor.org/biocLite.R") biocLite(package, suppressUpdates=TRUE, suppressAutoUpdate=TRUE, ask=FALSE) }} if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL # if it still does not exist, write the package name to output if (!is.null(cur_version)) {{ if ({version_satisfied}) write(paste(package, cur_version, "INSTALLED"), file={repr(output_file)}) else write(paste(package, cur_version, "VERSION_MISMATCH"), file={repr(output_file)}) }} else {{ write(paste(package, "NA", "MISSING"), file={repr(output_file)}) quit("no") }} }} else {{ if (!is.null(cur_version)) write(paste(package, cur_version, "VERSION_MISMATCH"), file={repr(output_file)}) else write(paste(package, cur_version, "UNAVAILABLE"), file={repr(output_file)}) }} ''' # temporarily change the run mode to run to execute script try: with open(script_file, 'w') as sfile: sfile.write(install_script) # p = subprocess.Popen( ['Rscript', '--default-packages=utils', script_file]) ret = p.wait() if ret != 0: env.logger.warning( f'Failed to detect or install R library {name}') return False except Exception as e: env.logger.error(f'Failed to execute script: {e}') return False finally: os.remove(script_file) ret_val = False with open(output_file) as tmp: for line in tmp: lib, cur_version, status = line.split(' ', 2) if status.strip() == "MISSING": env.logger.warning( f'R library {lib} is not available and cannot be installed.' ) elif status.strip() == "UNAVAILABLE": env.logger.warning(f'R library {lib} is not available.') elif status.strip() == 'AVAILABLE': env.logger.debug( f'R library {lib} ({cur_version}) is available') ret_val = True elif status.strip() == 'INSTALLED': env.logger.debug( f'R library {lib} ({cur_version}) has been installed') ret_val = True elif status.strip() == 'VERSION_MISMATCH': env.logger.warning( f'R library {lib} ({cur_version}) does not satisfy version requirement ({"/".join(version)})!' ) else: raise RuntimeError(f'This should not happen: {line}') try: os.remove(output_file) except Exception: pass return ret_val
def function[_install, parameter[self, name, version, repos]]: constant[Check existence and version match of R library. cran and bioc packages are unique yet might overlap with github. Therefore if the input name is {repo}/{pkg} the package will be installed from github if not available, else from cran or bioc ] from relative_module[sos.pattern] import module[glob_wildcards] import module[tempfile] import module[subprocess] variable[output_file] assign[=] call[name[tempfile].NamedTemporaryFile, parameter[]].name variable[script_file] assign[=] call[name[tempfile].NamedTemporaryFile, parameter[]].name variable[package_loaded] assign[=] constant[suppressMessages(require(package, character.only=TRUE, quietly=TRUE))] variable[version_satisfied] assign[=] constant[TRUE] for taget[name[opt]] in starred[tuple[[<ast.Constant object at 0x7da1b1395d50>, <ast.Constant object at 0x7da1b1395d20>, <ast.Constant object at 0x7da1b1395cf0>, <ast.Constant object at 0x7da1b1395cc0>, <ast.Constant object at 0x7da1b1395c90>, <ast.Constant object at 0x7da1b1395c60>]]] begin[:] if compare[name[opt] in name[name]] begin[:] if compare[name[version] is_not constant[None]] begin[:] <ast.Raise object at 0x7da1b1395a80> <ast.Tuple object at 0x7da1b13958a0> assign[=] <ast.ListComp object at 0x7da1b1395810> if compare[constant[,] in name[version]] begin[:] <ast.Raise object at 0x7da1b1395510> variable[version] assign[=] tuple[[<ast.BinOp object at 0x7da1b13952d0>]] break if compare[name[version] is_not constant[None]] begin[:] variable[version] assign[=] call[name[list], parameter[name[version]]] variable[operators] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b1394f40>, <ast.Name object at 0x7da1b1394f10>]]] in starred[call[name[enumerate], parameter[name[version]]]] begin[:] variable[value] assign[=] call[name[str], parameter[name[value]]] if call[name[value].startswith, parameter[constant[>=]]] begin[:] call[name[operators].append, parameter[constant[>=]]] call[name[version]][name[idx]] assign[=] call[name[value]][<ast.Slice object at 0x7da1b1394a30>] variable[version_satisfied] assign[=] call[constant[||].join, parameter[<ast.ListComp object at 0x7da1b2344be0>]] if call[name[len], parameter[call[call[name[glob_wildcards], parameter[constant[{repo}@{pkg}], list[[<ast.Name object at 0x7da1b2345ed0>]]]]][constant[repo]]]] begin[:] call[name[self]._install, parameter[constant[remotes], constant[None], name[repos]]] variable[install_script] assign[=] <ast.JoinedStr object at 0x7da1b1321180> <ast.Try object at 0x7da1b11792a0> variable[ret_val] assign[=] constant[False] with call[name[open], parameter[name[output_file]]] begin[:] for taget[name[line]] in starred[name[tmp]] begin[:] <ast.Tuple object at 0x7da18dc06ec0> assign[=] call[name[line].split, parameter[constant[ ], constant[2]]] if compare[call[name[status].strip, parameter[]] equal[==] constant[MISSING]] begin[:] call[name[env].logger.warning, parameter[<ast.JoinedStr object at 0x7da18dc05000>]] <ast.Try object at 0x7da18bccaa10> return[name[ret_val]]
keyword[def] identifier[_install] ( identifier[self] , identifier[name] , identifier[version] , identifier[repos] ): literal[string] keyword[from] identifier[sos] . identifier[pattern] keyword[import] identifier[glob_wildcards] keyword[import] identifier[tempfile] keyword[import] identifier[subprocess] identifier[output_file] = identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[mode] = literal[string] , identifier[suffix] = literal[string] , identifier[delete] = keyword[False] ). identifier[name] identifier[script_file] = identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[mode] = literal[string] , identifier[suffix] = literal[string] , identifier[delete] = keyword[False] ). identifier[name] identifier[package_loaded] = literal[string] identifier[version_satisfied] = literal[string] keyword[for] identifier[opt] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ): keyword[if] identifier[opt] keyword[in] identifier[name] : keyword[if] identifier[version] keyword[is] keyword[not] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[name] , identifier[version] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[name] . identifier[split] ( identifier[opt] , literal[int] )] keyword[if] literal[string] keyword[in] identifier[version] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[version] =( identifier[opt] + identifier[version] ,) keyword[break] keyword[if] identifier[version] keyword[is] keyword[not] keyword[None] : identifier[version] = identifier[list] ( identifier[version] ) identifier[operators] =[] keyword[for] identifier[idx] , identifier[value] keyword[in] identifier[enumerate] ( identifier[version] ): identifier[value] = identifier[str] ( identifier[value] ) keyword[if] identifier[value] . identifier[startswith] ( literal[string] ): identifier[operators] . identifier[append] ( literal[string] ) identifier[version] [ identifier[idx] ]= identifier[value] [ literal[int] :] keyword[elif] identifier[value] . identifier[startswith] ( literal[string] ): identifier[operators] . identifier[append] ( literal[string] ) identifier[version] [ identifier[idx] ]= identifier[value] [ literal[int] :] keyword[elif] identifier[value] . identifier[startswith] ( literal[string] ): identifier[operators] . identifier[append] ( literal[string] ) identifier[version] [ identifier[idx] ]= identifier[value] [ literal[int] :] keyword[elif] identifier[value] . identifier[startswith] ( literal[string] ): identifier[operators] . identifier[append] ( literal[string] ) identifier[version] [ identifier[idx] ]= identifier[value] [ literal[int] :] keyword[elif] identifier[value] . identifier[startswith] ( literal[string] ): identifier[operators] . identifier[append] ( literal[string] ) identifier[version] [ identifier[idx] ]= identifier[value] [ literal[int] :] keyword[elif] identifier[value] . identifier[startswith] ( literal[string] ): identifier[operators] . identifier[append] ( literal[string] ) identifier[version] [ identifier[idx] ]= identifier[value] [ literal[int] :] keyword[else] : identifier[operators] . identifier[append] ( literal[string] ) identifier[version_satisfied] = literal[string] . identifier[join] ([ literal[string] keyword[for] identifier[x] , identifier[y] keyword[in] identifier[zip] ( identifier[version] , identifier[operators] ) ]) keyword[if] identifier[len] ( identifier[glob_wildcards] ( literal[string] ,[ identifier[name] ])[ literal[string] ]): identifier[self] . identifier[_install] ( literal[string] , keyword[None] , identifier[repos] ) identifier[install_script] = literal[string] keyword[else] : identifier[install_script] = literal[string] keyword[try] : keyword[with] identifier[open] ( identifier[script_file] , literal[string] ) keyword[as] identifier[sfile] : identifier[sfile] . identifier[write] ( identifier[install_script] ) identifier[p] = identifier[subprocess] . identifier[Popen] ( [ literal[string] , literal[string] , identifier[script_file] ]) identifier[ret] = identifier[p] . identifier[wait] () keyword[if] identifier[ret] != literal[int] : identifier[env] . identifier[logger] . identifier[warning] ( literal[string] ) keyword[return] keyword[False] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[env] . identifier[logger] . identifier[error] ( literal[string] ) keyword[return] keyword[False] keyword[finally] : identifier[os] . identifier[remove] ( identifier[script_file] ) identifier[ret_val] = keyword[False] keyword[with] identifier[open] ( identifier[output_file] ) keyword[as] identifier[tmp] : keyword[for] identifier[line] keyword[in] identifier[tmp] : identifier[lib] , identifier[cur_version] , identifier[status] = identifier[line] . identifier[split] ( literal[string] , literal[int] ) keyword[if] identifier[status] . identifier[strip] ()== literal[string] : identifier[env] . identifier[logger] . identifier[warning] ( literal[string] ) keyword[elif] identifier[status] . identifier[strip] ()== literal[string] : identifier[env] . identifier[logger] . identifier[warning] ( literal[string] ) keyword[elif] identifier[status] . identifier[strip] ()== literal[string] : identifier[env] . identifier[logger] . identifier[debug] ( literal[string] ) identifier[ret_val] = keyword[True] keyword[elif] identifier[status] . identifier[strip] ()== literal[string] : identifier[env] . identifier[logger] . identifier[debug] ( literal[string] ) identifier[ret_val] = keyword[True] keyword[elif] identifier[status] . identifier[strip] ()== literal[string] : identifier[env] . identifier[logger] . identifier[warning] ( literal[string] ) keyword[else] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[try] : identifier[os] . identifier[remove] ( identifier[output_file] ) keyword[except] identifier[Exception] : keyword[pass] keyword[return] identifier[ret_val]
def _install(self, name, version, repos): """Check existence and version match of R library. cran and bioc packages are unique yet might overlap with github. Therefore if the input name is {repo}/{pkg} the package will be installed from github if not available, else from cran or bioc """ from sos.pattern import glob_wildcards import tempfile import subprocess output_file = tempfile.NamedTemporaryFile(mode='w+t', suffix='.txt', delete=False).name script_file = tempfile.NamedTemporaryFile(mode='w+t', suffix='.R', delete=False).name # package_loaded = 'suppressMessages(require(package, character.only=TRUE, quietly=TRUE))' version_satisfied = 'TRUE' for opt in ('==', '>=', '>', '<=', '<', '!='): if opt in name: if version is not None: raise ValueError(f"Specifying 'version=' option in addition to '{name}' is not allowed") # depends on [control=['if'], data=[]] (name, version) = [x.strip() for x in name.split(opt, 1)] if ',' in version: raise ValueError(f'SoS does not yet support multiple version comparisons. {version} provided') # depends on [control=['if'], data=['version']] version = (opt + version,) break # depends on [control=['if'], data=['opt', 'name']] # depends on [control=['for'], data=['opt']] if version is not None: version = list(version) operators = [] for (idx, value) in enumerate(version): value = str(value) if value.startswith('>='): operators.append('>=') version[idx] = value[2:] # depends on [control=['if'], data=[]] elif value.startswith('>'): operators.append('>') version[idx] = value[1:] # depends on [control=['if'], data=[]] elif value.startswith('<='): operators.append('<=') version[idx] = value[2:] # depends on [control=['if'], data=[]] elif value.startswith('<'): operators.append('<') version[idx] = value[1:] # depends on [control=['if'], data=[]] elif value.startswith('=='): operators.append('==') version[idx] = value[2:] # depends on [control=['if'], data=[]] elif value.startswith('!='): operators.append('!=') version[idx] = value[2:] # depends on [control=['if'], data=[]] else: operators.append('==') # depends on [control=['for'], data=[]] # check version and mark version mismatch # if current version satisfies any of the # requirement the check program quits version_satisfied = '||'.join([f'(cur_version {y} {repr(x)})' for (x, y) in zip(version, operators)]) # depends on [control=['if'], data=['version']] # if len(glob_wildcards('{repo}@{pkg}', [name])['repo']): # package is from github self._install('remotes', None, repos) install_script = f'''\n options(warn=-1)\n package_repo <-strsplit("{name}", split="@")[[1]][2]\n package <-strsplit("{name}", split="@")[[1]][1]\n if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL\n if (!is.null(cur_version) && {version_satisfied}) {{\n write(paste(package, cur_version, "AVAILABLE"), file={repr(output_file)})\n }} else if ({('TRUE' if self._autoinstall else 'FALSE')}) {{\n remotes::install_github(package_repo, force = TRUE)\n if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL\n # if it still does not exist, write the package name to output\n if (!is.null(cur_version)) {{\n if ({version_satisfied}) write(paste(package, cur_version, "INSTALLED"), file={repr(output_file)})\n else write(paste(package, cur_version, "VERSION_MISMATCH"), file={repr(output_file)})\n }} else {{\n write(paste(package, "NA", "MISSING"), file={repr(output_file)})\n quit("no")\n }}\n }} else {{\n if (!is.null(cur_version)) write(paste(package, cur_version, "VERSION_MISMATCH"), file={repr(output_file)}) else write(paste(package, cur_version, "UNAVAILABLE"), file={repr(output_file)})\n }}\n ''' # depends on [control=['if'], data=[]] else: # package is from cran or bioc install_script = f'''\n options(warn=-1)\n package <- "{name}"\n if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL\n if (!is.null(cur_version) && {version_satisfied}) {{\n write(paste(package, cur_version, "AVAILABLE"), file={repr(output_file)})\n }} else if ({('TRUE' if self._autoinstall else 'FALSE')}) {{\n install.packages(package, repos="{repos}", quiet=FALSE)\n # if the package still does not exist\n if (!{package_loaded}) {{\n source("http://bioconductor.org/biocLite.R")\n biocLite(package, suppressUpdates=TRUE, suppressAutoUpdate=TRUE, ask=FALSE)\n }}\n if ({package_loaded}) cur_version <- packageVersion(package) else cur_version <- NULL\n # if it still does not exist, write the package name to output\n if (!is.null(cur_version)) {{\n if ({version_satisfied}) write(paste(package, cur_version, "INSTALLED"), file={repr(output_file)}) else write(paste(package, cur_version, "VERSION_MISMATCH"), file={repr(output_file)})\n }} else {{\n write(paste(package, "NA", "MISSING"), file={repr(output_file)})\n quit("no")\n }}\n }} else {{\n if (!is.null(cur_version)) write(paste(package, cur_version, "VERSION_MISMATCH"), file={repr(output_file)}) else write(paste(package, cur_version, "UNAVAILABLE"), file={repr(output_file)})\n }}\n ''' # temporarily change the run mode to run to execute script try: with open(script_file, 'w') as sfile: sfile.write(install_script) # depends on [control=['with'], data=['sfile']] # p = subprocess.Popen(['Rscript', '--default-packages=utils', script_file]) ret = p.wait() if ret != 0: env.logger.warning(f'Failed to detect or install R library {name}') return False # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except Exception as e: env.logger.error(f'Failed to execute script: {e}') return False # depends on [control=['except'], data=['e']] finally: os.remove(script_file) ret_val = False with open(output_file) as tmp: for line in tmp: (lib, cur_version, status) = line.split(' ', 2) if status.strip() == 'MISSING': env.logger.warning(f'R library {lib} is not available and cannot be installed.') # depends on [control=['if'], data=[]] elif status.strip() == 'UNAVAILABLE': env.logger.warning(f'R library {lib} is not available.') # depends on [control=['if'], data=[]] elif status.strip() == 'AVAILABLE': env.logger.debug(f'R library {lib} ({cur_version}) is available') ret_val = True # depends on [control=['if'], data=[]] elif status.strip() == 'INSTALLED': env.logger.debug(f'R library {lib} ({cur_version}) has been installed') ret_val = True # depends on [control=['if'], data=[]] elif status.strip() == 'VERSION_MISMATCH': env.logger.warning(f"R library {lib} ({cur_version}) does not satisfy version requirement ({'/'.join(version)})!") # depends on [control=['if'], data=[]] else: raise RuntimeError(f'This should not happen: {line}') # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['tmp']] try: os.remove(output_file) # depends on [control=['try'], data=[]] except Exception: pass # depends on [control=['except'], data=[]] return ret_val
def Parse(self, stat, file_object, knowledge_base): """Parse a ntp config into rdf.""" _, _ = stat, knowledge_base # TODO(hanuszczak): This parser only allows single use because it messes # with its state. This should be fixed. field_parser = NtpdFieldParser() for line in field_parser.ParseEntries( utils.ReadFileBytesAsUnicode(file_object)): field_parser.ParseLine(line) yield rdf_config_file.NtpConfig( config=field_parser.config, server=field_parser.keyed.get("server"), restrict=field_parser.keyed.get("restrict"), fudge=field_parser.keyed.get("fudge"), trap=field_parser.keyed.get("trap"), peer=field_parser.keyed.get("peer"), broadcast=field_parser.keyed.get("broadcast"), manycastclient=field_parser.keyed.get("manycastclient"))
def function[Parse, parameter[self, stat, file_object, knowledge_base]]: constant[Parse a ntp config into rdf.] <ast.Tuple object at 0x7da1b1b85810> assign[=] tuple[[<ast.Name object at 0x7da1b1b86ce0>, <ast.Name object at 0x7da1b1b87310>]] variable[field_parser] assign[=] call[name[NtpdFieldParser], parameter[]] for taget[name[line]] in starred[call[name[field_parser].ParseEntries, parameter[call[name[utils].ReadFileBytesAsUnicode, parameter[name[file_object]]]]]] begin[:] call[name[field_parser].ParseLine, parameter[name[line]]] <ast.Yield object at 0x7da1b1b87490>
keyword[def] identifier[Parse] ( identifier[self] , identifier[stat] , identifier[file_object] , identifier[knowledge_base] ): literal[string] identifier[_] , identifier[_] = identifier[stat] , identifier[knowledge_base] identifier[field_parser] = identifier[NtpdFieldParser] () keyword[for] identifier[line] keyword[in] identifier[field_parser] . identifier[ParseEntries] ( identifier[utils] . identifier[ReadFileBytesAsUnicode] ( identifier[file_object] )): identifier[field_parser] . identifier[ParseLine] ( identifier[line] ) keyword[yield] identifier[rdf_config_file] . identifier[NtpConfig] ( identifier[config] = identifier[field_parser] . identifier[config] , identifier[server] = identifier[field_parser] . identifier[keyed] . identifier[get] ( literal[string] ), identifier[restrict] = identifier[field_parser] . identifier[keyed] . identifier[get] ( literal[string] ), identifier[fudge] = identifier[field_parser] . identifier[keyed] . identifier[get] ( literal[string] ), identifier[trap] = identifier[field_parser] . identifier[keyed] . identifier[get] ( literal[string] ), identifier[peer] = identifier[field_parser] . identifier[keyed] . identifier[get] ( literal[string] ), identifier[broadcast] = identifier[field_parser] . identifier[keyed] . identifier[get] ( literal[string] ), identifier[manycastclient] = identifier[field_parser] . identifier[keyed] . identifier[get] ( literal[string] ))
def Parse(self, stat, file_object, knowledge_base): """Parse a ntp config into rdf.""" (_, _) = (stat, knowledge_base) # TODO(hanuszczak): This parser only allows single use because it messes # with its state. This should be fixed. field_parser = NtpdFieldParser() for line in field_parser.ParseEntries(utils.ReadFileBytesAsUnicode(file_object)): field_parser.ParseLine(line) # depends on [control=['for'], data=['line']] yield rdf_config_file.NtpConfig(config=field_parser.config, server=field_parser.keyed.get('server'), restrict=field_parser.keyed.get('restrict'), fudge=field_parser.keyed.get('fudge'), trap=field_parser.keyed.get('trap'), peer=field_parser.keyed.get('peer'), broadcast=field_parser.keyed.get('broadcast'), manycastclient=field_parser.keyed.get('manycastclient'))
def _parseline(line): """ Parse a line of Java properties file. :param line: A string to parse, must not start with ' ', '#' or '!' (comment) :return: A tuple of (key, value), both key and value may be None >>> _parseline(" ") (None, '') >>> _parseline("aaa:") ('aaa', '') >>> _parseline(" aaa:") ('aaa', '') >>> _parseline("aaa") ('aaa', '') >>> _parseline("url = http://localhost") ('url', 'http://localhost') >>> _parseline("calendar.japanese.type: LocalGregorianCalendar") ('calendar.japanese.type', 'LocalGregorianCalendar') """ pair = re.split(r"(?:\s+)?(?:(?<!\\)[=:])", line.strip(), 1) key = pair[0].rstrip() if len(pair) < 2: LOGGER.warning("Invalid line found: %s", line) return (key or None, '') return (key, pair[1].strip())
def function[_parseline, parameter[line]]: constant[ Parse a line of Java properties file. :param line: A string to parse, must not start with ' ', '#' or '!' (comment) :return: A tuple of (key, value), both key and value may be None >>> _parseline(" ") (None, '') >>> _parseline("aaa:") ('aaa', '') >>> _parseline(" aaa:") ('aaa', '') >>> _parseline("aaa") ('aaa', '') >>> _parseline("url = http://localhost") ('url', 'http://localhost') >>> _parseline("calendar.japanese.type: LocalGregorianCalendar") ('calendar.japanese.type', 'LocalGregorianCalendar') ] variable[pair] assign[=] call[name[re].split, parameter[constant[(?:\s+)?(?:(?<!\\)[=:])], call[name[line].strip, parameter[]], constant[1]]] variable[key] assign[=] call[call[name[pair]][constant[0]].rstrip, parameter[]] if compare[call[name[len], parameter[name[pair]]] less[<] constant[2]] begin[:] call[name[LOGGER].warning, parameter[constant[Invalid line found: %s], name[line]]] return[tuple[[<ast.BoolOp object at 0x7da18fe93100>, <ast.Constant object at 0x7da18fe92a10>]]] return[tuple[[<ast.Name object at 0x7da18fe925f0>, <ast.Call object at 0x7da18fe93d30>]]]
keyword[def] identifier[_parseline] ( identifier[line] ): literal[string] identifier[pair] = identifier[re] . identifier[split] ( literal[string] , identifier[line] . identifier[strip] (), literal[int] ) identifier[key] = identifier[pair] [ literal[int] ]. identifier[rstrip] () keyword[if] identifier[len] ( identifier[pair] )< literal[int] : identifier[LOGGER] . identifier[warning] ( literal[string] , identifier[line] ) keyword[return] ( identifier[key] keyword[or] keyword[None] , literal[string] ) keyword[return] ( identifier[key] , identifier[pair] [ literal[int] ]. identifier[strip] ())
def _parseline(line): """ Parse a line of Java properties file. :param line: A string to parse, must not start with ' ', '#' or '!' (comment) :return: A tuple of (key, value), both key and value may be None >>> _parseline(" ") (None, '') >>> _parseline("aaa:") ('aaa', '') >>> _parseline(" aaa:") ('aaa', '') >>> _parseline("aaa") ('aaa', '') >>> _parseline("url = http://localhost") ('url', 'http://localhost') >>> _parseline("calendar.japanese.type: LocalGregorianCalendar") ('calendar.japanese.type', 'LocalGregorianCalendar') """ pair = re.split('(?:\\s+)?(?:(?<!\\\\)[=:])', line.strip(), 1) key = pair[0].rstrip() if len(pair) < 2: LOGGER.warning('Invalid line found: %s', line) return (key or None, '') # depends on [control=['if'], data=[]] return (key, pair[1].strip())
def basis_function(degree, knot_vector, span, knot): """ Computes the non-vanishing basis functions for a single parameter. Implementation of Algorithm A2.2 from The NURBS Book by Piegl & Tiller. :param degree: degree, :math:`p` :type degree: int :param knot_vector: knot vector, :math:`U` :type knot_vector: list, tuple :param span: knot span, :math:`i` :type span: int :param knot: knot or parameter, :math:`u` :type knot: float :return: basis functions :rtype: list """ left = [0.0 for _ in range(degree + 1)] right = [0.0 for _ in range(degree + 1)] N = [1.0 for _ in range(degree + 1)] # N[0] = 1.0 by definition for j in range(1, degree + 1): left[j] = knot - knot_vector[span + 1 - j] right[j] = knot_vector[span + j] - knot saved = 0.0 for r in range(0, j): temp = N[r] / (right[r + 1] + left[j - r]) N[r] = saved + right[r + 1] * temp saved = left[j - r] * temp N[j] = saved return N
def function[basis_function, parameter[degree, knot_vector, span, knot]]: constant[ Computes the non-vanishing basis functions for a single parameter. Implementation of Algorithm A2.2 from The NURBS Book by Piegl & Tiller. :param degree: degree, :math:`p` :type degree: int :param knot_vector: knot vector, :math:`U` :type knot_vector: list, tuple :param span: knot span, :math:`i` :type span: int :param knot: knot or parameter, :math:`u` :type knot: float :return: basis functions :rtype: list ] variable[left] assign[=] <ast.ListComp object at 0x7da1b17836a0> variable[right] assign[=] <ast.ListComp object at 0x7da1b1783910> variable[N] assign[=] <ast.ListComp object at 0x7da1b1782920> for taget[name[j]] in starred[call[name[range], parameter[constant[1], binary_operation[name[degree] + constant[1]]]]] begin[:] call[name[left]][name[j]] assign[=] binary_operation[name[knot] - call[name[knot_vector]][binary_operation[binary_operation[name[span] + constant[1]] - name[j]]]] call[name[right]][name[j]] assign[=] binary_operation[call[name[knot_vector]][binary_operation[name[span] + name[j]]] - name[knot]] variable[saved] assign[=] constant[0.0] for taget[name[r]] in starred[call[name[range], parameter[constant[0], name[j]]]] begin[:] variable[temp] assign[=] binary_operation[call[name[N]][name[r]] / binary_operation[call[name[right]][binary_operation[name[r] + constant[1]]] + call[name[left]][binary_operation[name[j] - name[r]]]]] call[name[N]][name[r]] assign[=] binary_operation[name[saved] + binary_operation[call[name[right]][binary_operation[name[r] + constant[1]]] * name[temp]]] variable[saved] assign[=] binary_operation[call[name[left]][binary_operation[name[j] - name[r]]] * name[temp]] call[name[N]][name[j]] assign[=] name[saved] return[name[N]]
keyword[def] identifier[basis_function] ( identifier[degree] , identifier[knot_vector] , identifier[span] , identifier[knot] ): literal[string] identifier[left] =[ literal[int] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[degree] + literal[int] )] identifier[right] =[ literal[int] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[degree] + literal[int] )] identifier[N] =[ literal[int] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[degree] + literal[int] )] keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[degree] + literal[int] ): identifier[left] [ identifier[j] ]= identifier[knot] - identifier[knot_vector] [ identifier[span] + literal[int] - identifier[j] ] identifier[right] [ identifier[j] ]= identifier[knot_vector] [ identifier[span] + identifier[j] ]- identifier[knot] identifier[saved] = literal[int] keyword[for] identifier[r] keyword[in] identifier[range] ( literal[int] , identifier[j] ): identifier[temp] = identifier[N] [ identifier[r] ]/( identifier[right] [ identifier[r] + literal[int] ]+ identifier[left] [ identifier[j] - identifier[r] ]) identifier[N] [ identifier[r] ]= identifier[saved] + identifier[right] [ identifier[r] + literal[int] ]* identifier[temp] identifier[saved] = identifier[left] [ identifier[j] - identifier[r] ]* identifier[temp] identifier[N] [ identifier[j] ]= identifier[saved] keyword[return] identifier[N]
def basis_function(degree, knot_vector, span, knot): """ Computes the non-vanishing basis functions for a single parameter. Implementation of Algorithm A2.2 from The NURBS Book by Piegl & Tiller. :param degree: degree, :math:`p` :type degree: int :param knot_vector: knot vector, :math:`U` :type knot_vector: list, tuple :param span: knot span, :math:`i` :type span: int :param knot: knot or parameter, :math:`u` :type knot: float :return: basis functions :rtype: list """ left = [0.0 for _ in range(degree + 1)] right = [0.0 for _ in range(degree + 1)] N = [1.0 for _ in range(degree + 1)] # N[0] = 1.0 by definition for j in range(1, degree + 1): left[j] = knot - knot_vector[span + 1 - j] right[j] = knot_vector[span + j] - knot saved = 0.0 for r in range(0, j): temp = N[r] / (right[r + 1] + left[j - r]) N[r] = saved + right[r + 1] * temp saved = left[j - r] * temp # depends on [control=['for'], data=['r']] N[j] = saved # depends on [control=['for'], data=['j']] return N
def close(self): """ Sends a shutdown signal to the unity environment, and closes the socket connection. """ if self._socket is not None and self._conn is not None: message_input = UnityMessage() message_input.header.status = 400 self._communicator_send(message_input.SerializeToString()) if self._socket is not None: self._socket.close() self._socket = None if self._socket is not None: self._conn.close() self._conn = None
def function[close, parameter[self]]: constant[ Sends a shutdown signal to the unity environment, and closes the socket connection. ] if <ast.BoolOp object at 0x7da1b1e12ce0> begin[:] variable[message_input] assign[=] call[name[UnityMessage], parameter[]] name[message_input].header.status assign[=] constant[400] call[name[self]._communicator_send, parameter[call[name[message_input].SerializeToString, parameter[]]]] if compare[name[self]._socket is_not constant[None]] begin[:] call[name[self]._socket.close, parameter[]] name[self]._socket assign[=] constant[None] if compare[name[self]._socket is_not constant[None]] begin[:] call[name[self]._conn.close, parameter[]] name[self]._conn assign[=] constant[None]
keyword[def] identifier[close] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_socket] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[_conn] keyword[is] keyword[not] keyword[None] : identifier[message_input] = identifier[UnityMessage] () identifier[message_input] . identifier[header] . identifier[status] = literal[int] identifier[self] . identifier[_communicator_send] ( identifier[message_input] . identifier[SerializeToString] ()) keyword[if] identifier[self] . identifier[_socket] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[_socket] . identifier[close] () identifier[self] . identifier[_socket] = keyword[None] keyword[if] identifier[self] . identifier[_socket] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[_conn] . identifier[close] () identifier[self] . identifier[_conn] = keyword[None]
def close(self): """ Sends a shutdown signal to the unity environment, and closes the socket connection. """ if self._socket is not None and self._conn is not None: message_input = UnityMessage() message_input.header.status = 400 self._communicator_send(message_input.SerializeToString()) # depends on [control=['if'], data=[]] if self._socket is not None: self._socket.close() self._socket = None # depends on [control=['if'], data=[]] if self._socket is not None: self._conn.close() self._conn = None # depends on [control=['if'], data=[]]
def terminal_unreserve(progress_obj, terminal_obj=None, verbose=0, identifier=None): """ Unregisters the terminal (stdout) for printing. an instance (progress_obj) can only unreserve the tty (terminal_obj) when it also reserved it see terminal_reserved for more information Returns ------- None """ if terminal_obj is None: terminal_obj = sys.stdout if identifier is None: identifier = '' else: identifier = identifier + ': ' po = TERMINAL_RESERVATION.get(terminal_obj) if po is None: log.debug("terminal %s was not reserved, nothing happens", terminal_obj) else: if po is progress_obj: log.debug("terminal %s now unreserned", terminal_obj) del TERMINAL_RESERVATION[terminal_obj] else: log.debug("you %s can NOT unreserve terminal %s be cause it was reserved by %s", progress_obj, terminal_obj, po)
def function[terminal_unreserve, parameter[progress_obj, terminal_obj, verbose, identifier]]: constant[ Unregisters the terminal (stdout) for printing. an instance (progress_obj) can only unreserve the tty (terminal_obj) when it also reserved it see terminal_reserved for more information Returns ------- None ] if compare[name[terminal_obj] is constant[None]] begin[:] variable[terminal_obj] assign[=] name[sys].stdout if compare[name[identifier] is constant[None]] begin[:] variable[identifier] assign[=] constant[] variable[po] assign[=] call[name[TERMINAL_RESERVATION].get, parameter[name[terminal_obj]]] if compare[name[po] is constant[None]] begin[:] call[name[log].debug, parameter[constant[terminal %s was not reserved, nothing happens], name[terminal_obj]]]
keyword[def] identifier[terminal_unreserve] ( identifier[progress_obj] , identifier[terminal_obj] = keyword[None] , identifier[verbose] = literal[int] , identifier[identifier] = keyword[None] ): literal[string] keyword[if] identifier[terminal_obj] keyword[is] keyword[None] : identifier[terminal_obj] = identifier[sys] . identifier[stdout] keyword[if] identifier[identifier] keyword[is] keyword[None] : identifier[identifier] = literal[string] keyword[else] : identifier[identifier] = identifier[identifier] + literal[string] identifier[po] = identifier[TERMINAL_RESERVATION] . identifier[get] ( identifier[terminal_obj] ) keyword[if] identifier[po] keyword[is] keyword[None] : identifier[log] . identifier[debug] ( literal[string] , identifier[terminal_obj] ) keyword[else] : keyword[if] identifier[po] keyword[is] identifier[progress_obj] : identifier[log] . identifier[debug] ( literal[string] , identifier[terminal_obj] ) keyword[del] identifier[TERMINAL_RESERVATION] [ identifier[terminal_obj] ] keyword[else] : identifier[log] . identifier[debug] ( literal[string] , identifier[progress_obj] , identifier[terminal_obj] , identifier[po] )
def terminal_unreserve(progress_obj, terminal_obj=None, verbose=0, identifier=None): """ Unregisters the terminal (stdout) for printing. an instance (progress_obj) can only unreserve the tty (terminal_obj) when it also reserved it see terminal_reserved for more information Returns ------- None """ if terminal_obj is None: terminal_obj = sys.stdout # depends on [control=['if'], data=['terminal_obj']] if identifier is None: identifier = '' # depends on [control=['if'], data=['identifier']] else: identifier = identifier + ': ' po = TERMINAL_RESERVATION.get(terminal_obj) if po is None: log.debug('terminal %s was not reserved, nothing happens', terminal_obj) # depends on [control=['if'], data=[]] elif po is progress_obj: log.debug('terminal %s now unreserned', terminal_obj) del TERMINAL_RESERVATION[terminal_obj] # depends on [control=['if'], data=[]] else: log.debug('you %s can NOT unreserve terminal %s be cause it was reserved by %s', progress_obj, terminal_obj, po)
def patch_fromText(self, textline): """Parse a textual representation of patches and return a list of patch objects. Args: textline: Text representation of patches. Returns: Array of Patch objects. Raises: ValueError: If invalid input. """ if type(textline) == unicode: # Patches should be composed of a subset of ascii chars, Unicode not # required. If this encode raises UnicodeEncodeError, patch is invalid. textline = textline.encode("ascii") patches = [] if not textline: return patches text = textline.split('\n') while len(text) != 0: m = re.match("^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$", text[0]) if not m: raise ValueError("Invalid patch string: " + text[0]) patch = patch_obj() patches.append(patch) patch.start1 = int(m.group(1)) if m.group(2) == '': patch.start1 -= 1 patch.length1 = 1 elif m.group(2) == '0': patch.length1 = 0 else: patch.start1 -= 1 patch.length1 = int(m.group(2)) patch.start2 = int(m.group(3)) if m.group(4) == '': patch.start2 -= 1 patch.length2 = 1 elif m.group(4) == '0': patch.length2 = 0 else: patch.start2 -= 1 patch.length2 = int(m.group(4)) del text[0] while len(text) != 0: if text[0]: sign = text[0][0] else: sign = '' line = urllib.unquote(text[0][1:]) line = line.decode("utf-8") if sign == '+': # Insertion. patch.diffs.append((self.DIFF_INSERT, line)) elif sign == '-': # Deletion. patch.diffs.append((self.DIFF_DELETE, line)) elif sign == ' ': # Minor equality. patch.diffs.append((self.DIFF_EQUAL, line)) elif sign == '@': # Start of next patch. break elif sign == '': # Blank line? Whatever. pass else: # WTF? raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line)) del text[0] return patches
def function[patch_fromText, parameter[self, textline]]: constant[Parse a textual representation of patches and return a list of patch objects. Args: textline: Text representation of patches. Returns: Array of Patch objects. Raises: ValueError: If invalid input. ] if compare[call[name[type], parameter[name[textline]]] equal[==] name[unicode]] begin[:] variable[textline] assign[=] call[name[textline].encode, parameter[constant[ascii]]] variable[patches] assign[=] list[[]] if <ast.UnaryOp object at 0x7da1b0705a50> begin[:] return[name[patches]] variable[text] assign[=] call[name[textline].split, parameter[constant[ ]]] while compare[call[name[len], parameter[name[text]]] not_equal[!=] constant[0]] begin[:] variable[m] assign[=] call[name[re].match, parameter[constant[^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$], call[name[text]][constant[0]]]] if <ast.UnaryOp object at 0x7da1b0704ac0> begin[:] <ast.Raise object at 0x7da1b0704940> variable[patch] assign[=] call[name[patch_obj], parameter[]] call[name[patches].append, parameter[name[patch]]] name[patch].start1 assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[1]]]]] if compare[call[name[m].group, parameter[constant[2]]] equal[==] constant[]] begin[:] <ast.AugAssign object at 0x7da1b0705d50> name[patch].length1 assign[=] constant[1] name[patch].start2 assign[=] call[name[int], parameter[call[name[m].group, parameter[constant[3]]]]] if compare[call[name[m].group, parameter[constant[4]]] equal[==] constant[]] begin[:] <ast.AugAssign object at 0x7da1b0705330> name[patch].length2 assign[=] constant[1] <ast.Delete object at 0x7da20e9b0580> while compare[call[name[len], parameter[name[text]]] not_equal[!=] constant[0]] begin[:] if call[name[text]][constant[0]] begin[:] variable[sign] assign[=] call[call[name[text]][constant[0]]][constant[0]] variable[line] assign[=] call[name[urllib].unquote, parameter[call[call[name[text]][constant[0]]][<ast.Slice object at 0x7da1b0704f40>]]] variable[line] assign[=] call[name[line].decode, parameter[constant[utf-8]]] if compare[name[sign] equal[==] constant[+]] begin[:] call[name[patch].diffs.append, parameter[tuple[[<ast.Attribute object at 0x7da1b0706200>, <ast.Name object at 0x7da1b0706230>]]]] <ast.Delete object at 0x7da1b0841ae0> return[name[patches]]
keyword[def] identifier[patch_fromText] ( identifier[self] , identifier[textline] ): literal[string] keyword[if] identifier[type] ( identifier[textline] )== identifier[unicode] : identifier[textline] = identifier[textline] . identifier[encode] ( literal[string] ) identifier[patches] =[] keyword[if] keyword[not] identifier[textline] : keyword[return] identifier[patches] identifier[text] = identifier[textline] . identifier[split] ( literal[string] ) keyword[while] identifier[len] ( identifier[text] )!= literal[int] : identifier[m] = identifier[re] . identifier[match] ( literal[string] , identifier[text] [ literal[int] ]) keyword[if] keyword[not] identifier[m] : keyword[raise] identifier[ValueError] ( literal[string] + identifier[text] [ literal[int] ]) identifier[patch] = identifier[patch_obj] () identifier[patches] . identifier[append] ( identifier[patch] ) identifier[patch] . identifier[start1] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] )) keyword[if] identifier[m] . identifier[group] ( literal[int] )== literal[string] : identifier[patch] . identifier[start1] -= literal[int] identifier[patch] . identifier[length1] = literal[int] keyword[elif] identifier[m] . identifier[group] ( literal[int] )== literal[string] : identifier[patch] . identifier[length1] = literal[int] keyword[else] : identifier[patch] . identifier[start1] -= literal[int] identifier[patch] . identifier[length1] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] )) identifier[patch] . identifier[start2] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] )) keyword[if] identifier[m] . identifier[group] ( literal[int] )== literal[string] : identifier[patch] . identifier[start2] -= literal[int] identifier[patch] . identifier[length2] = literal[int] keyword[elif] identifier[m] . identifier[group] ( literal[int] )== literal[string] : identifier[patch] . identifier[length2] = literal[int] keyword[else] : identifier[patch] . identifier[start2] -= literal[int] identifier[patch] . identifier[length2] = identifier[int] ( identifier[m] . identifier[group] ( literal[int] )) keyword[del] identifier[text] [ literal[int] ] keyword[while] identifier[len] ( identifier[text] )!= literal[int] : keyword[if] identifier[text] [ literal[int] ]: identifier[sign] = identifier[text] [ literal[int] ][ literal[int] ] keyword[else] : identifier[sign] = literal[string] identifier[line] = identifier[urllib] . identifier[unquote] ( identifier[text] [ literal[int] ][ literal[int] :]) identifier[line] = identifier[line] . identifier[decode] ( literal[string] ) keyword[if] identifier[sign] == literal[string] : identifier[patch] . identifier[diffs] . identifier[append] (( identifier[self] . identifier[DIFF_INSERT] , identifier[line] )) keyword[elif] identifier[sign] == literal[string] : identifier[patch] . identifier[diffs] . identifier[append] (( identifier[self] . identifier[DIFF_DELETE] , identifier[line] )) keyword[elif] identifier[sign] == literal[string] : identifier[patch] . identifier[diffs] . identifier[append] (( identifier[self] . identifier[DIFF_EQUAL] , identifier[line] )) keyword[elif] identifier[sign] == literal[string] : keyword[break] keyword[elif] identifier[sign] == literal[string] : keyword[pass] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] %( identifier[sign] , identifier[line] )) keyword[del] identifier[text] [ literal[int] ] keyword[return] identifier[patches]
def patch_fromText(self, textline): """Parse a textual representation of patches and return a list of patch objects. Args: textline: Text representation of patches. Returns: Array of Patch objects. Raises: ValueError: If invalid input. """ if type(textline) == unicode: # Patches should be composed of a subset of ascii chars, Unicode not # required. If this encode raises UnicodeEncodeError, patch is invalid. textline = textline.encode('ascii') # depends on [control=['if'], data=[]] patches = [] if not textline: return patches # depends on [control=['if'], data=[]] text = textline.split('\n') while len(text) != 0: m = re.match('^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$', text[0]) if not m: raise ValueError('Invalid patch string: ' + text[0]) # depends on [control=['if'], data=[]] patch = patch_obj() patches.append(patch) patch.start1 = int(m.group(1)) if m.group(2) == '': patch.start1 -= 1 patch.length1 = 1 # depends on [control=['if'], data=[]] elif m.group(2) == '0': patch.length1 = 0 # depends on [control=['if'], data=[]] else: patch.start1 -= 1 patch.length1 = int(m.group(2)) patch.start2 = int(m.group(3)) if m.group(4) == '': patch.start2 -= 1 patch.length2 = 1 # depends on [control=['if'], data=[]] elif m.group(4) == '0': patch.length2 = 0 # depends on [control=['if'], data=[]] else: patch.start2 -= 1 patch.length2 = int(m.group(4)) del text[0] while len(text) != 0: if text[0]: sign = text[0][0] # depends on [control=['if'], data=[]] else: sign = '' line = urllib.unquote(text[0][1:]) line = line.decode('utf-8') if sign == '+': # Insertion. patch.diffs.append((self.DIFF_INSERT, line)) # depends on [control=['if'], data=[]] elif sign == '-': # Deletion. patch.diffs.append((self.DIFF_DELETE, line)) # depends on [control=['if'], data=[]] elif sign == ' ': # Minor equality. patch.diffs.append((self.DIFF_EQUAL, line)) # depends on [control=['if'], data=[]] elif sign == '@': # Start of next patch. break # depends on [control=['if'], data=[]] elif sign == '': # Blank line? Whatever. pass # depends on [control=['if'], data=[]] else: # WTF? raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line)) del text[0] # depends on [control=['while'], data=[]] # depends on [control=['while'], data=[]] return patches
def from_textfile(cls, textfile, workers=1, job_size=1000): """ Count the set of words appeared in a text file. Args: textfile (string): The name of the text file or `TextFile` object. min_count (integer): Minimum number of times a word/token appeared in the document to be considered part of the vocabulary. workers (integer): Number of parallel workers to read the file simulatenously. job_size (integer): Size of the batch send to each worker. most_frequent (integer): if no min_count is specified, consider the most frequent k words for the vocabulary. Returns: A vocabulary of the most frequent words appeared in the document. """ c = Counter() if isinstance(textfile, string_types): textfile = TextFile(textfile) for result in textfile.apply(count, workers, job_size): c.update(result) return CountedVocabulary(word_count=c)
def function[from_textfile, parameter[cls, textfile, workers, job_size]]: constant[ Count the set of words appeared in a text file. Args: textfile (string): The name of the text file or `TextFile` object. min_count (integer): Minimum number of times a word/token appeared in the document to be considered part of the vocabulary. workers (integer): Number of parallel workers to read the file simulatenously. job_size (integer): Size of the batch send to each worker. most_frequent (integer): if no min_count is specified, consider the most frequent k words for the vocabulary. Returns: A vocabulary of the most frequent words appeared in the document. ] variable[c] assign[=] call[name[Counter], parameter[]] if call[name[isinstance], parameter[name[textfile], name[string_types]]] begin[:] variable[textfile] assign[=] call[name[TextFile], parameter[name[textfile]]] for taget[name[result]] in starred[call[name[textfile].apply, parameter[name[count], name[workers], name[job_size]]]] begin[:] call[name[c].update, parameter[name[result]]] return[call[name[CountedVocabulary], parameter[]]]
keyword[def] identifier[from_textfile] ( identifier[cls] , identifier[textfile] , identifier[workers] = literal[int] , identifier[job_size] = literal[int] ): literal[string] identifier[c] = identifier[Counter] () keyword[if] identifier[isinstance] ( identifier[textfile] , identifier[string_types] ): identifier[textfile] = identifier[TextFile] ( identifier[textfile] ) keyword[for] identifier[result] keyword[in] identifier[textfile] . identifier[apply] ( identifier[count] , identifier[workers] , identifier[job_size] ): identifier[c] . identifier[update] ( identifier[result] ) keyword[return] identifier[CountedVocabulary] ( identifier[word_count] = identifier[c] )
def from_textfile(cls, textfile, workers=1, job_size=1000): """ Count the set of words appeared in a text file. Args: textfile (string): The name of the text file or `TextFile` object. min_count (integer): Minimum number of times a word/token appeared in the document to be considered part of the vocabulary. workers (integer): Number of parallel workers to read the file simulatenously. job_size (integer): Size of the batch send to each worker. most_frequent (integer): if no min_count is specified, consider the most frequent k words for the vocabulary. Returns: A vocabulary of the most frequent words appeared in the document. """ c = Counter() if isinstance(textfile, string_types): textfile = TextFile(textfile) # depends on [control=['if'], data=[]] for result in textfile.apply(count, workers, job_size): c.update(result) # depends on [control=['for'], data=['result']] return CountedVocabulary(word_count=c)
def _wait_and_retry(provider, job_id, poll_interval, retries, job_descriptor): """Wait for job and retry any tasks that fail. Stops retrying an individual task when: it succeeds, is canceled, or has been retried "retries" times. This function exits when there are no tasks running and there are no tasks eligible to be retried. Args: provider: job service provider job_id: a single job ID (string) to wait for poll_interval: integer seconds to wait between iterations retries: number of retries job_descriptor: job descriptor used to originally submit job Returns: Empty list if there was no error, a list containing an error message from a failed task otherwise. """ while True: tasks = provider.lookup_job_tasks({'*'}, job_ids=[job_id]) running_tasks = set() completed_tasks = set() canceled_tasks = set() fully_failed_tasks = set() task_fail_count = dict() # This is an arbitrary task that is either fully failed or canceled (with # preference for the former). message_task = None task_dict = dict() for t in tasks: task_id = job_model.numeric_task_id(t.get_field('task-id')) task_dict[task_id] = t status = t.get_field('task-status') if status == 'FAILURE': # Could compute this from task-attempt as well. task_fail_count[task_id] = task_fail_count.get(task_id, 0) + 1 if task_fail_count[task_id] > retries: fully_failed_tasks.add(task_id) message_task = t elif status == 'CANCELED': canceled_tasks.add(task_id) if not message_task: message_task = t elif status == 'SUCCESS': completed_tasks.add(task_id) elif status == 'RUNNING': running_tasks.add(task_id) retry_tasks = ( set(task_fail_count).difference(fully_failed_tasks) .difference(running_tasks).difference(completed_tasks) .difference(canceled_tasks)) # job completed. if not retry_tasks and not running_tasks: # If there are any fully failed tasks, return the completion message of an # arbitrary one. # If not, but there are canceled tasks, return the completion message of # an arbitrary one. if message_task: return [provider.get_tasks_completion_messages([message_task])] # Otherwise successful completion. return [] for task_id in retry_tasks: identifier = '{}.{}'.format(job_id, task_id) if task_id else job_id print(' {} (attempt {}) failed. Retrying.'.format( identifier, task_fail_count[task_id])) msg = task_dict[task_id].get_field('status-message') print(' Failure message: {}'.format(msg)) _retry_task(provider, job_descriptor, task_id, task_fail_count[task_id] + 1) SLEEP_FUNCTION(poll_interval)
def function[_wait_and_retry, parameter[provider, job_id, poll_interval, retries, job_descriptor]]: constant[Wait for job and retry any tasks that fail. Stops retrying an individual task when: it succeeds, is canceled, or has been retried "retries" times. This function exits when there are no tasks running and there are no tasks eligible to be retried. Args: provider: job service provider job_id: a single job ID (string) to wait for poll_interval: integer seconds to wait between iterations retries: number of retries job_descriptor: job descriptor used to originally submit job Returns: Empty list if there was no error, a list containing an error message from a failed task otherwise. ] while constant[True] begin[:] variable[tasks] assign[=] call[name[provider].lookup_job_tasks, parameter[<ast.Set object at 0x7da1b0109ae0>]] variable[running_tasks] assign[=] call[name[set], parameter[]] variable[completed_tasks] assign[=] call[name[set], parameter[]] variable[canceled_tasks] assign[=] call[name[set], parameter[]] variable[fully_failed_tasks] assign[=] call[name[set], parameter[]] variable[task_fail_count] assign[=] call[name[dict], parameter[]] variable[message_task] assign[=] constant[None] variable[task_dict] assign[=] call[name[dict], parameter[]] for taget[name[t]] in starred[name[tasks]] begin[:] variable[task_id] assign[=] call[name[job_model].numeric_task_id, parameter[call[name[t].get_field, parameter[constant[task-id]]]]] call[name[task_dict]][name[task_id]] assign[=] name[t] variable[status] assign[=] call[name[t].get_field, parameter[constant[task-status]]] if compare[name[status] equal[==] constant[FAILURE]] begin[:] call[name[task_fail_count]][name[task_id]] assign[=] binary_operation[call[name[task_fail_count].get, parameter[name[task_id], constant[0]]] + constant[1]] if compare[call[name[task_fail_count]][name[task_id]] greater[>] name[retries]] begin[:] call[name[fully_failed_tasks].add, parameter[name[task_id]]] variable[message_task] assign[=] name[t] variable[retry_tasks] assign[=] call[call[call[call[call[name[set], parameter[name[task_fail_count]]].difference, parameter[name[fully_failed_tasks]]].difference, parameter[name[running_tasks]]].difference, parameter[name[completed_tasks]]].difference, parameter[name[canceled_tasks]]] if <ast.BoolOp object at 0x7da1b00540d0> begin[:] if name[message_task] begin[:] return[list[[<ast.Call object at 0x7da1b0057e20>]]] return[list[[]]] for taget[name[task_id]] in starred[name[retry_tasks]] begin[:] variable[identifier] assign[=] <ast.IfExp object at 0x7da1b0055900> call[name[print], parameter[call[constant[ {} (attempt {}) failed. Retrying.].format, parameter[name[identifier], call[name[task_fail_count]][name[task_id]]]]]] variable[msg] assign[=] call[call[name[task_dict]][name[task_id]].get_field, parameter[constant[status-message]]] call[name[print], parameter[call[constant[ Failure message: {}].format, parameter[name[msg]]]]] call[name[_retry_task], parameter[name[provider], name[job_descriptor], name[task_id], binary_operation[call[name[task_fail_count]][name[task_id]] + constant[1]]]] call[name[SLEEP_FUNCTION], parameter[name[poll_interval]]]
keyword[def] identifier[_wait_and_retry] ( identifier[provider] , identifier[job_id] , identifier[poll_interval] , identifier[retries] , identifier[job_descriptor] ): literal[string] keyword[while] keyword[True] : identifier[tasks] = identifier[provider] . identifier[lookup_job_tasks] ({ literal[string] }, identifier[job_ids] =[ identifier[job_id] ]) identifier[running_tasks] = identifier[set] () identifier[completed_tasks] = identifier[set] () identifier[canceled_tasks] = identifier[set] () identifier[fully_failed_tasks] = identifier[set] () identifier[task_fail_count] = identifier[dict] () identifier[message_task] = keyword[None] identifier[task_dict] = identifier[dict] () keyword[for] identifier[t] keyword[in] identifier[tasks] : identifier[task_id] = identifier[job_model] . identifier[numeric_task_id] ( identifier[t] . identifier[get_field] ( literal[string] )) identifier[task_dict] [ identifier[task_id] ]= identifier[t] identifier[status] = identifier[t] . identifier[get_field] ( literal[string] ) keyword[if] identifier[status] == literal[string] : identifier[task_fail_count] [ identifier[task_id] ]= identifier[task_fail_count] . identifier[get] ( identifier[task_id] , literal[int] )+ literal[int] keyword[if] identifier[task_fail_count] [ identifier[task_id] ]> identifier[retries] : identifier[fully_failed_tasks] . identifier[add] ( identifier[task_id] ) identifier[message_task] = identifier[t] keyword[elif] identifier[status] == literal[string] : identifier[canceled_tasks] . identifier[add] ( identifier[task_id] ) keyword[if] keyword[not] identifier[message_task] : identifier[message_task] = identifier[t] keyword[elif] identifier[status] == literal[string] : identifier[completed_tasks] . identifier[add] ( identifier[task_id] ) keyword[elif] identifier[status] == literal[string] : identifier[running_tasks] . identifier[add] ( identifier[task_id] ) identifier[retry_tasks] =( identifier[set] ( identifier[task_fail_count] ). identifier[difference] ( identifier[fully_failed_tasks] ) . identifier[difference] ( identifier[running_tasks] ). identifier[difference] ( identifier[completed_tasks] ) . identifier[difference] ( identifier[canceled_tasks] )) keyword[if] keyword[not] identifier[retry_tasks] keyword[and] keyword[not] identifier[running_tasks] : keyword[if] identifier[message_task] : keyword[return] [ identifier[provider] . identifier[get_tasks_completion_messages] ([ identifier[message_task] ])] keyword[return] [] keyword[for] identifier[task_id] keyword[in] identifier[retry_tasks] : identifier[identifier] = literal[string] . identifier[format] ( identifier[job_id] , identifier[task_id] ) keyword[if] identifier[task_id] keyword[else] identifier[job_id] identifier[print] ( literal[string] . identifier[format] ( identifier[identifier] , identifier[task_fail_count] [ identifier[task_id] ])) identifier[msg] = identifier[task_dict] [ identifier[task_id] ]. identifier[get_field] ( literal[string] ) identifier[print] ( literal[string] . identifier[format] ( identifier[msg] )) identifier[_retry_task] ( identifier[provider] , identifier[job_descriptor] , identifier[task_id] , identifier[task_fail_count] [ identifier[task_id] ]+ literal[int] ) identifier[SLEEP_FUNCTION] ( identifier[poll_interval] )
def _wait_and_retry(provider, job_id, poll_interval, retries, job_descriptor): """Wait for job and retry any tasks that fail. Stops retrying an individual task when: it succeeds, is canceled, or has been retried "retries" times. This function exits when there are no tasks running and there are no tasks eligible to be retried. Args: provider: job service provider job_id: a single job ID (string) to wait for poll_interval: integer seconds to wait between iterations retries: number of retries job_descriptor: job descriptor used to originally submit job Returns: Empty list if there was no error, a list containing an error message from a failed task otherwise. """ while True: tasks = provider.lookup_job_tasks({'*'}, job_ids=[job_id]) running_tasks = set() completed_tasks = set() canceled_tasks = set() fully_failed_tasks = set() task_fail_count = dict() # This is an arbitrary task that is either fully failed or canceled (with # preference for the former). message_task = None task_dict = dict() for t in tasks: task_id = job_model.numeric_task_id(t.get_field('task-id')) task_dict[task_id] = t status = t.get_field('task-status') if status == 'FAILURE': # Could compute this from task-attempt as well. task_fail_count[task_id] = task_fail_count.get(task_id, 0) + 1 if task_fail_count[task_id] > retries: fully_failed_tasks.add(task_id) message_task = t # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif status == 'CANCELED': canceled_tasks.add(task_id) if not message_task: message_task = t # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif status == 'SUCCESS': completed_tasks.add(task_id) # depends on [control=['if'], data=[]] elif status == 'RUNNING': running_tasks.add(task_id) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']] retry_tasks = set(task_fail_count).difference(fully_failed_tasks).difference(running_tasks).difference(completed_tasks).difference(canceled_tasks) # job completed. if not retry_tasks and (not running_tasks): # If there are any fully failed tasks, return the completion message of an # arbitrary one. # If not, but there are canceled tasks, return the completion message of # an arbitrary one. if message_task: return [provider.get_tasks_completion_messages([message_task])] # depends on [control=['if'], data=[]] # Otherwise successful completion. return [] # depends on [control=['if'], data=[]] for task_id in retry_tasks: identifier = '{}.{}'.format(job_id, task_id) if task_id else job_id print(' {} (attempt {}) failed. Retrying.'.format(identifier, task_fail_count[task_id])) msg = task_dict[task_id].get_field('status-message') print(' Failure message: {}'.format(msg)) _retry_task(provider, job_descriptor, task_id, task_fail_count[task_id] + 1) # depends on [control=['for'], data=['task_id']] SLEEP_FUNCTION(poll_interval) # depends on [control=['while'], data=[]]
def change(connect_spec, dn, before, after): '''Modify an entry in an LDAP database. This does the same thing as :py:func:`modify`, but with a simpler interface. Instead of taking a list of directives, it takes a before and after view of an entry, determines the differences between the two, computes the directives, and executes them. Any attribute value present in ``before`` but missing in ``after`` is deleted. Any attribute value present in ``after`` but missing in ``before`` is added. Any attribute value in the database that is not mentioned in either ``before`` or ``after`` is not altered. Any attribute value that is present in both ``before`` and ``after`` is ignored, regardless of whether that attribute value exists in the database. :param connect_spec: See the documentation for the ``connect_spec`` parameter for :py:func:`connect`. :param dn: Distinguished name of the entry. :param before: The expected state of the entry before modification. This is a dict mapping each attribute name to an iterable of values. :param after: The desired state of the entry after modification. This is a dict mapping each attribute name to an iterable of values. :returns: ``True`` if successful, raises an exception otherwise. CLI example: .. code-block:: bash salt '*' ldap3.change "{ 'url': 'ldaps://ldap.example.com/', 'bind': { 'method': 'simple', 'password': 'secret'} }" dn='cn=admin,dc=example,dc=com' before="{'example_value': 'before_val'}" after="{'example_value': 'after_val'}" ''' l = connect(connect_spec) # convert the "iterable of values" to lists in case that's what # modifyModlist() expects (also to ensure that the caller's dicts # are not modified) before = dict(((attr, salt.utils.data.encode(list(vals))) for attr, vals in six.iteritems(before))) after = dict(((attr, salt.utils.data.encode(list(vals))) for attr, vals in six.iteritems(after))) if 'unicodePwd' in after: after['unicodePwd'] = [_format_unicode_password(x) for x in after['unicodePwd']] modlist = ldap.modlist.modifyModlist(before, after) try: l.c.modify_s(dn, modlist) except ldap.LDAPError as e: _convert_exception(e) return True
def function[change, parameter[connect_spec, dn, before, after]]: constant[Modify an entry in an LDAP database. This does the same thing as :py:func:`modify`, but with a simpler interface. Instead of taking a list of directives, it takes a before and after view of an entry, determines the differences between the two, computes the directives, and executes them. Any attribute value present in ``before`` but missing in ``after`` is deleted. Any attribute value present in ``after`` but missing in ``before`` is added. Any attribute value in the database that is not mentioned in either ``before`` or ``after`` is not altered. Any attribute value that is present in both ``before`` and ``after`` is ignored, regardless of whether that attribute value exists in the database. :param connect_spec: See the documentation for the ``connect_spec`` parameter for :py:func:`connect`. :param dn: Distinguished name of the entry. :param before: The expected state of the entry before modification. This is a dict mapping each attribute name to an iterable of values. :param after: The desired state of the entry after modification. This is a dict mapping each attribute name to an iterable of values. :returns: ``True`` if successful, raises an exception otherwise. CLI example: .. code-block:: bash salt '*' ldap3.change "{ 'url': 'ldaps://ldap.example.com/', 'bind': { 'method': 'simple', 'password': 'secret'} }" dn='cn=admin,dc=example,dc=com' before="{'example_value': 'before_val'}" after="{'example_value': 'after_val'}" ] variable[l] assign[=] call[name[connect], parameter[name[connect_spec]]] variable[before] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b1c46d40>]] variable[after] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b1c46c20>]] if compare[constant[unicodePwd] in name[after]] begin[:] call[name[after]][constant[unicodePwd]] assign[=] <ast.ListComp object at 0x7da1b1c47670> variable[modlist] assign[=] call[name[ldap].modlist.modifyModlist, parameter[name[before], name[after]]] <ast.Try object at 0x7da1b215feb0> return[constant[True]]
keyword[def] identifier[change] ( identifier[connect_spec] , identifier[dn] , identifier[before] , identifier[after] ): literal[string] identifier[l] = identifier[connect] ( identifier[connect_spec] ) identifier[before] = identifier[dict] ((( identifier[attr] , identifier[salt] . identifier[utils] . identifier[data] . identifier[encode] ( identifier[list] ( identifier[vals] ))) keyword[for] identifier[attr] , identifier[vals] keyword[in] identifier[six] . identifier[iteritems] ( identifier[before] ))) identifier[after] = identifier[dict] ((( identifier[attr] , identifier[salt] . identifier[utils] . identifier[data] . identifier[encode] ( identifier[list] ( identifier[vals] ))) keyword[for] identifier[attr] , identifier[vals] keyword[in] identifier[six] . identifier[iteritems] ( identifier[after] ))) keyword[if] literal[string] keyword[in] identifier[after] : identifier[after] [ literal[string] ]=[ identifier[_format_unicode_password] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[after] [ literal[string] ]] identifier[modlist] = identifier[ldap] . identifier[modlist] . identifier[modifyModlist] ( identifier[before] , identifier[after] ) keyword[try] : identifier[l] . identifier[c] . identifier[modify_s] ( identifier[dn] , identifier[modlist] ) keyword[except] identifier[ldap] . identifier[LDAPError] keyword[as] identifier[e] : identifier[_convert_exception] ( identifier[e] ) keyword[return] keyword[True]
def change(connect_spec, dn, before, after): """Modify an entry in an LDAP database. This does the same thing as :py:func:`modify`, but with a simpler interface. Instead of taking a list of directives, it takes a before and after view of an entry, determines the differences between the two, computes the directives, and executes them. Any attribute value present in ``before`` but missing in ``after`` is deleted. Any attribute value present in ``after`` but missing in ``before`` is added. Any attribute value in the database that is not mentioned in either ``before`` or ``after`` is not altered. Any attribute value that is present in both ``before`` and ``after`` is ignored, regardless of whether that attribute value exists in the database. :param connect_spec: See the documentation for the ``connect_spec`` parameter for :py:func:`connect`. :param dn: Distinguished name of the entry. :param before: The expected state of the entry before modification. This is a dict mapping each attribute name to an iterable of values. :param after: The desired state of the entry after modification. This is a dict mapping each attribute name to an iterable of values. :returns: ``True`` if successful, raises an exception otherwise. CLI example: .. code-block:: bash salt '*' ldap3.change "{ 'url': 'ldaps://ldap.example.com/', 'bind': { 'method': 'simple', 'password': 'secret'} }" dn='cn=admin,dc=example,dc=com' before="{'example_value': 'before_val'}" after="{'example_value': 'after_val'}" """ l = connect(connect_spec) # convert the "iterable of values" to lists in case that's what # modifyModlist() expects (also to ensure that the caller's dicts # are not modified) before = dict(((attr, salt.utils.data.encode(list(vals))) for (attr, vals) in six.iteritems(before))) after = dict(((attr, salt.utils.data.encode(list(vals))) for (attr, vals) in six.iteritems(after))) if 'unicodePwd' in after: after['unicodePwd'] = [_format_unicode_password(x) for x in after['unicodePwd']] # depends on [control=['if'], data=['after']] modlist = ldap.modlist.modifyModlist(before, after) try: l.c.modify_s(dn, modlist) # depends on [control=['try'], data=[]] except ldap.LDAPError as e: _convert_exception(e) # depends on [control=['except'], data=['e']] return True
def remove(name=None, pkgs=None, **kwargs): ''' .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any apt-get/dpkg commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Remove packages using ``apt-get remove``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' return _uninstall(action='remove', name=name, pkgs=pkgs, **kwargs)
def function[remove, parameter[name, pkgs]]: constant[ .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any apt-get/dpkg commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Remove packages using ``apt-get remove``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ] return[call[name[_uninstall], parameter[]]]
keyword[def] identifier[remove] ( identifier[name] = keyword[None] , identifier[pkgs] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[_uninstall] ( identifier[action] = literal[string] , identifier[name] = identifier[name] , identifier[pkgs] = identifier[pkgs] ,** identifier[kwargs] )
def remove(name=None, pkgs=None, **kwargs): """ .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any apt-get/dpkg commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Remove packages using ``apt-get remove``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' """ return _uninstall(action='remove', name=name, pkgs=pkgs, **kwargs)
def phon(self, cls='current', previousdelimiter="", strict=False,correctionhandling=CorrectionHandling.CURRENT): """Get the phonetic representation associated with this element (of the specified class) The phonetic content will be constructed from child-elements whereever possible, as they are more specific. If no phonetic content can be obtained from the children and the element has itself phonetic content associated with it, then that will be used. Parameters: cls (str): The class of the phonetic content to obtain, defaults to ``current``. retaintokenisation (bool): If set, the space attribute on words will be ignored, otherwise it will be adhered to and phonetic content will be detokenised as much as possible. Defaults to ``False``. previousdelimiter (str): Can be set to a delimiter that was last outputed, useful when chaining calls to :meth:`phon`. Defaults to an empty string. strict (bool): Set this if you are strictly interested in the phonetic content explicitly associated with the element, without recursing into children. Defaults to ``False``. correctionhandling: Specifies what phonetic content to retrieve when corrections are encountered. The default is ``CorrectionHandling.CURRENT``, which will retrieve the corrected/current phonetic content. You can set this to ``CorrectionHandling.ORIGINAL`` if you want the phonetic content prior to correction, and ``CorrectionHandling.EITHER`` if you don't care. Example:: word.phon() Returns: The phonetic content of the element (``unicode`` instance in Python 2, ``str`` in Python 3) Raises: :class:`NoSuchPhon`: if no phonetic conent is found at all. See also: :meth:`phoncontent`: Retrieves the phonetic content as an element rather than a string :meth:`text` :meth:`textcontent` """ if strict: return self.phoncontent(cls,correctionhandling).phon() if self.PHONCONTAINER: s = "" for e in self: if isstring(e): s += e else: try: if s: s += e.TEXTDELIMITER #We use TEXTDELIMITER for phon too except AttributeError: pass s += e.phon() return s elif not self.SPEAKABLE: #only readable elements can hold phonetic content raise NoSuchPhon else: #Get text from children first delimiter = "" s = "" for e in self: if e.SPEAKABLE and not isinstance(e, PhonContent) and not isinstance(e,String): try: s += e.phon(cls, delimiter,False,correctionhandling) #delimiter will be buffered and only printed upon next iteration, this prevents the delimiter being outputted at the end of a sequence and to be compounded with other delimiters delimiter = e.gettextdelimiter() #We use TEXTDELIMITER for phon too except NoSuchPhon: #No text, that's okay, just continue continue if not s and self.hasphon(cls): s = self.phoncontent(cls,correctionhandling).phon() if s and previousdelimiter: return previousdelimiter + s elif s: return s else: #No text found at all :`( raise NoSuchPhon
def function[phon, parameter[self, cls, previousdelimiter, strict, correctionhandling]]: constant[Get the phonetic representation associated with this element (of the specified class) The phonetic content will be constructed from child-elements whereever possible, as they are more specific. If no phonetic content can be obtained from the children and the element has itself phonetic content associated with it, then that will be used. Parameters: cls (str): The class of the phonetic content to obtain, defaults to ``current``. retaintokenisation (bool): If set, the space attribute on words will be ignored, otherwise it will be adhered to and phonetic content will be detokenised as much as possible. Defaults to ``False``. previousdelimiter (str): Can be set to a delimiter that was last outputed, useful when chaining calls to :meth:`phon`. Defaults to an empty string. strict (bool): Set this if you are strictly interested in the phonetic content explicitly associated with the element, without recursing into children. Defaults to ``False``. correctionhandling: Specifies what phonetic content to retrieve when corrections are encountered. The default is ``CorrectionHandling.CURRENT``, which will retrieve the corrected/current phonetic content. You can set this to ``CorrectionHandling.ORIGINAL`` if you want the phonetic content prior to correction, and ``CorrectionHandling.EITHER`` if you don't care. Example:: word.phon() Returns: The phonetic content of the element (``unicode`` instance in Python 2, ``str`` in Python 3) Raises: :class:`NoSuchPhon`: if no phonetic conent is found at all. See also: :meth:`phoncontent`: Retrieves the phonetic content as an element rather than a string :meth:`text` :meth:`textcontent` ] if name[strict] begin[:] return[call[call[name[self].phoncontent, parameter[name[cls], name[correctionhandling]]].phon, parameter[]]] if name[self].PHONCONTAINER begin[:] variable[s] assign[=] constant[] for taget[name[e]] in starred[name[self]] begin[:] if call[name[isstring], parameter[name[e]]] begin[:] <ast.AugAssign object at 0x7da18f00ea10> return[name[s]]
keyword[def] identifier[phon] ( identifier[self] , identifier[cls] = literal[string] , identifier[previousdelimiter] = literal[string] , identifier[strict] = keyword[False] , identifier[correctionhandling] = identifier[CorrectionHandling] . identifier[CURRENT] ): literal[string] keyword[if] identifier[strict] : keyword[return] identifier[self] . identifier[phoncontent] ( identifier[cls] , identifier[correctionhandling] ). identifier[phon] () keyword[if] identifier[self] . identifier[PHONCONTAINER] : identifier[s] = literal[string] keyword[for] identifier[e] keyword[in] identifier[self] : keyword[if] identifier[isstring] ( identifier[e] ): identifier[s] += identifier[e] keyword[else] : keyword[try] : keyword[if] identifier[s] : identifier[s] += identifier[e] . identifier[TEXTDELIMITER] keyword[except] identifier[AttributeError] : keyword[pass] identifier[s] += identifier[e] . identifier[phon] () keyword[return] identifier[s] keyword[elif] keyword[not] identifier[self] . identifier[SPEAKABLE] : keyword[raise] identifier[NoSuchPhon] keyword[else] : identifier[delimiter] = literal[string] identifier[s] = literal[string] keyword[for] identifier[e] keyword[in] identifier[self] : keyword[if] identifier[e] . identifier[SPEAKABLE] keyword[and] keyword[not] identifier[isinstance] ( identifier[e] , identifier[PhonContent] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[e] , identifier[String] ): keyword[try] : identifier[s] += identifier[e] . identifier[phon] ( identifier[cls] , identifier[delimiter] , keyword[False] , identifier[correctionhandling] ) identifier[delimiter] = identifier[e] . identifier[gettextdelimiter] () keyword[except] identifier[NoSuchPhon] : keyword[continue] keyword[if] keyword[not] identifier[s] keyword[and] identifier[self] . identifier[hasphon] ( identifier[cls] ): identifier[s] = identifier[self] . identifier[phoncontent] ( identifier[cls] , identifier[correctionhandling] ). identifier[phon] () keyword[if] identifier[s] keyword[and] identifier[previousdelimiter] : keyword[return] identifier[previousdelimiter] + identifier[s] keyword[elif] identifier[s] : keyword[return] identifier[s] keyword[else] : keyword[raise] identifier[NoSuchPhon]
def phon(self, cls='current', previousdelimiter='', strict=False, correctionhandling=CorrectionHandling.CURRENT): """Get the phonetic representation associated with this element (of the specified class) The phonetic content will be constructed from child-elements whereever possible, as they are more specific. If no phonetic content can be obtained from the children and the element has itself phonetic content associated with it, then that will be used. Parameters: cls (str): The class of the phonetic content to obtain, defaults to ``current``. retaintokenisation (bool): If set, the space attribute on words will be ignored, otherwise it will be adhered to and phonetic content will be detokenised as much as possible. Defaults to ``False``. previousdelimiter (str): Can be set to a delimiter that was last outputed, useful when chaining calls to :meth:`phon`. Defaults to an empty string. strict (bool): Set this if you are strictly interested in the phonetic content explicitly associated with the element, without recursing into children. Defaults to ``False``. correctionhandling: Specifies what phonetic content to retrieve when corrections are encountered. The default is ``CorrectionHandling.CURRENT``, which will retrieve the corrected/current phonetic content. You can set this to ``CorrectionHandling.ORIGINAL`` if you want the phonetic content prior to correction, and ``CorrectionHandling.EITHER`` if you don't care. Example:: word.phon() Returns: The phonetic content of the element (``unicode`` instance in Python 2, ``str`` in Python 3) Raises: :class:`NoSuchPhon`: if no phonetic conent is found at all. See also: :meth:`phoncontent`: Retrieves the phonetic content as an element rather than a string :meth:`text` :meth:`textcontent` """ if strict: return self.phoncontent(cls, correctionhandling).phon() # depends on [control=['if'], data=[]] if self.PHONCONTAINER: s = '' for e in self: if isstring(e): s += e # depends on [control=['if'], data=[]] else: try: if s: s += e.TEXTDELIMITER #We use TEXTDELIMITER for phon too # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except AttributeError: pass # depends on [control=['except'], data=[]] s += e.phon() # depends on [control=['for'], data=['e']] return s # depends on [control=['if'], data=[]] elif not self.SPEAKABLE: #only readable elements can hold phonetic content raise NoSuchPhon # depends on [control=['if'], data=[]] else: #Get text from children first delimiter = '' s = '' for e in self: if e.SPEAKABLE and (not isinstance(e, PhonContent)) and (not isinstance(e, String)): try: s += e.phon(cls, delimiter, False, correctionhandling) #delimiter will be buffered and only printed upon next iteration, this prevents the delimiter being outputted at the end of a sequence and to be compounded with other delimiters delimiter = e.gettextdelimiter() #We use TEXTDELIMITER for phon too # depends on [control=['try'], data=[]] except NoSuchPhon: #No text, that's okay, just continue continue # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['e']] if not s and self.hasphon(cls): s = self.phoncontent(cls, correctionhandling).phon() # depends on [control=['if'], data=[]] if s and previousdelimiter: return previousdelimiter + s # depends on [control=['if'], data=[]] elif s: return s # depends on [control=['if'], data=[]] else: #No text found at all :`( raise NoSuchPhon
def deleteprojectmember(self, project_id, user_id): """Delete a project member :param project_id: project id :param user_id: user id :return: always true """ request = requests.delete( '{0}/{1}/members/{2}'.format(self.projects_url, project_id, user_id), headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) if request.status_code == 200: return True
def function[deleteprojectmember, parameter[self, project_id, user_id]]: constant[Delete a project member :param project_id: project id :param user_id: user id :return: always true ] variable[request] assign[=] call[name[requests].delete, parameter[call[constant[{0}/{1}/members/{2}].format, parameter[name[self].projects_url, name[project_id], name[user_id]]]]] if compare[name[request].status_code equal[==] constant[200]] begin[:] return[constant[True]]
keyword[def] identifier[deleteprojectmember] ( identifier[self] , identifier[project_id] , identifier[user_id] ): literal[string] identifier[request] = identifier[requests] . identifier[delete] ( literal[string] . identifier[format] ( identifier[self] . identifier[projects_url] , identifier[project_id] , identifier[user_id] ), identifier[headers] = identifier[self] . identifier[headers] , identifier[verify] = identifier[self] . identifier[verify_ssl] , identifier[auth] = identifier[self] . identifier[auth] , identifier[timeout] = identifier[self] . identifier[timeout] ) keyword[if] identifier[request] . identifier[status_code] == literal[int] : keyword[return] keyword[True]
def deleteprojectmember(self, project_id, user_id): """Delete a project member :param project_id: project id :param user_id: user id :return: always true """ request = requests.delete('{0}/{1}/members/{2}'.format(self.projects_url, project_id, user_id), headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) if request.status_code == 200: return True # depends on [control=['if'], data=[]]
def load_suffixes(self, filename): """ Build the suffix dictionary. The keys will be possible long versions, and the values will be the accepted abbreviations. Everything should be stored using the value version, and you can search all by using building a set of self.suffixes.keys() and self.suffixes.values(). """ with open(filename, 'r') as f: for line in f: # Make sure we have key and value if len(line.split(',')) != 2: continue # Strip off newlines. self.suffixes[line.strip().split(',')[0]] = line.strip().split(',')[1]
def function[load_suffixes, parameter[self, filename]]: constant[ Build the suffix dictionary. The keys will be possible long versions, and the values will be the accepted abbreviations. Everything should be stored using the value version, and you can search all by using building a set of self.suffixes.keys() and self.suffixes.values(). ] with call[name[open], parameter[name[filename], constant[r]]] begin[:] for taget[name[line]] in starred[name[f]] begin[:] if compare[call[name[len], parameter[call[name[line].split, parameter[constant[,]]]]] not_equal[!=] constant[2]] begin[:] continue call[name[self].suffixes][call[call[call[name[line].strip, parameter[]].split, parameter[constant[,]]]][constant[0]]] assign[=] call[call[call[name[line].strip, parameter[]].split, parameter[constant[,]]]][constant[1]]
keyword[def] identifier[load_suffixes] ( identifier[self] , identifier[filename] ): literal[string] keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] : keyword[for] identifier[line] keyword[in] identifier[f] : keyword[if] identifier[len] ( identifier[line] . identifier[split] ( literal[string] ))!= literal[int] : keyword[continue] identifier[self] . identifier[suffixes] [ identifier[line] . identifier[strip] (). identifier[split] ( literal[string] )[ literal[int] ]]= identifier[line] . identifier[strip] (). identifier[split] ( literal[string] )[ literal[int] ]
def load_suffixes(self, filename): """ Build the suffix dictionary. The keys will be possible long versions, and the values will be the accepted abbreviations. Everything should be stored using the value version, and you can search all by using building a set of self.suffixes.keys() and self.suffixes.values(). """ with open(filename, 'r') as f: for line in f: # Make sure we have key and value if len(line.split(',')) != 2: continue # depends on [control=['if'], data=[]] # Strip off newlines. self.suffixes[line.strip().split(',')[0]] = line.strip().split(',')[1] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']]
def parseTree(self, root, state: ParseState) -> List[Dict]: """ Parses the XML ast tree recursively to generate a JSON AST which can be ingested by other scripts to generate Python scripts. Args: root: The current root of the tree. state: The current state of the tree defined by an object of the ParseState class. Returns: ast: A JSON ast that defines the structure of the Fortran file. """ if root.tag in self.AST_TAG_HANDLERS: return self.AST_TAG_HANDLERS[root.tag](root, state) elif root.tag in self.libRtns: return self.process_libRtn(root, state) else: prog = [] for node in root: prog += self.parseTree(node, state) return prog
def function[parseTree, parameter[self, root, state]]: constant[ Parses the XML ast tree recursively to generate a JSON AST which can be ingested by other scripts to generate Python scripts. Args: root: The current root of the tree. state: The current state of the tree defined by an object of the ParseState class. Returns: ast: A JSON ast that defines the structure of the Fortran file. ] if compare[name[root].tag in name[self].AST_TAG_HANDLERS] begin[:] return[call[call[name[self].AST_TAG_HANDLERS][name[root].tag], parameter[name[root], name[state]]]]
keyword[def] identifier[parseTree] ( identifier[self] , identifier[root] , identifier[state] : identifier[ParseState] )-> identifier[List] [ identifier[Dict] ]: literal[string] keyword[if] identifier[root] . identifier[tag] keyword[in] identifier[self] . identifier[AST_TAG_HANDLERS] : keyword[return] identifier[self] . identifier[AST_TAG_HANDLERS] [ identifier[root] . identifier[tag] ]( identifier[root] , identifier[state] ) keyword[elif] identifier[root] . identifier[tag] keyword[in] identifier[self] . identifier[libRtns] : keyword[return] identifier[self] . identifier[process_libRtn] ( identifier[root] , identifier[state] ) keyword[else] : identifier[prog] =[] keyword[for] identifier[node] keyword[in] identifier[root] : identifier[prog] += identifier[self] . identifier[parseTree] ( identifier[node] , identifier[state] ) keyword[return] identifier[prog]
def parseTree(self, root, state: ParseState) -> List[Dict]: """ Parses the XML ast tree recursively to generate a JSON AST which can be ingested by other scripts to generate Python scripts. Args: root: The current root of the tree. state: The current state of the tree defined by an object of the ParseState class. Returns: ast: A JSON ast that defines the structure of the Fortran file. """ if root.tag in self.AST_TAG_HANDLERS: return self.AST_TAG_HANDLERS[root.tag](root, state) # depends on [control=['if'], data=[]] elif root.tag in self.libRtns: return self.process_libRtn(root, state) # depends on [control=['if'], data=[]] else: prog = [] for node in root: prog += self.parseTree(node, state) # depends on [control=['for'], data=['node']] return prog
def set_id(self, dxid): ''' :param dxid: New ID to be associated with the handler :type dxid: string Discards the currently stored ID and associates the handler with *dxid* ''' if dxid is not None: verify_string_dxid(dxid, self._class) self._dxid = dxid
def function[set_id, parameter[self, dxid]]: constant[ :param dxid: New ID to be associated with the handler :type dxid: string Discards the currently stored ID and associates the handler with *dxid* ] if compare[name[dxid] is_not constant[None]] begin[:] call[name[verify_string_dxid], parameter[name[dxid], name[self]._class]] name[self]._dxid assign[=] name[dxid]
keyword[def] identifier[set_id] ( identifier[self] , identifier[dxid] ): literal[string] keyword[if] identifier[dxid] keyword[is] keyword[not] keyword[None] : identifier[verify_string_dxid] ( identifier[dxid] , identifier[self] . identifier[_class] ) identifier[self] . identifier[_dxid] = identifier[dxid]
def set_id(self, dxid): """ :param dxid: New ID to be associated with the handler :type dxid: string Discards the currently stored ID and associates the handler with *dxid* """ if dxid is not None: verify_string_dxid(dxid, self._class) # depends on [control=['if'], data=['dxid']] self._dxid = dxid
def satisifesShapeOr(cntxt: Context, n: Node, se: ShExJ.ShapeOr, _: DebugContext) -> bool: """ Se is a ShapeOr and there is some shape expression se2 in shapeExprs such that satisfies(n, se2, G, m). """ return any(satisfies(cntxt, n, se2) for se2 in se.shapeExprs)
def function[satisifesShapeOr, parameter[cntxt, n, se, _]]: constant[ Se is a ShapeOr and there is some shape expression se2 in shapeExprs such that satisfies(n, se2, G, m). ] return[call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b0f0cfa0>]]]
keyword[def] identifier[satisifesShapeOr] ( identifier[cntxt] : identifier[Context] , identifier[n] : identifier[Node] , identifier[se] : identifier[ShExJ] . identifier[ShapeOr] , identifier[_] : identifier[DebugContext] )-> identifier[bool] : literal[string] keyword[return] identifier[any] ( identifier[satisfies] ( identifier[cntxt] , identifier[n] , identifier[se2] ) keyword[for] identifier[se2] keyword[in] identifier[se] . identifier[shapeExprs] )
def satisifesShapeOr(cntxt: Context, n: Node, se: ShExJ.ShapeOr, _: DebugContext) -> bool: """ Se is a ShapeOr and there is some shape expression se2 in shapeExprs such that satisfies(n, se2, G, m). """ return any((satisfies(cntxt, n, se2) for se2 in se.shapeExprs))
def _check_reads_hit(self, alignment_io, min_aligned_fraction): '''Given an alignment return a list of sequence names that are less than the min_aligned_fraction''' to_return = [] alignment_length = None for s in SeqIO.parse(alignment_io, "fasta"): if not alignment_length: alignment_length = len(s.seq) min_length = int(min_aligned_fraction * alignment_length) logging.debug("Determined min number of aligned bases to be %s" % min_length) elif len(s.seq) != alignment_length: raise Exception("Alignment file appears to not be of uniform length") num_unaligned = s.seq.count('-') num_aligned = alignment_length-num_unaligned logging.debug("Sequence %s has %d aligned positions" % (s.name, alignment_length-num_unaligned)) if num_aligned <= min_length: to_return.append(s.name) return to_return
def function[_check_reads_hit, parameter[self, alignment_io, min_aligned_fraction]]: constant[Given an alignment return a list of sequence names that are less than the min_aligned_fraction] variable[to_return] assign[=] list[[]] variable[alignment_length] assign[=] constant[None] for taget[name[s]] in starred[call[name[SeqIO].parse, parameter[name[alignment_io], constant[fasta]]]] begin[:] if <ast.UnaryOp object at 0x7da20c76fe50> begin[:] variable[alignment_length] assign[=] call[name[len], parameter[name[s].seq]] variable[min_length] assign[=] call[name[int], parameter[binary_operation[name[min_aligned_fraction] * name[alignment_length]]]] call[name[logging].debug, parameter[binary_operation[constant[Determined min number of aligned bases to be %s] <ast.Mod object at 0x7da2590d6920> name[min_length]]]] variable[num_unaligned] assign[=] call[name[s].seq.count, parameter[constant[-]]] variable[num_aligned] assign[=] binary_operation[name[alignment_length] - name[num_unaligned]] call[name[logging].debug, parameter[binary_operation[constant[Sequence %s has %d aligned positions] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c76d9c0>, <ast.BinOp object at 0x7da20c76c820>]]]]] if compare[name[num_aligned] less_or_equal[<=] name[min_length]] begin[:] call[name[to_return].append, parameter[name[s].name]] return[name[to_return]]
keyword[def] identifier[_check_reads_hit] ( identifier[self] , identifier[alignment_io] , identifier[min_aligned_fraction] ): literal[string] identifier[to_return] =[] identifier[alignment_length] = keyword[None] keyword[for] identifier[s] keyword[in] identifier[SeqIO] . identifier[parse] ( identifier[alignment_io] , literal[string] ): keyword[if] keyword[not] identifier[alignment_length] : identifier[alignment_length] = identifier[len] ( identifier[s] . identifier[seq] ) identifier[min_length] = identifier[int] ( identifier[min_aligned_fraction] * identifier[alignment_length] ) identifier[logging] . identifier[debug] ( literal[string] % identifier[min_length] ) keyword[elif] identifier[len] ( identifier[s] . identifier[seq] )!= identifier[alignment_length] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[num_unaligned] = identifier[s] . identifier[seq] . identifier[count] ( literal[string] ) identifier[num_aligned] = identifier[alignment_length] - identifier[num_unaligned] identifier[logging] . identifier[debug] ( literal[string] %( identifier[s] . identifier[name] , identifier[alignment_length] - identifier[num_unaligned] )) keyword[if] identifier[num_aligned] <= identifier[min_length] : identifier[to_return] . identifier[append] ( identifier[s] . identifier[name] ) keyword[return] identifier[to_return]
def _check_reads_hit(self, alignment_io, min_aligned_fraction): """Given an alignment return a list of sequence names that are less than the min_aligned_fraction""" to_return = [] alignment_length = None for s in SeqIO.parse(alignment_io, 'fasta'): if not alignment_length: alignment_length = len(s.seq) min_length = int(min_aligned_fraction * alignment_length) logging.debug('Determined min number of aligned bases to be %s' % min_length) # depends on [control=['if'], data=[]] elif len(s.seq) != alignment_length: raise Exception('Alignment file appears to not be of uniform length') # depends on [control=['if'], data=[]] num_unaligned = s.seq.count('-') num_aligned = alignment_length - num_unaligned logging.debug('Sequence %s has %d aligned positions' % (s.name, alignment_length - num_unaligned)) if num_aligned <= min_length: to_return.append(s.name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']] return to_return
def check_settings(self, settings): """ Checks the settings info. :param settings: Dict with settings data :type settings: dict :returns: Errors found on the settings data :rtype: list """ assert isinstance(settings, dict) errors = [] if not isinstance(settings, dict) or len(settings) == 0: errors.append('invalid_syntax') else: if not self.__sp_validation_only: errors += self.check_idp_settings(settings) sp_errors = self.check_sp_settings(settings) errors += sp_errors return errors
def function[check_settings, parameter[self, settings]]: constant[ Checks the settings info. :param settings: Dict with settings data :type settings: dict :returns: Errors found on the settings data :rtype: list ] assert[call[name[isinstance], parameter[name[settings], name[dict]]]] variable[errors] assign[=] list[[]] if <ast.BoolOp object at 0x7da1b170ef50> begin[:] call[name[errors].append, parameter[constant[invalid_syntax]]] return[name[errors]]
keyword[def] identifier[check_settings] ( identifier[self] , identifier[settings] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[settings] , identifier[dict] ) identifier[errors] =[] keyword[if] keyword[not] identifier[isinstance] ( identifier[settings] , identifier[dict] ) keyword[or] identifier[len] ( identifier[settings] )== literal[int] : identifier[errors] . identifier[append] ( literal[string] ) keyword[else] : keyword[if] keyword[not] identifier[self] . identifier[__sp_validation_only] : identifier[errors] += identifier[self] . identifier[check_idp_settings] ( identifier[settings] ) identifier[sp_errors] = identifier[self] . identifier[check_sp_settings] ( identifier[settings] ) identifier[errors] += identifier[sp_errors] keyword[return] identifier[errors]
def check_settings(self, settings): """ Checks the settings info. :param settings: Dict with settings data :type settings: dict :returns: Errors found on the settings data :rtype: list """ assert isinstance(settings, dict) errors = [] if not isinstance(settings, dict) or len(settings) == 0: errors.append('invalid_syntax') # depends on [control=['if'], data=[]] else: if not self.__sp_validation_only: errors += self.check_idp_settings(settings) # depends on [control=['if'], data=[]] sp_errors = self.check_sp_settings(settings) errors += sp_errors return errors