code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def RIBSystemRouteLimitExceeded_originator_switch_info_switchVcsId(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") RIBSystemRouteLimitExceeded = ET.SubElement(config, "RIBSystemRouteLimitExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream") originator_switch_info = ET.SubElement(RIBSystemRouteLimitExceeded, "originator-switch-info") switchVcsId = ET.SubElement(originator_switch_info, "switchVcsId") switchVcsId.text = kwargs.pop('switchVcsId') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[RIBSystemRouteLimitExceeded_originator_switch_info_switchVcsId, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[RIBSystemRouteLimitExceeded] assign[=] call[name[ET].SubElement, parameter[name[config], constant[RIBSystemRouteLimitExceeded]]] variable[originator_switch_info] assign[=] call[name[ET].SubElement, parameter[name[RIBSystemRouteLimitExceeded], constant[originator-switch-info]]] variable[switchVcsId] assign[=] call[name[ET].SubElement, parameter[name[originator_switch_info], constant[switchVcsId]]] name[switchVcsId].text assign[=] call[name[kwargs].pop, parameter[constant[switchVcsId]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[RIBSystemRouteLimitExceeded_originator_switch_info_switchVcsId] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[RIBSystemRouteLimitExceeded] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] ) identifier[originator_switch_info] = identifier[ET] . identifier[SubElement] ( identifier[RIBSystemRouteLimitExceeded] , literal[string] ) identifier[switchVcsId] = identifier[ET] . identifier[SubElement] ( identifier[originator_switch_info] , literal[string] ) identifier[switchVcsId] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def RIBSystemRouteLimitExceeded_originator_switch_info_switchVcsId(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') RIBSystemRouteLimitExceeded = ET.SubElement(config, 'RIBSystemRouteLimitExceeded', xmlns='http://brocade.com/ns/brocade-notification-stream') originator_switch_info = ET.SubElement(RIBSystemRouteLimitExceeded, 'originator-switch-info') switchVcsId = ET.SubElement(originator_switch_info, 'switchVcsId') switchVcsId.text = kwargs.pop('switchVcsId') callback = kwargs.pop('callback', self._callback) return callback(config)
def _fit( indexed_params, penalization, lam, lam_perturb, lam_scale_, estimator, penalty_name, subsample, bootstrap, prng, X=None, ): """Wrapper function outside of instance for fitting a single model average trial. If X is None, then we assume we are using a broadcast spark object. Else, we expect X to get passed into this function. """ index = indexed_params if isinstance(X, np.ndarray): local_X = X else: local_X = X.value n_samples, n_features = local_X.shape prec_is_real = False while not prec_is_real: boot_lam = None if penalization == "subsampling": pass elif penalization == "random": boot_lam = _fix_weights(_random_weights, n_features, lam, lam_perturb, prng) elif penalization == "fully-random": boot_lam = _fix_weights(_fully_random_weights, n_features, lam_scale_, prng) else: raise NotImplementedError( ( "Only penalization = 'subsampling', " "'random', and 'fully-random' have " "been implemented. Found {}.".format(penalization) ) ) # new instance of estimator new_estimator = clone(estimator) if boot_lam is not None: new_estimator.set_params(**{penalty_name: boot_lam}) # fit estimator num_subsamples = int(subsample * n_samples) rp = bootstrap(n_samples, num_subsamples, prng) new_estimator.fit(local_X[rp, :]) # check that new_estimator.precision_ is real # if not, skip this boot_lam and try again if isinstance(new_estimator.precision_, list): prec_real_bools = [] for prec in new_estimator.precision_: prec_real_bools.append(np.all(np.isreal(prec))) prec_is_real = np.all(np.array(prec_real_bools) is True) elif isinstance(new_estimator.precision_, np.ndarray): prec_is_real = np.all(np.isreal(new_estimator.precision_)) else: raise ValueError("Estimator returned invalid precision_.") return index, (boot_lam, rp, new_estimator)
def function[_fit, parameter[indexed_params, penalization, lam, lam_perturb, lam_scale_, estimator, penalty_name, subsample, bootstrap, prng, X]]: constant[Wrapper function outside of instance for fitting a single model average trial. If X is None, then we assume we are using a broadcast spark object. Else, we expect X to get passed into this function. ] variable[index] assign[=] name[indexed_params] if call[name[isinstance], parameter[name[X], name[np].ndarray]] begin[:] variable[local_X] assign[=] name[X] <ast.Tuple object at 0x7da18ede6740> assign[=] name[local_X].shape variable[prec_is_real] assign[=] constant[False] while <ast.UnaryOp object at 0x7da18ede52d0> begin[:] variable[boot_lam] assign[=] constant[None] if compare[name[penalization] equal[==] constant[subsampling]] begin[:] pass variable[new_estimator] assign[=] call[name[clone], parameter[name[estimator]]] if compare[name[boot_lam] is_not constant[None]] begin[:] call[name[new_estimator].set_params, parameter[]] variable[num_subsamples] assign[=] call[name[int], parameter[binary_operation[name[subsample] * name[n_samples]]]] variable[rp] assign[=] call[name[bootstrap], parameter[name[n_samples], name[num_subsamples], name[prng]]] call[name[new_estimator].fit, parameter[call[name[local_X]][tuple[[<ast.Name object at 0x7da18ede6140>, <ast.Slice object at 0x7da18ede6230>]]]]] if call[name[isinstance], parameter[name[new_estimator].precision_, name[list]]] begin[:] variable[prec_real_bools] assign[=] list[[]] for taget[name[prec]] in starred[name[new_estimator].precision_] begin[:] call[name[prec_real_bools].append, parameter[call[name[np].all, parameter[call[name[np].isreal, parameter[name[prec]]]]]]] variable[prec_is_real] assign[=] call[name[np].all, parameter[compare[call[name[np].array, parameter[name[prec_real_bools]]] is constant[True]]]] return[tuple[[<ast.Name object at 0x7da20c6e7550>, <ast.Tuple object at 0x7da20c6e7c40>]]]
keyword[def] identifier[_fit] ( identifier[indexed_params] , identifier[penalization] , identifier[lam] , identifier[lam_perturb] , identifier[lam_scale_] , identifier[estimator] , identifier[penalty_name] , identifier[subsample] , identifier[bootstrap] , identifier[prng] , identifier[X] = keyword[None] , ): literal[string] identifier[index] = identifier[indexed_params] keyword[if] identifier[isinstance] ( identifier[X] , identifier[np] . identifier[ndarray] ): identifier[local_X] = identifier[X] keyword[else] : identifier[local_X] = identifier[X] . identifier[value] identifier[n_samples] , identifier[n_features] = identifier[local_X] . identifier[shape] identifier[prec_is_real] = keyword[False] keyword[while] keyword[not] identifier[prec_is_real] : identifier[boot_lam] = keyword[None] keyword[if] identifier[penalization] == literal[string] : keyword[pass] keyword[elif] identifier[penalization] == literal[string] : identifier[boot_lam] = identifier[_fix_weights] ( identifier[_random_weights] , identifier[n_features] , identifier[lam] , identifier[lam_perturb] , identifier[prng] ) keyword[elif] identifier[penalization] == literal[string] : identifier[boot_lam] = identifier[_fix_weights] ( identifier[_fully_random_weights] , identifier[n_features] , identifier[lam_scale_] , identifier[prng] ) keyword[else] : keyword[raise] identifier[NotImplementedError] ( ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[penalization] ) ) ) identifier[new_estimator] = identifier[clone] ( identifier[estimator] ) keyword[if] identifier[boot_lam] keyword[is] keyword[not] keyword[None] : identifier[new_estimator] . identifier[set_params] (**{ identifier[penalty_name] : identifier[boot_lam] }) identifier[num_subsamples] = identifier[int] ( identifier[subsample] * identifier[n_samples] ) identifier[rp] = identifier[bootstrap] ( identifier[n_samples] , identifier[num_subsamples] , identifier[prng] ) identifier[new_estimator] . identifier[fit] ( identifier[local_X] [ identifier[rp] ,:]) keyword[if] identifier[isinstance] ( identifier[new_estimator] . identifier[precision_] , identifier[list] ): identifier[prec_real_bools] =[] keyword[for] identifier[prec] keyword[in] identifier[new_estimator] . identifier[precision_] : identifier[prec_real_bools] . identifier[append] ( identifier[np] . identifier[all] ( identifier[np] . identifier[isreal] ( identifier[prec] ))) identifier[prec_is_real] = identifier[np] . identifier[all] ( identifier[np] . identifier[array] ( identifier[prec_real_bools] ) keyword[is] keyword[True] ) keyword[elif] identifier[isinstance] ( identifier[new_estimator] . identifier[precision_] , identifier[np] . identifier[ndarray] ): identifier[prec_is_real] = identifier[np] . identifier[all] ( identifier[np] . identifier[isreal] ( identifier[new_estimator] . identifier[precision_] )) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[index] ,( identifier[boot_lam] , identifier[rp] , identifier[new_estimator] )
def _fit(indexed_params, penalization, lam, lam_perturb, lam_scale_, estimator, penalty_name, subsample, bootstrap, prng, X=None): """Wrapper function outside of instance for fitting a single model average trial. If X is None, then we assume we are using a broadcast spark object. Else, we expect X to get passed into this function. """ index = indexed_params if isinstance(X, np.ndarray): local_X = X # depends on [control=['if'], data=[]] else: local_X = X.value (n_samples, n_features) = local_X.shape prec_is_real = False while not prec_is_real: boot_lam = None if penalization == 'subsampling': pass # depends on [control=['if'], data=[]] elif penalization == 'random': boot_lam = _fix_weights(_random_weights, n_features, lam, lam_perturb, prng) # depends on [control=['if'], data=[]] elif penalization == 'fully-random': boot_lam = _fix_weights(_fully_random_weights, n_features, lam_scale_, prng) # depends on [control=['if'], data=[]] else: raise NotImplementedError("Only penalization = 'subsampling', 'random', and 'fully-random' have been implemented. Found {}.".format(penalization)) # new instance of estimator new_estimator = clone(estimator) if boot_lam is not None: new_estimator.set_params(**{penalty_name: boot_lam}) # depends on [control=['if'], data=['boot_lam']] # fit estimator num_subsamples = int(subsample * n_samples) rp = bootstrap(n_samples, num_subsamples, prng) new_estimator.fit(local_X[rp, :]) # check that new_estimator.precision_ is real # if not, skip this boot_lam and try again if isinstance(new_estimator.precision_, list): prec_real_bools = [] for prec in new_estimator.precision_: prec_real_bools.append(np.all(np.isreal(prec))) # depends on [control=['for'], data=['prec']] prec_is_real = np.all(np.array(prec_real_bools) is True) # depends on [control=['if'], data=[]] elif isinstance(new_estimator.precision_, np.ndarray): prec_is_real = np.all(np.isreal(new_estimator.precision_)) # depends on [control=['if'], data=[]] else: raise ValueError('Estimator returned invalid precision_.') # depends on [control=['while'], data=[]] return (index, (boot_lam, rp, new_estimator))
def calc_freefree_eta(ne, t, hz): """Dulk (1985) equations 7 and 20, assuming pure hydrogen.""" kappa = calc_freefree_kappa(ne, t, hz) return kappa * cgs.k * t * hz**2 / cgs.c**2
def function[calc_freefree_eta, parameter[ne, t, hz]]: constant[Dulk (1985) equations 7 and 20, assuming pure hydrogen.] variable[kappa] assign[=] call[name[calc_freefree_kappa], parameter[name[ne], name[t], name[hz]]] return[binary_operation[binary_operation[binary_operation[binary_operation[name[kappa] * name[cgs].k] * name[t]] * binary_operation[name[hz] ** constant[2]]] / binary_operation[name[cgs].c ** constant[2]]]]
keyword[def] identifier[calc_freefree_eta] ( identifier[ne] , identifier[t] , identifier[hz] ): literal[string] identifier[kappa] = identifier[calc_freefree_kappa] ( identifier[ne] , identifier[t] , identifier[hz] ) keyword[return] identifier[kappa] * identifier[cgs] . identifier[k] * identifier[t] * identifier[hz] ** literal[int] / identifier[cgs] . identifier[c] ** literal[int]
def calc_freefree_eta(ne, t, hz): """Dulk (1985) equations 7 and 20, assuming pure hydrogen.""" kappa = calc_freefree_kappa(ne, t, hz) return kappa * cgs.k * t * hz ** 2 / cgs.c ** 2
def post(self, request, uri): """ Set node data for uri, return rendered content. JSON Response: {uri: x, content: y} """ uri = self.decode_uri(uri) data, meta = self.get_post_data(request) meta['author'] = auth.get_username(request) node = cio.set(uri, data, publish=False, **meta) return self.render_to_json(node)
def function[post, parameter[self, request, uri]]: constant[ Set node data for uri, return rendered content. JSON Response: {uri: x, content: y} ] variable[uri] assign[=] call[name[self].decode_uri, parameter[name[uri]]] <ast.Tuple object at 0x7da1b0ebde70> assign[=] call[name[self].get_post_data, parameter[name[request]]] call[name[meta]][constant[author]] assign[=] call[name[auth].get_username, parameter[name[request]]] variable[node] assign[=] call[name[cio].set, parameter[name[uri], name[data]]] return[call[name[self].render_to_json, parameter[name[node]]]]
keyword[def] identifier[post] ( identifier[self] , identifier[request] , identifier[uri] ): literal[string] identifier[uri] = identifier[self] . identifier[decode_uri] ( identifier[uri] ) identifier[data] , identifier[meta] = identifier[self] . identifier[get_post_data] ( identifier[request] ) identifier[meta] [ literal[string] ]= identifier[auth] . identifier[get_username] ( identifier[request] ) identifier[node] = identifier[cio] . identifier[set] ( identifier[uri] , identifier[data] , identifier[publish] = keyword[False] ,** identifier[meta] ) keyword[return] identifier[self] . identifier[render_to_json] ( identifier[node] )
def post(self, request, uri): """ Set node data for uri, return rendered content. JSON Response: {uri: x, content: y} """ uri = self.decode_uri(uri) (data, meta) = self.get_post_data(request) meta['author'] = auth.get_username(request) node = cio.set(uri, data, publish=False, **meta) return self.render_to_json(node)
def download_manifest(self, manifest, replica, num_retries=10, min_delay_seconds=0.25, download_dir=''): """ Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it. :param str manifest: path to a TSV (tab-separated values) file listing files to download :param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and `gcp` for Google Cloud Platform. [aws, gcp] :param int num_retries: The initial quota of download failures to accept before exiting due to failures. The number of retries increase and decrease as file chucks succeed and fail. :param float min_delay_seconds: The minimum number of seconds to wait in between retries. Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it. Each row in the manifest represents one file in DSS. The manifest must have a header row. The header row must declare the following columns: * `bundle_uuid` - the UUID of the bundle containing the file in DSS. * `bundle_version` - the version of the bundle containing the file in DSS. * `file_name` - the name of the file as specified in the bundle. The TSV may have additional columns. Those columns will be ignored. The ordering of the columns is insignificant because the TSV is required to have a header row. """ file_errors = 0 file_task, bundle_errors = self._download_manifest_tasks(manifest, replica, num_retries, min_delay_seconds, download_dir) with concurrent.futures.ThreadPoolExecutor(self.threads) as executor: futures_to_dss_file = {executor.submit(task): dss_file for dss_file, task in file_task} for future in concurrent.futures.as_completed(futures_to_dss_file): dss_file = futures_to_dss_file[future] try: future.result() except Exception as e: file_errors += 1 logger.warning('Failed to download file %s version %s from replica %s', dss_file.uuid, dss_file.version, dss_file.replica, exc_info=e) if file_errors or bundle_errors: bundle_error_str = '{} bundle(s) failed to download'.format(bundle_errors) if bundle_errors else '' file_error_str = '{} file(s) failed to download'.format(file_errors) if file_errors else '' raise RuntimeError(bundle_error_str + (' and ' if bundle_errors and file_errors else '') + file_error_str) else: self._write_output_manifest(manifest, download_dir) logger.info('Primary copies of the files have been downloaded to `.hca` and linked ' 'into per-bundle subdirectories of the current directory.')
def function[download_manifest, parameter[self, manifest, replica, num_retries, min_delay_seconds, download_dir]]: constant[ Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it. :param str manifest: path to a TSV (tab-separated values) file listing files to download :param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and `gcp` for Google Cloud Platform. [aws, gcp] :param int num_retries: The initial quota of download failures to accept before exiting due to failures. The number of retries increase and decrease as file chucks succeed and fail. :param float min_delay_seconds: The minimum number of seconds to wait in between retries. Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it. Each row in the manifest represents one file in DSS. The manifest must have a header row. The header row must declare the following columns: * `bundle_uuid` - the UUID of the bundle containing the file in DSS. * `bundle_version` - the version of the bundle containing the file in DSS. * `file_name` - the name of the file as specified in the bundle. The TSV may have additional columns. Those columns will be ignored. The ordering of the columns is insignificant because the TSV is required to have a header row. ] variable[file_errors] assign[=] constant[0] <ast.Tuple object at 0x7da20c993fa0> assign[=] call[name[self]._download_manifest_tasks, parameter[name[manifest], name[replica], name[num_retries], name[min_delay_seconds], name[download_dir]]] with call[name[concurrent].futures.ThreadPoolExecutor, parameter[name[self].threads]] begin[:] variable[futures_to_dss_file] assign[=] <ast.DictComp object at 0x7da20c9939a0> for taget[name[future]] in starred[call[name[concurrent].futures.as_completed, parameter[name[futures_to_dss_file]]]] begin[:] variable[dss_file] assign[=] call[name[futures_to_dss_file]][name[future]] <ast.Try object at 0x7da20c991c00> if <ast.BoolOp object at 0x7da20c991000> begin[:] variable[bundle_error_str] assign[=] <ast.IfExp object at 0x7da20c993970> variable[file_error_str] assign[=] <ast.IfExp object at 0x7da20c990430> <ast.Raise object at 0x7da20c993850>
keyword[def] identifier[download_manifest] ( identifier[self] , identifier[manifest] , identifier[replica] , identifier[num_retries] = literal[int] , identifier[min_delay_seconds] = literal[int] , identifier[download_dir] = literal[string] ): literal[string] identifier[file_errors] = literal[int] identifier[file_task] , identifier[bundle_errors] = identifier[self] . identifier[_download_manifest_tasks] ( identifier[manifest] , identifier[replica] , identifier[num_retries] , identifier[min_delay_seconds] , identifier[download_dir] ) keyword[with] identifier[concurrent] . identifier[futures] . identifier[ThreadPoolExecutor] ( identifier[self] . identifier[threads] ) keyword[as] identifier[executor] : identifier[futures_to_dss_file] ={ identifier[executor] . identifier[submit] ( identifier[task] ): identifier[dss_file] keyword[for] identifier[dss_file] , identifier[task] keyword[in] identifier[file_task] } keyword[for] identifier[future] keyword[in] identifier[concurrent] . identifier[futures] . identifier[as_completed] ( identifier[futures_to_dss_file] ): identifier[dss_file] = identifier[futures_to_dss_file] [ identifier[future] ] keyword[try] : identifier[future] . identifier[result] () keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[file_errors] += literal[int] identifier[logger] . identifier[warning] ( literal[string] , identifier[dss_file] . identifier[uuid] , identifier[dss_file] . identifier[version] , identifier[dss_file] . identifier[replica] , identifier[exc_info] = identifier[e] ) keyword[if] identifier[file_errors] keyword[or] identifier[bundle_errors] : identifier[bundle_error_str] = literal[string] . identifier[format] ( identifier[bundle_errors] ) keyword[if] identifier[bundle_errors] keyword[else] literal[string] identifier[file_error_str] = literal[string] . identifier[format] ( identifier[file_errors] ) keyword[if] identifier[file_errors] keyword[else] literal[string] keyword[raise] identifier[RuntimeError] ( identifier[bundle_error_str] +( literal[string] keyword[if] identifier[bundle_errors] keyword[and] identifier[file_errors] keyword[else] literal[string] )+ identifier[file_error_str] ) keyword[else] : identifier[self] . identifier[_write_output_manifest] ( identifier[manifest] , identifier[download_dir] ) identifier[logger] . identifier[info] ( literal[string] literal[string] )
def download_manifest(self, manifest, replica, num_retries=10, min_delay_seconds=0.25, download_dir=''): """ Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it. :param str manifest: path to a TSV (tab-separated values) file listing files to download :param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and `gcp` for Google Cloud Platform. [aws, gcp] :param int num_retries: The initial quota of download failures to accept before exiting due to failures. The number of retries increase and decrease as file chucks succeed and fail. :param float min_delay_seconds: The minimum number of seconds to wait in between retries. Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it. Each row in the manifest represents one file in DSS. The manifest must have a header row. The header row must declare the following columns: * `bundle_uuid` - the UUID of the bundle containing the file in DSS. * `bundle_version` - the version of the bundle containing the file in DSS. * `file_name` - the name of the file as specified in the bundle. The TSV may have additional columns. Those columns will be ignored. The ordering of the columns is insignificant because the TSV is required to have a header row. """ file_errors = 0 (file_task, bundle_errors) = self._download_manifest_tasks(manifest, replica, num_retries, min_delay_seconds, download_dir) with concurrent.futures.ThreadPoolExecutor(self.threads) as executor: futures_to_dss_file = {executor.submit(task): dss_file for (dss_file, task) in file_task} for future in concurrent.futures.as_completed(futures_to_dss_file): dss_file = futures_to_dss_file[future] try: future.result() # depends on [control=['try'], data=[]] except Exception as e: file_errors += 1 logger.warning('Failed to download file %s version %s from replica %s', dss_file.uuid, dss_file.version, dss_file.replica, exc_info=e) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['future']] # depends on [control=['with'], data=['executor']] if file_errors or bundle_errors: bundle_error_str = '{} bundle(s) failed to download'.format(bundle_errors) if bundle_errors else '' file_error_str = '{} file(s) failed to download'.format(file_errors) if file_errors else '' raise RuntimeError(bundle_error_str + (' and ' if bundle_errors and file_errors else '') + file_error_str) # depends on [control=['if'], data=[]] else: self._write_output_manifest(manifest, download_dir) logger.info('Primary copies of the files have been downloaded to `.hca` and linked into per-bundle subdirectories of the current directory.')
def translation(self, bundleId, languages, priority='gp', domain=None, localedir=None, class_=None, codeset=None): """Returns the ``Translations`` instance to be used for obtaining translations. ``bundleId`` is the name of the bundle to use. ``languages`` is the list of languages to use, with subsequent ones being fallbacks. Additionally, based on the value of ``priority``, local translated values can be given precedence over Globalization Pipeline translated values. For example, to fallback to Spanish if French translated values are not found, ``languages=['fr', 'es']``. And if ``priority=gp``, the fallback chain will be as follows: - use ``gp`` French translated value, if not found: - use ``local`` French translated value, if not found: - use ``gp`` Spanish translated value, if not found: - use ``local`` Spanish translated value, if not found: - use source value, if not found: - use provided key In order to search for local translated values, the optional parameters must be provided according to `gettext.translation <https://docs.python.org/2/library/gettext.html#gettext.translation>`_ """ availableLangs = self.get_avaliable_languages(bundleId) translations = None for language in languages: # get local translation localTranslations = None if domain: t = local_translation(domain=domain, localedir=localedir, languages=[language], class_=class_, fallback=True, codeset=codeset) # only use t if it's not NullTranslations - NullTranslations # indicates that a translation file was not found if t is not NullTranslations: localTranslations = t gpTranslations = None # get gp translation if the bundle has the language match = self.__get_language_match(languageCode=language, languageIds=availableLangs) if match: gpTranslations = GPTranslations(bundleId=bundleId, languageId=match, client=self, cacheTimeout=self.__cacheTimeout) # create the fallback chain if not translations: # set the first translation in the chain if priority == 'local': if not localTranslations: translations = gpTranslations else: translations = localTranslations if gpTranslations: translations.add_fallback(gpTranslations) elif priority == 'gp': if not gpTranslations: translations = localTranslations else: translations = gpTranslations if localTranslations: translations.add_fallback(localTranslations) else: # add fallback in the preferred order if priority == 'local': if localTranslations: translations.add_fallback(localTranslations) if gpTranslations: translations.add_fallback(gpTranslations) elif priority == 'gp': if gpTranslations: translations.add_fallback(gpTranslations) if localTranslations: translations.add_fallback(localTranslations) if not translations: logging.warning('No translations were found for bundleID <%s>' \ + ' and languages <%s> ', bundleId, languages) translations = NullTranslations() return translations
def function[translation, parameter[self, bundleId, languages, priority, domain, localedir, class_, codeset]]: constant[Returns the ``Translations`` instance to be used for obtaining translations. ``bundleId`` is the name of the bundle to use. ``languages`` is the list of languages to use, with subsequent ones being fallbacks. Additionally, based on the value of ``priority``, local translated values can be given precedence over Globalization Pipeline translated values. For example, to fallback to Spanish if French translated values are not found, ``languages=['fr', 'es']``. And if ``priority=gp``, the fallback chain will be as follows: - use ``gp`` French translated value, if not found: - use ``local`` French translated value, if not found: - use ``gp`` Spanish translated value, if not found: - use ``local`` Spanish translated value, if not found: - use source value, if not found: - use provided key In order to search for local translated values, the optional parameters must be provided according to `gettext.translation <https://docs.python.org/2/library/gettext.html#gettext.translation>`_ ] variable[availableLangs] assign[=] call[name[self].get_avaliable_languages, parameter[name[bundleId]]] variable[translations] assign[=] constant[None] for taget[name[language]] in starred[name[languages]] begin[:] variable[localTranslations] assign[=] constant[None] if name[domain] begin[:] variable[t] assign[=] call[name[local_translation], parameter[]] if compare[name[t] is_not name[NullTranslations]] begin[:] variable[localTranslations] assign[=] name[t] variable[gpTranslations] assign[=] constant[None] variable[match] assign[=] call[name[self].__get_language_match, parameter[]] if name[match] begin[:] variable[gpTranslations] assign[=] call[name[GPTranslations], parameter[]] if <ast.UnaryOp object at 0x7da18bc72a70> begin[:] if compare[name[priority] equal[==] constant[local]] begin[:] if <ast.UnaryOp object at 0x7da18bc718d0> begin[:] variable[translations] assign[=] name[gpTranslations] if <ast.UnaryOp object at 0x7da18bc73a60> begin[:] call[name[logging].warning, parameter[binary_operation[constant[No translations were found for bundleID <%s>] + constant[ and languages <%s> ]], name[bundleId], name[languages]]] variable[translations] assign[=] call[name[NullTranslations], parameter[]] return[name[translations]]
keyword[def] identifier[translation] ( identifier[self] , identifier[bundleId] , identifier[languages] , identifier[priority] = literal[string] , identifier[domain] = keyword[None] , identifier[localedir] = keyword[None] , identifier[class_] = keyword[None] , identifier[codeset] = keyword[None] ): literal[string] identifier[availableLangs] = identifier[self] . identifier[get_avaliable_languages] ( identifier[bundleId] ) identifier[translations] = keyword[None] keyword[for] identifier[language] keyword[in] identifier[languages] : identifier[localTranslations] = keyword[None] keyword[if] identifier[domain] : identifier[t] = identifier[local_translation] ( identifier[domain] = identifier[domain] , identifier[localedir] = identifier[localedir] , identifier[languages] =[ identifier[language] ], identifier[class_] = identifier[class_] , identifier[fallback] = keyword[True] , identifier[codeset] = identifier[codeset] ) keyword[if] identifier[t] keyword[is] keyword[not] identifier[NullTranslations] : identifier[localTranslations] = identifier[t] identifier[gpTranslations] = keyword[None] identifier[match] = identifier[self] . identifier[__get_language_match] ( identifier[languageCode] = identifier[language] , identifier[languageIds] = identifier[availableLangs] ) keyword[if] identifier[match] : identifier[gpTranslations] = identifier[GPTranslations] ( identifier[bundleId] = identifier[bundleId] , identifier[languageId] = identifier[match] , identifier[client] = identifier[self] , identifier[cacheTimeout] = identifier[self] . identifier[__cacheTimeout] ) keyword[if] keyword[not] identifier[translations] : keyword[if] identifier[priority] == literal[string] : keyword[if] keyword[not] identifier[localTranslations] : identifier[translations] = identifier[gpTranslations] keyword[else] : identifier[translations] = identifier[localTranslations] keyword[if] identifier[gpTranslations] : identifier[translations] . identifier[add_fallback] ( identifier[gpTranslations] ) keyword[elif] identifier[priority] == literal[string] : keyword[if] keyword[not] identifier[gpTranslations] : identifier[translations] = identifier[localTranslations] keyword[else] : identifier[translations] = identifier[gpTranslations] keyword[if] identifier[localTranslations] : identifier[translations] . identifier[add_fallback] ( identifier[localTranslations] ) keyword[else] : keyword[if] identifier[priority] == literal[string] : keyword[if] identifier[localTranslations] : identifier[translations] . identifier[add_fallback] ( identifier[localTranslations] ) keyword[if] identifier[gpTranslations] : identifier[translations] . identifier[add_fallback] ( identifier[gpTranslations] ) keyword[elif] identifier[priority] == literal[string] : keyword[if] identifier[gpTranslations] : identifier[translations] . identifier[add_fallback] ( identifier[gpTranslations] ) keyword[if] identifier[localTranslations] : identifier[translations] . identifier[add_fallback] ( identifier[localTranslations] ) keyword[if] keyword[not] identifier[translations] : identifier[logging] . identifier[warning] ( literal[string] + literal[string] , identifier[bundleId] , identifier[languages] ) identifier[translations] = identifier[NullTranslations] () keyword[return] identifier[translations]
def translation(self, bundleId, languages, priority='gp', domain=None, localedir=None, class_=None, codeset=None): """Returns the ``Translations`` instance to be used for obtaining translations. ``bundleId`` is the name of the bundle to use. ``languages`` is the list of languages to use, with subsequent ones being fallbacks. Additionally, based on the value of ``priority``, local translated values can be given precedence over Globalization Pipeline translated values. For example, to fallback to Spanish if French translated values are not found, ``languages=['fr', 'es']``. And if ``priority=gp``, the fallback chain will be as follows: - use ``gp`` French translated value, if not found: - use ``local`` French translated value, if not found: - use ``gp`` Spanish translated value, if not found: - use ``local`` Spanish translated value, if not found: - use source value, if not found: - use provided key In order to search for local translated values, the optional parameters must be provided according to `gettext.translation <https://docs.python.org/2/library/gettext.html#gettext.translation>`_ """ availableLangs = self.get_avaliable_languages(bundleId) translations = None for language in languages: # get local translation localTranslations = None if domain: t = local_translation(domain=domain, localedir=localedir, languages=[language], class_=class_, fallback=True, codeset=codeset) # only use t if it's not NullTranslations - NullTranslations # indicates that a translation file was not found if t is not NullTranslations: localTranslations = t # depends on [control=['if'], data=['t']] # depends on [control=['if'], data=[]] gpTranslations = None # get gp translation if the bundle has the language match = self.__get_language_match(languageCode=language, languageIds=availableLangs) if match: gpTranslations = GPTranslations(bundleId=bundleId, languageId=match, client=self, cacheTimeout=self.__cacheTimeout) # depends on [control=['if'], data=[]] # create the fallback chain if not translations: # set the first translation in the chain if priority == 'local': if not localTranslations: translations = gpTranslations # depends on [control=['if'], data=[]] else: translations = localTranslations if gpTranslations: translations.add_fallback(gpTranslations) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif priority == 'gp': if not gpTranslations: translations = localTranslations # depends on [control=['if'], data=[]] else: translations = gpTranslations if localTranslations: translations.add_fallback(localTranslations) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # add fallback in the preferred order elif priority == 'local': if localTranslations: translations.add_fallback(localTranslations) # depends on [control=['if'], data=[]] if gpTranslations: translations.add_fallback(gpTranslations) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif priority == 'gp': if gpTranslations: translations.add_fallback(gpTranslations) # depends on [control=['if'], data=[]] if localTranslations: translations.add_fallback(localTranslations) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['language']] if not translations: logging.warning('No translations were found for bundleID <%s>' + ' and languages <%s> ', bundleId, languages) translations = NullTranslations() # depends on [control=['if'], data=[]] return translations
def parse_date(self, value): """ A lazy method to parse anything to date. If input data type is: - string: parse date from it - integer: use from ordinal - datetime: use date part - date: just return it """ if isinstance(value, sixmini.string_types): return self.str2date(value) elif value is None: raise TypeError("Unable to parse date from %r" % value) elif isinstance(value, sixmini.integer_types): return date.fromordinal(value) elif isinstance(value, datetime): return value.date() elif isinstance(value, date): return value else: raise ValueError("Unable to parse date from %r" % value)
def function[parse_date, parameter[self, value]]: constant[ A lazy method to parse anything to date. If input data type is: - string: parse date from it - integer: use from ordinal - datetime: use date part - date: just return it ] if call[name[isinstance], parameter[name[value], name[sixmini].string_types]] begin[:] return[call[name[self].str2date, parameter[name[value]]]]
keyword[def] identifier[parse_date] ( identifier[self] , identifier[value] ): literal[string] keyword[if] identifier[isinstance] ( identifier[value] , identifier[sixmini] . identifier[string_types] ): keyword[return] identifier[self] . identifier[str2date] ( identifier[value] ) keyword[elif] identifier[value] keyword[is] keyword[None] : keyword[raise] identifier[TypeError] ( literal[string] % identifier[value] ) keyword[elif] identifier[isinstance] ( identifier[value] , identifier[sixmini] . identifier[integer_types] ): keyword[return] identifier[date] . identifier[fromordinal] ( identifier[value] ) keyword[elif] identifier[isinstance] ( identifier[value] , identifier[datetime] ): keyword[return] identifier[value] . identifier[date] () keyword[elif] identifier[isinstance] ( identifier[value] , identifier[date] ): keyword[return] identifier[value] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[value] )
def parse_date(self, value): """ A lazy method to parse anything to date. If input data type is: - string: parse date from it - integer: use from ordinal - datetime: use date part - date: just return it """ if isinstance(value, sixmini.string_types): return self.str2date(value) # depends on [control=['if'], data=[]] elif value is None: raise TypeError('Unable to parse date from %r' % value) # depends on [control=['if'], data=['value']] elif isinstance(value, sixmini.integer_types): return date.fromordinal(value) # depends on [control=['if'], data=[]] elif isinstance(value, datetime): return value.date() # depends on [control=['if'], data=[]] elif isinstance(value, date): return value # depends on [control=['if'], data=[]] else: raise ValueError('Unable to parse date from %r' % value)
def __public_objs(self): """ Returns a dictionary mapping a public identifier name to a Python object. """ members = dict(inspect.getmembers(self.module)) return dict([(name, obj) for name, obj in members.items() if self.__is_exported(name, inspect.getmodule(obj))])
def function[__public_objs, parameter[self]]: constant[ Returns a dictionary mapping a public identifier name to a Python object. ] variable[members] assign[=] call[name[dict], parameter[call[name[inspect].getmembers, parameter[name[self].module]]]] return[call[name[dict], parameter[<ast.ListComp object at 0x7da1b149e2c0>]]]
keyword[def] identifier[__public_objs] ( identifier[self] ): literal[string] identifier[members] = identifier[dict] ( identifier[inspect] . identifier[getmembers] ( identifier[self] . identifier[module] )) keyword[return] identifier[dict] ([( identifier[name] , identifier[obj] ) keyword[for] identifier[name] , identifier[obj] keyword[in] identifier[members] . identifier[items] () keyword[if] identifier[self] . identifier[__is_exported] ( identifier[name] , identifier[inspect] . identifier[getmodule] ( identifier[obj] ))])
def __public_objs(self): """ Returns a dictionary mapping a public identifier name to a Python object. """ members = dict(inspect.getmembers(self.module)) return dict([(name, obj) for (name, obj) in members.items() if self.__is_exported(name, inspect.getmodule(obj))])
def translate_filenames(filenames): """Convert filenames from Linux to Windows.""" if is_windows(): return filenames for index, filename in enumerate(filenames): filenames[index] = vboxsf_to_windows(filename)
def function[translate_filenames, parameter[filenames]]: constant[Convert filenames from Linux to Windows.] if call[name[is_windows], parameter[]] begin[:] return[name[filenames]] for taget[tuple[[<ast.Name object at 0x7da2047e9ae0>, <ast.Name object at 0x7da2047e9ba0>]]] in starred[call[name[enumerate], parameter[name[filenames]]]] begin[:] call[name[filenames]][name[index]] assign[=] call[name[vboxsf_to_windows], parameter[name[filename]]]
keyword[def] identifier[translate_filenames] ( identifier[filenames] ): literal[string] keyword[if] identifier[is_windows] (): keyword[return] identifier[filenames] keyword[for] identifier[index] , identifier[filename] keyword[in] identifier[enumerate] ( identifier[filenames] ): identifier[filenames] [ identifier[index] ]= identifier[vboxsf_to_windows] ( identifier[filename] )
def translate_filenames(filenames): """Convert filenames from Linux to Windows.""" if is_windows(): return filenames # depends on [control=['if'], data=[]] for (index, filename) in enumerate(filenames): filenames[index] = vboxsf_to_windows(filename) # depends on [control=['for'], data=[]]
def xml_to_json(root): """Convert an Open511 XML document or document fragment to JSON. Takes an lxml Element object. Returns a dict ready to be JSON-serialized.""" j = {} if len(root) == 0: # Tag with no children, return str/int return _maybe_intify(root.text) if len(root) == 1 and root[0].tag.startswith('{' + NS_GML): # GML return gml_to_geojson(root[0]) if root.tag == 'open511': j['meta'] = {'version': root.get('version')} for elem in root: name = elem.tag if name == 'link' and elem.get('rel'): name = elem.get('rel') + '_url' if name == 'self_url': name = 'url' if root.tag == 'open511': j['meta'][name] = elem.get('href') continue elif name.startswith('{' + NS_PROTECTED): name = '!' + name[name.index('}') + 1:] elif name[0] == '{': # Namespace! name = '+' + name[name.index('}') + 1:] if name in j: continue # duplicate elif elem.tag == 'link' and not elem.text: j[name] = elem.get('href') elif len(elem): if name == 'grouped_events': # An array of URLs j[name] = [xml_link_to_json(child, to_dict=False) for child in elem] elif name in ('attachments', 'media_files'): # An array of JSON objects j[name] = [xml_link_to_json(child, to_dict=True) for child in elem] elif all((name == pluralize(child.tag) for child in elem)): # <something><somethings> serializes to a JSON array j[name] = [xml_to_json(child) for child in elem] else: j[name] = xml_to_json(elem) else: if root.tag == 'open511' and name.endswith('s') and not elem.text: # Special case: an empty e.g. <events /> container at the root level # should be serialized to [], not null j[name] = [] else: j[name] = _maybe_intify(elem.text) return j
def function[xml_to_json, parameter[root]]: constant[Convert an Open511 XML document or document fragment to JSON. Takes an lxml Element object. Returns a dict ready to be JSON-serialized.] variable[j] assign[=] dictionary[[], []] if compare[call[name[len], parameter[name[root]]] equal[==] constant[0]] begin[:] return[call[name[_maybe_intify], parameter[name[root].text]]] if <ast.BoolOp object at 0x7da1b004a710> begin[:] return[call[name[gml_to_geojson], parameter[call[name[root]][constant[0]]]]] if compare[name[root].tag equal[==] constant[open511]] begin[:] call[name[j]][constant[meta]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0048850>], [<ast.Call object at 0x7da1b0048220>]] for taget[name[elem]] in starred[name[root]] begin[:] variable[name] assign[=] name[elem].tag if <ast.BoolOp object at 0x7da1b0048d60> begin[:] variable[name] assign[=] binary_operation[call[name[elem].get, parameter[constant[rel]]] + constant[_url]] if compare[name[name] equal[==] constant[self_url]] begin[:] variable[name] assign[=] constant[url] if compare[name[root].tag equal[==] constant[open511]] begin[:] call[call[name[j]][constant[meta]]][name[name]] assign[=] call[name[elem].get, parameter[constant[href]]] continue if compare[name[name] in name[j]] begin[:] continue return[name[j]]
keyword[def] identifier[xml_to_json] ( identifier[root] ): literal[string] identifier[j] ={} keyword[if] identifier[len] ( identifier[root] )== literal[int] : keyword[return] identifier[_maybe_intify] ( identifier[root] . identifier[text] ) keyword[if] identifier[len] ( identifier[root] )== literal[int] keyword[and] identifier[root] [ literal[int] ]. identifier[tag] . identifier[startswith] ( literal[string] + identifier[NS_GML] ): keyword[return] identifier[gml_to_geojson] ( identifier[root] [ literal[int] ]) keyword[if] identifier[root] . identifier[tag] == literal[string] : identifier[j] [ literal[string] ]={ literal[string] : identifier[root] . identifier[get] ( literal[string] )} keyword[for] identifier[elem] keyword[in] identifier[root] : identifier[name] = identifier[elem] . identifier[tag] keyword[if] identifier[name] == literal[string] keyword[and] identifier[elem] . identifier[get] ( literal[string] ): identifier[name] = identifier[elem] . identifier[get] ( literal[string] )+ literal[string] keyword[if] identifier[name] == literal[string] : identifier[name] = literal[string] keyword[if] identifier[root] . identifier[tag] == literal[string] : identifier[j] [ literal[string] ][ identifier[name] ]= identifier[elem] . identifier[get] ( literal[string] ) keyword[continue] keyword[elif] identifier[name] . identifier[startswith] ( literal[string] + identifier[NS_PROTECTED] ): identifier[name] = literal[string] + identifier[name] [ identifier[name] . identifier[index] ( literal[string] )+ literal[int] :] keyword[elif] identifier[name] [ literal[int] ]== literal[string] : identifier[name] = literal[string] + identifier[name] [ identifier[name] . identifier[index] ( literal[string] )+ literal[int] :] keyword[if] identifier[name] keyword[in] identifier[j] : keyword[continue] keyword[elif] identifier[elem] . identifier[tag] == literal[string] keyword[and] keyword[not] identifier[elem] . identifier[text] : identifier[j] [ identifier[name] ]= identifier[elem] . identifier[get] ( literal[string] ) keyword[elif] identifier[len] ( identifier[elem] ): keyword[if] identifier[name] == literal[string] : identifier[j] [ identifier[name] ]=[ identifier[xml_link_to_json] ( identifier[child] , identifier[to_dict] = keyword[False] ) keyword[for] identifier[child] keyword[in] identifier[elem] ] keyword[elif] identifier[name] keyword[in] ( literal[string] , literal[string] ): identifier[j] [ identifier[name] ]=[ identifier[xml_link_to_json] ( identifier[child] , identifier[to_dict] = keyword[True] ) keyword[for] identifier[child] keyword[in] identifier[elem] ] keyword[elif] identifier[all] (( identifier[name] == identifier[pluralize] ( identifier[child] . identifier[tag] ) keyword[for] identifier[child] keyword[in] identifier[elem] )): identifier[j] [ identifier[name] ]=[ identifier[xml_to_json] ( identifier[child] ) keyword[for] identifier[child] keyword[in] identifier[elem] ] keyword[else] : identifier[j] [ identifier[name] ]= identifier[xml_to_json] ( identifier[elem] ) keyword[else] : keyword[if] identifier[root] . identifier[tag] == literal[string] keyword[and] identifier[name] . identifier[endswith] ( literal[string] ) keyword[and] keyword[not] identifier[elem] . identifier[text] : identifier[j] [ identifier[name] ]=[] keyword[else] : identifier[j] [ identifier[name] ]= identifier[_maybe_intify] ( identifier[elem] . identifier[text] ) keyword[return] identifier[j]
def xml_to_json(root): """Convert an Open511 XML document or document fragment to JSON. Takes an lxml Element object. Returns a dict ready to be JSON-serialized.""" j = {} if len(root) == 0: # Tag with no children, return str/int return _maybe_intify(root.text) # depends on [control=['if'], data=[]] if len(root) == 1 and root[0].tag.startswith('{' + NS_GML): # GML return gml_to_geojson(root[0]) # depends on [control=['if'], data=[]] if root.tag == 'open511': j['meta'] = {'version': root.get('version')} # depends on [control=['if'], data=[]] for elem in root: name = elem.tag if name == 'link' and elem.get('rel'): name = elem.get('rel') + '_url' if name == 'self_url': name = 'url' # depends on [control=['if'], data=['name']] if root.tag == 'open511': j['meta'][name] = elem.get('href') continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif name.startswith('{' + NS_PROTECTED): name = '!' + name[name.index('}') + 1:] # depends on [control=['if'], data=[]] elif name[0] == '{': # Namespace! name = '+' + name[name.index('}') + 1:] # depends on [control=['if'], data=[]] if name in j: continue # duplicate # depends on [control=['if'], data=[]] elif elem.tag == 'link' and (not elem.text): j[name] = elem.get('href') # depends on [control=['if'], data=[]] elif len(elem): if name == 'grouped_events': # An array of URLs j[name] = [xml_link_to_json(child, to_dict=False) for child in elem] # depends on [control=['if'], data=['name']] elif name in ('attachments', 'media_files'): # An array of JSON objects j[name] = [xml_link_to_json(child, to_dict=True) for child in elem] # depends on [control=['if'], data=['name']] elif all((name == pluralize(child.tag) for child in elem)): # <something><somethings> serializes to a JSON array j[name] = [xml_to_json(child) for child in elem] # depends on [control=['if'], data=[]] else: j[name] = xml_to_json(elem) # depends on [control=['if'], data=[]] elif root.tag == 'open511' and name.endswith('s') and (not elem.text): # Special case: an empty e.g. <events /> container at the root level # should be serialized to [], not null j[name] = [] # depends on [control=['if'], data=[]] else: j[name] = _maybe_intify(elem.text) # depends on [control=['for'], data=['elem']] return j
def get_waittime(self): """Return the appropriate time to wait, if we sent too many messages :returns: the time to wait in seconds :rtype: :class:`float` :raises: None """ now = time.time() self.sentmessages.appendleft(now) if len(self.sentmessages) == self.sentmessages.maxlen: # check if the oldes message is older than # limited by self.limitinterval oldest = self.sentmessages[-1] waittime = self.limitinterval - (now - oldest) if waittime > 0: return waittime + 1 # add a little buffer return 0
def function[get_waittime, parameter[self]]: constant[Return the appropriate time to wait, if we sent too many messages :returns: the time to wait in seconds :rtype: :class:`float` :raises: None ] variable[now] assign[=] call[name[time].time, parameter[]] call[name[self].sentmessages.appendleft, parameter[name[now]]] if compare[call[name[len], parameter[name[self].sentmessages]] equal[==] name[self].sentmessages.maxlen] begin[:] variable[oldest] assign[=] call[name[self].sentmessages][<ast.UnaryOp object at 0x7da1b0bd4430>] variable[waittime] assign[=] binary_operation[name[self].limitinterval - binary_operation[name[now] - name[oldest]]] if compare[name[waittime] greater[>] constant[0]] begin[:] return[binary_operation[name[waittime] + constant[1]]] return[constant[0]]
keyword[def] identifier[get_waittime] ( identifier[self] ): literal[string] identifier[now] = identifier[time] . identifier[time] () identifier[self] . identifier[sentmessages] . identifier[appendleft] ( identifier[now] ) keyword[if] identifier[len] ( identifier[self] . identifier[sentmessages] )== identifier[self] . identifier[sentmessages] . identifier[maxlen] : identifier[oldest] = identifier[self] . identifier[sentmessages] [- literal[int] ] identifier[waittime] = identifier[self] . identifier[limitinterval] -( identifier[now] - identifier[oldest] ) keyword[if] identifier[waittime] > literal[int] : keyword[return] identifier[waittime] + literal[int] keyword[return] literal[int]
def get_waittime(self): """Return the appropriate time to wait, if we sent too many messages :returns: the time to wait in seconds :rtype: :class:`float` :raises: None """ now = time.time() self.sentmessages.appendleft(now) if len(self.sentmessages) == self.sentmessages.maxlen: # check if the oldes message is older than # limited by self.limitinterval oldest = self.sentmessages[-1] waittime = self.limitinterval - (now - oldest) if waittime > 0: return waittime + 1 # add a little buffer # depends on [control=['if'], data=['waittime']] # depends on [control=['if'], data=[]] return 0
def execute_command_by_uuid(self, tab_uuid, command): # TODO DBUS_ONLY """Execute the `command' in the tab whose terminal has the `tab_uuid' uuid """ if command[-1] != '\n': command += '\n' try: tab_uuid = uuid.UUID(tab_uuid) page_index, = ( index for index, t in enumerate(self.get_notebook().iter_terminals()) if t.get_uuid() == tab_uuid ) except ValueError: pass else: terminals = self.get_notebook().get_terminals_for_page(page_index) for current_vte in terminals: current_vte.feed_child(command)
def function[execute_command_by_uuid, parameter[self, tab_uuid, command]]: constant[Execute the `command' in the tab whose terminal has the `tab_uuid' uuid ] if compare[call[name[command]][<ast.UnaryOp object at 0x7da18f00c250>] not_equal[!=] constant[ ]] begin[:] <ast.AugAssign object at 0x7da1b26afc40> <ast.Try object at 0x7da1b26af430>
keyword[def] identifier[execute_command_by_uuid] ( identifier[self] , identifier[tab_uuid] , identifier[command] ): literal[string] keyword[if] identifier[command] [- literal[int] ]!= literal[string] : identifier[command] += literal[string] keyword[try] : identifier[tab_uuid] = identifier[uuid] . identifier[UUID] ( identifier[tab_uuid] ) identifier[page_index] ,=( identifier[index] keyword[for] identifier[index] , identifier[t] keyword[in] identifier[enumerate] ( identifier[self] . identifier[get_notebook] (). identifier[iter_terminals] ()) keyword[if] identifier[t] . identifier[get_uuid] ()== identifier[tab_uuid] ) keyword[except] identifier[ValueError] : keyword[pass] keyword[else] : identifier[terminals] = identifier[self] . identifier[get_notebook] (). identifier[get_terminals_for_page] ( identifier[page_index] ) keyword[for] identifier[current_vte] keyword[in] identifier[terminals] : identifier[current_vte] . identifier[feed_child] ( identifier[command] )
def execute_command_by_uuid(self, tab_uuid, command): # TODO DBUS_ONLY "Execute the `command' in the tab whose terminal has the `tab_uuid' uuid\n " if command[-1] != '\n': command += '\n' # depends on [control=['if'], data=[]] try: tab_uuid = uuid.UUID(tab_uuid) (page_index,) = (index for (index, t) in enumerate(self.get_notebook().iter_terminals()) if t.get_uuid() == tab_uuid) # depends on [control=['try'], data=[]] except ValueError: pass # depends on [control=['except'], data=[]] else: terminals = self.get_notebook().get_terminals_for_page(page_index) for current_vte in terminals: current_vte.feed_child(command) # depends on [control=['for'], data=['current_vte']]
def compare_data_model_residuals(s, tile, data_vmin='calc', data_vmax='calc', res_vmin=-0.1, res_vmax=0.1, edgepts='calc', do_imshow=True, data_cmap=plt.cm.bone, res_cmap=plt.cm.RdBu): """ Compare the data, model, and residuals of a state. Makes an image of any 2D slice of a state that compares the data, model, and residuals. The upper left portion of the image is the raw data, the central portion the model, and the lower right portion the image. Either plots the image using plt.imshow() or returns a np.ndarray of the image pixels for later use. Parameters ---------- st : peri.ImageState object The state to plot. tile : peri.util.Tile object The slice of the image to plot. Can be any xy, xz, or yz projection, but it must return a valid 2D slice (the slice is squeezed internally). data_vmin : {Float, `calc`}, optional vmin for the imshow for the data and generative model (shared). Default is 'calc' = 0.5(data.min() + model.min()) data_vmax : {Float, `calc`}, optional vmax for the imshow for the data and generative model (shared). Default is 'calc' = 0.5(data.max() + model.max()) res_vmin : Float, optional vmin for the imshow for the residuals. Default is -0.1 Default is 'calc' = 0.5(data.min() + model.min()) res_vmax : Float, optional vmax for the imshow for the residuals. Default is +0.1 edgepts : {Nested list-like, Float, 'calc'}, optional. The vertices of the triangles which determine the splitting of the image. The vertices are at (image corner, (edge, y), and (x,edge), where edge is the appropriate edge of the image. edgepts[0] : (x,y) points for the upper edge edgepts[1] : (x,y) points for the lower edge where `x` is the coordinate along the image's 0th axis and `y` along the images 1st axis. Default is 'calc,' which calculates edge points by splitting the image into 3 regions of equal area. If edgepts is a float scalar, calculates the edge points based on a constant fraction of distance from the edge. do_imshow : Bool If True, imshow's and returns the returned handle. If False, returns the array as a [M,N,4] array. data_cmap : matplotlib colormap instance The colormap to use for the data and model. res_cmap : matplotlib colormap instance The colormap to use for the residuals. Returns ------- image : {matplotlib.pyplot.AxesImage, numpy.ndarray} If `do_imshow` == True, the returned handle from imshow. If `do_imshow` == False, an [M,N,4] np.ndarray of the image pixels. """ # This could be modified to alpha the borderline... or to embiggen # the image and slice it more finely residuals = s.residuals[tile.slicer].squeeze() data = s.data[tile.slicer].squeeze() model = s.model[tile.slicer].squeeze() if data.ndim != 2: raise ValueError('tile does not give a 2D slice') im = np.zeros([data.shape[0], data.shape[1], 4]) if data_vmin == 'calc': data_vmin = 0.5*(data.min() + model.min()) if data_vmax == 'calc': data_vmax = 0.5*(data.max() + model.max()) #1. Get masks: upper_mask, center_mask, lower_mask = trisect_image(im.shape, edgepts) #2. Get colorbar'd images gm = data_cmap(center_data(model, data_vmin, data_vmax)) dt = data_cmap(center_data(data, data_vmin, data_vmax)) rs = res_cmap(center_data(residuals, res_vmin, res_vmax)) for a in range(4): im[:,:,a][upper_mask] = rs[:,:,a][upper_mask] im[:,:,a][center_mask] = gm[:,:,a][center_mask] im[:,:,a][lower_mask] = dt[:,:,a][lower_mask] if do_imshow: return plt.imshow(im) else: return im
def function[compare_data_model_residuals, parameter[s, tile, data_vmin, data_vmax, res_vmin, res_vmax, edgepts, do_imshow, data_cmap, res_cmap]]: constant[ Compare the data, model, and residuals of a state. Makes an image of any 2D slice of a state that compares the data, model, and residuals. The upper left portion of the image is the raw data, the central portion the model, and the lower right portion the image. Either plots the image using plt.imshow() or returns a np.ndarray of the image pixels for later use. Parameters ---------- st : peri.ImageState object The state to plot. tile : peri.util.Tile object The slice of the image to plot. Can be any xy, xz, or yz projection, but it must return a valid 2D slice (the slice is squeezed internally). data_vmin : {Float, `calc`}, optional vmin for the imshow for the data and generative model (shared). Default is 'calc' = 0.5(data.min() + model.min()) data_vmax : {Float, `calc`}, optional vmax for the imshow for the data and generative model (shared). Default is 'calc' = 0.5(data.max() + model.max()) res_vmin : Float, optional vmin for the imshow for the residuals. Default is -0.1 Default is 'calc' = 0.5(data.min() + model.min()) res_vmax : Float, optional vmax for the imshow for the residuals. Default is +0.1 edgepts : {Nested list-like, Float, 'calc'}, optional. The vertices of the triangles which determine the splitting of the image. The vertices are at (image corner, (edge, y), and (x,edge), where edge is the appropriate edge of the image. edgepts[0] : (x,y) points for the upper edge edgepts[1] : (x,y) points for the lower edge where `x` is the coordinate along the image's 0th axis and `y` along the images 1st axis. Default is 'calc,' which calculates edge points by splitting the image into 3 regions of equal area. If edgepts is a float scalar, calculates the edge points based on a constant fraction of distance from the edge. do_imshow : Bool If True, imshow's and returns the returned handle. If False, returns the array as a [M,N,4] array. data_cmap : matplotlib colormap instance The colormap to use for the data and model. res_cmap : matplotlib colormap instance The colormap to use for the residuals. Returns ------- image : {matplotlib.pyplot.AxesImage, numpy.ndarray} If `do_imshow` == True, the returned handle from imshow. If `do_imshow` == False, an [M,N,4] np.ndarray of the image pixels. ] variable[residuals] assign[=] call[call[name[s].residuals][name[tile].slicer].squeeze, parameter[]] variable[data] assign[=] call[call[name[s].data][name[tile].slicer].squeeze, parameter[]] variable[model] assign[=] call[call[name[s].model][name[tile].slicer].squeeze, parameter[]] if compare[name[data].ndim not_equal[!=] constant[2]] begin[:] <ast.Raise object at 0x7da1b2347a30> variable[im] assign[=] call[name[np].zeros, parameter[list[[<ast.Subscript object at 0x7da1b2347b20>, <ast.Subscript object at 0x7da1b23460b0>, <ast.Constant object at 0x7da1b2346a70>]]]] if compare[name[data_vmin] equal[==] constant[calc]] begin[:] variable[data_vmin] assign[=] binary_operation[constant[0.5] * binary_operation[call[name[data].min, parameter[]] + call[name[model].min, parameter[]]]] if compare[name[data_vmax] equal[==] constant[calc]] begin[:] variable[data_vmax] assign[=] binary_operation[constant[0.5] * binary_operation[call[name[data].max, parameter[]] + call[name[model].max, parameter[]]]] <ast.Tuple object at 0x7da1b23472b0> assign[=] call[name[trisect_image], parameter[name[im].shape, name[edgepts]]] variable[gm] assign[=] call[name[data_cmap], parameter[call[name[center_data], parameter[name[model], name[data_vmin], name[data_vmax]]]]] variable[dt] assign[=] call[name[data_cmap], parameter[call[name[center_data], parameter[name[data], name[data_vmin], name[data_vmax]]]]] variable[rs] assign[=] call[name[res_cmap], parameter[call[name[center_data], parameter[name[residuals], name[res_vmin], name[res_vmax]]]]] for taget[name[a]] in starred[call[name[range], parameter[constant[4]]]] begin[:] call[call[name[im]][tuple[[<ast.Slice object at 0x7da1b2347940>, <ast.Slice object at 0x7da1b23452a0>, <ast.Name object at 0x7da1b2344c40>]]]][name[upper_mask]] assign[=] call[call[name[rs]][tuple[[<ast.Slice object at 0x7da1b2345870>, <ast.Slice object at 0x7da1b23454b0>, <ast.Name object at 0x7da1b2344b20>]]]][name[upper_mask]] call[call[name[im]][tuple[[<ast.Slice object at 0x7da1b2345fc0>, <ast.Slice object at 0x7da1b2345c30>, <ast.Name object at 0x7da1b2347be0>]]]][name[center_mask]] assign[=] call[call[name[gm]][tuple[[<ast.Slice object at 0x7da1b2345660>, <ast.Slice object at 0x7da1b2345480>, <ast.Name object at 0x7da1b2346da0>]]]][name[center_mask]] call[call[name[im]][tuple[[<ast.Slice object at 0x7da1b021ef50>, <ast.Slice object at 0x7da1b021ee30>, <ast.Name object at 0x7da1b021ca90>]]]][name[lower_mask]] assign[=] call[call[name[dt]][tuple[[<ast.Slice object at 0x7da1b021ccd0>, <ast.Slice object at 0x7da1b021e110>, <ast.Name object at 0x7da1b021f520>]]]][name[lower_mask]] if name[do_imshow] begin[:] return[call[name[plt].imshow, parameter[name[im]]]]
keyword[def] identifier[compare_data_model_residuals] ( identifier[s] , identifier[tile] , identifier[data_vmin] = literal[string] , identifier[data_vmax] = literal[string] , identifier[res_vmin] =- literal[int] , identifier[res_vmax] = literal[int] , identifier[edgepts] = literal[string] , identifier[do_imshow] = keyword[True] , identifier[data_cmap] = identifier[plt] . identifier[cm] . identifier[bone] , identifier[res_cmap] = identifier[plt] . identifier[cm] . identifier[RdBu] ): literal[string] identifier[residuals] = identifier[s] . identifier[residuals] [ identifier[tile] . identifier[slicer] ]. identifier[squeeze] () identifier[data] = identifier[s] . identifier[data] [ identifier[tile] . identifier[slicer] ]. identifier[squeeze] () identifier[model] = identifier[s] . identifier[model] [ identifier[tile] . identifier[slicer] ]. identifier[squeeze] () keyword[if] identifier[data] . identifier[ndim] != literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[im] = identifier[np] . identifier[zeros] ([ identifier[data] . identifier[shape] [ literal[int] ], identifier[data] . identifier[shape] [ literal[int] ], literal[int] ]) keyword[if] identifier[data_vmin] == literal[string] : identifier[data_vmin] = literal[int] *( identifier[data] . identifier[min] ()+ identifier[model] . identifier[min] ()) keyword[if] identifier[data_vmax] == literal[string] : identifier[data_vmax] = literal[int] *( identifier[data] . identifier[max] ()+ identifier[model] . identifier[max] ()) identifier[upper_mask] , identifier[center_mask] , identifier[lower_mask] = identifier[trisect_image] ( identifier[im] . identifier[shape] , identifier[edgepts] ) identifier[gm] = identifier[data_cmap] ( identifier[center_data] ( identifier[model] , identifier[data_vmin] , identifier[data_vmax] )) identifier[dt] = identifier[data_cmap] ( identifier[center_data] ( identifier[data] , identifier[data_vmin] , identifier[data_vmax] )) identifier[rs] = identifier[res_cmap] ( identifier[center_data] ( identifier[residuals] , identifier[res_vmin] , identifier[res_vmax] )) keyword[for] identifier[a] keyword[in] identifier[range] ( literal[int] ): identifier[im] [:,:, identifier[a] ][ identifier[upper_mask] ]= identifier[rs] [:,:, identifier[a] ][ identifier[upper_mask] ] identifier[im] [:,:, identifier[a] ][ identifier[center_mask] ]= identifier[gm] [:,:, identifier[a] ][ identifier[center_mask] ] identifier[im] [:,:, identifier[a] ][ identifier[lower_mask] ]= identifier[dt] [:,:, identifier[a] ][ identifier[lower_mask] ] keyword[if] identifier[do_imshow] : keyword[return] identifier[plt] . identifier[imshow] ( identifier[im] ) keyword[else] : keyword[return] identifier[im]
def compare_data_model_residuals(s, tile, data_vmin='calc', data_vmax='calc', res_vmin=-0.1, res_vmax=0.1, edgepts='calc', do_imshow=True, data_cmap=plt.cm.bone, res_cmap=plt.cm.RdBu): """ Compare the data, model, and residuals of a state. Makes an image of any 2D slice of a state that compares the data, model, and residuals. The upper left portion of the image is the raw data, the central portion the model, and the lower right portion the image. Either plots the image using plt.imshow() or returns a np.ndarray of the image pixels for later use. Parameters ---------- st : peri.ImageState object The state to plot. tile : peri.util.Tile object The slice of the image to plot. Can be any xy, xz, or yz projection, but it must return a valid 2D slice (the slice is squeezed internally). data_vmin : {Float, `calc`}, optional vmin for the imshow for the data and generative model (shared). Default is 'calc' = 0.5(data.min() + model.min()) data_vmax : {Float, `calc`}, optional vmax for the imshow for the data and generative model (shared). Default is 'calc' = 0.5(data.max() + model.max()) res_vmin : Float, optional vmin for the imshow for the residuals. Default is -0.1 Default is 'calc' = 0.5(data.min() + model.min()) res_vmax : Float, optional vmax for the imshow for the residuals. Default is +0.1 edgepts : {Nested list-like, Float, 'calc'}, optional. The vertices of the triangles which determine the splitting of the image. The vertices are at (image corner, (edge, y), and (x,edge), where edge is the appropriate edge of the image. edgepts[0] : (x,y) points for the upper edge edgepts[1] : (x,y) points for the lower edge where `x` is the coordinate along the image's 0th axis and `y` along the images 1st axis. Default is 'calc,' which calculates edge points by splitting the image into 3 regions of equal area. If edgepts is a float scalar, calculates the edge points based on a constant fraction of distance from the edge. do_imshow : Bool If True, imshow's and returns the returned handle. If False, returns the array as a [M,N,4] array. data_cmap : matplotlib colormap instance The colormap to use for the data and model. res_cmap : matplotlib colormap instance The colormap to use for the residuals. Returns ------- image : {matplotlib.pyplot.AxesImage, numpy.ndarray} If `do_imshow` == True, the returned handle from imshow. If `do_imshow` == False, an [M,N,4] np.ndarray of the image pixels. """ # This could be modified to alpha the borderline... or to embiggen # the image and slice it more finely residuals = s.residuals[tile.slicer].squeeze() data = s.data[tile.slicer].squeeze() model = s.model[tile.slicer].squeeze() if data.ndim != 2: raise ValueError('tile does not give a 2D slice') # depends on [control=['if'], data=[]] im = np.zeros([data.shape[0], data.shape[1], 4]) if data_vmin == 'calc': data_vmin = 0.5 * (data.min() + model.min()) # depends on [control=['if'], data=['data_vmin']] if data_vmax == 'calc': data_vmax = 0.5 * (data.max() + model.max()) # depends on [control=['if'], data=['data_vmax']] #1. Get masks: (upper_mask, center_mask, lower_mask) = trisect_image(im.shape, edgepts) #2. Get colorbar'd images gm = data_cmap(center_data(model, data_vmin, data_vmax)) dt = data_cmap(center_data(data, data_vmin, data_vmax)) rs = res_cmap(center_data(residuals, res_vmin, res_vmax)) for a in range(4): im[:, :, a][upper_mask] = rs[:, :, a][upper_mask] im[:, :, a][center_mask] = gm[:, :, a][center_mask] im[:, :, a][lower_mask] = dt[:, :, a][lower_mask] # depends on [control=['for'], data=['a']] if do_imshow: return plt.imshow(im) # depends on [control=['if'], data=[]] else: return im
def _get_mutagen_metadata(filepath): """Get mutagen metadata dict from a file.""" try: metadata = mutagen.File(filepath, easy=True) except mutagen.MutagenError: logger.warning("Can't load {} as music file.".format(filepath)) raise return metadata
def function[_get_mutagen_metadata, parameter[filepath]]: constant[Get mutagen metadata dict from a file.] <ast.Try object at 0x7da20c7cacb0> return[name[metadata]]
keyword[def] identifier[_get_mutagen_metadata] ( identifier[filepath] ): literal[string] keyword[try] : identifier[metadata] = identifier[mutagen] . identifier[File] ( identifier[filepath] , identifier[easy] = keyword[True] ) keyword[except] identifier[mutagen] . identifier[MutagenError] : identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[filepath] )) keyword[raise] keyword[return] identifier[metadata]
def _get_mutagen_metadata(filepath): """Get mutagen metadata dict from a file.""" try: metadata = mutagen.File(filepath, easy=True) # depends on [control=['try'], data=[]] except mutagen.MutagenError: logger.warning("Can't load {} as music file.".format(filepath)) raise # depends on [control=['except'], data=[]] return metadata
def apply_patch(patchfile,cwd=None,posix=False,level=0): """call 'patch -p[level] [--posix] < arg1' posix mode is sometimes necessary. It keeps empty files so that dpkg-source removes their contents. """ if not os.path.exists(patchfile): raise RuntimeError('patchfile "%s" does not exist'%patchfile) fd = open(patchfile,mode='r') level_str = '-p%d'%level args = ['/usr/bin/patch',level_str] if posix: args.append('--posix') log.info('PATCH COMMAND: %s < %s', ' '.join(args), patchfile) log.info(' PATCHING in dir: %s', cwd) # print >> sys.stderr, 'PATCH COMMAND:',' '.join(args),'<',patchfile # print >> sys.stderr, ' PATCHING in dir:',cwd res = subprocess.Popen( args, cwd=cwd, stdin=fd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True ) returncode=None while returncode is None: returncode = res.poll() ready = select.select( [res.stdout,res.stderr],[],[],0.1) # XXX figure out how to do this without reading byte-by-byte if res.stdout in ready[0]: sys.stdout.write(res.stdout.read(1)) sys.stdout.flush() if res.stderr in ready[0]: sys.stderr.write(res.stderr.read(1)) sys.stderr.flush() # finish outputting file sys.stdout.write(res.stdout.read()) sys.stdout.flush() sys.stderr.write(res.stderr.read()) sys.stderr.flush() if returncode: log.error('ERROR running: %s', ' '.join(args)) log.error('ERROR in %s', cwd) # print >> sys.stderr, 'ERROR running: %s'%(' '.join(args),) # print >> sys.stderr, 'ERROR in',cwd raise RuntimeError('returncode %d'%returncode)
def function[apply_patch, parameter[patchfile, cwd, posix, level]]: constant[call 'patch -p[level] [--posix] < arg1' posix mode is sometimes necessary. It keeps empty files so that dpkg-source removes their contents. ] if <ast.UnaryOp object at 0x7da207f989d0> begin[:] <ast.Raise object at 0x7da207f98280> variable[fd] assign[=] call[name[open], parameter[name[patchfile]]] variable[level_str] assign[=] binary_operation[constant[-p%d] <ast.Mod object at 0x7da2590d6920> name[level]] variable[args] assign[=] list[[<ast.Constant object at 0x7da207f9a4d0>, <ast.Name object at 0x7da207f9a530>]] if name[posix] begin[:] call[name[args].append, parameter[constant[--posix]]] call[name[log].info, parameter[constant[PATCH COMMAND: %s < %s], call[constant[ ].join, parameter[name[args]]], name[patchfile]]] call[name[log].info, parameter[constant[ PATCHING in dir: %s], name[cwd]]] variable[res] assign[=] call[name[subprocess].Popen, parameter[name[args]]] variable[returncode] assign[=] constant[None] while compare[name[returncode] is constant[None]] begin[:] variable[returncode] assign[=] call[name[res].poll, parameter[]] variable[ready] assign[=] call[name[select].select, parameter[list[[<ast.Attribute object at 0x7da20e9559f0>, <ast.Attribute object at 0x7da20e954c70>]], list[[]], list[[]], constant[0.1]]] if compare[name[res].stdout in call[name[ready]][constant[0]]] begin[:] call[name[sys].stdout.write, parameter[call[name[res].stdout.read, parameter[constant[1]]]]] call[name[sys].stdout.flush, parameter[]] if compare[name[res].stderr in call[name[ready]][constant[0]]] begin[:] call[name[sys].stderr.write, parameter[call[name[res].stderr.read, parameter[constant[1]]]]] call[name[sys].stderr.flush, parameter[]] call[name[sys].stdout.write, parameter[call[name[res].stdout.read, parameter[]]]] call[name[sys].stdout.flush, parameter[]] call[name[sys].stderr.write, parameter[call[name[res].stderr.read, parameter[]]]] call[name[sys].stderr.flush, parameter[]] if name[returncode] begin[:] call[name[log].error, parameter[constant[ERROR running: %s], call[constant[ ].join, parameter[name[args]]]]] call[name[log].error, parameter[constant[ERROR in %s], name[cwd]]] <ast.Raise object at 0x7da18fe909d0>
keyword[def] identifier[apply_patch] ( identifier[patchfile] , identifier[cwd] = keyword[None] , identifier[posix] = keyword[False] , identifier[level] = literal[int] ): literal[string] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[patchfile] ): keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[patchfile] ) identifier[fd] = identifier[open] ( identifier[patchfile] , identifier[mode] = literal[string] ) identifier[level_str] = literal[string] % identifier[level] identifier[args] =[ literal[string] , identifier[level_str] ] keyword[if] identifier[posix] : identifier[args] . identifier[append] ( literal[string] ) identifier[log] . identifier[info] ( literal[string] , literal[string] . identifier[join] ( identifier[args] ), identifier[patchfile] ) identifier[log] . identifier[info] ( literal[string] , identifier[cwd] ) identifier[res] = identifier[subprocess] . identifier[Popen] ( identifier[args] , identifier[cwd] = identifier[cwd] , identifier[stdin] = identifier[fd] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] , identifier[universal_newlines] = keyword[True] ) identifier[returncode] = keyword[None] keyword[while] identifier[returncode] keyword[is] keyword[None] : identifier[returncode] = identifier[res] . identifier[poll] () identifier[ready] = identifier[select] . identifier[select] ([ identifier[res] . identifier[stdout] , identifier[res] . identifier[stderr] ],[],[], literal[int] ) keyword[if] identifier[res] . identifier[stdout] keyword[in] identifier[ready] [ literal[int] ]: identifier[sys] . identifier[stdout] . identifier[write] ( identifier[res] . identifier[stdout] . identifier[read] ( literal[int] )) identifier[sys] . identifier[stdout] . identifier[flush] () keyword[if] identifier[res] . identifier[stderr] keyword[in] identifier[ready] [ literal[int] ]: identifier[sys] . identifier[stderr] . identifier[write] ( identifier[res] . identifier[stderr] . identifier[read] ( literal[int] )) identifier[sys] . identifier[stderr] . identifier[flush] () identifier[sys] . identifier[stdout] . identifier[write] ( identifier[res] . identifier[stdout] . identifier[read] ()) identifier[sys] . identifier[stdout] . identifier[flush] () identifier[sys] . identifier[stderr] . identifier[write] ( identifier[res] . identifier[stderr] . identifier[read] ()) identifier[sys] . identifier[stderr] . identifier[flush] () keyword[if] identifier[returncode] : identifier[log] . identifier[error] ( literal[string] , literal[string] . identifier[join] ( identifier[args] )) identifier[log] . identifier[error] ( literal[string] , identifier[cwd] ) keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[returncode] )
def apply_patch(patchfile, cwd=None, posix=False, level=0): """call 'patch -p[level] [--posix] < arg1' posix mode is sometimes necessary. It keeps empty files so that dpkg-source removes their contents. """ if not os.path.exists(patchfile): raise RuntimeError('patchfile "%s" does not exist' % patchfile) # depends on [control=['if'], data=[]] fd = open(patchfile, mode='r') level_str = '-p%d' % level args = ['/usr/bin/patch', level_str] if posix: args.append('--posix') # depends on [control=['if'], data=[]] log.info('PATCH COMMAND: %s < %s', ' '.join(args), patchfile) log.info(' PATCHING in dir: %s', cwd) # print >> sys.stderr, 'PATCH COMMAND:',' '.join(args),'<',patchfile # print >> sys.stderr, ' PATCHING in dir:',cwd res = subprocess.Popen(args, cwd=cwd, stdin=fd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) returncode = None while returncode is None: returncode = res.poll() ready = select.select([res.stdout, res.stderr], [], [], 0.1) # XXX figure out how to do this without reading byte-by-byte if res.stdout in ready[0]: sys.stdout.write(res.stdout.read(1)) sys.stdout.flush() # depends on [control=['if'], data=[]] if res.stderr in ready[0]: sys.stderr.write(res.stderr.read(1)) sys.stderr.flush() # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['returncode']] # finish outputting file sys.stdout.write(res.stdout.read()) sys.stdout.flush() sys.stderr.write(res.stderr.read()) sys.stderr.flush() if returncode: log.error('ERROR running: %s', ' '.join(args)) log.error('ERROR in %s', cwd) # print >> sys.stderr, 'ERROR running: %s'%(' '.join(args),) # print >> sys.stderr, 'ERROR in',cwd raise RuntimeError('returncode %d' % returncode) # depends on [control=['if'], data=[]]
def get_event_display_settings(self, id, **data): """ GET /events/:id/display_settings/ Retrieves the display settings for an event. """ return self.get("/events/{0}/display_settings/".format(id), data=data)
def function[get_event_display_settings, parameter[self, id]]: constant[ GET /events/:id/display_settings/ Retrieves the display settings for an event. ] return[call[name[self].get, parameter[call[constant[/events/{0}/display_settings/].format, parameter[name[id]]]]]]
keyword[def] identifier[get_event_display_settings] ( identifier[self] , identifier[id] ,** identifier[data] ): literal[string] keyword[return] identifier[self] . identifier[get] ( literal[string] . identifier[format] ( identifier[id] ), identifier[data] = identifier[data] )
def get_event_display_settings(self, id, **data): """ GET /events/:id/display_settings/ Retrieves the display settings for an event. """ return self.get('/events/{0}/display_settings/'.format(id), data=data)
def send_email_validation(request): """Receiver for request-created signal to send email notification.""" token = EmailConfirmationSerializer().create_token( request.id, dict(email=request.sender_email) ) pid, record = get_record(request.recid) _send_notification( request.sender_email, _("Access request verification"), "zenodo_accessrequests/emails/validate_email.tpl", request=request, record=record, pid=pid, days=timedelta( seconds=current_app.config["ACCESSREQUESTS_CONFIRMLINK_EXPIRES_IN"] ).days, confirm_link=url_for( "invenio_records_ui.recid_access_request_email_confirm", pid_value=request.recid, token=token, _external=True, ) )
def function[send_email_validation, parameter[request]]: constant[Receiver for request-created signal to send email notification.] variable[token] assign[=] call[call[name[EmailConfirmationSerializer], parameter[]].create_token, parameter[name[request].id, call[name[dict], parameter[]]]] <ast.Tuple object at 0x7da18bc71a80> assign[=] call[name[get_record], parameter[name[request].recid]] call[name[_send_notification], parameter[name[request].sender_email, call[name[_], parameter[constant[Access request verification]]], constant[zenodo_accessrequests/emails/validate_email.tpl]]]
keyword[def] identifier[send_email_validation] ( identifier[request] ): literal[string] identifier[token] = identifier[EmailConfirmationSerializer] (). identifier[create_token] ( identifier[request] . identifier[id] , identifier[dict] ( identifier[email] = identifier[request] . identifier[sender_email] ) ) identifier[pid] , identifier[record] = identifier[get_record] ( identifier[request] . identifier[recid] ) identifier[_send_notification] ( identifier[request] . identifier[sender_email] , identifier[_] ( literal[string] ), literal[string] , identifier[request] = identifier[request] , identifier[record] = identifier[record] , identifier[pid] = identifier[pid] , identifier[days] = identifier[timedelta] ( identifier[seconds] = identifier[current_app] . identifier[config] [ literal[string] ] ). identifier[days] , identifier[confirm_link] = identifier[url_for] ( literal[string] , identifier[pid_value] = identifier[request] . identifier[recid] , identifier[token] = identifier[token] , identifier[_external] = keyword[True] , ) )
def send_email_validation(request): """Receiver for request-created signal to send email notification.""" token = EmailConfirmationSerializer().create_token(request.id, dict(email=request.sender_email)) (pid, record) = get_record(request.recid) _send_notification(request.sender_email, _('Access request verification'), 'zenodo_accessrequests/emails/validate_email.tpl', request=request, record=record, pid=pid, days=timedelta(seconds=current_app.config['ACCESSREQUESTS_CONFIRMLINK_EXPIRES_IN']).days, confirm_link=url_for('invenio_records_ui.recid_access_request_email_confirm', pid_value=request.recid, token=token, _external=True))
def _query(self, query_type, query_str, verbose=False): """Internal query method for the VirusTotal Service Args: query_type(str): The type of query (either 'file' or 'url') query_str (str): The file hash or domain/url to be queried """ # First check query cache cached = self.query_cache.get(query_str) if cached: if verbose: print('Returning Cached VT Query Results') return cached # Not in cache so make the actual query if query_type == 'file': response = requests.get('https://www.virustotal.com/vtapi/v2/file/report', params={'apikey': self.apikey, 'resource': query_str, 'allinfo': 1}) else: response = requests.post('https://www.virustotal.com/vtapi/v2/url/report', params={'apikey': self.apikey, 'resource': query_str, 'allinfo': 1}) # Make sure we got a json blob back try: vt_output = response.json() except ValueError: error_msg = 'VirusTotal no valid response, throttling and trying again...' if self.throttle: if verbose: print(error_msg) time.sleep(30) return self._query(query_type, query_str) return {'vt_error': error_msg} # Check for not-found if not vt_output or vt_output['response_code'] == 0: output = {'query': query_str, 'not_found': True} self.query_cache.set(query_str, output) return output # Exclude some fields (if summary=True) output = {field: vt_output[field] for field in vt_output.keys() if field not in self.exclude} # Put the file sha in the output output['query'] = query_str # Organize the scans fields scan_results = collections.Counter() for scan in vt_output['scans'].values(): if 'result' in scan: if scan['result']: scan_results[scan['result']] += 1 output['scan_results'] = scan_results.most_common(5) # Pull results in Cache self.query_cache.set(query_str, output) # Return results return output
def function[_query, parameter[self, query_type, query_str, verbose]]: constant[Internal query method for the VirusTotal Service Args: query_type(str): The type of query (either 'file' or 'url') query_str (str): The file hash or domain/url to be queried ] variable[cached] assign[=] call[name[self].query_cache.get, parameter[name[query_str]]] if name[cached] begin[:] if name[verbose] begin[:] call[name[print], parameter[constant[Returning Cached VT Query Results]]] return[name[cached]] if compare[name[query_type] equal[==] constant[file]] begin[:] variable[response] assign[=] call[name[requests].get, parameter[constant[https://www.virustotal.com/vtapi/v2/file/report]]] <ast.Try object at 0x7da18bcca2c0> if <ast.BoolOp object at 0x7da18bcc8310> begin[:] variable[output] assign[=] dictionary[[<ast.Constant object at 0x7da18bccacb0>, <ast.Constant object at 0x7da18bccbc40>], [<ast.Name object at 0x7da18bcc9cf0>, <ast.Constant object at 0x7da18bcc8e20>]] call[name[self].query_cache.set, parameter[name[query_str], name[output]]] return[name[output]] variable[output] assign[=] <ast.DictComp object at 0x7da18bcc8d90> call[name[output]][constant[query]] assign[=] name[query_str] variable[scan_results] assign[=] call[name[collections].Counter, parameter[]] for taget[name[scan]] in starred[call[call[name[vt_output]][constant[scans]].values, parameter[]]] begin[:] if compare[constant[result] in name[scan]] begin[:] if call[name[scan]][constant[result]] begin[:] <ast.AugAssign object at 0x7da18bcca3e0> call[name[output]][constant[scan_results]] assign[=] call[name[scan_results].most_common, parameter[constant[5]]] call[name[self].query_cache.set, parameter[name[query_str], name[output]]] return[name[output]]
keyword[def] identifier[_query] ( identifier[self] , identifier[query_type] , identifier[query_str] , identifier[verbose] = keyword[False] ): literal[string] identifier[cached] = identifier[self] . identifier[query_cache] . identifier[get] ( identifier[query_str] ) keyword[if] identifier[cached] : keyword[if] identifier[verbose] : identifier[print] ( literal[string] ) keyword[return] identifier[cached] keyword[if] identifier[query_type] == literal[string] : identifier[response] = identifier[requests] . identifier[get] ( literal[string] , identifier[params] ={ literal[string] : identifier[self] . identifier[apikey] , literal[string] : identifier[query_str] , literal[string] : literal[int] }) keyword[else] : identifier[response] = identifier[requests] . identifier[post] ( literal[string] , identifier[params] ={ literal[string] : identifier[self] . identifier[apikey] , literal[string] : identifier[query_str] , literal[string] : literal[int] }) keyword[try] : identifier[vt_output] = identifier[response] . identifier[json] () keyword[except] identifier[ValueError] : identifier[error_msg] = literal[string] keyword[if] identifier[self] . identifier[throttle] : keyword[if] identifier[verbose] : identifier[print] ( identifier[error_msg] ) identifier[time] . identifier[sleep] ( literal[int] ) keyword[return] identifier[self] . identifier[_query] ( identifier[query_type] , identifier[query_str] ) keyword[return] { literal[string] : identifier[error_msg] } keyword[if] keyword[not] identifier[vt_output] keyword[or] identifier[vt_output] [ literal[string] ]== literal[int] : identifier[output] ={ literal[string] : identifier[query_str] , literal[string] : keyword[True] } identifier[self] . identifier[query_cache] . identifier[set] ( identifier[query_str] , identifier[output] ) keyword[return] identifier[output] identifier[output] ={ identifier[field] : identifier[vt_output] [ identifier[field] ] keyword[for] identifier[field] keyword[in] identifier[vt_output] . identifier[keys] () keyword[if] identifier[field] keyword[not] keyword[in] identifier[self] . identifier[exclude] } identifier[output] [ literal[string] ]= identifier[query_str] identifier[scan_results] = identifier[collections] . identifier[Counter] () keyword[for] identifier[scan] keyword[in] identifier[vt_output] [ literal[string] ]. identifier[values] (): keyword[if] literal[string] keyword[in] identifier[scan] : keyword[if] identifier[scan] [ literal[string] ]: identifier[scan_results] [ identifier[scan] [ literal[string] ]]+= literal[int] identifier[output] [ literal[string] ]= identifier[scan_results] . identifier[most_common] ( literal[int] ) identifier[self] . identifier[query_cache] . identifier[set] ( identifier[query_str] , identifier[output] ) keyword[return] identifier[output]
def _query(self, query_type, query_str, verbose=False): """Internal query method for the VirusTotal Service Args: query_type(str): The type of query (either 'file' or 'url') query_str (str): The file hash or domain/url to be queried """ # First check query cache cached = self.query_cache.get(query_str) if cached: if verbose: print('Returning Cached VT Query Results') # depends on [control=['if'], data=[]] return cached # depends on [control=['if'], data=[]] # Not in cache so make the actual query if query_type == 'file': response = requests.get('https://www.virustotal.com/vtapi/v2/file/report', params={'apikey': self.apikey, 'resource': query_str, 'allinfo': 1}) # depends on [control=['if'], data=[]] else: response = requests.post('https://www.virustotal.com/vtapi/v2/url/report', params={'apikey': self.apikey, 'resource': query_str, 'allinfo': 1}) # Make sure we got a json blob back try: vt_output = response.json() # depends on [control=['try'], data=[]] except ValueError: error_msg = 'VirusTotal no valid response, throttling and trying again...' if self.throttle: if verbose: print(error_msg) # depends on [control=['if'], data=[]] time.sleep(30) return self._query(query_type, query_str) # depends on [control=['if'], data=[]] return {'vt_error': error_msg} # depends on [control=['except'], data=[]] # Check for not-found if not vt_output or vt_output['response_code'] == 0: output = {'query': query_str, 'not_found': True} self.query_cache.set(query_str, output) return output # depends on [control=['if'], data=[]] # Exclude some fields (if summary=True) output = {field: vt_output[field] for field in vt_output.keys() if field not in self.exclude} # Put the file sha in the output output['query'] = query_str # Organize the scans fields scan_results = collections.Counter() for scan in vt_output['scans'].values(): if 'result' in scan: if scan['result']: scan_results[scan['result']] += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['scan']] # depends on [control=['for'], data=['scan']] output['scan_results'] = scan_results.most_common(5) # Pull results in Cache self.query_cache.set(query_str, output) # Return results return output
def meta(self, meta): '''Extract model metadata for lua script stdnet/lib/lua/odm.lua''' data = meta.as_dict() data['namespace'] = self.basekey(meta) return data
def function[meta, parameter[self, meta]]: constant[Extract model metadata for lua script stdnet/lib/lua/odm.lua] variable[data] assign[=] call[name[meta].as_dict, parameter[]] call[name[data]][constant[namespace]] assign[=] call[name[self].basekey, parameter[name[meta]]] return[name[data]]
keyword[def] identifier[meta] ( identifier[self] , identifier[meta] ): literal[string] identifier[data] = identifier[meta] . identifier[as_dict] () identifier[data] [ literal[string] ]= identifier[self] . identifier[basekey] ( identifier[meta] ) keyword[return] identifier[data]
def meta(self, meta): """Extract model metadata for lua script stdnet/lib/lua/odm.lua""" data = meta.as_dict() data['namespace'] = self.basekey(meta) return data
def selectShapePoint(self, point): """Select the first shape created which contains this point.""" self.deSelectShape() if self.selectedVertex(): # A vertex is marked for selection. index, shape = self.hVertex, self.hShape shape.highlightVertex(index, shape.MOVE_VERTEX) self.selectShape(shape) return for shape in reversed(self.shapes): if self.isVisible(shape) and shape.containsPoint(point): self.selectShape(shape) self.calculateOffsets(shape, point) return
def function[selectShapePoint, parameter[self, point]]: constant[Select the first shape created which contains this point.] call[name[self].deSelectShape, parameter[]] if call[name[self].selectedVertex, parameter[]] begin[:] <ast.Tuple object at 0x7da1b2059cc0> assign[=] tuple[[<ast.Attribute object at 0x7da1b2059b40>, <ast.Attribute object at 0x7da1b2059ba0>]] call[name[shape].highlightVertex, parameter[name[index], name[shape].MOVE_VERTEX]] call[name[self].selectShape, parameter[name[shape]]] return[None] for taget[name[shape]] in starred[call[name[reversed], parameter[name[self].shapes]]] begin[:] if <ast.BoolOp object at 0x7da1b2058e50> begin[:] call[name[self].selectShape, parameter[name[shape]]] call[name[self].calculateOffsets, parameter[name[shape], name[point]]] return[None]
keyword[def] identifier[selectShapePoint] ( identifier[self] , identifier[point] ): literal[string] identifier[self] . identifier[deSelectShape] () keyword[if] identifier[self] . identifier[selectedVertex] (): identifier[index] , identifier[shape] = identifier[self] . identifier[hVertex] , identifier[self] . identifier[hShape] identifier[shape] . identifier[highlightVertex] ( identifier[index] , identifier[shape] . identifier[MOVE_VERTEX] ) identifier[self] . identifier[selectShape] ( identifier[shape] ) keyword[return] keyword[for] identifier[shape] keyword[in] identifier[reversed] ( identifier[self] . identifier[shapes] ): keyword[if] identifier[self] . identifier[isVisible] ( identifier[shape] ) keyword[and] identifier[shape] . identifier[containsPoint] ( identifier[point] ): identifier[self] . identifier[selectShape] ( identifier[shape] ) identifier[self] . identifier[calculateOffsets] ( identifier[shape] , identifier[point] ) keyword[return]
def selectShapePoint(self, point): """Select the first shape created which contains this point.""" self.deSelectShape() if self.selectedVertex(): # A vertex is marked for selection. (index, shape) = (self.hVertex, self.hShape) shape.highlightVertex(index, shape.MOVE_VERTEX) self.selectShape(shape) return # depends on [control=['if'], data=[]] for shape in reversed(self.shapes): if self.isVisible(shape) and shape.containsPoint(point): self.selectShape(shape) self.calculateOffsets(shape, point) return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['shape']]
def _customized_dumper(container, dumper=Dumper): """ Coutnerpart of :func:`_customized_loader` for dumpers. """ def container_representer(dumper, data, mapping_tag=_MAPPING_TAG): """Container representer. """ return dumper.represent_mapping(mapping_tag, data.items()) def ustr_representer(dumper, data): """Unicode string representer""" tag = "tag:yaml.org,2002:python/unicode" return dumper.represent_scalar(tag, data) try: dumper.add_representer(unicode, ustr_representer) except NameError: pass if type(container) != dict: dumper.add_representer(container, container_representer) return dumper
def function[_customized_dumper, parameter[container, dumper]]: constant[ Coutnerpart of :func:`_customized_loader` for dumpers. ] def function[container_representer, parameter[dumper, data, mapping_tag]]: constant[Container representer. ] return[call[name[dumper].represent_mapping, parameter[name[mapping_tag], call[name[data].items, parameter[]]]]] def function[ustr_representer, parameter[dumper, data]]: constant[Unicode string representer] variable[tag] assign[=] constant[tag:yaml.org,2002:python/unicode] return[call[name[dumper].represent_scalar, parameter[name[tag], name[data]]]] <ast.Try object at 0x7da18c4cfa00> if compare[call[name[type], parameter[name[container]]] not_equal[!=] name[dict]] begin[:] call[name[dumper].add_representer, parameter[name[container], name[container_representer]]] return[name[dumper]]
keyword[def] identifier[_customized_dumper] ( identifier[container] , identifier[dumper] = identifier[Dumper] ): literal[string] keyword[def] identifier[container_representer] ( identifier[dumper] , identifier[data] , identifier[mapping_tag] = identifier[_MAPPING_TAG] ): literal[string] keyword[return] identifier[dumper] . identifier[represent_mapping] ( identifier[mapping_tag] , identifier[data] . identifier[items] ()) keyword[def] identifier[ustr_representer] ( identifier[dumper] , identifier[data] ): literal[string] identifier[tag] = literal[string] keyword[return] identifier[dumper] . identifier[represent_scalar] ( identifier[tag] , identifier[data] ) keyword[try] : identifier[dumper] . identifier[add_representer] ( identifier[unicode] , identifier[ustr_representer] ) keyword[except] identifier[NameError] : keyword[pass] keyword[if] identifier[type] ( identifier[container] )!= identifier[dict] : identifier[dumper] . identifier[add_representer] ( identifier[container] , identifier[container_representer] ) keyword[return] identifier[dumper]
def _customized_dumper(container, dumper=Dumper): """ Coutnerpart of :func:`_customized_loader` for dumpers. """ def container_representer(dumper, data, mapping_tag=_MAPPING_TAG): """Container representer. """ return dumper.represent_mapping(mapping_tag, data.items()) def ustr_representer(dumper, data): """Unicode string representer""" tag = 'tag:yaml.org,2002:python/unicode' return dumper.represent_scalar(tag, data) try: dumper.add_representer(unicode, ustr_representer) # depends on [control=['try'], data=[]] except NameError: pass # depends on [control=['except'], data=[]] if type(container) != dict: dumper.add_representer(container, container_representer) # depends on [control=['if'], data=[]] return dumper
def sample(self, num_rows): """Sample new rows. Args: num_rows(int): Number of rows to sample Returns: pandas.DataFrame """ sampled_values = [] for i in range(num_rows): sampled_values.append(self._sample_row()) return pd.DataFrame(sampled_values, columns=self.columns)
def function[sample, parameter[self, num_rows]]: constant[Sample new rows. Args: num_rows(int): Number of rows to sample Returns: pandas.DataFrame ] variable[sampled_values] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[name[num_rows]]]] begin[:] call[name[sampled_values].append, parameter[call[name[self]._sample_row, parameter[]]]] return[call[name[pd].DataFrame, parameter[name[sampled_values]]]]
keyword[def] identifier[sample] ( identifier[self] , identifier[num_rows] ): literal[string] identifier[sampled_values] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_rows] ): identifier[sampled_values] . identifier[append] ( identifier[self] . identifier[_sample_row] ()) keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[sampled_values] , identifier[columns] = identifier[self] . identifier[columns] )
def sample(self, num_rows): """Sample new rows. Args: num_rows(int): Number of rows to sample Returns: pandas.DataFrame """ sampled_values = [] for i in range(num_rows): sampled_values.append(self._sample_row()) # depends on [control=['for'], data=[]] return pd.DataFrame(sampled_values, columns=self.columns)
def parse_url(url, extra_schemes={}): """ parse a munge url type:URL URL.type examples: file.yaml yaml:file.txt http://example.com/file.yaml yaml:http://example.com/file.txt mysql://user:password@localhost/database/table django:///home/user/project/settings_dir.settings/app_name/model """ if not url: raise ValueError('url cannot be empty') cls = None res = urlsplit(url) # check config first if res.scheme in extra_schemes: # TODO - nerge these with any existing and recurse addr = extra_schemes[res.scheme] if 'type' in addr: cls = find_cls(res.scheme, extra_schemes) if 'url' in addr: url = addr['url'] if cls: res = urlsplit(url) return MungeURL(cls, res) # TODO - nerge these with any existing and recurse return parse_url(url) if res.scheme: cls = find_cls(res.scheme, extra_schemes) # check file extension if not cls: (rest, sep, ext) = url.rpartition('.') cls = find_cls(ext, extra_schemes) if not cls: raise ValueError('unable to find codec for %s' % url) return MungeURL(cls, res)
def function[parse_url, parameter[url, extra_schemes]]: constant[ parse a munge url type:URL URL.type examples: file.yaml yaml:file.txt http://example.com/file.yaml yaml:http://example.com/file.txt mysql://user:password@localhost/database/table django:///home/user/project/settings_dir.settings/app_name/model ] if <ast.UnaryOp object at 0x7da20c6a8670> begin[:] <ast.Raise object at 0x7da20c6a83a0> variable[cls] assign[=] constant[None] variable[res] assign[=] call[name[urlsplit], parameter[name[url]]] if compare[name[res].scheme in name[extra_schemes]] begin[:] variable[addr] assign[=] call[name[extra_schemes]][name[res].scheme] if compare[constant[type] in name[addr]] begin[:] variable[cls] assign[=] call[name[find_cls], parameter[name[res].scheme, name[extra_schemes]]] if compare[constant[url] in name[addr]] begin[:] variable[url] assign[=] call[name[addr]][constant[url]] if name[cls] begin[:] variable[res] assign[=] call[name[urlsplit], parameter[name[url]]] return[call[name[MungeURL], parameter[name[cls], name[res]]]] return[call[name[parse_url], parameter[name[url]]]] if name[res].scheme begin[:] variable[cls] assign[=] call[name[find_cls], parameter[name[res].scheme, name[extra_schemes]]] if <ast.UnaryOp object at 0x7da20c993d00> begin[:] <ast.Tuple object at 0x7da20c993ac0> assign[=] call[name[url].rpartition, parameter[constant[.]]] variable[cls] assign[=] call[name[find_cls], parameter[name[ext], name[extra_schemes]]] if <ast.UnaryOp object at 0x7da20c991240> begin[:] <ast.Raise object at 0x7da20c993e80> return[call[name[MungeURL], parameter[name[cls], name[res]]]]
keyword[def] identifier[parse_url] ( identifier[url] , identifier[extra_schemes] ={}): literal[string] keyword[if] keyword[not] identifier[url] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[cls] = keyword[None] identifier[res] = identifier[urlsplit] ( identifier[url] ) keyword[if] identifier[res] . identifier[scheme] keyword[in] identifier[extra_schemes] : identifier[addr] = identifier[extra_schemes] [ identifier[res] . identifier[scheme] ] keyword[if] literal[string] keyword[in] identifier[addr] : identifier[cls] = identifier[find_cls] ( identifier[res] . identifier[scheme] , identifier[extra_schemes] ) keyword[if] literal[string] keyword[in] identifier[addr] : identifier[url] = identifier[addr] [ literal[string] ] keyword[if] identifier[cls] : identifier[res] = identifier[urlsplit] ( identifier[url] ) keyword[return] identifier[MungeURL] ( identifier[cls] , identifier[res] ) keyword[return] identifier[parse_url] ( identifier[url] ) keyword[if] identifier[res] . identifier[scheme] : identifier[cls] = identifier[find_cls] ( identifier[res] . identifier[scheme] , identifier[extra_schemes] ) keyword[if] keyword[not] identifier[cls] : ( identifier[rest] , identifier[sep] , identifier[ext] )= identifier[url] . identifier[rpartition] ( literal[string] ) identifier[cls] = identifier[find_cls] ( identifier[ext] , identifier[extra_schemes] ) keyword[if] keyword[not] identifier[cls] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[url] ) keyword[return] identifier[MungeURL] ( identifier[cls] , identifier[res] )
def parse_url(url, extra_schemes={}): """ parse a munge url type:URL URL.type examples: file.yaml yaml:file.txt http://example.com/file.yaml yaml:http://example.com/file.txt mysql://user:password@localhost/database/table django:///home/user/project/settings_dir.settings/app_name/model """ if not url: raise ValueError('url cannot be empty') # depends on [control=['if'], data=[]] cls = None res = urlsplit(url) # check config first if res.scheme in extra_schemes: # TODO - nerge these with any existing and recurse addr = extra_schemes[res.scheme] if 'type' in addr: cls = find_cls(res.scheme, extra_schemes) # depends on [control=['if'], data=[]] if 'url' in addr: url = addr['url'] if cls: res = urlsplit(url) return MungeURL(cls, res) # depends on [control=['if'], data=[]] # TODO - nerge these with any existing and recurse return parse_url(url) # depends on [control=['if'], data=['addr']] # depends on [control=['if'], data=['extra_schemes']] if res.scheme: cls = find_cls(res.scheme, extra_schemes) # depends on [control=['if'], data=[]] # check file extension if not cls: (rest, sep, ext) = url.rpartition('.') cls = find_cls(ext, extra_schemes) if not cls: raise ValueError('unable to find codec for %s' % url) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return MungeURL(cls, res)
def enable_secure_boot(self, secure_boot_enable): """Enable/Disable secure boot on the server. Caller needs to reset the server after issuing this command to bring this into effect. :param secure_boot_enable: True, if secure boot needs to be enabled for next boot, else False. :raises: InvalidInputError, if the validation of the input fails :raises: SushyError, on an error from iLO. """ if not isinstance(secure_boot_enable, bool): msg = ('The parameter "%(parameter)s" value "%(value)s" is ' 'invalid. Valid values are: True/False.' % {'parameter': 'secure_boot_enable', 'value': secure_boot_enable}) raise exception.InvalidInputError(msg) self._conn.patch(self.path, data={'SecureBootEnable': secure_boot_enable})
def function[enable_secure_boot, parameter[self, secure_boot_enable]]: constant[Enable/Disable secure boot on the server. Caller needs to reset the server after issuing this command to bring this into effect. :param secure_boot_enable: True, if secure boot needs to be enabled for next boot, else False. :raises: InvalidInputError, if the validation of the input fails :raises: SushyError, on an error from iLO. ] if <ast.UnaryOp object at 0x7da1b1a6d960> begin[:] variable[msg] assign[=] binary_operation[constant[The parameter "%(parameter)s" value "%(value)s" is invalid. Valid values are: True/False.] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b1a6dcc0>, <ast.Constant object at 0x7da1b1a6d030>], [<ast.Constant object at 0x7da1b1a6dd50>, <ast.Name object at 0x7da1b1a6d7e0>]]] <ast.Raise object at 0x7da1b1a6e0e0> call[name[self]._conn.patch, parameter[name[self].path]]
keyword[def] identifier[enable_secure_boot] ( identifier[self] , identifier[secure_boot_enable] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[secure_boot_enable] , identifier[bool] ): identifier[msg] =( literal[string] literal[string] % { literal[string] : literal[string] , literal[string] : identifier[secure_boot_enable] }) keyword[raise] identifier[exception] . identifier[InvalidInputError] ( identifier[msg] ) identifier[self] . identifier[_conn] . identifier[patch] ( identifier[self] . identifier[path] , identifier[data] ={ literal[string] : identifier[secure_boot_enable] })
def enable_secure_boot(self, secure_boot_enable): """Enable/Disable secure boot on the server. Caller needs to reset the server after issuing this command to bring this into effect. :param secure_boot_enable: True, if secure boot needs to be enabled for next boot, else False. :raises: InvalidInputError, if the validation of the input fails :raises: SushyError, on an error from iLO. """ if not isinstance(secure_boot_enable, bool): msg = 'The parameter "%(parameter)s" value "%(value)s" is invalid. Valid values are: True/False.' % {'parameter': 'secure_boot_enable', 'value': secure_boot_enable} raise exception.InvalidInputError(msg) # depends on [control=['if'], data=[]] self._conn.patch(self.path, data={'SecureBootEnable': secure_boot_enable})
def work_get(self, wallet, account): """ Retrieves work for **account** in **wallet** .. enable_control required .. version 8.0 required :param wallet: Wallet to get account work for :type wallet: str :param account: Account to get work for :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.work_get( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... account="xrb_1111111111111111111111111111111111111111111111111111hifc8npp" ... ) "432e5cf728c90f4f" """ wallet = self._process_value(wallet, 'wallet') account = self._process_value(account, 'account') payload = {"wallet": wallet, "account": account} resp = self.call('work_get', payload) return resp['work']
def function[work_get, parameter[self, wallet, account]]: constant[ Retrieves work for **account** in **wallet** .. enable_control required .. version 8.0 required :param wallet: Wallet to get account work for :type wallet: str :param account: Account to get work for :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.work_get( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... account="xrb_1111111111111111111111111111111111111111111111111111hifc8npp" ... ) "432e5cf728c90f4f" ] variable[wallet] assign[=] call[name[self]._process_value, parameter[name[wallet], constant[wallet]]] variable[account] assign[=] call[name[self]._process_value, parameter[name[account], constant[account]]] variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b2538e20>, <ast.Constant object at 0x7da1b2538fd0>], [<ast.Name object at 0x7da1b2538ca0>, <ast.Name object at 0x7da1b253b490>]] variable[resp] assign[=] call[name[self].call, parameter[constant[work_get], name[payload]]] return[call[name[resp]][constant[work]]]
keyword[def] identifier[work_get] ( identifier[self] , identifier[wallet] , identifier[account] ): literal[string] identifier[wallet] = identifier[self] . identifier[_process_value] ( identifier[wallet] , literal[string] ) identifier[account] = identifier[self] . identifier[_process_value] ( identifier[account] , literal[string] ) identifier[payload] ={ literal[string] : identifier[wallet] , literal[string] : identifier[account] } identifier[resp] = identifier[self] . identifier[call] ( literal[string] , identifier[payload] ) keyword[return] identifier[resp] [ literal[string] ]
def work_get(self, wallet, account): """ Retrieves work for **account** in **wallet** .. enable_control required .. version 8.0 required :param wallet: Wallet to get account work for :type wallet: str :param account: Account to get work for :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.work_get( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... account="xrb_1111111111111111111111111111111111111111111111111111hifc8npp" ... ) "432e5cf728c90f4f" """ wallet = self._process_value(wallet, 'wallet') account = self._process_value(account, 'account') payload = {'wallet': wallet, 'account': account} resp = self.call('work_get', payload) return resp['work']
def _direct_upload(file_obj, file_name, fields, session, samples_resource): """Uploads a single file-like object via our validating proxy. Maintains compatibility with direct upload to a user's S3 bucket as well in case we disable our validating proxy. Parameters ---------- file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the case of paired files, they will be interleaved and uploaded uncompressed. In the case of a single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'. file_name : `string` The file_name you wish to associate this fastx file with at One Codex. fields : `dict` Additional data fields to include as JSON in the POST. Must include 'sample_id' and 'upload_url' at a minimum. samples_resource : `onecodex.models.Samples` Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline. Raises ------ RetryableUploadException In cases where the proxy is temporarily down or we experience connectivity issues UploadException In other cases where the proxy determines the upload is invalid and should *not* be retried. """ # need an OrderedDict to preserve field order for S3, required for Python 2.7 multipart_fields = OrderedDict() for k, v in fields["additional_fields"].items(): multipart_fields[str(k)] = str(v) # this attribute is only in FASTXInterleave and FilePassthru mime_type = getattr(file_obj, "mime_type", "text/plain") multipart_fields["file"] = (file_name, file_obj, mime_type) encoder = MultipartEncoder(multipart_fields) upload_request = None try: upload_request = session.post( fields["upload_url"], data=encoder, headers={"Content-Type": encoder.content_type}, auth={}, ) except requests.exceptions.ConnectionError: pass # If we expect a status *always* try to check it, # waiting up to 4 hours for buffering to complete (~30-50GB file gzipped) if "status_url" in fields["additional_fields"]: now = time.time() while time.time() < (now + 60 * 60 * 4): try: resp = session.post( fields["additional_fields"]["status_url"], json={"sample_id": fields["sample_id"]}, ) resp.raise_for_status() except (ValueError, requests.exceptions.RequestException) as e: logging.debug("Retrying due to error: {}".format(e)) raise RetryableUploadException( "Unexpected failure of direct upload proxy. Retrying..." ) if resp.json() and resp.json().get("complete", True) is False: logging.debug("Blocking on waiting for proxy to complete (in progress)...") time.sleep(30) else: break # Return is successfully processed if resp.json().get("code") in [200, 201]: file_obj.close() return elif resp.json().get("code") == 500: logging.debug("Retrying due to 500 from proxy...") raise RetryableUploadException("Unexpected issue with direct upload proxy. Retrying...") else: raise_api_error(resp, state="upload") # Direct to S3 case else: file_obj.close() if upload_request.status_code not in [200, 201]: raise RetryableUploadException("Unknown connectivity issue with proxy upload.") # Issue a callback -- this only happens in the direct-to-S3 case try: if not fields["additional_fields"].get("callback_url"): samples_resource.confirm_upload( {"sample_id": fields["sample_id"], "upload_type": "standard"} ) except requests.exceptions.HTTPError as e: raise_api_error(e.response, state="callback") except requests.exceptions.ConnectionError: raise_connectivity_error()
def function[_direct_upload, parameter[file_obj, file_name, fields, session, samples_resource]]: constant[Uploads a single file-like object via our validating proxy. Maintains compatibility with direct upload to a user's S3 bucket as well in case we disable our validating proxy. Parameters ---------- file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the case of paired files, they will be interleaved and uploaded uncompressed. In the case of a single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'. file_name : `string` The file_name you wish to associate this fastx file with at One Codex. fields : `dict` Additional data fields to include as JSON in the POST. Must include 'sample_id' and 'upload_url' at a minimum. samples_resource : `onecodex.models.Samples` Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline. Raises ------ RetryableUploadException In cases where the proxy is temporarily down or we experience connectivity issues UploadException In other cases where the proxy determines the upload is invalid and should *not* be retried. ] variable[multipart_fields] assign[=] call[name[OrderedDict], parameter[]] for taget[tuple[[<ast.Name object at 0x7da204564e50>, <ast.Name object at 0x7da204566920>]]] in starred[call[call[name[fields]][constant[additional_fields]].items, parameter[]]] begin[:] call[name[multipart_fields]][call[name[str], parameter[name[k]]]] assign[=] call[name[str], parameter[name[v]]] variable[mime_type] assign[=] call[name[getattr], parameter[name[file_obj], constant[mime_type], constant[text/plain]]] call[name[multipart_fields]][constant[file]] assign[=] tuple[[<ast.Name object at 0x7da204567730>, <ast.Name object at 0x7da204564400>, <ast.Name object at 0x7da204564130>]] variable[encoder] assign[=] call[name[MultipartEncoder], parameter[name[multipart_fields]]] variable[upload_request] assign[=] constant[None] <ast.Try object at 0x7da204564940> if compare[constant[status_url] in call[name[fields]][constant[additional_fields]]] begin[:] variable[now] assign[=] call[name[time].time, parameter[]] while compare[call[name[time].time, parameter[]] less[<] binary_operation[name[now] + binary_operation[binary_operation[constant[60] * constant[60]] * constant[4]]]] begin[:] <ast.Try object at 0x7da18c4cca90> if <ast.BoolOp object at 0x7da18dc9a3b0> begin[:] call[name[logging].debug, parameter[constant[Blocking on waiting for proxy to complete (in progress)...]]] call[name[time].sleep, parameter[constant[30]]] if compare[call[call[name[resp].json, parameter[]].get, parameter[constant[code]]] in list[[<ast.Constant object at 0x7da2044c01f0>, <ast.Constant object at 0x7da2044c3ca0>]]] begin[:] call[name[file_obj].close, parameter[]] return[None]
keyword[def] identifier[_direct_upload] ( identifier[file_obj] , identifier[file_name] , identifier[fields] , identifier[session] , identifier[samples_resource] ): literal[string] identifier[multipart_fields] = identifier[OrderedDict] () keyword[for] identifier[k] , identifier[v] keyword[in] identifier[fields] [ literal[string] ]. identifier[items] (): identifier[multipart_fields] [ identifier[str] ( identifier[k] )]= identifier[str] ( identifier[v] ) identifier[mime_type] = identifier[getattr] ( identifier[file_obj] , literal[string] , literal[string] ) identifier[multipart_fields] [ literal[string] ]=( identifier[file_name] , identifier[file_obj] , identifier[mime_type] ) identifier[encoder] = identifier[MultipartEncoder] ( identifier[multipart_fields] ) identifier[upload_request] = keyword[None] keyword[try] : identifier[upload_request] = identifier[session] . identifier[post] ( identifier[fields] [ literal[string] ], identifier[data] = identifier[encoder] , identifier[headers] ={ literal[string] : identifier[encoder] . identifier[content_type] }, identifier[auth] ={}, ) keyword[except] identifier[requests] . identifier[exceptions] . identifier[ConnectionError] : keyword[pass] keyword[if] literal[string] keyword[in] identifier[fields] [ literal[string] ]: identifier[now] = identifier[time] . identifier[time] () keyword[while] identifier[time] . identifier[time] ()<( identifier[now] + literal[int] * literal[int] * literal[int] ): keyword[try] : identifier[resp] = identifier[session] . identifier[post] ( identifier[fields] [ literal[string] ][ literal[string] ], identifier[json] ={ literal[string] : identifier[fields] [ literal[string] ]}, ) identifier[resp] . identifier[raise_for_status] () keyword[except] ( identifier[ValueError] , identifier[requests] . identifier[exceptions] . identifier[RequestException] ) keyword[as] identifier[e] : identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[e] )) keyword[raise] identifier[RetryableUploadException] ( literal[string] ) keyword[if] identifier[resp] . identifier[json] () keyword[and] identifier[resp] . identifier[json] (). identifier[get] ( literal[string] , keyword[True] ) keyword[is] keyword[False] : identifier[logging] . identifier[debug] ( literal[string] ) identifier[time] . identifier[sleep] ( literal[int] ) keyword[else] : keyword[break] keyword[if] identifier[resp] . identifier[json] (). identifier[get] ( literal[string] ) keyword[in] [ literal[int] , literal[int] ]: identifier[file_obj] . identifier[close] () keyword[return] keyword[elif] identifier[resp] . identifier[json] (). identifier[get] ( literal[string] )== literal[int] : identifier[logging] . identifier[debug] ( literal[string] ) keyword[raise] identifier[RetryableUploadException] ( literal[string] ) keyword[else] : identifier[raise_api_error] ( identifier[resp] , identifier[state] = literal[string] ) keyword[else] : identifier[file_obj] . identifier[close] () keyword[if] identifier[upload_request] . identifier[status_code] keyword[not] keyword[in] [ literal[int] , literal[int] ]: keyword[raise] identifier[RetryableUploadException] ( literal[string] ) keyword[try] : keyword[if] keyword[not] identifier[fields] [ literal[string] ]. identifier[get] ( literal[string] ): identifier[samples_resource] . identifier[confirm_upload] ( { literal[string] : identifier[fields] [ literal[string] ], literal[string] : literal[string] } ) keyword[except] identifier[requests] . identifier[exceptions] . identifier[HTTPError] keyword[as] identifier[e] : identifier[raise_api_error] ( identifier[e] . identifier[response] , identifier[state] = literal[string] ) keyword[except] identifier[requests] . identifier[exceptions] . identifier[ConnectionError] : identifier[raise_connectivity_error] ()
def _direct_upload(file_obj, file_name, fields, session, samples_resource): """Uploads a single file-like object via our validating proxy. Maintains compatibility with direct upload to a user's S3 bucket as well in case we disable our validating proxy. Parameters ---------- file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the case of paired files, they will be interleaved and uploaded uncompressed. In the case of a single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'. file_name : `string` The file_name you wish to associate this fastx file with at One Codex. fields : `dict` Additional data fields to include as JSON in the POST. Must include 'sample_id' and 'upload_url' at a minimum. samples_resource : `onecodex.models.Samples` Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline. Raises ------ RetryableUploadException In cases where the proxy is temporarily down or we experience connectivity issues UploadException In other cases where the proxy determines the upload is invalid and should *not* be retried. """ # need an OrderedDict to preserve field order for S3, required for Python 2.7 multipart_fields = OrderedDict() for (k, v) in fields['additional_fields'].items(): multipart_fields[str(k)] = str(v) # depends on [control=['for'], data=[]] # this attribute is only in FASTXInterleave and FilePassthru mime_type = getattr(file_obj, 'mime_type', 'text/plain') multipart_fields['file'] = (file_name, file_obj, mime_type) encoder = MultipartEncoder(multipart_fields) upload_request = None try: upload_request = session.post(fields['upload_url'], data=encoder, headers={'Content-Type': encoder.content_type}, auth={}) # depends on [control=['try'], data=[]] except requests.exceptions.ConnectionError: pass # depends on [control=['except'], data=[]] # If we expect a status *always* try to check it, # waiting up to 4 hours for buffering to complete (~30-50GB file gzipped) if 'status_url' in fields['additional_fields']: now = time.time() while time.time() < now + 60 * 60 * 4: try: resp = session.post(fields['additional_fields']['status_url'], json={'sample_id': fields['sample_id']}) resp.raise_for_status() # depends on [control=['try'], data=[]] except (ValueError, requests.exceptions.RequestException) as e: logging.debug('Retrying due to error: {}'.format(e)) raise RetryableUploadException('Unexpected failure of direct upload proxy. Retrying...') # depends on [control=['except'], data=['e']] if resp.json() and resp.json().get('complete', True) is False: logging.debug('Blocking on waiting for proxy to complete (in progress)...') time.sleep(30) # depends on [control=['if'], data=[]] else: break # depends on [control=['while'], data=[]] # Return is successfully processed if resp.json().get('code') in [200, 201]: file_obj.close() return # depends on [control=['if'], data=[]] elif resp.json().get('code') == 500: logging.debug('Retrying due to 500 from proxy...') raise RetryableUploadException('Unexpected issue with direct upload proxy. Retrying...') # depends on [control=['if'], data=[]] else: raise_api_error(resp, state='upload') # depends on [control=['if'], data=[]] else: # Direct to S3 case file_obj.close() if upload_request.status_code not in [200, 201]: raise RetryableUploadException('Unknown connectivity issue with proxy upload.') # depends on [control=['if'], data=[]] # Issue a callback -- this only happens in the direct-to-S3 case try: if not fields['additional_fields'].get('callback_url'): samples_resource.confirm_upload({'sample_id': fields['sample_id'], 'upload_type': 'standard'}) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except requests.exceptions.HTTPError as e: raise_api_error(e.response, state='callback') # depends on [control=['except'], data=['e']] except requests.exceptions.ConnectionError: raise_connectivity_error() # depends on [control=['except'], data=[]]
def attr_case_name(self, name): """Returns preserved case name for case insensitive value of name. Checks first within standard attributes. If not found there, checks attributes for higher order data structures. If not found, returns supplied name as it is available for use. Intended to be used to help ensure that the same case is applied to all repetitions of a given variable name. Parameters ---------- name : str name of variable to get stored case form Returns ------- str name in proper case """ lower_name = name.lower() for i in self.attrs(): if lower_name == i.lower(): return i # check if attribute present in higher order structures for key in self.keys_nD(): for i in self[key].children.attrs(): if lower_name == i.lower(): return i # nothing was found if still here # pass name back, free to be whatever return name
def function[attr_case_name, parameter[self, name]]: constant[Returns preserved case name for case insensitive value of name. Checks first within standard attributes. If not found there, checks attributes for higher order data structures. If not found, returns supplied name as it is available for use. Intended to be used to help ensure that the same case is applied to all repetitions of a given variable name. Parameters ---------- name : str name of variable to get stored case form Returns ------- str name in proper case ] variable[lower_name] assign[=] call[name[name].lower, parameter[]] for taget[name[i]] in starred[call[name[self].attrs, parameter[]]] begin[:] if compare[name[lower_name] equal[==] call[name[i].lower, parameter[]]] begin[:] return[name[i]] for taget[name[key]] in starred[call[name[self].keys_nD, parameter[]]] begin[:] for taget[name[i]] in starred[call[call[name[self]][name[key]].children.attrs, parameter[]]] begin[:] if compare[name[lower_name] equal[==] call[name[i].lower, parameter[]]] begin[:] return[name[i]] return[name[name]]
keyword[def] identifier[attr_case_name] ( identifier[self] , identifier[name] ): literal[string] identifier[lower_name] = identifier[name] . identifier[lower] () keyword[for] identifier[i] keyword[in] identifier[self] . identifier[attrs] (): keyword[if] identifier[lower_name] == identifier[i] . identifier[lower] (): keyword[return] identifier[i] keyword[for] identifier[key] keyword[in] identifier[self] . identifier[keys_nD] (): keyword[for] identifier[i] keyword[in] identifier[self] [ identifier[key] ]. identifier[children] . identifier[attrs] (): keyword[if] identifier[lower_name] == identifier[i] . identifier[lower] (): keyword[return] identifier[i] keyword[return] identifier[name]
def attr_case_name(self, name): """Returns preserved case name for case insensitive value of name. Checks first within standard attributes. If not found there, checks attributes for higher order data structures. If not found, returns supplied name as it is available for use. Intended to be used to help ensure that the same case is applied to all repetitions of a given variable name. Parameters ---------- name : str name of variable to get stored case form Returns ------- str name in proper case """ lower_name = name.lower() for i in self.attrs(): if lower_name == i.lower(): return i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # check if attribute present in higher order structures for key in self.keys_nD(): for i in self[key].children.attrs(): if lower_name == i.lower(): return i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['key']] # nothing was found if still here # pass name back, free to be whatever return name
def get_dxlink_ids(link): ''' :param link: A DNAnexus link :type link: dict :returns: (Object ID, Project ID) if the link is to a data object (or :const:`None` if no project specified in the link), or (Job ID, Field) if the link is a job-based object reference (JBOR). :rtype: tuple Get the object ID and detail from a link. There are three types of links: * Simple link of the form ``{"$dnanexus_link": "file-XXXX"}`` returns ``("file-XXXX", None)``. * Data object link of the form ``{"$dnanexus_link': {"id": "file-XXXX", "project": "project-XXXX"}}`` returns ``("file-XXXX", "project-XXXX")``. * Job-based object reference (JBOR) of the form ``{"$dnanexus_link": {"job": "job-XXXX", "field": "foo"}}`` returns ``("job-XXXX", "foo")``. ''' if not is_dxlink(link): raise DXError('Invalid link: %r' % link) if isinstance(link['$dnanexus_link'], basestring): return link['$dnanexus_link'], None elif 'id' in link['$dnanexus_link']: return link['$dnanexus_link']['id'], link['$dnanexus_link'].get('project') else: return link['$dnanexus_link']['job'], link['$dnanexus_link']['field']
def function[get_dxlink_ids, parameter[link]]: constant[ :param link: A DNAnexus link :type link: dict :returns: (Object ID, Project ID) if the link is to a data object (or :const:`None` if no project specified in the link), or (Job ID, Field) if the link is a job-based object reference (JBOR). :rtype: tuple Get the object ID and detail from a link. There are three types of links: * Simple link of the form ``{"$dnanexus_link": "file-XXXX"}`` returns ``("file-XXXX", None)``. * Data object link of the form ``{"$dnanexus_link': {"id": "file-XXXX", "project": "project-XXXX"}}`` returns ``("file-XXXX", "project-XXXX")``. * Job-based object reference (JBOR) of the form ``{"$dnanexus_link": {"job": "job-XXXX", "field": "foo"}}`` returns ``("job-XXXX", "foo")``. ] if <ast.UnaryOp object at 0x7da18dc986d0> begin[:] <ast.Raise object at 0x7da18dc9ba90> if call[name[isinstance], parameter[call[name[link]][constant[$dnanexus_link]], name[basestring]]] begin[:] return[tuple[[<ast.Subscript object at 0x7da18dc9a9b0>, <ast.Constant object at 0x7da18dc9b010>]]]
keyword[def] identifier[get_dxlink_ids] ( identifier[link] ): literal[string] keyword[if] keyword[not] identifier[is_dxlink] ( identifier[link] ): keyword[raise] identifier[DXError] ( literal[string] % identifier[link] ) keyword[if] identifier[isinstance] ( identifier[link] [ literal[string] ], identifier[basestring] ): keyword[return] identifier[link] [ literal[string] ], keyword[None] keyword[elif] literal[string] keyword[in] identifier[link] [ literal[string] ]: keyword[return] identifier[link] [ literal[string] ][ literal[string] ], identifier[link] [ literal[string] ]. identifier[get] ( literal[string] ) keyword[else] : keyword[return] identifier[link] [ literal[string] ][ literal[string] ], identifier[link] [ literal[string] ][ literal[string] ]
def get_dxlink_ids(link): """ :param link: A DNAnexus link :type link: dict :returns: (Object ID, Project ID) if the link is to a data object (or :const:`None` if no project specified in the link), or (Job ID, Field) if the link is a job-based object reference (JBOR). :rtype: tuple Get the object ID and detail from a link. There are three types of links: * Simple link of the form ``{"$dnanexus_link": "file-XXXX"}`` returns ``("file-XXXX", None)``. * Data object link of the form ``{"$dnanexus_link': {"id": "file-XXXX", "project": "project-XXXX"}}`` returns ``("file-XXXX", "project-XXXX")``. * Job-based object reference (JBOR) of the form ``{"$dnanexus_link": {"job": "job-XXXX", "field": "foo"}}`` returns ``("job-XXXX", "foo")``. """ if not is_dxlink(link): raise DXError('Invalid link: %r' % link) # depends on [control=['if'], data=[]] if isinstance(link['$dnanexus_link'], basestring): return (link['$dnanexus_link'], None) # depends on [control=['if'], data=[]] elif 'id' in link['$dnanexus_link']: return (link['$dnanexus_link']['id'], link['$dnanexus_link'].get('project')) # depends on [control=['if'], data=[]] else: return (link['$dnanexus_link']['job'], link['$dnanexus_link']['field'])
def merge_variables(variables, **kwargs): ''' Concatenates Variables along row axis. Args: variables (list): List of Variables to merge. Variables can have different names (and all Variables that share a name will be concatenated together). Returns: A list of Variables. ''' var_dict = OrderedDict() for v in variables: if v.name not in var_dict: var_dict[v.name] = [] var_dict[v.name].append(v) return [merge_variables(vars_, **kwargs) for vars_ in list(var_dict.values())]
def function[merge_variables, parameter[variables]]: constant[ Concatenates Variables along row axis. Args: variables (list): List of Variables to merge. Variables can have different names (and all Variables that share a name will be concatenated together). Returns: A list of Variables. ] variable[var_dict] assign[=] call[name[OrderedDict], parameter[]] for taget[name[v]] in starred[name[variables]] begin[:] if compare[name[v].name <ast.NotIn object at 0x7da2590d7190> name[var_dict]] begin[:] call[name[var_dict]][name[v].name] assign[=] list[[]] call[call[name[var_dict]][name[v].name].append, parameter[name[v]]] return[<ast.ListComp object at 0x7da1b10199c0>]
keyword[def] identifier[merge_variables] ( identifier[variables] ,** identifier[kwargs] ): literal[string] identifier[var_dict] = identifier[OrderedDict] () keyword[for] identifier[v] keyword[in] identifier[variables] : keyword[if] identifier[v] . identifier[name] keyword[not] keyword[in] identifier[var_dict] : identifier[var_dict] [ identifier[v] . identifier[name] ]=[] identifier[var_dict] [ identifier[v] . identifier[name] ]. identifier[append] ( identifier[v] ) keyword[return] [ identifier[merge_variables] ( identifier[vars_] ,** identifier[kwargs] ) keyword[for] identifier[vars_] keyword[in] identifier[list] ( identifier[var_dict] . identifier[values] ())]
def merge_variables(variables, **kwargs): """ Concatenates Variables along row axis. Args: variables (list): List of Variables to merge. Variables can have different names (and all Variables that share a name will be concatenated together). Returns: A list of Variables. """ var_dict = OrderedDict() for v in variables: if v.name not in var_dict: var_dict[v.name] = [] # depends on [control=['if'], data=['var_dict']] var_dict[v.name].append(v) # depends on [control=['for'], data=['v']] return [merge_variables(vars_, **kwargs) for vars_ in list(var_dict.values())]
def linkify_hd_by_tp(self, timeperiods): """Replace dependency_period by a real object in host dependency :param timeperiods: list of timeperiod, used to look for a specific one :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: None """ for hostdep in self: try: tp_name = hostdep.dependency_period timeperiod = timeperiods.find_by_name(tp_name) if timeperiod: hostdep.dependency_period = timeperiod.uuid else: hostdep.dependency_period = '' except AttributeError as exp: # pragma: no cover, simple protectionn logger.error("[hostdependency] fail to linkify by timeperiod: %s", exp)
def function[linkify_hd_by_tp, parameter[self, timeperiods]]: constant[Replace dependency_period by a real object in host dependency :param timeperiods: list of timeperiod, used to look for a specific one :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: None ] for taget[name[hostdep]] in starred[name[self]] begin[:] <ast.Try object at 0x7da18bc70bb0>
keyword[def] identifier[linkify_hd_by_tp] ( identifier[self] , identifier[timeperiods] ): literal[string] keyword[for] identifier[hostdep] keyword[in] identifier[self] : keyword[try] : identifier[tp_name] = identifier[hostdep] . identifier[dependency_period] identifier[timeperiod] = identifier[timeperiods] . identifier[find_by_name] ( identifier[tp_name] ) keyword[if] identifier[timeperiod] : identifier[hostdep] . identifier[dependency_period] = identifier[timeperiod] . identifier[uuid] keyword[else] : identifier[hostdep] . identifier[dependency_period] = literal[string] keyword[except] identifier[AttributeError] keyword[as] identifier[exp] : identifier[logger] . identifier[error] ( literal[string] , identifier[exp] )
def linkify_hd_by_tp(self, timeperiods): """Replace dependency_period by a real object in host dependency :param timeperiods: list of timeperiod, used to look for a specific one :type timeperiods: alignak.objects.timeperiod.Timeperiods :return: None """ for hostdep in self: try: tp_name = hostdep.dependency_period timeperiod = timeperiods.find_by_name(tp_name) if timeperiod: hostdep.dependency_period = timeperiod.uuid # depends on [control=['if'], data=[]] else: hostdep.dependency_period = '' # depends on [control=['try'], data=[]] except AttributeError as exp: # pragma: no cover, simple protectionn logger.error('[hostdependency] fail to linkify by timeperiod: %s', exp) # depends on [control=['except'], data=['exp']] # depends on [control=['for'], data=['hostdep']]
def layout_asides(self, block, context, frag, view_name, aside_frag_fns): """ Execute and layout the aside_frags wrt the block's frag. Runtimes should feel free to override this method to control execution, place, and style the asides appropriately for their application This default method appends the aside_frags after frag. If you override this, you must call wrap_aside around each aside as per this function. Args: block (XBlock): the block being rendered frag (html): The result from rendering the block aside_frag_fns list((aside, aside_fn)): The asides and closures for rendering to call """ result = Fragment(frag.content) result.add_fragment_resources(frag) for aside, aside_fn in aside_frag_fns: aside_frag = self.wrap_aside(block, aside, view_name, aside_fn(block, context), context) aside.save() result.add_content(aside_frag.content) result.add_fragment_resources(aside_frag) return result
def function[layout_asides, parameter[self, block, context, frag, view_name, aside_frag_fns]]: constant[ Execute and layout the aside_frags wrt the block's frag. Runtimes should feel free to override this method to control execution, place, and style the asides appropriately for their application This default method appends the aside_frags after frag. If you override this, you must call wrap_aside around each aside as per this function. Args: block (XBlock): the block being rendered frag (html): The result from rendering the block aside_frag_fns list((aside, aside_fn)): The asides and closures for rendering to call ] variable[result] assign[=] call[name[Fragment], parameter[name[frag].content]] call[name[result].add_fragment_resources, parameter[name[frag]]] for taget[tuple[[<ast.Name object at 0x7da18fe92350>, <ast.Name object at 0x7da18fe93940>]]] in starred[name[aside_frag_fns]] begin[:] variable[aside_frag] assign[=] call[name[self].wrap_aside, parameter[name[block], name[aside], name[view_name], call[name[aside_fn], parameter[name[block], name[context]]], name[context]]] call[name[aside].save, parameter[]] call[name[result].add_content, parameter[name[aside_frag].content]] call[name[result].add_fragment_resources, parameter[name[aside_frag]]] return[name[result]]
keyword[def] identifier[layout_asides] ( identifier[self] , identifier[block] , identifier[context] , identifier[frag] , identifier[view_name] , identifier[aside_frag_fns] ): literal[string] identifier[result] = identifier[Fragment] ( identifier[frag] . identifier[content] ) identifier[result] . identifier[add_fragment_resources] ( identifier[frag] ) keyword[for] identifier[aside] , identifier[aside_fn] keyword[in] identifier[aside_frag_fns] : identifier[aside_frag] = identifier[self] . identifier[wrap_aside] ( identifier[block] , identifier[aside] , identifier[view_name] , identifier[aside_fn] ( identifier[block] , identifier[context] ), identifier[context] ) identifier[aside] . identifier[save] () identifier[result] . identifier[add_content] ( identifier[aside_frag] . identifier[content] ) identifier[result] . identifier[add_fragment_resources] ( identifier[aside_frag] ) keyword[return] identifier[result]
def layout_asides(self, block, context, frag, view_name, aside_frag_fns): """ Execute and layout the aside_frags wrt the block's frag. Runtimes should feel free to override this method to control execution, place, and style the asides appropriately for their application This default method appends the aside_frags after frag. If you override this, you must call wrap_aside around each aside as per this function. Args: block (XBlock): the block being rendered frag (html): The result from rendering the block aside_frag_fns list((aside, aside_fn)): The asides and closures for rendering to call """ result = Fragment(frag.content) result.add_fragment_resources(frag) for (aside, aside_fn) in aside_frag_fns: aside_frag = self.wrap_aside(block, aside, view_name, aside_fn(block, context), context) aside.save() result.add_content(aside_frag.content) result.add_fragment_resources(aside_frag) # depends on [control=['for'], data=[]] return result
def _has_sneaky_javascript(self, style): """ Depending on the browser, stuff like ``e x p r e s s i o n(...)`` can get interpreted, or ``expre/* stuff */ssion(...)``. This checks for attempt to do stuff like this. Typically the response will be to kill the entire style; if you have just a bit of Javascript in the style another rule will catch that and remove only the Javascript from the style; this catches more sneaky attempts. """ style = self._substitute_comments('', style) style = style.replace('\\', '') style = _substitute_whitespace('', style) style = style.lower() if 'javascript:' in style: return True if 'expression(' in style: return True return False
def function[_has_sneaky_javascript, parameter[self, style]]: constant[ Depending on the browser, stuff like ``e x p r e s s i o n(...)`` can get interpreted, or ``expre/* stuff */ssion(...)``. This checks for attempt to do stuff like this. Typically the response will be to kill the entire style; if you have just a bit of Javascript in the style another rule will catch that and remove only the Javascript from the style; this catches more sneaky attempts. ] variable[style] assign[=] call[name[self]._substitute_comments, parameter[constant[], name[style]]] variable[style] assign[=] call[name[style].replace, parameter[constant[\], constant[]]] variable[style] assign[=] call[name[_substitute_whitespace], parameter[constant[], name[style]]] variable[style] assign[=] call[name[style].lower, parameter[]] if compare[constant[javascript:] in name[style]] begin[:] return[constant[True]] if compare[constant[expression(] in name[style]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[_has_sneaky_javascript] ( identifier[self] , identifier[style] ): literal[string] identifier[style] = identifier[self] . identifier[_substitute_comments] ( literal[string] , identifier[style] ) identifier[style] = identifier[style] . identifier[replace] ( literal[string] , literal[string] ) identifier[style] = identifier[_substitute_whitespace] ( literal[string] , identifier[style] ) identifier[style] = identifier[style] . identifier[lower] () keyword[if] literal[string] keyword[in] identifier[style] : keyword[return] keyword[True] keyword[if] literal[string] keyword[in] identifier[style] : keyword[return] keyword[True] keyword[return] keyword[False]
def _has_sneaky_javascript(self, style): """ Depending on the browser, stuff like ``e x p r e s s i o n(...)`` can get interpreted, or ``expre/* stuff */ssion(...)``. This checks for attempt to do stuff like this. Typically the response will be to kill the entire style; if you have just a bit of Javascript in the style another rule will catch that and remove only the Javascript from the style; this catches more sneaky attempts. """ style = self._substitute_comments('', style) style = style.replace('\\', '') style = _substitute_whitespace('', style) style = style.lower() if 'javascript:' in style: return True # depends on [control=['if'], data=[]] if 'expression(' in style: return True # depends on [control=['if'], data=[]] return False
def _entryChanged(self, entry): """This is called when a log entry is changed""" # resave the log self.purrer.save() # redo entry item if entry.tw_item: number = entry.tw_item._ientry entry.tw_item = None self.etw.takeTopLevelItem(number) if number: after = self.etw.topLevelItem(number - 1) else: after = None self._addEntryItem(entry, number, after) # log will have changed, so update the viewer self._updateViewer()
def function[_entryChanged, parameter[self, entry]]: constant[This is called when a log entry is changed] call[name[self].purrer.save, parameter[]] if name[entry].tw_item begin[:] variable[number] assign[=] name[entry].tw_item._ientry name[entry].tw_item assign[=] constant[None] call[name[self].etw.takeTopLevelItem, parameter[name[number]]] if name[number] begin[:] variable[after] assign[=] call[name[self].etw.topLevelItem, parameter[binary_operation[name[number] - constant[1]]]] call[name[self]._addEntryItem, parameter[name[entry], name[number], name[after]]] call[name[self]._updateViewer, parameter[]]
keyword[def] identifier[_entryChanged] ( identifier[self] , identifier[entry] ): literal[string] identifier[self] . identifier[purrer] . identifier[save] () keyword[if] identifier[entry] . identifier[tw_item] : identifier[number] = identifier[entry] . identifier[tw_item] . identifier[_ientry] identifier[entry] . identifier[tw_item] = keyword[None] identifier[self] . identifier[etw] . identifier[takeTopLevelItem] ( identifier[number] ) keyword[if] identifier[number] : identifier[after] = identifier[self] . identifier[etw] . identifier[topLevelItem] ( identifier[number] - literal[int] ) keyword[else] : identifier[after] = keyword[None] identifier[self] . identifier[_addEntryItem] ( identifier[entry] , identifier[number] , identifier[after] ) identifier[self] . identifier[_updateViewer] ()
def _entryChanged(self, entry): """This is called when a log entry is changed""" # resave the log self.purrer.save() # redo entry item if entry.tw_item: number = entry.tw_item._ientry entry.tw_item = None self.etw.takeTopLevelItem(number) if number: after = self.etw.topLevelItem(number - 1) # depends on [control=['if'], data=[]] else: after = None self._addEntryItem(entry, number, after) # depends on [control=['if'], data=[]] # log will have changed, so update the viewer self._updateViewer()
def autostack(self): """Rewrite graph to combine similarly-shaped variables (faster startup).""" num_slices = 0 for v in self.graph.all_variables: num_slices += self.mesh_to_impl[v.mesh].size if num_slices >= 2 ** 16: # Startup times are slow with lots of variable slices. # Perform more aggressive stacking max_combined_slice_size = 2 ** 27 else: # Stacking hurts memory utilization - only stack small variables. max_combined_slice_size = 2 ** 16 self.graph.rewrite_stack_variables( mesh_to_impl=self.mesh_to_impl, max_combined_slice_size=max_combined_slice_size)
def function[autostack, parameter[self]]: constant[Rewrite graph to combine similarly-shaped variables (faster startup).] variable[num_slices] assign[=] constant[0] for taget[name[v]] in starred[name[self].graph.all_variables] begin[:] <ast.AugAssign object at 0x7da2054a6440> if compare[name[num_slices] greater_or_equal[>=] binary_operation[constant[2] ** constant[16]]] begin[:] variable[max_combined_slice_size] assign[=] binary_operation[constant[2] ** constant[27]] call[name[self].graph.rewrite_stack_variables, parameter[]]
keyword[def] identifier[autostack] ( identifier[self] ): literal[string] identifier[num_slices] = literal[int] keyword[for] identifier[v] keyword[in] identifier[self] . identifier[graph] . identifier[all_variables] : identifier[num_slices] += identifier[self] . identifier[mesh_to_impl] [ identifier[v] . identifier[mesh] ]. identifier[size] keyword[if] identifier[num_slices] >= literal[int] ** literal[int] : identifier[max_combined_slice_size] = literal[int] ** literal[int] keyword[else] : identifier[max_combined_slice_size] = literal[int] ** literal[int] identifier[self] . identifier[graph] . identifier[rewrite_stack_variables] ( identifier[mesh_to_impl] = identifier[self] . identifier[mesh_to_impl] , identifier[max_combined_slice_size] = identifier[max_combined_slice_size] )
def autostack(self): """Rewrite graph to combine similarly-shaped variables (faster startup).""" num_slices = 0 for v in self.graph.all_variables: num_slices += self.mesh_to_impl[v.mesh].size # depends on [control=['for'], data=['v']] if num_slices >= 2 ** 16: # Startup times are slow with lots of variable slices. # Perform more aggressive stacking max_combined_slice_size = 2 ** 27 # depends on [control=['if'], data=[]] else: # Stacking hurts memory utilization - only stack small variables. max_combined_slice_size = 2 ** 16 self.graph.rewrite_stack_variables(mesh_to_impl=self.mesh_to_impl, max_combined_slice_size=max_combined_slice_size)
def equivalent(first: T, second: T) -> bool: """Compare two objects for equivalence (identity or equality), using array_equiv if either object is an ndarray """ # TODO: refactor to avoid circular import from . import duck_array_ops if isinstance(first, np.ndarray) or isinstance(second, np.ndarray): return duck_array_ops.array_equiv(first, second) else: return ((first is second) or (first == second) or (pd.isnull(first) and pd.isnull(second)))
def function[equivalent, parameter[first, second]]: constant[Compare two objects for equivalence (identity or equality), using array_equiv if either object is an ndarray ] from relative_module[None] import module[duck_array_ops] if <ast.BoolOp object at 0x7da20e955450> begin[:] return[call[name[duck_array_ops].array_equiv, parameter[name[first], name[second]]]]
keyword[def] identifier[equivalent] ( identifier[first] : identifier[T] , identifier[second] : identifier[T] )-> identifier[bool] : literal[string] keyword[from] . keyword[import] identifier[duck_array_ops] keyword[if] identifier[isinstance] ( identifier[first] , identifier[np] . identifier[ndarray] ) keyword[or] identifier[isinstance] ( identifier[second] , identifier[np] . identifier[ndarray] ): keyword[return] identifier[duck_array_ops] . identifier[array_equiv] ( identifier[first] , identifier[second] ) keyword[else] : keyword[return] (( identifier[first] keyword[is] identifier[second] ) keyword[or] ( identifier[first] == identifier[second] ) keyword[or] ( identifier[pd] . identifier[isnull] ( identifier[first] ) keyword[and] identifier[pd] . identifier[isnull] ( identifier[second] )))
def equivalent(first: T, second: T) -> bool: """Compare two objects for equivalence (identity or equality), using array_equiv if either object is an ndarray """ # TODO: refactor to avoid circular import from . import duck_array_ops if isinstance(first, np.ndarray) or isinstance(second, np.ndarray): return duck_array_ops.array_equiv(first, second) # depends on [control=['if'], data=[]] else: return first is second or first == second or (pd.isnull(first) and pd.isnull(second))
def revision(self, message): """ Create a new revision file :param message: """ alembic.command.revision(self.alembic_config(), message=message)
def function[revision, parameter[self, message]]: constant[ Create a new revision file :param message: ] call[name[alembic].command.revision, parameter[call[name[self].alembic_config, parameter[]]]]
keyword[def] identifier[revision] ( identifier[self] , identifier[message] ): literal[string] identifier[alembic] . identifier[command] . identifier[revision] ( identifier[self] . identifier[alembic_config] (), identifier[message] = identifier[message] )
def revision(self, message): """ Create a new revision file :param message: """ alembic.command.revision(self.alembic_config(), message=message)
def nurbs_to_bspline(obj, **kwargs): """ Extracts the non-rational components from rational parametric shapes, if possible. The possibility of converting a rational shape to a non-rational one depends on the weights vector. :param obj: NURBS shape :type obj: NURBS.Curve, NURBS.Surface or NURBS.Volume :return: B-Spline shape :rtype: BSpline.Curve, BSpline.Surface or BSpline.Volume :raises: TypeError """ if not obj.rational: raise TypeError("The input must be a rational shape") # Get keyword arguments tol = kwargs.get('tol', 10e-8) # Test for non-rational component extraction for w in obj.weights: if abs(w - 1.0) > tol: print("Cannot extract non-rational components") return obj # NURBS -> B-Spline if isinstance(obj, NURBS.Curve): return _convert.convert_curve(obj, BSpline) elif isinstance(obj, NURBS.Surface): return _convert.convert_surface(obj, BSpline) elif isinstance(obj, NURBS.Volume): return _convert.convert_volume(obj, BSpline) else: raise TypeError("Input must be an instance of NURBS curve, surface or volume")
def function[nurbs_to_bspline, parameter[obj]]: constant[ Extracts the non-rational components from rational parametric shapes, if possible. The possibility of converting a rational shape to a non-rational one depends on the weights vector. :param obj: NURBS shape :type obj: NURBS.Curve, NURBS.Surface or NURBS.Volume :return: B-Spline shape :rtype: BSpline.Curve, BSpline.Surface or BSpline.Volume :raises: TypeError ] if <ast.UnaryOp object at 0x7da1b17b6830> begin[:] <ast.Raise object at 0x7da1b17b7610> variable[tol] assign[=] call[name[kwargs].get, parameter[constant[tol], constant[1e-07]]] for taget[name[w]] in starred[name[obj].weights] begin[:] if compare[call[name[abs], parameter[binary_operation[name[w] - constant[1.0]]]] greater[>] name[tol]] begin[:] call[name[print], parameter[constant[Cannot extract non-rational components]]] return[name[obj]] if call[name[isinstance], parameter[name[obj], name[NURBS].Curve]] begin[:] return[call[name[_convert].convert_curve, parameter[name[obj], name[BSpline]]]]
keyword[def] identifier[nurbs_to_bspline] ( identifier[obj] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[obj] . identifier[rational] : keyword[raise] identifier[TypeError] ( literal[string] ) identifier[tol] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] ) keyword[for] identifier[w] keyword[in] identifier[obj] . identifier[weights] : keyword[if] identifier[abs] ( identifier[w] - literal[int] )> identifier[tol] : identifier[print] ( literal[string] ) keyword[return] identifier[obj] keyword[if] identifier[isinstance] ( identifier[obj] , identifier[NURBS] . identifier[Curve] ): keyword[return] identifier[_convert] . identifier[convert_curve] ( identifier[obj] , identifier[BSpline] ) keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[NURBS] . identifier[Surface] ): keyword[return] identifier[_convert] . identifier[convert_surface] ( identifier[obj] , identifier[BSpline] ) keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[NURBS] . identifier[Volume] ): keyword[return] identifier[_convert] . identifier[convert_volume] ( identifier[obj] , identifier[BSpline] ) keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] )
def nurbs_to_bspline(obj, **kwargs): """ Extracts the non-rational components from rational parametric shapes, if possible. The possibility of converting a rational shape to a non-rational one depends on the weights vector. :param obj: NURBS shape :type obj: NURBS.Curve, NURBS.Surface or NURBS.Volume :return: B-Spline shape :rtype: BSpline.Curve, BSpline.Surface or BSpline.Volume :raises: TypeError """ if not obj.rational: raise TypeError('The input must be a rational shape') # depends on [control=['if'], data=[]] # Get keyword arguments tol = kwargs.get('tol', 1e-07) # Test for non-rational component extraction for w in obj.weights: if abs(w - 1.0) > tol: print('Cannot extract non-rational components') return obj # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['w']] # NURBS -> B-Spline if isinstance(obj, NURBS.Curve): return _convert.convert_curve(obj, BSpline) # depends on [control=['if'], data=[]] elif isinstance(obj, NURBS.Surface): return _convert.convert_surface(obj, BSpline) # depends on [control=['if'], data=[]] elif isinstance(obj, NURBS.Volume): return _convert.convert_volume(obj, BSpline) # depends on [control=['if'], data=[]] else: raise TypeError('Input must be an instance of NURBS curve, surface or volume')
async def cluster_delslots(self, *slots): """ Set hash slots as unbound in the cluster. It determines by it self what node the slot is in and sends it there Returns a list of the results for each processed slot. """ cluster_nodes = self._nodes_slots_to_slots_nodes(await self.cluster_nodes()) res = list() for slot in slots: res.append(await self.execute_command('CLUSTER DELSLOTS', slot, node_id=cluster_nodes[slot])) return res
<ast.AsyncFunctionDef object at 0x7da1b079b670>
keyword[async] keyword[def] identifier[cluster_delslots] ( identifier[self] ,* identifier[slots] ): literal[string] identifier[cluster_nodes] = identifier[self] . identifier[_nodes_slots_to_slots_nodes] ( keyword[await] identifier[self] . identifier[cluster_nodes] ()) identifier[res] = identifier[list] () keyword[for] identifier[slot] keyword[in] identifier[slots] : identifier[res] . identifier[append] ( keyword[await] identifier[self] . identifier[execute_command] ( literal[string] , identifier[slot] , identifier[node_id] = identifier[cluster_nodes] [ identifier[slot] ])) keyword[return] identifier[res]
async def cluster_delslots(self, *slots): """ Set hash slots as unbound in the cluster. It determines by it self what node the slot is in and sends it there Returns a list of the results for each processed slot. """ cluster_nodes = self._nodes_slots_to_slots_nodes(await self.cluster_nodes()) res = list() for slot in slots: res.append(await self.execute_command('CLUSTER DELSLOTS', slot, node_id=cluster_nodes[slot])) # depends on [control=['for'], data=['slot']] return res
def calendar_data(self, request): """ Return event data in JSON format for AJAX requests, or a calendar page to be loaded in an iframe. """ # mutable copy request.GET = request.GET.copy() if 'timezone' in request.GET: tz = djtz.get(request.GET.pop('timezone')) else: tz = get_current_timezone() if 'start' in request.GET: start_dt = self._parse_dt_from_request(request, 'start') if start_dt: start = djtz.localize(start_dt, tz) else: start = None else: start = None if 'end' in request.GET: end_dt = self._parse_dt_from_request(request, 'end') if end_dt: end = djtz.localize(end_dt, tz) else: end = None else: end = None # filter the qs like the changelist filters cl = ChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter, self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_max_show_all, self.list_editable, self) filtered_event_ids = cl.get_queryset(request).values_list('id', flat=True) all_occurrences = models.Occurrence.objects.filter(event__id__in=filtered_event_ids).overlapping(start, end) data = [] for occurrence in all_occurrences.all(): data.append(self._calendar_json_for_occurrence(occurrence)) data = json.dumps(data, cls=DjangoJSONEncoder) return HttpResponse(content=data, content_type='application/json')
def function[calendar_data, parameter[self, request]]: constant[ Return event data in JSON format for AJAX requests, or a calendar page to be loaded in an iframe. ] name[request].GET assign[=] call[name[request].GET.copy, parameter[]] if compare[constant[timezone] in name[request].GET] begin[:] variable[tz] assign[=] call[name[djtz].get, parameter[call[name[request].GET.pop, parameter[constant[timezone]]]]] if compare[constant[start] in name[request].GET] begin[:] variable[start_dt] assign[=] call[name[self]._parse_dt_from_request, parameter[name[request], constant[start]]] if name[start_dt] begin[:] variable[start] assign[=] call[name[djtz].localize, parameter[name[start_dt], name[tz]]] if compare[constant[end] in name[request].GET] begin[:] variable[end_dt] assign[=] call[name[self]._parse_dt_from_request, parameter[name[request], constant[end]]] if name[end_dt] begin[:] variable[end] assign[=] call[name[djtz].localize, parameter[name[end_dt], name[tz]]] variable[cl] assign[=] call[name[ChangeList], parameter[name[request], name[self].model, name[self].list_display, name[self].list_display_links, name[self].list_filter, name[self].date_hierarchy, name[self].search_fields, name[self].list_select_related, name[self].list_per_page, name[self].list_max_show_all, name[self].list_editable, name[self]]] variable[filtered_event_ids] assign[=] call[call[name[cl].get_queryset, parameter[name[request]]].values_list, parameter[constant[id]]] variable[all_occurrences] assign[=] call[call[name[models].Occurrence.objects.filter, parameter[]].overlapping, parameter[name[start], name[end]]] variable[data] assign[=] list[[]] for taget[name[occurrence]] in starred[call[name[all_occurrences].all, parameter[]]] begin[:] call[name[data].append, parameter[call[name[self]._calendar_json_for_occurrence, parameter[name[occurrence]]]]] variable[data] assign[=] call[name[json].dumps, parameter[name[data]]] return[call[name[HttpResponse], parameter[]]]
keyword[def] identifier[calendar_data] ( identifier[self] , identifier[request] ): literal[string] identifier[request] . identifier[GET] = identifier[request] . identifier[GET] . identifier[copy] () keyword[if] literal[string] keyword[in] identifier[request] . identifier[GET] : identifier[tz] = identifier[djtz] . identifier[get] ( identifier[request] . identifier[GET] . identifier[pop] ( literal[string] )) keyword[else] : identifier[tz] = identifier[get_current_timezone] () keyword[if] literal[string] keyword[in] identifier[request] . identifier[GET] : identifier[start_dt] = identifier[self] . identifier[_parse_dt_from_request] ( identifier[request] , literal[string] ) keyword[if] identifier[start_dt] : identifier[start] = identifier[djtz] . identifier[localize] ( identifier[start_dt] , identifier[tz] ) keyword[else] : identifier[start] = keyword[None] keyword[else] : identifier[start] = keyword[None] keyword[if] literal[string] keyword[in] identifier[request] . identifier[GET] : identifier[end_dt] = identifier[self] . identifier[_parse_dt_from_request] ( identifier[request] , literal[string] ) keyword[if] identifier[end_dt] : identifier[end] = identifier[djtz] . identifier[localize] ( identifier[end_dt] , identifier[tz] ) keyword[else] : identifier[end] = keyword[None] keyword[else] : identifier[end] = keyword[None] identifier[cl] = identifier[ChangeList] ( identifier[request] , identifier[self] . identifier[model] , identifier[self] . identifier[list_display] , identifier[self] . identifier[list_display_links] , identifier[self] . identifier[list_filter] , identifier[self] . identifier[date_hierarchy] , identifier[self] . identifier[search_fields] , identifier[self] . identifier[list_select_related] , identifier[self] . identifier[list_per_page] , identifier[self] . identifier[list_max_show_all] , identifier[self] . identifier[list_editable] , identifier[self] ) identifier[filtered_event_ids] = identifier[cl] . identifier[get_queryset] ( identifier[request] ). identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] ) identifier[all_occurrences] = identifier[models] . identifier[Occurrence] . identifier[objects] . identifier[filter] ( identifier[event__id__in] = identifier[filtered_event_ids] ). identifier[overlapping] ( identifier[start] , identifier[end] ) identifier[data] =[] keyword[for] identifier[occurrence] keyword[in] identifier[all_occurrences] . identifier[all] (): identifier[data] . identifier[append] ( identifier[self] . identifier[_calendar_json_for_occurrence] ( identifier[occurrence] )) identifier[data] = identifier[json] . identifier[dumps] ( identifier[data] , identifier[cls] = identifier[DjangoJSONEncoder] ) keyword[return] identifier[HttpResponse] ( identifier[content] = identifier[data] , identifier[content_type] = literal[string] )
def calendar_data(self, request): """ Return event data in JSON format for AJAX requests, or a calendar page to be loaded in an iframe. """ # mutable copy request.GET = request.GET.copy() if 'timezone' in request.GET: tz = djtz.get(request.GET.pop('timezone')) # depends on [control=['if'], data=[]] else: tz = get_current_timezone() if 'start' in request.GET: start_dt = self._parse_dt_from_request(request, 'start') if start_dt: start = djtz.localize(start_dt, tz) # depends on [control=['if'], data=[]] else: start = None # depends on [control=['if'], data=[]] else: start = None if 'end' in request.GET: end_dt = self._parse_dt_from_request(request, 'end') if end_dt: end = djtz.localize(end_dt, tz) # depends on [control=['if'], data=[]] else: end = None # depends on [control=['if'], data=[]] else: end = None # filter the qs like the changelist filters cl = ChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter, self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_max_show_all, self.list_editable, self) filtered_event_ids = cl.get_queryset(request).values_list('id', flat=True) all_occurrences = models.Occurrence.objects.filter(event__id__in=filtered_event_ids).overlapping(start, end) data = [] for occurrence in all_occurrences.all(): data.append(self._calendar_json_for_occurrence(occurrence)) # depends on [control=['for'], data=['occurrence']] data = json.dumps(data, cls=DjangoJSONEncoder) return HttpResponse(content=data, content_type='application/json')
def get_current_service_state(self, service_id: str): """Get the state of a SDP service.""" state = self._get_service_state(service_id) return state.current_state
def function[get_current_service_state, parameter[self, service_id]]: constant[Get the state of a SDP service.] variable[state] assign[=] call[name[self]._get_service_state, parameter[name[service_id]]] return[name[state].current_state]
keyword[def] identifier[get_current_service_state] ( identifier[self] , identifier[service_id] : identifier[str] ): literal[string] identifier[state] = identifier[self] . identifier[_get_service_state] ( identifier[service_id] ) keyword[return] identifier[state] . identifier[current_state]
def get_current_service_state(self, service_id: str): """Get the state of a SDP service.""" state = self._get_service_state(service_id) return state.current_state
def reset(self) -> None: """Restores the starting position.""" self.turn = WHITE self.castling_rights = BB_CORNERS self.ep_square = None self.halfmove_clock = 0 self.fullmove_number = 1 self.reset_board()
def function[reset, parameter[self]]: constant[Restores the starting position.] name[self].turn assign[=] name[WHITE] name[self].castling_rights assign[=] name[BB_CORNERS] name[self].ep_square assign[=] constant[None] name[self].halfmove_clock assign[=] constant[0] name[self].fullmove_number assign[=] constant[1] call[name[self].reset_board, parameter[]]
keyword[def] identifier[reset] ( identifier[self] )-> keyword[None] : literal[string] identifier[self] . identifier[turn] = identifier[WHITE] identifier[self] . identifier[castling_rights] = identifier[BB_CORNERS] identifier[self] . identifier[ep_square] = keyword[None] identifier[self] . identifier[halfmove_clock] = literal[int] identifier[self] . identifier[fullmove_number] = literal[int] identifier[self] . identifier[reset_board] ()
def reset(self) -> None: """Restores the starting position.""" self.turn = WHITE self.castling_rights = BB_CORNERS self.ep_square = None self.halfmove_clock = 0 self.fullmove_number = 1 self.reset_board()
def interval(best,lo=np.nan,hi=np.nan): """ Pythonized interval for easy output to yaml """ return [float(best),[float(lo),float(hi)]]
def function[interval, parameter[best, lo, hi]]: constant[ Pythonized interval for easy output to yaml ] return[list[[<ast.Call object at 0x7da2054a63e0>, <ast.List object at 0x7da2054a45e0>]]]
keyword[def] identifier[interval] ( identifier[best] , identifier[lo] = identifier[np] . identifier[nan] , identifier[hi] = identifier[np] . identifier[nan] ): literal[string] keyword[return] [ identifier[float] ( identifier[best] ),[ identifier[float] ( identifier[lo] ), identifier[float] ( identifier[hi] )]]
def interval(best, lo=np.nan, hi=np.nan): """ Pythonized interval for easy output to yaml """ return [float(best), [float(lo), float(hi)]]
def lastChild(self): ''' lastChild - property, Get the last child block, text or tag @return <str/AdvancedTag/None> - The last child block, or None if no child blocks ''' blocks = object.__getattribute__(self, 'blocks') # First block is empty string for indent, but don't hardcode incase that changes if blocks[0] == '': firstIdx = 1 else: firstIdx = 0 if len(blocks) <= firstIdx: return None return blocks[-1]
def function[lastChild, parameter[self]]: constant[ lastChild - property, Get the last child block, text or tag @return <str/AdvancedTag/None> - The last child block, or None if no child blocks ] variable[blocks] assign[=] call[name[object].__getattribute__, parameter[name[self], constant[blocks]]] if compare[call[name[blocks]][constant[0]] equal[==] constant[]] begin[:] variable[firstIdx] assign[=] constant[1] if compare[call[name[len], parameter[name[blocks]]] less_or_equal[<=] name[firstIdx]] begin[:] return[constant[None]] return[call[name[blocks]][<ast.UnaryOp object at 0x7da1b10c0f10>]]
keyword[def] identifier[lastChild] ( identifier[self] ): literal[string] identifier[blocks] = identifier[object] . identifier[__getattribute__] ( identifier[self] , literal[string] ) keyword[if] identifier[blocks] [ literal[int] ]== literal[string] : identifier[firstIdx] = literal[int] keyword[else] : identifier[firstIdx] = literal[int] keyword[if] identifier[len] ( identifier[blocks] )<= identifier[firstIdx] : keyword[return] keyword[None] keyword[return] identifier[blocks] [- literal[int] ]
def lastChild(self): """ lastChild - property, Get the last child block, text or tag @return <str/AdvancedTag/None> - The last child block, or None if no child blocks """ blocks = object.__getattribute__(self, 'blocks') # First block is empty string for indent, but don't hardcode incase that changes if blocks[0] == '': firstIdx = 1 # depends on [control=['if'], data=[]] else: firstIdx = 0 if len(blocks) <= firstIdx: return None # depends on [control=['if'], data=[]] return blocks[-1]
def snapshots(self): """ Get all Volumes of type Snapshot. Updates every time - no caching. :return: a `list` of all the `ScaleIO_Volume` that have a are of type Snapshot. :rtype: list """ self.connection._check_login() response = self.connection._do_get("{}/{}".format(self.connection._api_url, "types/Volume/instances")).json() all_volumes_snapshot = [] for volume in response: if volume['volumeType'] == 'Snapshot': all_volumes_snapshot.append( Volume.from_dict(volume) ) return all_volumes_snapshot
def function[snapshots, parameter[self]]: constant[ Get all Volumes of type Snapshot. Updates every time - no caching. :return: a `list` of all the `ScaleIO_Volume` that have a are of type Snapshot. :rtype: list ] call[name[self].connection._check_login, parameter[]] variable[response] assign[=] call[call[name[self].connection._do_get, parameter[call[constant[{}/{}].format, parameter[name[self].connection._api_url, constant[types/Volume/instances]]]]].json, parameter[]] variable[all_volumes_snapshot] assign[=] list[[]] for taget[name[volume]] in starred[name[response]] begin[:] if compare[call[name[volume]][constant[volumeType]] equal[==] constant[Snapshot]] begin[:] call[name[all_volumes_snapshot].append, parameter[call[name[Volume].from_dict, parameter[name[volume]]]]] return[name[all_volumes_snapshot]]
keyword[def] identifier[snapshots] ( identifier[self] ): literal[string] identifier[self] . identifier[connection] . identifier[_check_login] () identifier[response] = identifier[self] . identifier[connection] . identifier[_do_get] ( literal[string] . identifier[format] ( identifier[self] . identifier[connection] . identifier[_api_url] , literal[string] )). identifier[json] () identifier[all_volumes_snapshot] =[] keyword[for] identifier[volume] keyword[in] identifier[response] : keyword[if] identifier[volume] [ literal[string] ]== literal[string] : identifier[all_volumes_snapshot] . identifier[append] ( identifier[Volume] . identifier[from_dict] ( identifier[volume] ) ) keyword[return] identifier[all_volumes_snapshot]
def snapshots(self): """ Get all Volumes of type Snapshot. Updates every time - no caching. :return: a `list` of all the `ScaleIO_Volume` that have a are of type Snapshot. :rtype: list """ self.connection._check_login() response = self.connection._do_get('{}/{}'.format(self.connection._api_url, 'types/Volume/instances')).json() all_volumes_snapshot = [] for volume in response: if volume['volumeType'] == 'Snapshot': all_volumes_snapshot.append(Volume.from_dict(volume)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['volume']] return all_volumes_snapshot
def image_list(list_aliases=False, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Lists all images from the LXD. list_aliases : Return a dict with the fingerprint as key and a list of aliases as value instead. remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.image_list true --out=json $ salt '*' lxd.image_list --out=json ''' client = pylxd_client_get(remote_addr, cert, key, verify_cert) images = client.images.all() if list_aliases: return {i.fingerprint: [a['name'] for a in i.aliases] for i in images} return map(_pylxd_model_to_dict, images)
def function[image_list, parameter[list_aliases, remote_addr, cert, key, verify_cert]]: constant[ Lists all images from the LXD. list_aliases : Return a dict with the fingerprint as key and a list of aliases as value instead. remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.image_list true --out=json $ salt '*' lxd.image_list --out=json ] variable[client] assign[=] call[name[pylxd_client_get], parameter[name[remote_addr], name[cert], name[key], name[verify_cert]]] variable[images] assign[=] call[name[client].images.all, parameter[]] if name[list_aliases] begin[:] return[<ast.DictComp object at 0x7da207f00700>] return[call[name[map], parameter[name[_pylxd_model_to_dict], name[images]]]]
keyword[def] identifier[image_list] ( identifier[list_aliases] = keyword[False] , identifier[remote_addr] = keyword[None] , identifier[cert] = keyword[None] , identifier[key] = keyword[None] , identifier[verify_cert] = keyword[True] ): literal[string] identifier[client] = identifier[pylxd_client_get] ( identifier[remote_addr] , identifier[cert] , identifier[key] , identifier[verify_cert] ) identifier[images] = identifier[client] . identifier[images] . identifier[all] () keyword[if] identifier[list_aliases] : keyword[return] { identifier[i] . identifier[fingerprint] :[ identifier[a] [ literal[string] ] keyword[for] identifier[a] keyword[in] identifier[i] . identifier[aliases] ] keyword[for] identifier[i] keyword[in] identifier[images] } keyword[return] identifier[map] ( identifier[_pylxd_model_to_dict] , identifier[images] )
def image_list(list_aliases=False, remote_addr=None, cert=None, key=None, verify_cert=True): """ Lists all images from the LXD. list_aliases : Return a dict with the fingerprint as key and a list of aliases as value instead. remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.image_list true --out=json $ salt '*' lxd.image_list --out=json """ client = pylxd_client_get(remote_addr, cert, key, verify_cert) images = client.images.all() if list_aliases: return {i.fingerprint: [a['name'] for a in i.aliases] for i in images} # depends on [control=['if'], data=[]] return map(_pylxd_model_to_dict, images)
def get_constraint_slack(self, name): """Get the value of the slack variable of a constraint.""" if self._slacks is None: return None else: index = self._get_constraint_index(name) return self._slacks[index]
def function[get_constraint_slack, parameter[self, name]]: constant[Get the value of the slack variable of a constraint.] if compare[name[self]._slacks is constant[None]] begin[:] return[constant[None]]
keyword[def] identifier[get_constraint_slack] ( identifier[self] , identifier[name] ): literal[string] keyword[if] identifier[self] . identifier[_slacks] keyword[is] keyword[None] : keyword[return] keyword[None] keyword[else] : identifier[index] = identifier[self] . identifier[_get_constraint_index] ( identifier[name] ) keyword[return] identifier[self] . identifier[_slacks] [ identifier[index] ]
def get_constraint_slack(self, name): """Get the value of the slack variable of a constraint.""" if self._slacks is None: return None # depends on [control=['if'], data=[]] else: index = self._get_constraint_index(name) return self._slacks[index]
def check_job_status(self, key=JobDetails.topkey, fail_running=False, fail_pending=False, force_check=False): """Check the status of a particular job By default this checks the status of the top-level job, but can by made to drill into the sub-jobs. Parameters ---------- key : str Key associated to the job in question fail_running : `bool` If True, consider running jobs as failed fail_pending : `bool` If True, consider pending jobs as failed force_check : `bool` Drill into status of individual jobs` instead of using top level job only Returns ------- status : `JobStatus` Job status flag """ if key in self.jobs: status = self.jobs[key].status if status in [JobStatus.unknown, JobStatus.ready, JobStatus.pending, JobStatus.running] or force_check: status = self._interface.check_job(self.jobs[key]) if status == JobStatus.running and fail_running: status = JobStatus.failed if status == JobStatus.pending and fail_pending: status = JobStatus.failed self.jobs[key].status = status if self._job_archive: self._job_archive.register_job(self.jobs[key]) else: status = JobStatus.no_job return status
def function[check_job_status, parameter[self, key, fail_running, fail_pending, force_check]]: constant[Check the status of a particular job By default this checks the status of the top-level job, but can by made to drill into the sub-jobs. Parameters ---------- key : str Key associated to the job in question fail_running : `bool` If True, consider running jobs as failed fail_pending : `bool` If True, consider pending jobs as failed force_check : `bool` Drill into status of individual jobs` instead of using top level job only Returns ------- status : `JobStatus` Job status flag ] if compare[name[key] in name[self].jobs] begin[:] variable[status] assign[=] call[name[self].jobs][name[key]].status if <ast.BoolOp object at 0x7da18f58c580> begin[:] variable[status] assign[=] call[name[self]._interface.check_job, parameter[call[name[self].jobs][name[key]]]] if <ast.BoolOp object at 0x7da18f58ff10> begin[:] variable[status] assign[=] name[JobStatus].failed if <ast.BoolOp object at 0x7da18f58d330> begin[:] variable[status] assign[=] name[JobStatus].failed call[name[self].jobs][name[key]].status assign[=] name[status] if name[self]._job_archive begin[:] call[name[self]._job_archive.register_job, parameter[call[name[self].jobs][name[key]]]] return[name[status]]
keyword[def] identifier[check_job_status] ( identifier[self] , identifier[key] = identifier[JobDetails] . identifier[topkey] , identifier[fail_running] = keyword[False] , identifier[fail_pending] = keyword[False] , identifier[force_check] = keyword[False] ): literal[string] keyword[if] identifier[key] keyword[in] identifier[self] . identifier[jobs] : identifier[status] = identifier[self] . identifier[jobs] [ identifier[key] ]. identifier[status] keyword[if] identifier[status] keyword[in] [ identifier[JobStatus] . identifier[unknown] , identifier[JobStatus] . identifier[ready] , identifier[JobStatus] . identifier[pending] , identifier[JobStatus] . identifier[running] ] keyword[or] identifier[force_check] : identifier[status] = identifier[self] . identifier[_interface] . identifier[check_job] ( identifier[self] . identifier[jobs] [ identifier[key] ]) keyword[if] identifier[status] == identifier[JobStatus] . identifier[running] keyword[and] identifier[fail_running] : identifier[status] = identifier[JobStatus] . identifier[failed] keyword[if] identifier[status] == identifier[JobStatus] . identifier[pending] keyword[and] identifier[fail_pending] : identifier[status] = identifier[JobStatus] . identifier[failed] identifier[self] . identifier[jobs] [ identifier[key] ]. identifier[status] = identifier[status] keyword[if] identifier[self] . identifier[_job_archive] : identifier[self] . identifier[_job_archive] . identifier[register_job] ( identifier[self] . identifier[jobs] [ identifier[key] ]) keyword[else] : identifier[status] = identifier[JobStatus] . identifier[no_job] keyword[return] identifier[status]
def check_job_status(self, key=JobDetails.topkey, fail_running=False, fail_pending=False, force_check=False): """Check the status of a particular job By default this checks the status of the top-level job, but can by made to drill into the sub-jobs. Parameters ---------- key : str Key associated to the job in question fail_running : `bool` If True, consider running jobs as failed fail_pending : `bool` If True, consider pending jobs as failed force_check : `bool` Drill into status of individual jobs` instead of using top level job only Returns ------- status : `JobStatus` Job status flag """ if key in self.jobs: status = self.jobs[key].status if status in [JobStatus.unknown, JobStatus.ready, JobStatus.pending, JobStatus.running] or force_check: status = self._interface.check_job(self.jobs[key]) # depends on [control=['if'], data=[]] if status == JobStatus.running and fail_running: status = JobStatus.failed # depends on [control=['if'], data=[]] if status == JobStatus.pending and fail_pending: status = JobStatus.failed # depends on [control=['if'], data=[]] self.jobs[key].status = status if self._job_archive: self._job_archive.register_job(self.jobs[key]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['key']] else: status = JobStatus.no_job return status
def _hex_to_rgb(color: str) -> Tuple[int, ...]: """Convert hex color to RGB format. :param color: Hex color. :return: RGB tuple. """ if color.startswith('#'): color = color.lstrip('#') return tuple(int(color[i:i + 2], 16) for i in (0, 2, 4))
def function[_hex_to_rgb, parameter[color]]: constant[Convert hex color to RGB format. :param color: Hex color. :return: RGB tuple. ] if call[name[color].startswith, parameter[constant[#]]] begin[:] variable[color] assign[=] call[name[color].lstrip, parameter[constant[#]]] return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da20c6c4790>]]]
keyword[def] identifier[_hex_to_rgb] ( identifier[color] : identifier[str] )-> identifier[Tuple] [ identifier[int] ,...]: literal[string] keyword[if] identifier[color] . identifier[startswith] ( literal[string] ): identifier[color] = identifier[color] . identifier[lstrip] ( literal[string] ) keyword[return] identifier[tuple] ( identifier[int] ( identifier[color] [ identifier[i] : identifier[i] + literal[int] ], literal[int] ) keyword[for] identifier[i] keyword[in] ( literal[int] , literal[int] , literal[int] ))
def _hex_to_rgb(color: str) -> Tuple[int, ...]: """Convert hex color to RGB format. :param color: Hex color. :return: RGB tuple. """ if color.startswith('#'): color = color.lstrip('#') # depends on [control=['if'], data=[]] return tuple((int(color[i:i + 2], 16) for i in (0, 2, 4)))
def get_aggs(self): """ Compute the values for single valued aggregations :returns: the single aggregation value """ res = self.fetch_aggregation_results() if 'aggregations' in res and 'values' in res['aggregations'][str(self.parent_agg_counter - 1)]: try: agg = res['aggregations'][str(self.parent_agg_counter - 1)]['values']["50.0"] if agg == 'NaN': # ES returns NaN. Convert to None for matplotlib graph agg = None except Exception as e: raise RuntimeError("Multivalue aggregation result not supported") elif 'aggregations' in res and 'value' in res['aggregations'][str(self.parent_agg_counter - 1)]: agg = res['aggregations'][str(self.parent_agg_counter - 1)]['value'] else: agg = res['hits']['total'] return agg
def function[get_aggs, parameter[self]]: constant[ Compute the values for single valued aggregations :returns: the single aggregation value ] variable[res] assign[=] call[name[self].fetch_aggregation_results, parameter[]] if <ast.BoolOp object at 0x7da1b265ee60> begin[:] <ast.Try object at 0x7da1b265fc70> return[name[agg]]
keyword[def] identifier[get_aggs] ( identifier[self] ): literal[string] identifier[res] = identifier[self] . identifier[fetch_aggregation_results] () keyword[if] literal[string] keyword[in] identifier[res] keyword[and] literal[string] keyword[in] identifier[res] [ literal[string] ][ identifier[str] ( identifier[self] . identifier[parent_agg_counter] - literal[int] )]: keyword[try] : identifier[agg] = identifier[res] [ literal[string] ][ identifier[str] ( identifier[self] . identifier[parent_agg_counter] - literal[int] )][ literal[string] ][ literal[string] ] keyword[if] identifier[agg] == literal[string] : identifier[agg] = keyword[None] keyword[except] identifier[Exception] keyword[as] identifier[e] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[elif] literal[string] keyword[in] identifier[res] keyword[and] literal[string] keyword[in] identifier[res] [ literal[string] ][ identifier[str] ( identifier[self] . identifier[parent_agg_counter] - literal[int] )]: identifier[agg] = identifier[res] [ literal[string] ][ identifier[str] ( identifier[self] . identifier[parent_agg_counter] - literal[int] )][ literal[string] ] keyword[else] : identifier[agg] = identifier[res] [ literal[string] ][ literal[string] ] keyword[return] identifier[agg]
def get_aggs(self): """ Compute the values for single valued aggregations :returns: the single aggregation value """ res = self.fetch_aggregation_results() if 'aggregations' in res and 'values' in res['aggregations'][str(self.parent_agg_counter - 1)]: try: agg = res['aggregations'][str(self.parent_agg_counter - 1)]['values']['50.0'] if agg == 'NaN': # ES returns NaN. Convert to None for matplotlib graph agg = None # depends on [control=['if'], data=['agg']] # depends on [control=['try'], data=[]] except Exception as e: raise RuntimeError('Multivalue aggregation result not supported') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] elif 'aggregations' in res and 'value' in res['aggregations'][str(self.parent_agg_counter - 1)]: agg = res['aggregations'][str(self.parent_agg_counter - 1)]['value'] # depends on [control=['if'], data=[]] else: agg = res['hits']['total'] return agg
def get_request(self, uuid, raw=False, multiple=False, connection_adapter=None): """Get a RPC request. :param str uuid: Rpc Identifier :param bool raw: If enabled return the frame as is, else return result as a dictionary. :param bool multiple: Are we expecting multiple frames. :param obj connection_adapter: Provide custom connection adapter. :return: """ if uuid not in self._response: return self._wait_for_request( uuid, connection_adapter or self._default_connection_adapter ) frame = self._get_response_frame(uuid) if not multiple: self.remove(uuid) result = None if raw: result = frame elif frame is not None: result = dict(frame) return result
def function[get_request, parameter[self, uuid, raw, multiple, connection_adapter]]: constant[Get a RPC request. :param str uuid: Rpc Identifier :param bool raw: If enabled return the frame as is, else return result as a dictionary. :param bool multiple: Are we expecting multiple frames. :param obj connection_adapter: Provide custom connection adapter. :return: ] if compare[name[uuid] <ast.NotIn object at 0x7da2590d7190> name[self]._response] begin[:] return[None] call[name[self]._wait_for_request, parameter[name[uuid], <ast.BoolOp object at 0x7da20c6a8b20>]] variable[frame] assign[=] call[name[self]._get_response_frame, parameter[name[uuid]]] if <ast.UnaryOp object at 0x7da20c6a9cc0> begin[:] call[name[self].remove, parameter[name[uuid]]] variable[result] assign[=] constant[None] if name[raw] begin[:] variable[result] assign[=] name[frame] return[name[result]]
keyword[def] identifier[get_request] ( identifier[self] , identifier[uuid] , identifier[raw] = keyword[False] , identifier[multiple] = keyword[False] , identifier[connection_adapter] = keyword[None] ): literal[string] keyword[if] identifier[uuid] keyword[not] keyword[in] identifier[self] . identifier[_response] : keyword[return] identifier[self] . identifier[_wait_for_request] ( identifier[uuid] , identifier[connection_adapter] keyword[or] identifier[self] . identifier[_default_connection_adapter] ) identifier[frame] = identifier[self] . identifier[_get_response_frame] ( identifier[uuid] ) keyword[if] keyword[not] identifier[multiple] : identifier[self] . identifier[remove] ( identifier[uuid] ) identifier[result] = keyword[None] keyword[if] identifier[raw] : identifier[result] = identifier[frame] keyword[elif] identifier[frame] keyword[is] keyword[not] keyword[None] : identifier[result] = identifier[dict] ( identifier[frame] ) keyword[return] identifier[result]
def get_request(self, uuid, raw=False, multiple=False, connection_adapter=None): """Get a RPC request. :param str uuid: Rpc Identifier :param bool raw: If enabled return the frame as is, else return result as a dictionary. :param bool multiple: Are we expecting multiple frames. :param obj connection_adapter: Provide custom connection adapter. :return: """ if uuid not in self._response: return # depends on [control=['if'], data=[]] self._wait_for_request(uuid, connection_adapter or self._default_connection_adapter) frame = self._get_response_frame(uuid) if not multiple: self.remove(uuid) # depends on [control=['if'], data=[]] result = None if raw: result = frame # depends on [control=['if'], data=[]] elif frame is not None: result = dict(frame) # depends on [control=['if'], data=['frame']] return result
def os_version(self, value): """The os_version property. Args: value (string). the property value. """ if value == self._defaults['ai.device.osVersion'] and 'ai.device.osVersion' in self._values: del self._values['ai.device.osVersion'] else: self._values['ai.device.osVersion'] = value
def function[os_version, parameter[self, value]]: constant[The os_version property. Args: value (string). the property value. ] if <ast.BoolOp object at 0x7da1b10694e0> begin[:] <ast.Delete object at 0x7da1b1073d30>
keyword[def] identifier[os_version] ( identifier[self] , identifier[value] ): literal[string] keyword[if] identifier[value] == identifier[self] . identifier[_defaults] [ literal[string] ] keyword[and] literal[string] keyword[in] identifier[self] . identifier[_values] : keyword[del] identifier[self] . identifier[_values] [ literal[string] ] keyword[else] : identifier[self] . identifier[_values] [ literal[string] ]= identifier[value]
def os_version(self, value): """The os_version property. Args: value (string). the property value. """ if value == self._defaults['ai.device.osVersion'] and 'ai.device.osVersion' in self._values: del self._values['ai.device.osVersion'] # depends on [control=['if'], data=[]] else: self._values['ai.device.osVersion'] = value
def replace_namespaced_event(self, name, namespace, body, **kwargs): # noqa: E501 """replace_namespaced_event # noqa: E501 replace the specified Event # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_event(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Event (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1Event body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1Event If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_event_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.replace_namespaced_event_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
def function[replace_namespaced_event, parameter[self, name, namespace, body]]: constant[replace_namespaced_event # noqa: E501 replace the specified Event # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_event(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Event (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1beta1Event body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1beta1Event If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[async_req]]] begin[:] return[call[name[self].replace_namespaced_event_with_http_info, parameter[name[name], name[namespace], name[body]]]]
keyword[def] identifier[replace_namespaced_event] ( identifier[self] , identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[replace_namespaced_event_with_http_info] ( identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[replace_namespaced_event_with_http_info] ( identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] ) keyword[return] identifier[data]
def replace_namespaced_event(self, name, namespace, body, **kwargs): # noqa: E501 "replace_namespaced_event # noqa: E501\n\n replace the specified Event # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.replace_namespaced_event(name, namespace, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the Event (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param V1beta1Event body: (required)\n :param str pretty: If 'true', then the output is pretty printed.\n :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed\n :return: V1beta1Event\n If the method is called asynchronously,\n returns the request thread.\n " kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_event_with_http_info(name, namespace, body, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]] else: data = self.replace_namespaced_event_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
def _set_ldp_protocol_errors_instance_since_clear(self, v, load=False): """ Setter method for ldp_protocol_errors_instance_since_clear, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_errors_instance_since_clear (container) If this variable is read-only (config: false) in the source YANG file, then _set_ldp_protocol_errors_instance_since_clear is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ldp_protocol_errors_instance_since_clear() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=ldp_protocol_errors_instance_since_clear.ldp_protocol_errors_instance_since_clear, is_container='container', presence=False, yang_name="ldp-protocol-errors-instance-since-clear", rest_name="ldp-protocol-errors-instance-since-clear", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-protocol-errors-instance-ldp-protocol-errors-instance-since-clear-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """ldp_protocol_errors_instance_since_clear must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=ldp_protocol_errors_instance_since_clear.ldp_protocol_errors_instance_since_clear, is_container='container', presence=False, yang_name="ldp-protocol-errors-instance-since-clear", rest_name="ldp-protocol-errors-instance-since-clear", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-protocol-errors-instance-ldp-protocol-errors-instance-since-clear-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""", }) self.__ldp_protocol_errors_instance_since_clear = t if hasattr(self, '_set'): self._set()
def function[_set_ldp_protocol_errors_instance_since_clear, parameter[self, v, load]]: constant[ Setter method for ldp_protocol_errors_instance_since_clear, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_errors_instance_since_clear (container) If this variable is read-only (config: false) in the source YANG file, then _set_ldp_protocol_errors_instance_since_clear is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ldp_protocol_errors_instance_since_clear() directly. ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da18fe92320> name[self].__ldp_protocol_errors_instance_since_clear assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_ldp_protocol_errors_instance_since_clear] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[ldp_protocol_errors_instance_since_clear] . identifier[ldp_protocol_errors_instance_since_clear] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[False] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__ldp_protocol_errors_instance_since_clear] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_ldp_protocol_errors_instance_since_clear(self, v, load=False): """ Setter method for ldp_protocol_errors_instance_since_clear, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_errors_instance_since_clear (container) If this variable is read-only (config: false) in the source YANG file, then _set_ldp_protocol_errors_instance_since_clear is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ldp_protocol_errors_instance_since_clear() directly. """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=ldp_protocol_errors_instance_since_clear.ldp_protocol_errors_instance_since_clear, is_container='container', presence=False, yang_name='ldp-protocol-errors-instance-since-clear', rest_name='ldp-protocol-errors-instance-since-clear', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-protocol-errors-instance-ldp-protocol-errors-instance-since-clear-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'ldp_protocol_errors_instance_since_clear must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=ldp_protocol_errors_instance_since_clear.ldp_protocol_errors_instance_since_clear, is_container=\'container\', presence=False, yang_name="ldp-protocol-errors-instance-since-clear", rest_name="ldp-protocol-errors-instance-since-clear", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'mpls-ldp-protocol-errors-instance-ldp-protocol-errors-instance-since-clear-1\'}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls-operational\', defining_module=\'brocade-mpls-operational\', yang_type=\'container\', is_config=False)'}) # depends on [control=['except'], data=[]] self.__ldp_protocol_errors_instance_since_clear = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def leave(self): """Leave the room. Returns: boolean: Leaving the room was successful. """ try: self.client.api.leave_room(self.room_id) del self.client.rooms[self.room_id] return True except MatrixRequestError: return False
def function[leave, parameter[self]]: constant[Leave the room. Returns: boolean: Leaving the room was successful. ] <ast.Try object at 0x7da1b16c1150>
keyword[def] identifier[leave] ( identifier[self] ): literal[string] keyword[try] : identifier[self] . identifier[client] . identifier[api] . identifier[leave_room] ( identifier[self] . identifier[room_id] ) keyword[del] identifier[self] . identifier[client] . identifier[rooms] [ identifier[self] . identifier[room_id] ] keyword[return] keyword[True] keyword[except] identifier[MatrixRequestError] : keyword[return] keyword[False]
def leave(self): """Leave the room. Returns: boolean: Leaving the room was successful. """ try: self.client.api.leave_room(self.room_id) del self.client.rooms[self.room_id] return True # depends on [control=['try'], data=[]] except MatrixRequestError: return False # depends on [control=['except'], data=[]]
def getheader (self, name, default=None): """Get decoded header value. @return: decoded header value or default of not found @rtype: unicode or type of default """ value = self.headers.get(name) if value is None: return default return unicode_safe(value, encoding=HEADER_ENCODING)
def function[getheader, parameter[self, name, default]]: constant[Get decoded header value. @return: decoded header value or default of not found @rtype: unicode or type of default ] variable[value] assign[=] call[name[self].headers.get, parameter[name[name]]] if compare[name[value] is constant[None]] begin[:] return[name[default]] return[call[name[unicode_safe], parameter[name[value]]]]
keyword[def] identifier[getheader] ( identifier[self] , identifier[name] , identifier[default] = keyword[None] ): literal[string] identifier[value] = identifier[self] . identifier[headers] . identifier[get] ( identifier[name] ) keyword[if] identifier[value] keyword[is] keyword[None] : keyword[return] identifier[default] keyword[return] identifier[unicode_safe] ( identifier[value] , identifier[encoding] = identifier[HEADER_ENCODING] )
def getheader(self, name, default=None): """Get decoded header value. @return: decoded header value or default of not found @rtype: unicode or type of default """ value = self.headers.get(name) if value is None: return default # depends on [control=['if'], data=[]] return unicode_safe(value, encoding=HEADER_ENCODING)
def unit_overlap(evaluated_model, reference_model): """ Computes unit overlap of two text documents. Documents has to be represented as TF models of non-empty document. :returns float: 0 <= overlap <= 1, where 0 means no match and 1 means exactly the same. """ if not (isinstance(evaluated_model, TfModel) and isinstance(reference_model, TfModel)): raise ValueError( "Arguments has to be instances of 'sumy.models.TfDocumentModel'") terms1 = frozenset(evaluated_model.terms) terms2 = frozenset(reference_model.terms) if not terms1 and not terms2: raise ValueError( "Documents can't be empty. Please pass the valid documents.") common_terms_count = len(terms1 & terms2) return common_terms_count / (len(terms1) + len(terms2) - common_terms_count)
def function[unit_overlap, parameter[evaluated_model, reference_model]]: constant[ Computes unit overlap of two text documents. Documents has to be represented as TF models of non-empty document. :returns float: 0 <= overlap <= 1, where 0 means no match and 1 means exactly the same. ] if <ast.UnaryOp object at 0x7da18fe92c20> begin[:] <ast.Raise object at 0x7da18fe90e50> variable[terms1] assign[=] call[name[frozenset], parameter[name[evaluated_model].terms]] variable[terms2] assign[=] call[name[frozenset], parameter[name[reference_model].terms]] if <ast.BoolOp object at 0x7da18fe93fa0> begin[:] <ast.Raise object at 0x7da18fe91fc0> variable[common_terms_count] assign[=] call[name[len], parameter[binary_operation[name[terms1] <ast.BitAnd object at 0x7da2590d6b60> name[terms2]]]] return[binary_operation[name[common_terms_count] / binary_operation[binary_operation[call[name[len], parameter[name[terms1]]] + call[name[len], parameter[name[terms2]]]] - name[common_terms_count]]]]
keyword[def] identifier[unit_overlap] ( identifier[evaluated_model] , identifier[reference_model] ): literal[string] keyword[if] keyword[not] ( identifier[isinstance] ( identifier[evaluated_model] , identifier[TfModel] ) keyword[and] identifier[isinstance] ( identifier[reference_model] , identifier[TfModel] )): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[terms1] = identifier[frozenset] ( identifier[evaluated_model] . identifier[terms] ) identifier[terms2] = identifier[frozenset] ( identifier[reference_model] . identifier[terms] ) keyword[if] keyword[not] identifier[terms1] keyword[and] keyword[not] identifier[terms2] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[common_terms_count] = identifier[len] ( identifier[terms1] & identifier[terms2] ) keyword[return] identifier[common_terms_count] /( identifier[len] ( identifier[terms1] )+ identifier[len] ( identifier[terms2] )- identifier[common_terms_count] )
def unit_overlap(evaluated_model, reference_model): """ Computes unit overlap of two text documents. Documents has to be represented as TF models of non-empty document. :returns float: 0 <= overlap <= 1, where 0 means no match and 1 means exactly the same. """ if not (isinstance(evaluated_model, TfModel) and isinstance(reference_model, TfModel)): raise ValueError("Arguments has to be instances of 'sumy.models.TfDocumentModel'") # depends on [control=['if'], data=[]] terms1 = frozenset(evaluated_model.terms) terms2 = frozenset(reference_model.terms) if not terms1 and (not terms2): raise ValueError("Documents can't be empty. Please pass the valid documents.") # depends on [control=['if'], data=[]] common_terms_count = len(terms1 & terms2) return common_terms_count / (len(terms1) + len(terms2) - common_terms_count)
def spit(path, txt, encoding='UTF-8', append=False): """ Write a unicode string `txt` to file `path`. By default encoded as UTF-8 and truncates the file prior to writing Parameters ---------- path : str File path to file on disk txt : unicode Text content to write to file encoding : str, default `UTF-8`, optional Encoding of the file append : Boolean, default False Append to file instead of truncating before writing Returns ------- The txt written to the file as a unicode string """ mode = 'a' if append else 'w' with io.open(path, mode, encoding=encoding) as f: f.write(txt) return txt
def function[spit, parameter[path, txt, encoding, append]]: constant[ Write a unicode string `txt` to file `path`. By default encoded as UTF-8 and truncates the file prior to writing Parameters ---------- path : str File path to file on disk txt : unicode Text content to write to file encoding : str, default `UTF-8`, optional Encoding of the file append : Boolean, default False Append to file instead of truncating before writing Returns ------- The txt written to the file as a unicode string ] variable[mode] assign[=] <ast.IfExp object at 0x7da18eb55330> with call[name[io].open, parameter[name[path], name[mode]]] begin[:] call[name[f].write, parameter[name[txt]]] return[name[txt]]
keyword[def] identifier[spit] ( identifier[path] , identifier[txt] , identifier[encoding] = literal[string] , identifier[append] = keyword[False] ): literal[string] identifier[mode] = literal[string] keyword[if] identifier[append] keyword[else] literal[string] keyword[with] identifier[io] . identifier[open] ( identifier[path] , identifier[mode] , identifier[encoding] = identifier[encoding] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[txt] ) keyword[return] identifier[txt]
def spit(path, txt, encoding='UTF-8', append=False): """ Write a unicode string `txt` to file `path`. By default encoded as UTF-8 and truncates the file prior to writing Parameters ---------- path : str File path to file on disk txt : unicode Text content to write to file encoding : str, default `UTF-8`, optional Encoding of the file append : Boolean, default False Append to file instead of truncating before writing Returns ------- The txt written to the file as a unicode string """ mode = 'a' if append else 'w' with io.open(path, mode, encoding=encoding) as f: f.write(txt) return txt # depends on [control=['with'], data=['f']]
def plot_groups_unplaced(self, fout_dir=".", **kws_pltargs): """Plot GO DAGs for groups of user GOs which are not in a section.""" hdrgos = self.grprobj.get_hdrgos_unplaced() pltargs = PltGroupedGosArgs(self.grprobj, fout_dir=fout_dir, **kws_pltargs) return self._plot_groups_hdrgos(hdrgos, pltargs)
def function[plot_groups_unplaced, parameter[self, fout_dir]]: constant[Plot GO DAGs for groups of user GOs which are not in a section.] variable[hdrgos] assign[=] call[name[self].grprobj.get_hdrgos_unplaced, parameter[]] variable[pltargs] assign[=] call[name[PltGroupedGosArgs], parameter[name[self].grprobj]] return[call[name[self]._plot_groups_hdrgos, parameter[name[hdrgos], name[pltargs]]]]
keyword[def] identifier[plot_groups_unplaced] ( identifier[self] , identifier[fout_dir] = literal[string] ,** identifier[kws_pltargs] ): literal[string] identifier[hdrgos] = identifier[self] . identifier[grprobj] . identifier[get_hdrgos_unplaced] () identifier[pltargs] = identifier[PltGroupedGosArgs] ( identifier[self] . identifier[grprobj] , identifier[fout_dir] = identifier[fout_dir] ,** identifier[kws_pltargs] ) keyword[return] identifier[self] . identifier[_plot_groups_hdrgos] ( identifier[hdrgos] , identifier[pltargs] )
def plot_groups_unplaced(self, fout_dir='.', **kws_pltargs): """Plot GO DAGs for groups of user GOs which are not in a section.""" hdrgos = self.grprobj.get_hdrgos_unplaced() pltargs = PltGroupedGosArgs(self.grprobj, fout_dir=fout_dir, **kws_pltargs) return self._plot_groups_hdrgos(hdrgos, pltargs)
def copyChar(len, out, val): """append the char value in the array """ ret = libxml2mod.xmlCopyChar(len, out, val) return ret
def function[copyChar, parameter[len, out, val]]: constant[append the char value in the array ] variable[ret] assign[=] call[name[libxml2mod].xmlCopyChar, parameter[name[len], name[out], name[val]]] return[name[ret]]
keyword[def] identifier[copyChar] ( identifier[len] , identifier[out] , identifier[val] ): literal[string] identifier[ret] = identifier[libxml2mod] . identifier[xmlCopyChar] ( identifier[len] , identifier[out] , identifier[val] ) keyword[return] identifier[ret]
def copyChar(len, out, val): """append the char value in the array """ ret = libxml2mod.xmlCopyChar(len, out, val) return ret
async def _keepalive(self): ''' Keep our connect to server alive forever, with some pointless traffic. ''' while self.protocol: vers = await self.RPC('server.version') logger.debug("Server version: " + repr(vers)) # Five minutes isn't really enough anymore; looks like # servers are killing 2-minute old idle connections now. # But decreasing interval this seems rude. await asyncio.sleep(600)
<ast.AsyncFunctionDef object at 0x7da1b1394520>
keyword[async] keyword[def] identifier[_keepalive] ( identifier[self] ): literal[string] keyword[while] identifier[self] . identifier[protocol] : identifier[vers] = keyword[await] identifier[self] . identifier[RPC] ( literal[string] ) identifier[logger] . identifier[debug] ( literal[string] + identifier[repr] ( identifier[vers] )) keyword[await] identifier[asyncio] . identifier[sleep] ( literal[int] )
async def _keepalive(self): """ Keep our connect to server alive forever, with some pointless traffic. """ while self.protocol: vers = await self.RPC('server.version') logger.debug('Server version: ' + repr(vers)) # Five minutes isn't really enough anymore; looks like # servers are killing 2-minute old idle connections now. # But decreasing interval this seems rude. await asyncio.sleep(600) # depends on [control=['while'], data=[]]
def saved_searches_factory_helper(splunk_connection_info): """Return a valid splunklib.client.SavedSearches object kwargs: - see splunklib.client.connect() """ if not ISplunkConnectionInfo.providedBy(splunk_connection_info): DoesNotImplement('argument did not provide expected interface') service = connect(**splunk_connection_info) saved_searches = service.saved_searches for s in saved_searches: logger.debug("Found Splunk saved search with name %s" % s.name) return saved_searches
def function[saved_searches_factory_helper, parameter[splunk_connection_info]]: constant[Return a valid splunklib.client.SavedSearches object kwargs: - see splunklib.client.connect() ] if <ast.UnaryOp object at 0x7da207f99090> begin[:] call[name[DoesNotImplement], parameter[constant[argument did not provide expected interface]]] variable[service] assign[=] call[name[connect], parameter[]] variable[saved_searches] assign[=] name[service].saved_searches for taget[name[s]] in starred[name[saved_searches]] begin[:] call[name[logger].debug, parameter[binary_operation[constant[Found Splunk saved search with name %s] <ast.Mod object at 0x7da2590d6920> name[s].name]]] return[name[saved_searches]]
keyword[def] identifier[saved_searches_factory_helper] ( identifier[splunk_connection_info] ): literal[string] keyword[if] keyword[not] identifier[ISplunkConnectionInfo] . identifier[providedBy] ( identifier[splunk_connection_info] ): identifier[DoesNotImplement] ( literal[string] ) identifier[service] = identifier[connect] (** identifier[splunk_connection_info] ) identifier[saved_searches] = identifier[service] . identifier[saved_searches] keyword[for] identifier[s] keyword[in] identifier[saved_searches] : identifier[logger] . identifier[debug] ( literal[string] % identifier[s] . identifier[name] ) keyword[return] identifier[saved_searches]
def saved_searches_factory_helper(splunk_connection_info): """Return a valid splunklib.client.SavedSearches object kwargs: - see splunklib.client.connect() """ if not ISplunkConnectionInfo.providedBy(splunk_connection_info): DoesNotImplement('argument did not provide expected interface') # depends on [control=['if'], data=[]] service = connect(**splunk_connection_info) saved_searches = service.saved_searches for s in saved_searches: logger.debug('Found Splunk saved search with name %s' % s.name) # depends on [control=['for'], data=['s']] return saved_searches
def addend_ids(self): """tuple of int ids of elements contributing to this subtotal. Any element id not present in the dimension or present but representing missing data is excluded. """ return tuple( arg for arg in self._subtotal_dict.get("args", []) if arg in self.valid_elements.element_ids )
def function[addend_ids, parameter[self]]: constant[tuple of int ids of elements contributing to this subtotal. Any element id not present in the dimension or present but representing missing data is excluded. ] return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b1b7c160>]]]
keyword[def] identifier[addend_ids] ( identifier[self] ): literal[string] keyword[return] identifier[tuple] ( identifier[arg] keyword[for] identifier[arg] keyword[in] identifier[self] . identifier[_subtotal_dict] . identifier[get] ( literal[string] ,[]) keyword[if] identifier[arg] keyword[in] identifier[self] . identifier[valid_elements] . identifier[element_ids] )
def addend_ids(self): """tuple of int ids of elements contributing to this subtotal. Any element id not present in the dimension or present but representing missing data is excluded. """ return tuple((arg for arg in self._subtotal_dict.get('args', []) if arg in self.valid_elements.element_ids))
def formatdate(timeval=None, localtime=False, usegmt=False): """Returns a date string as specified by RFC 2822, e.g.: Fri, 09 Nov 2001 01:08:47 -0000 Optional timeval if given is a floating point time value as accepted by gmtime() and localtime(), otherwise the current time is used. Optional localtime is a flag that when True, interprets timeval, and returns a date relative to the local timezone instead of UTC, properly taking daylight savings time into account. Optional argument usegmt means that the timezone is written out as an ascii string, not numeric one (so "GMT" instead of "+0000"). This is needed for HTTP, and is only used when localtime==False. """ # Note: we cannot use strftime() because that honors the locale and RFC # 2822 requires that day and month names be the English abbreviations. if timeval is None: timeval = time.time() if localtime: now = time.localtime(timeval) # Calculate timezone offset, based on whether the local zone has # daylight savings time, and whether DST is in effect. if time.daylight and now[-1]: offset = time.altzone else: offset = time.timezone hours, minutes = divmod(abs(offset), 3600) # Remember offset is in seconds west of UTC, but the timezone is in # minutes east of UTC, so the signs differ. if offset > 0: sign = '-' else: sign = '+' zone = '%s%02d%02d' % (sign, hours, minutes // 60) else: now = time.gmtime(timeval) # Timezone offset is always -0000 if usegmt: zone = 'GMT' else: zone = '-0000' return _format_timetuple_and_zone(now, zone)
def function[formatdate, parameter[timeval, localtime, usegmt]]: constant[Returns a date string as specified by RFC 2822, e.g.: Fri, 09 Nov 2001 01:08:47 -0000 Optional timeval if given is a floating point time value as accepted by gmtime() and localtime(), otherwise the current time is used. Optional localtime is a flag that when True, interprets timeval, and returns a date relative to the local timezone instead of UTC, properly taking daylight savings time into account. Optional argument usegmt means that the timezone is written out as an ascii string, not numeric one (so "GMT" instead of "+0000"). This is needed for HTTP, and is only used when localtime==False. ] if compare[name[timeval] is constant[None]] begin[:] variable[timeval] assign[=] call[name[time].time, parameter[]] if name[localtime] begin[:] variable[now] assign[=] call[name[time].localtime, parameter[name[timeval]]] if <ast.BoolOp object at 0x7da18dc99840> begin[:] variable[offset] assign[=] name[time].altzone <ast.Tuple object at 0x7da18dc99a20> assign[=] call[name[divmod], parameter[call[name[abs], parameter[name[offset]]], constant[3600]]] if compare[name[offset] greater[>] constant[0]] begin[:] variable[sign] assign[=] constant[-] variable[zone] assign[=] binary_operation[constant[%s%02d%02d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18dc98af0>, <ast.Name object at 0x7da18dc99d50>, <ast.BinOp object at 0x7da18dc9add0>]]] return[call[name[_format_timetuple_and_zone], parameter[name[now], name[zone]]]]
keyword[def] identifier[formatdate] ( identifier[timeval] = keyword[None] , identifier[localtime] = keyword[False] , identifier[usegmt] = keyword[False] ): literal[string] keyword[if] identifier[timeval] keyword[is] keyword[None] : identifier[timeval] = identifier[time] . identifier[time] () keyword[if] identifier[localtime] : identifier[now] = identifier[time] . identifier[localtime] ( identifier[timeval] ) keyword[if] identifier[time] . identifier[daylight] keyword[and] identifier[now] [- literal[int] ]: identifier[offset] = identifier[time] . identifier[altzone] keyword[else] : identifier[offset] = identifier[time] . identifier[timezone] identifier[hours] , identifier[minutes] = identifier[divmod] ( identifier[abs] ( identifier[offset] ), literal[int] ) keyword[if] identifier[offset] > literal[int] : identifier[sign] = literal[string] keyword[else] : identifier[sign] = literal[string] identifier[zone] = literal[string] %( identifier[sign] , identifier[hours] , identifier[minutes] // literal[int] ) keyword[else] : identifier[now] = identifier[time] . identifier[gmtime] ( identifier[timeval] ) keyword[if] identifier[usegmt] : identifier[zone] = literal[string] keyword[else] : identifier[zone] = literal[string] keyword[return] identifier[_format_timetuple_and_zone] ( identifier[now] , identifier[zone] )
def formatdate(timeval=None, localtime=False, usegmt=False): """Returns a date string as specified by RFC 2822, e.g.: Fri, 09 Nov 2001 01:08:47 -0000 Optional timeval if given is a floating point time value as accepted by gmtime() and localtime(), otherwise the current time is used. Optional localtime is a flag that when True, interprets timeval, and returns a date relative to the local timezone instead of UTC, properly taking daylight savings time into account. Optional argument usegmt means that the timezone is written out as an ascii string, not numeric one (so "GMT" instead of "+0000"). This is needed for HTTP, and is only used when localtime==False. """ # Note: we cannot use strftime() because that honors the locale and RFC # 2822 requires that day and month names be the English abbreviations. if timeval is None: timeval = time.time() # depends on [control=['if'], data=['timeval']] if localtime: now = time.localtime(timeval) # Calculate timezone offset, based on whether the local zone has # daylight savings time, and whether DST is in effect. if time.daylight and now[-1]: offset = time.altzone # depends on [control=['if'], data=[]] else: offset = time.timezone (hours, minutes) = divmod(abs(offset), 3600) # Remember offset is in seconds west of UTC, but the timezone is in # minutes east of UTC, so the signs differ. if offset > 0: sign = '-' # depends on [control=['if'], data=[]] else: sign = '+' zone = '%s%02d%02d' % (sign, hours, minutes // 60) # depends on [control=['if'], data=[]] else: now = time.gmtime(timeval) # Timezone offset is always -0000 if usegmt: zone = 'GMT' # depends on [control=['if'], data=[]] else: zone = '-0000' return _format_timetuple_and_zone(now, zone)
async def parse_line(self): """ :py:func:`asyncio.coroutine` Parsing server response line. :return: (code, line) :rtype: (:py:class:`aioftp.Code`, :py:class:`str`) :raises ConnectionResetError: if received data is empty (this means, that connection is closed) :raises asyncio.TimeoutError: if there where no data for `timeout` period """ line = await self.stream.readline() if not line: self.stream.close() raise ConnectionResetError s = line.decode(encoding=self.encoding).rstrip() logger.info(s) return Code(s[:3]), s[3:]
<ast.AsyncFunctionDef object at 0x7da1b00b6650>
keyword[async] keyword[def] identifier[parse_line] ( identifier[self] ): literal[string] identifier[line] = keyword[await] identifier[self] . identifier[stream] . identifier[readline] () keyword[if] keyword[not] identifier[line] : identifier[self] . identifier[stream] . identifier[close] () keyword[raise] identifier[ConnectionResetError] identifier[s] = identifier[line] . identifier[decode] ( identifier[encoding] = identifier[self] . identifier[encoding] ). identifier[rstrip] () identifier[logger] . identifier[info] ( identifier[s] ) keyword[return] identifier[Code] ( identifier[s] [: literal[int] ]), identifier[s] [ literal[int] :]
async def parse_line(self): """ :py:func:`asyncio.coroutine` Parsing server response line. :return: (code, line) :rtype: (:py:class:`aioftp.Code`, :py:class:`str`) :raises ConnectionResetError: if received data is empty (this means, that connection is closed) :raises asyncio.TimeoutError: if there where no data for `timeout` period """ line = await self.stream.readline() if not line: self.stream.close() raise ConnectionResetError # depends on [control=['if'], data=[]] s = line.decode(encoding=self.encoding).rstrip() logger.info(s) return (Code(s[:3]), s[3:])
def parse_psqs(psqs_results_file): """Parse a PSQS result file and returns a Pandas DataFrame of the results Args: psqs_results_file: Path to psqs results file Returns: Pandas DataFrame: Summary of PSQS results """ # TODO: generalize column names for all results, save as dict instead psqs_results = pd.read_csv(psqs_results_file, sep='\t', header=None) psqs_results['pdb_file'] = psqs_results[0].apply(lambda x: str(x).strip('./').strip('.pdb')) psqs_results = psqs_results.rename(columns = {1:'psqs_local', 2:'psqs_burial', 3:'psqs_contact', 4:'psqs_total'}).drop(0, axis=1) psqs_results['u_pdb'] = psqs_results['pdb_file'].apply(lambda x: x.upper() if len(x)==4 else np.nan) psqs_results['i_entry_name'] = psqs_results['pdb_file'].apply(lambda x: x.split('_model1')[0] if len(x)>4 else np.nan) psqs_results = psqs_results[pd.notnull(psqs_results.psqs_total)] return psqs_results
def function[parse_psqs, parameter[psqs_results_file]]: constant[Parse a PSQS result file and returns a Pandas DataFrame of the results Args: psqs_results_file: Path to psqs results file Returns: Pandas DataFrame: Summary of PSQS results ] variable[psqs_results] assign[=] call[name[pd].read_csv, parameter[name[psqs_results_file]]] call[name[psqs_results]][constant[pdb_file]] assign[=] call[call[name[psqs_results]][constant[0]].apply, parameter[<ast.Lambda object at 0x7da1b0e6cee0>]] variable[psqs_results] assign[=] call[call[name[psqs_results].rename, parameter[]].drop, parameter[constant[0]]] call[name[psqs_results]][constant[u_pdb]] assign[=] call[call[name[psqs_results]][constant[pdb_file]].apply, parameter[<ast.Lambda object at 0x7da1b0e6d6c0>]] call[name[psqs_results]][constant[i_entry_name]] assign[=] call[call[name[psqs_results]][constant[pdb_file]].apply, parameter[<ast.Lambda object at 0x7da20c6c4820>]] variable[psqs_results] assign[=] call[name[psqs_results]][call[name[pd].notnull, parameter[name[psqs_results].psqs_total]]] return[name[psqs_results]]
keyword[def] identifier[parse_psqs] ( identifier[psqs_results_file] ): literal[string] identifier[psqs_results] = identifier[pd] . identifier[read_csv] ( identifier[psqs_results_file] , identifier[sep] = literal[string] , identifier[header] = keyword[None] ) identifier[psqs_results] [ literal[string] ]= identifier[psqs_results] [ literal[int] ]. identifier[apply] ( keyword[lambda] identifier[x] : identifier[str] ( identifier[x] ). identifier[strip] ( literal[string] ). identifier[strip] ( literal[string] )) identifier[psqs_results] = identifier[psqs_results] . identifier[rename] ( identifier[columns] ={ literal[int] : literal[string] , literal[int] : literal[string] , literal[int] : literal[string] , literal[int] : literal[string] }). identifier[drop] ( literal[int] , identifier[axis] = literal[int] ) identifier[psqs_results] [ literal[string] ]= identifier[psqs_results] [ literal[string] ]. identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] . identifier[upper] () keyword[if] identifier[len] ( identifier[x] )== literal[int] keyword[else] identifier[np] . identifier[nan] ) identifier[psqs_results] [ literal[string] ]= identifier[psqs_results] [ literal[string] ]. identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] . identifier[split] ( literal[string] )[ literal[int] ] keyword[if] identifier[len] ( identifier[x] )> literal[int] keyword[else] identifier[np] . identifier[nan] ) identifier[psqs_results] = identifier[psqs_results] [ identifier[pd] . identifier[notnull] ( identifier[psqs_results] . identifier[psqs_total] )] keyword[return] identifier[psqs_results]
def parse_psqs(psqs_results_file): """Parse a PSQS result file and returns a Pandas DataFrame of the results Args: psqs_results_file: Path to psqs results file Returns: Pandas DataFrame: Summary of PSQS results """ # TODO: generalize column names for all results, save as dict instead psqs_results = pd.read_csv(psqs_results_file, sep='\t', header=None) psqs_results['pdb_file'] = psqs_results[0].apply(lambda x: str(x).strip('./').strip('.pdb')) psqs_results = psqs_results.rename(columns={1: 'psqs_local', 2: 'psqs_burial', 3: 'psqs_contact', 4: 'psqs_total'}).drop(0, axis=1) psqs_results['u_pdb'] = psqs_results['pdb_file'].apply(lambda x: x.upper() if len(x) == 4 else np.nan) psqs_results['i_entry_name'] = psqs_results['pdb_file'].apply(lambda x: x.split('_model1')[0] if len(x) > 4 else np.nan) psqs_results = psqs_results[pd.notnull(psqs_results.psqs_total)] return psqs_results
def store_policy(self, pol_id, policy): """Store the policy. Policy is maintained as a dictionary of pol ID. """ if pol_id not in self.policies: self.policies[pol_id] = policy self.policy_cnt += 1
def function[store_policy, parameter[self, pol_id, policy]]: constant[Store the policy. Policy is maintained as a dictionary of pol ID. ] if compare[name[pol_id] <ast.NotIn object at 0x7da2590d7190> name[self].policies] begin[:] call[name[self].policies][name[pol_id]] assign[=] name[policy] <ast.AugAssign object at 0x7da1b1ba9540>
keyword[def] identifier[store_policy] ( identifier[self] , identifier[pol_id] , identifier[policy] ): literal[string] keyword[if] identifier[pol_id] keyword[not] keyword[in] identifier[self] . identifier[policies] : identifier[self] . identifier[policies] [ identifier[pol_id] ]= identifier[policy] identifier[self] . identifier[policy_cnt] += literal[int]
def store_policy(self, pol_id, policy): """Store the policy. Policy is maintained as a dictionary of pol ID. """ if pol_id not in self.policies: self.policies[pol_id] = policy self.policy_cnt += 1 # depends on [control=['if'], data=['pol_id']]
def get_quadrant_from_class(node_class): """Return the ID of the segment of the plane corresponding to a class.""" up, edge_type, _ = node_class if up == 0: return 0 if random.random() < 0.5 else 7 mappings = {(-1, 'modification'): 1, (-1, 'amount'): 2, (-1, 'activity'): 3, (1, 'activity'): 4, (1, 'amount'): 5, (1, 'modification'): 6} return mappings[(up, edge_type)]
def function[get_quadrant_from_class, parameter[node_class]]: constant[Return the ID of the segment of the plane corresponding to a class.] <ast.Tuple object at 0x7da207f00b50> assign[=] name[node_class] if compare[name[up] equal[==] constant[0]] begin[:] return[<ast.IfExp object at 0x7da207f00eb0>] variable[mappings] assign[=] dictionary[[<ast.Tuple object at 0x7da18fe91e70>, <ast.Tuple object at 0x7da18fe92fb0>, <ast.Tuple object at 0x7da18fe92320>, <ast.Tuple object at 0x7da18fe93d90>, <ast.Tuple object at 0x7da18fe92b00>, <ast.Tuple object at 0x7da18fe90eb0>], [<ast.Constant object at 0x7da18fe90370>, <ast.Constant object at 0x7da18fe93730>, <ast.Constant object at 0x7da18fe903d0>, <ast.Constant object at 0x7da18fe91750>, <ast.Constant object at 0x7da18fe91060>, <ast.Constant object at 0x7da18fe93250>]] return[call[name[mappings]][tuple[[<ast.Name object at 0x7da18fe92380>, <ast.Name object at 0x7da18fe90970>]]]]
keyword[def] identifier[get_quadrant_from_class] ( identifier[node_class] ): literal[string] identifier[up] , identifier[edge_type] , identifier[_] = identifier[node_class] keyword[if] identifier[up] == literal[int] : keyword[return] literal[int] keyword[if] identifier[random] . identifier[random] ()< literal[int] keyword[else] literal[int] identifier[mappings] ={(- literal[int] , literal[string] ): literal[int] , (- literal[int] , literal[string] ): literal[int] , (- literal[int] , literal[string] ): literal[int] , ( literal[int] , literal[string] ): literal[int] , ( literal[int] , literal[string] ): literal[int] , ( literal[int] , literal[string] ): literal[int] } keyword[return] identifier[mappings] [( identifier[up] , identifier[edge_type] )]
def get_quadrant_from_class(node_class): """Return the ID of the segment of the plane corresponding to a class.""" (up, edge_type, _) = node_class if up == 0: return 0 if random.random() < 0.5 else 7 # depends on [control=['if'], data=[]] mappings = {(-1, 'modification'): 1, (-1, 'amount'): 2, (-1, 'activity'): 3, (1, 'activity'): 4, (1, 'amount'): 5, (1, 'modification'): 6} return mappings[up, edge_type]
def relevel(self, y): """ Reorder levels of an H2O factor for one single column of a H2O frame The levels of a factor are reordered such that the reference level is at level 0, all remaining levels are moved down as needed. :param str y: The reference level :returns: New reordered factor column """ return H2OFrame._expr(expr=ExprNode("relevel", self, quote(y)))
def function[relevel, parameter[self, y]]: constant[ Reorder levels of an H2O factor for one single column of a H2O frame The levels of a factor are reordered such that the reference level is at level 0, all remaining levels are moved down as needed. :param str y: The reference level :returns: New reordered factor column ] return[call[name[H2OFrame]._expr, parameter[]]]
keyword[def] identifier[relevel] ( identifier[self] , identifier[y] ): literal[string] keyword[return] identifier[H2OFrame] . identifier[_expr] ( identifier[expr] = identifier[ExprNode] ( literal[string] , identifier[self] , identifier[quote] ( identifier[y] )))
def relevel(self, y): """ Reorder levels of an H2O factor for one single column of a H2O frame The levels of a factor are reordered such that the reference level is at level 0, all remaining levels are moved down as needed. :param str y: The reference level :returns: New reordered factor column """ return H2OFrame._expr(expr=ExprNode('relevel', self, quote(y)))
def interpolate_nearest(self, lons, lats, data): """ Interpolate using nearest-neighbour approximation Returns the same as interpolate(lons,lats,data,order=0) """ return self.interpolate(lons, lats, data, order=0)
def function[interpolate_nearest, parameter[self, lons, lats, data]]: constant[ Interpolate using nearest-neighbour approximation Returns the same as interpolate(lons,lats,data,order=0) ] return[call[name[self].interpolate, parameter[name[lons], name[lats], name[data]]]]
keyword[def] identifier[interpolate_nearest] ( identifier[self] , identifier[lons] , identifier[lats] , identifier[data] ): literal[string] keyword[return] identifier[self] . identifier[interpolate] ( identifier[lons] , identifier[lats] , identifier[data] , identifier[order] = literal[int] )
def interpolate_nearest(self, lons, lats, data): """ Interpolate using nearest-neighbour approximation Returns the same as interpolate(lons,lats,data,order=0) """ return self.interpolate(lons, lats, data, order=0)
def guess_language(file_name, local_file): """Guess lexer and language for a file. Returns a tuple of (language_str, lexer_obj). """ lexer = None language = get_language_from_extension(file_name) if language: lexer = get_lexer(language) else: lexer = smart_guess_lexer(file_name, local_file) if lexer: language = u(lexer.name) return language, lexer
def function[guess_language, parameter[file_name, local_file]]: constant[Guess lexer and language for a file. Returns a tuple of (language_str, lexer_obj). ] variable[lexer] assign[=] constant[None] variable[language] assign[=] call[name[get_language_from_extension], parameter[name[file_name]]] if name[language] begin[:] variable[lexer] assign[=] call[name[get_lexer], parameter[name[language]]] return[tuple[[<ast.Name object at 0x7da204346200>, <ast.Name object at 0x7da204347340>]]]
keyword[def] identifier[guess_language] ( identifier[file_name] , identifier[local_file] ): literal[string] identifier[lexer] = keyword[None] identifier[language] = identifier[get_language_from_extension] ( identifier[file_name] ) keyword[if] identifier[language] : identifier[lexer] = identifier[get_lexer] ( identifier[language] ) keyword[else] : identifier[lexer] = identifier[smart_guess_lexer] ( identifier[file_name] , identifier[local_file] ) keyword[if] identifier[lexer] : identifier[language] = identifier[u] ( identifier[lexer] . identifier[name] ) keyword[return] identifier[language] , identifier[lexer]
def guess_language(file_name, local_file): """Guess lexer and language for a file. Returns a tuple of (language_str, lexer_obj). """ lexer = None language = get_language_from_extension(file_name) if language: lexer = get_lexer(language) # depends on [control=['if'], data=[]] else: lexer = smart_guess_lexer(file_name, local_file) if lexer: language = u(lexer.name) # depends on [control=['if'], data=[]] return (language, lexer)
def editor(self): """ Returns the editor to invoke. It returns a list with the command in the first position and its arguments in the remainder. """ result = 'vi' if 'TOPYDO_EDITOR' in os.environ and os.environ['TOPYDO_EDITOR']: result = os.environ['TOPYDO_EDITOR'] else: try: result = str(self.cp.get('edit', 'editor')) except configparser.NoOptionError: if 'EDITOR' in os.environ and os.environ['EDITOR']: result = os.environ['EDITOR'] return shlex.split(result)
def function[editor, parameter[self]]: constant[ Returns the editor to invoke. It returns a list with the command in the first position and its arguments in the remainder. ] variable[result] assign[=] constant[vi] if <ast.BoolOp object at 0x7da20e963580> begin[:] variable[result] assign[=] call[name[os].environ][constant[TOPYDO_EDITOR]] return[call[name[shlex].split, parameter[name[result]]]]
keyword[def] identifier[editor] ( identifier[self] ): literal[string] identifier[result] = literal[string] keyword[if] literal[string] keyword[in] identifier[os] . identifier[environ] keyword[and] identifier[os] . identifier[environ] [ literal[string] ]: identifier[result] = identifier[os] . identifier[environ] [ literal[string] ] keyword[else] : keyword[try] : identifier[result] = identifier[str] ( identifier[self] . identifier[cp] . identifier[get] ( literal[string] , literal[string] )) keyword[except] identifier[configparser] . identifier[NoOptionError] : keyword[if] literal[string] keyword[in] identifier[os] . identifier[environ] keyword[and] identifier[os] . identifier[environ] [ literal[string] ]: identifier[result] = identifier[os] . identifier[environ] [ literal[string] ] keyword[return] identifier[shlex] . identifier[split] ( identifier[result] )
def editor(self): """ Returns the editor to invoke. It returns a list with the command in the first position and its arguments in the remainder. """ result = 'vi' if 'TOPYDO_EDITOR' in os.environ and os.environ['TOPYDO_EDITOR']: result = os.environ['TOPYDO_EDITOR'] # depends on [control=['if'], data=[]] else: try: result = str(self.cp.get('edit', 'editor')) # depends on [control=['try'], data=[]] except configparser.NoOptionError: if 'EDITOR' in os.environ and os.environ['EDITOR']: result = os.environ['EDITOR'] # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] return shlex.split(result)
def show_shakemap_importer(self): """Show the converter dialog.""" # import here only so that it is AFTER i18n set up from safe.gui.tools.shake_grid.shakemap_converter_dialog import ( ShakemapConverterDialog) dialog = ShakemapConverterDialog( self.iface.mainWindow(), self.iface, self.dock_widget) dialog.exec_()
def function[show_shakemap_importer, parameter[self]]: constant[Show the converter dialog.] from relative_module[safe.gui.tools.shake_grid.shakemap_converter_dialog] import module[ShakemapConverterDialog] variable[dialog] assign[=] call[name[ShakemapConverterDialog], parameter[call[name[self].iface.mainWindow, parameter[]], name[self].iface, name[self].dock_widget]] call[name[dialog].exec_, parameter[]]
keyword[def] identifier[show_shakemap_importer] ( identifier[self] ): literal[string] keyword[from] identifier[safe] . identifier[gui] . identifier[tools] . identifier[shake_grid] . identifier[shakemap_converter_dialog] keyword[import] ( identifier[ShakemapConverterDialog] ) identifier[dialog] = identifier[ShakemapConverterDialog] ( identifier[self] . identifier[iface] . identifier[mainWindow] (), identifier[self] . identifier[iface] , identifier[self] . identifier[dock_widget] ) identifier[dialog] . identifier[exec_] ()
def show_shakemap_importer(self): """Show the converter dialog.""" # import here only so that it is AFTER i18n set up from safe.gui.tools.shake_grid.shakemap_converter_dialog import ShakemapConverterDialog dialog = ShakemapConverterDialog(self.iface.mainWindow(), self.iface, self.dock_widget) dialog.exec_()
def _parse_attr(self, line): """Parse a single attribute type/value pair.""" colon_pos = line.index(b':') attr_type = line[0:colon_pos].decode('ascii') if line[colon_pos:].startswith(b'::'): attr_value = base64.decodestring(line[colon_pos + 2:]) elif line[colon_pos:].startswith(b':<'): url = line[colon_pos + 2:].strip() attr_value = b'' if self._process_url_schemes: u = urlparse(url) if u[0] in self._process_url_schemes: attr_value = urlopen(url.decode('ascii')).read() else: attr_value = line[colon_pos + 1:].strip() return self._decode_value(attr_type, attr_value)
def function[_parse_attr, parameter[self, line]]: constant[Parse a single attribute type/value pair.] variable[colon_pos] assign[=] call[name[line].index, parameter[constant[b':']]] variable[attr_type] assign[=] call[call[name[line]][<ast.Slice object at 0x7da1b26ac6a0>].decode, parameter[constant[ascii]]] if call[call[name[line]][<ast.Slice object at 0x7da1b26ae1d0>].startswith, parameter[constant[b'::']]] begin[:] variable[attr_value] assign[=] call[name[base64].decodestring, parameter[call[name[line]][<ast.Slice object at 0x7da1b26ac610>]]] return[call[name[self]._decode_value, parameter[name[attr_type], name[attr_value]]]]
keyword[def] identifier[_parse_attr] ( identifier[self] , identifier[line] ): literal[string] identifier[colon_pos] = identifier[line] . identifier[index] ( literal[string] ) identifier[attr_type] = identifier[line] [ literal[int] : identifier[colon_pos] ]. identifier[decode] ( literal[string] ) keyword[if] identifier[line] [ identifier[colon_pos] :]. identifier[startswith] ( literal[string] ): identifier[attr_value] = identifier[base64] . identifier[decodestring] ( identifier[line] [ identifier[colon_pos] + literal[int] :]) keyword[elif] identifier[line] [ identifier[colon_pos] :]. identifier[startswith] ( literal[string] ): identifier[url] = identifier[line] [ identifier[colon_pos] + literal[int] :]. identifier[strip] () identifier[attr_value] = literal[string] keyword[if] identifier[self] . identifier[_process_url_schemes] : identifier[u] = identifier[urlparse] ( identifier[url] ) keyword[if] identifier[u] [ literal[int] ] keyword[in] identifier[self] . identifier[_process_url_schemes] : identifier[attr_value] = identifier[urlopen] ( identifier[url] . identifier[decode] ( literal[string] )). identifier[read] () keyword[else] : identifier[attr_value] = identifier[line] [ identifier[colon_pos] + literal[int] :]. identifier[strip] () keyword[return] identifier[self] . identifier[_decode_value] ( identifier[attr_type] , identifier[attr_value] )
def _parse_attr(self, line): """Parse a single attribute type/value pair.""" colon_pos = line.index(b':') attr_type = line[0:colon_pos].decode('ascii') if line[colon_pos:].startswith(b'::'): attr_value = base64.decodestring(line[colon_pos + 2:]) # depends on [control=['if'], data=[]] elif line[colon_pos:].startswith(b':<'): url = line[colon_pos + 2:].strip() attr_value = b'' if self._process_url_schemes: u = urlparse(url) if u[0] in self._process_url_schemes: attr_value = urlopen(url.decode('ascii')).read() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: attr_value = line[colon_pos + 1:].strip() return self._decode_value(attr_type, attr_value)
def data32_send(self, type, len, data, force_mavlink1=False): ''' Data packet, size 32 type : data type (uint8_t) len : data length (uint8_t) data : raw data (uint8_t) ''' return self.send(self.data32_encode(type, len, data), force_mavlink1=force_mavlink1)
def function[data32_send, parameter[self, type, len, data, force_mavlink1]]: constant[ Data packet, size 32 type : data type (uint8_t) len : data length (uint8_t) data : raw data (uint8_t) ] return[call[name[self].send, parameter[call[name[self].data32_encode, parameter[name[type], name[len], name[data]]]]]]
keyword[def] identifier[data32_send] ( identifier[self] , identifier[type] , identifier[len] , identifier[data] , identifier[force_mavlink1] = keyword[False] ): literal[string] keyword[return] identifier[self] . identifier[send] ( identifier[self] . identifier[data32_encode] ( identifier[type] , identifier[len] , identifier[data] ), identifier[force_mavlink1] = identifier[force_mavlink1] )
def data32_send(self, type, len, data, force_mavlink1=False): """ Data packet, size 32 type : data type (uint8_t) len : data length (uint8_t) data : raw data (uint8_t) """ return self.send(self.data32_encode(type, len, data), force_mavlink1=force_mavlink1)
def format_modes(modes, full_modes=False, current_mode=None): """ Creates a nice readily printable Table for a list of modes. Used in `displays list' and the candidates list in `displays set'. """ t = table.Table((( '*' if mode == current_mode else '', # 0 str(Q.CGDisplayModeGetWidth(mode)), # 1 str(Q.CGDisplayModeGetHeight(mode)), # 2 '@'+shorter_float_str(Q.CGDisplayModeGetRefreshRate(mode)), # 3 format_pixelEncoding( Q.CGDisplayModeCopyPixelEncoding(mode))) # 4 for mode in modes)) t.set_key(2, 'height') t.set_key(3, 'rate') t.set_key(4, 'depth') t.set_alignment('height', 'l') t.set_alignment('rate', 'l') t.set_separator('height', ' x ') created_flags_col = False if full_modes: t.append_col(tuple((' '.join(get_flags_of_mode(mode)) for mode in modes)), key='flags') created_flags_col = True else: # Remove refresh rate and bit depth if they are all the same if len(frozenset(t.get_col('rate'))) == 1: t.del_col('rate') if len(frozenset(t.get_col('depth'))) == 1: t.del_col('depth') # Show distinct IO flags when several modes appear the same lut = {} for i, row in enumerate(t): row = tuple(row) if row not in lut: lut[row] = [] elif not created_flags_col: t.append_col(('',) * len(modes), key='flags') lut[row].append(i) for rw, indices in lut.iteritems(): if len(indices) == 1: continue flags = {} for i in indices: flags[i] = get_flags_of_mode(modes[i]) common_flags = reduce(lambda x, y: x.intersection(y), map(frozenset, flags.itervalues())) for i in indices: t[i, 'flags'] = ' '.join(frozenset(flags[i]) - common_flags) if created_flags_col: t.set_alignment('flags', 'l') return t
def function[format_modes, parameter[modes, full_modes, current_mode]]: constant[ Creates a nice readily printable Table for a list of modes. Used in `displays list' and the candidates list in `displays set'. ] variable[t] assign[=] call[name[table].Table, parameter[<ast.GeneratorExp object at 0x7da1b0818340>]] call[name[t].set_key, parameter[constant[2], constant[height]]] call[name[t].set_key, parameter[constant[3], constant[rate]]] call[name[t].set_key, parameter[constant[4], constant[depth]]] call[name[t].set_alignment, parameter[constant[height], constant[l]]] call[name[t].set_alignment, parameter[constant[rate], constant[l]]] call[name[t].set_separator, parameter[constant[height], constant[ x ]]] variable[created_flags_col] assign[=] constant[False] if name[full_modes] begin[:] call[name[t].append_col, parameter[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b0819390>]]]] variable[created_flags_col] assign[=] constant[True] if name[created_flags_col] begin[:] call[name[t].set_alignment, parameter[constant[flags], constant[l]]] return[name[t]]
keyword[def] identifier[format_modes] ( identifier[modes] , identifier[full_modes] = keyword[False] , identifier[current_mode] = keyword[None] ): literal[string] identifier[t] = identifier[table] . identifier[Table] ((( literal[string] keyword[if] identifier[mode] == identifier[current_mode] keyword[else] literal[string] , identifier[str] ( identifier[Q] . identifier[CGDisplayModeGetWidth] ( identifier[mode] )), identifier[str] ( identifier[Q] . identifier[CGDisplayModeGetHeight] ( identifier[mode] )), literal[string] + identifier[shorter_float_str] ( identifier[Q] . identifier[CGDisplayModeGetRefreshRate] ( identifier[mode] )), identifier[format_pixelEncoding] ( identifier[Q] . identifier[CGDisplayModeCopyPixelEncoding] ( identifier[mode] ))) keyword[for] identifier[mode] keyword[in] identifier[modes] )) identifier[t] . identifier[set_key] ( literal[int] , literal[string] ) identifier[t] . identifier[set_key] ( literal[int] , literal[string] ) identifier[t] . identifier[set_key] ( literal[int] , literal[string] ) identifier[t] . identifier[set_alignment] ( literal[string] , literal[string] ) identifier[t] . identifier[set_alignment] ( literal[string] , literal[string] ) identifier[t] . identifier[set_separator] ( literal[string] , literal[string] ) identifier[created_flags_col] = keyword[False] keyword[if] identifier[full_modes] : identifier[t] . identifier[append_col] ( identifier[tuple] (( literal[string] . identifier[join] ( identifier[get_flags_of_mode] ( identifier[mode] )) keyword[for] identifier[mode] keyword[in] identifier[modes] )), identifier[key] = literal[string] ) identifier[created_flags_col] = keyword[True] keyword[else] : keyword[if] identifier[len] ( identifier[frozenset] ( identifier[t] . identifier[get_col] ( literal[string] )))== literal[int] : identifier[t] . identifier[del_col] ( literal[string] ) keyword[if] identifier[len] ( identifier[frozenset] ( identifier[t] . identifier[get_col] ( literal[string] )))== literal[int] : identifier[t] . identifier[del_col] ( literal[string] ) identifier[lut] ={} keyword[for] identifier[i] , identifier[row] keyword[in] identifier[enumerate] ( identifier[t] ): identifier[row] = identifier[tuple] ( identifier[row] ) keyword[if] identifier[row] keyword[not] keyword[in] identifier[lut] : identifier[lut] [ identifier[row] ]=[] keyword[elif] keyword[not] identifier[created_flags_col] : identifier[t] . identifier[append_col] (( literal[string] ,)* identifier[len] ( identifier[modes] ), identifier[key] = literal[string] ) identifier[lut] [ identifier[row] ]. identifier[append] ( identifier[i] ) keyword[for] identifier[rw] , identifier[indices] keyword[in] identifier[lut] . identifier[iteritems] (): keyword[if] identifier[len] ( identifier[indices] )== literal[int] : keyword[continue] identifier[flags] ={} keyword[for] identifier[i] keyword[in] identifier[indices] : identifier[flags] [ identifier[i] ]= identifier[get_flags_of_mode] ( identifier[modes] [ identifier[i] ]) identifier[common_flags] = identifier[reduce] ( keyword[lambda] identifier[x] , identifier[y] : identifier[x] . identifier[intersection] ( identifier[y] ), identifier[map] ( identifier[frozenset] , identifier[flags] . identifier[itervalues] ())) keyword[for] identifier[i] keyword[in] identifier[indices] : identifier[t] [ identifier[i] , literal[string] ]= literal[string] . identifier[join] ( identifier[frozenset] ( identifier[flags] [ identifier[i] ]) - identifier[common_flags] ) keyword[if] identifier[created_flags_col] : identifier[t] . identifier[set_alignment] ( literal[string] , literal[string] ) keyword[return] identifier[t]
def format_modes(modes, full_modes=False, current_mode=None): """ Creates a nice readily printable Table for a list of modes. Used in `displays list' and the candidates list in `displays set'. """ # 0 # 1 # 2 # 3 # 4 t = table.Table((('*' if mode == current_mode else '', str(Q.CGDisplayModeGetWidth(mode)), str(Q.CGDisplayModeGetHeight(mode)), '@' + shorter_float_str(Q.CGDisplayModeGetRefreshRate(mode)), format_pixelEncoding(Q.CGDisplayModeCopyPixelEncoding(mode))) for mode in modes)) t.set_key(2, 'height') t.set_key(3, 'rate') t.set_key(4, 'depth') t.set_alignment('height', 'l') t.set_alignment('rate', 'l') t.set_separator('height', ' x ') created_flags_col = False if full_modes: t.append_col(tuple((' '.join(get_flags_of_mode(mode)) for mode in modes)), key='flags') created_flags_col = True # depends on [control=['if'], data=[]] else: # Remove refresh rate and bit depth if they are all the same if len(frozenset(t.get_col('rate'))) == 1: t.del_col('rate') # depends on [control=['if'], data=[]] if len(frozenset(t.get_col('depth'))) == 1: t.del_col('depth') # depends on [control=['if'], data=[]] # Show distinct IO flags when several modes appear the same lut = {} for (i, row) in enumerate(t): row = tuple(row) if row not in lut: lut[row] = [] # depends on [control=['if'], data=['row', 'lut']] elif not created_flags_col: t.append_col(('',) * len(modes), key='flags') # depends on [control=['if'], data=[]] lut[row].append(i) # depends on [control=['for'], data=[]] for (rw, indices) in lut.iteritems(): if len(indices) == 1: continue # depends on [control=['if'], data=[]] flags = {} for i in indices: flags[i] = get_flags_of_mode(modes[i]) # depends on [control=['for'], data=['i']] common_flags = reduce(lambda x, y: x.intersection(y), map(frozenset, flags.itervalues())) for i in indices: t[i, 'flags'] = ' '.join(frozenset(flags[i]) - common_flags) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=[]] if created_flags_col: t.set_alignment('flags', 'l') # depends on [control=['if'], data=[]] return t
def scroll_event(self, widget, event): """ Called when a mouse is turned in the widget (and maybe for finger scrolling in the trackpad). Adjust method signature as appropriate for callback. """ x, y = event.x, event.y num_degrees = 0 direction = 0 # x, y = coordinates of mouse self.last_win_x, self.last_win_y = x, y # calculate number of degrees of scroll and direction of scroll # both floats in the 0-359.999 range # num_degrees = # direction = self.logger.debug("scroll deg=%f direction=%f" % ( num_degrees, direction)) data_x, data_y = self.check_cursor_location() return self.make_ui_callback('scroll', direction, num_degrees, data_x, data_y)
def function[scroll_event, parameter[self, widget, event]]: constant[ Called when a mouse is turned in the widget (and maybe for finger scrolling in the trackpad). Adjust method signature as appropriate for callback. ] <ast.Tuple object at 0x7da1b0c26dd0> assign[=] tuple[[<ast.Attribute object at 0x7da1b0c24250>, <ast.Attribute object at 0x7da1b0c27d30>]] variable[num_degrees] assign[=] constant[0] variable[direction] assign[=] constant[0] <ast.Tuple object at 0x7da1b0c25b70> assign[=] tuple[[<ast.Name object at 0x7da1b0c25120>, <ast.Name object at 0x7da1b0c27dc0>]] call[name[self].logger.debug, parameter[binary_operation[constant[scroll deg=%f direction=%f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0c27fa0>, <ast.Name object at 0x7da1b0c24220>]]]]] <ast.Tuple object at 0x7da1b0c25270> assign[=] call[name[self].check_cursor_location, parameter[]] return[call[name[self].make_ui_callback, parameter[constant[scroll], name[direction], name[num_degrees], name[data_x], name[data_y]]]]
keyword[def] identifier[scroll_event] ( identifier[self] , identifier[widget] , identifier[event] ): literal[string] identifier[x] , identifier[y] = identifier[event] . identifier[x] , identifier[event] . identifier[y] identifier[num_degrees] = literal[int] identifier[direction] = literal[int] identifier[self] . identifier[last_win_x] , identifier[self] . identifier[last_win_y] = identifier[x] , identifier[y] identifier[self] . identifier[logger] . identifier[debug] ( literal[string] %( identifier[num_degrees] , identifier[direction] )) identifier[data_x] , identifier[data_y] = identifier[self] . identifier[check_cursor_location] () keyword[return] identifier[self] . identifier[make_ui_callback] ( literal[string] , identifier[direction] , identifier[num_degrees] , identifier[data_x] , identifier[data_y] )
def scroll_event(self, widget, event): """ Called when a mouse is turned in the widget (and maybe for finger scrolling in the trackpad). Adjust method signature as appropriate for callback. """ (x, y) = (event.x, event.y) num_degrees = 0 direction = 0 # x, y = coordinates of mouse (self.last_win_x, self.last_win_y) = (x, y) # calculate number of degrees of scroll and direction of scroll # both floats in the 0-359.999 range # num_degrees = # direction = self.logger.debug('scroll deg=%f direction=%f' % (num_degrees, direction)) (data_x, data_y) = self.check_cursor_location() return self.make_ui_callback('scroll', direction, num_degrees, data_x, data_y)
def make_iv(self, pkt): """generate an IV for the packet""" if self.xpn_en: tmp_pn = (self.pn & 0xFFFFFFFF00000000) | (pkt[MACsec].pn & 0xFFFFFFFF) # noqa: E501 tmp_iv = self.ssci + struct.pack('!Q', tmp_pn) return bytes(bytearray([a ^ b for a, b in zip(bytearray(tmp_iv), bytearray(self.salt))])) # noqa: E501 else: return self.sci + struct.pack('!I', pkt[MACsec].pn)
def function[make_iv, parameter[self, pkt]]: constant[generate an IV for the packet] if name[self].xpn_en begin[:] variable[tmp_pn] assign[=] binary_operation[binary_operation[name[self].pn <ast.BitAnd object at 0x7da2590d6b60> constant[18446744069414584320]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[call[name[pkt]][name[MACsec]].pn <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]] variable[tmp_iv] assign[=] binary_operation[name[self].ssci + call[name[struct].pack, parameter[constant[!Q], name[tmp_pn]]]] return[call[name[bytes], parameter[call[name[bytearray], parameter[<ast.ListComp object at 0x7da1b2098040>]]]]]
keyword[def] identifier[make_iv] ( identifier[self] , identifier[pkt] ): literal[string] keyword[if] identifier[self] . identifier[xpn_en] : identifier[tmp_pn] =( identifier[self] . identifier[pn] & literal[int] )|( identifier[pkt] [ identifier[MACsec] ]. identifier[pn] & literal[int] ) identifier[tmp_iv] = identifier[self] . identifier[ssci] + identifier[struct] . identifier[pack] ( literal[string] , identifier[tmp_pn] ) keyword[return] identifier[bytes] ( identifier[bytearray] ([ identifier[a] ^ identifier[b] keyword[for] identifier[a] , identifier[b] keyword[in] identifier[zip] ( identifier[bytearray] ( identifier[tmp_iv] ), identifier[bytearray] ( identifier[self] . identifier[salt] ))])) keyword[else] : keyword[return] identifier[self] . identifier[sci] + identifier[struct] . identifier[pack] ( literal[string] , identifier[pkt] [ identifier[MACsec] ]. identifier[pn] )
def make_iv(self, pkt): """generate an IV for the packet""" if self.xpn_en: tmp_pn = self.pn & 18446744069414584320 | pkt[MACsec].pn & 4294967295 # noqa: E501 tmp_iv = self.ssci + struct.pack('!Q', tmp_pn) return bytes(bytearray([a ^ b for (a, b) in zip(bytearray(tmp_iv), bytearray(self.salt))])) # noqa: E501 # depends on [control=['if'], data=[]] else: return self.sci + struct.pack('!I', pkt[MACsec].pn)
def empty(self): """ Clear out the buffer and return all data that was in it. :return: any data that was in the buffer prior to clearing it out, as a `str` """ self._lock.acquire() try: out = self._buffer_tobytes() del self._buffer[:] if (self._event is not None) and not self._closed: self._event.clear() return out finally: self._lock.release()
def function[empty, parameter[self]]: constant[ Clear out the buffer and return all data that was in it. :return: any data that was in the buffer prior to clearing it out, as a `str` ] call[name[self]._lock.acquire, parameter[]] <ast.Try object at 0x7da1b21982e0>
keyword[def] identifier[empty] ( identifier[self] ): literal[string] identifier[self] . identifier[_lock] . identifier[acquire] () keyword[try] : identifier[out] = identifier[self] . identifier[_buffer_tobytes] () keyword[del] identifier[self] . identifier[_buffer] [:] keyword[if] ( identifier[self] . identifier[_event] keyword[is] keyword[not] keyword[None] ) keyword[and] keyword[not] identifier[self] . identifier[_closed] : identifier[self] . identifier[_event] . identifier[clear] () keyword[return] identifier[out] keyword[finally] : identifier[self] . identifier[_lock] . identifier[release] ()
def empty(self): """ Clear out the buffer and return all data that was in it. :return: any data that was in the buffer prior to clearing it out, as a `str` """ self._lock.acquire() try: out = self._buffer_tobytes() del self._buffer[:] if self._event is not None and (not self._closed): self._event.clear() # depends on [control=['if'], data=[]] return out # depends on [control=['try'], data=[]] finally: self._lock.release()
def _match(self, regex): """Find the first line matching regex and return the match object""" cregex = re.compile(regex) for line in self.content.splitlines(): match = cregex.match(line) if match: return match raise Exception('No "{0}" line in {1}.cpp'.format( regex_to_error_msg(regex), self.name ))
def function[_match, parameter[self, regex]]: constant[Find the first line matching regex and return the match object] variable[cregex] assign[=] call[name[re].compile, parameter[name[regex]]] for taget[name[line]] in starred[call[name[self].content.splitlines, parameter[]]] begin[:] variable[match] assign[=] call[name[cregex].match, parameter[name[line]]] if name[match] begin[:] return[name[match]] <ast.Raise object at 0x7da1b204b340>
keyword[def] identifier[_match] ( identifier[self] , identifier[regex] ): literal[string] identifier[cregex] = identifier[re] . identifier[compile] ( identifier[regex] ) keyword[for] identifier[line] keyword[in] identifier[self] . identifier[content] . identifier[splitlines] (): identifier[match] = identifier[cregex] . identifier[match] ( identifier[line] ) keyword[if] identifier[match] : keyword[return] identifier[match] keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[regex_to_error_msg] ( identifier[regex] ), identifier[self] . identifier[name] ))
def _match(self, regex): """Find the first line matching regex and return the match object""" cregex = re.compile(regex) for line in self.content.splitlines(): match = cregex.match(line) if match: return match # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] raise Exception('No "{0}" line in {1}.cpp'.format(regex_to_error_msg(regex), self.name))
def get_results(self): ''' :return: result from running the task ''' self._event.wait() if self._exception is not None: # # Well... rethrownig the exception caught in execute # but on the caller thread # raise self._exception # pylint: disable=E0702 return self._result
def function[get_results, parameter[self]]: constant[ :return: result from running the task ] call[name[self]._event.wait, parameter[]] if compare[name[self]._exception is_not constant[None]] begin[:] <ast.Raise object at 0x7da20c76d660> return[name[self]._result]
keyword[def] identifier[get_results] ( identifier[self] ): literal[string] identifier[self] . identifier[_event] . identifier[wait] () keyword[if] identifier[self] . identifier[_exception] keyword[is] keyword[not] keyword[None] : keyword[raise] identifier[self] . identifier[_exception] keyword[return] identifier[self] . identifier[_result]
def get_results(self): """ :return: result from running the task """ self._event.wait() if self._exception is not None: # # Well... rethrownig the exception caught in execute # but on the caller thread # raise self._exception # pylint: disable=E0702 # depends on [control=['if'], data=[]] return self._result
def Load(file): """ Loads a model from specified file """ with open(file, 'rb') as file: model = dill.load(file) return model
def function[Load, parameter[file]]: constant[ Loads a model from specified file ] with call[name[open], parameter[name[file], constant[rb]]] begin[:] variable[model] assign[=] call[name[dill].load, parameter[name[file]]] return[name[model]]
keyword[def] identifier[Load] ( identifier[file] ): literal[string] keyword[with] identifier[open] ( identifier[file] , literal[string] ) keyword[as] identifier[file] : identifier[model] = identifier[dill] . identifier[load] ( identifier[file] ) keyword[return] identifier[model]
def Load(file): """ Loads a model from specified file """ with open(file, 'rb') as file: model = dill.load(file) return model # depends on [control=['with'], data=['file']]
def set_attribute(self, aid, attrib, val, idx='*'): """ Set the value of an xml attribute marked with the matching aid attribute. """ if aid in self.__attrib_ids: elems = self.__attrib_ids[aid] if idx == '*': for elem in elems: self.__set_attribute(elem, attrib, val) elif idx < len(elems): elem = elems[idx] self.__set_attribute(elem, attrib, val)
def function[set_attribute, parameter[self, aid, attrib, val, idx]]: constant[ Set the value of an xml attribute marked with the matching aid attribute. ] if compare[name[aid] in name[self].__attrib_ids] begin[:] variable[elems] assign[=] call[name[self].__attrib_ids][name[aid]] if compare[name[idx] equal[==] constant[*]] begin[:] for taget[name[elem]] in starred[name[elems]] begin[:] call[name[self].__set_attribute, parameter[name[elem], name[attrib], name[val]]]
keyword[def] identifier[set_attribute] ( identifier[self] , identifier[aid] , identifier[attrib] , identifier[val] , identifier[idx] = literal[string] ): literal[string] keyword[if] identifier[aid] keyword[in] identifier[self] . identifier[__attrib_ids] : identifier[elems] = identifier[self] . identifier[__attrib_ids] [ identifier[aid] ] keyword[if] identifier[idx] == literal[string] : keyword[for] identifier[elem] keyword[in] identifier[elems] : identifier[self] . identifier[__set_attribute] ( identifier[elem] , identifier[attrib] , identifier[val] ) keyword[elif] identifier[idx] < identifier[len] ( identifier[elems] ): identifier[elem] = identifier[elems] [ identifier[idx] ] identifier[self] . identifier[__set_attribute] ( identifier[elem] , identifier[attrib] , identifier[val] )
def set_attribute(self, aid, attrib, val, idx='*'): """ Set the value of an xml attribute marked with the matching aid attribute. """ if aid in self.__attrib_ids: elems = self.__attrib_ids[aid] if idx == '*': for elem in elems: self.__set_attribute(elem, attrib, val) # depends on [control=['for'], data=['elem']] # depends on [control=['if'], data=[]] elif idx < len(elems): elem = elems[idx] self.__set_attribute(elem, attrib, val) # depends on [control=['if'], data=['idx']] # depends on [control=['if'], data=['aid']]
def init_send(self): """ Generates the first (IKE_INIT) packet for Initiator :return: bytes() containing a valid IKE_INIT packet """ packet = Packet() self.packets.append(packet) packet.add_payload(payloads.SA()) packet.add_payload(payloads.KE(diffie_hellman=self.diffie_hellman)) packet.add_payload(payloads.Nonce(nonce=self.Ni)) packet.iSPI = self.iSPI = packet.payloads[0].spi self.state = State.INIT return bytes(packet)
def function[init_send, parameter[self]]: constant[ Generates the first (IKE_INIT) packet for Initiator :return: bytes() containing a valid IKE_INIT packet ] variable[packet] assign[=] call[name[Packet], parameter[]] call[name[self].packets.append, parameter[name[packet]]] call[name[packet].add_payload, parameter[call[name[payloads].SA, parameter[]]]] call[name[packet].add_payload, parameter[call[name[payloads].KE, parameter[]]]] call[name[packet].add_payload, parameter[call[name[payloads].Nonce, parameter[]]]] name[packet].iSPI assign[=] call[name[packet].payloads][constant[0]].spi name[self].state assign[=] name[State].INIT return[call[name[bytes], parameter[name[packet]]]]
keyword[def] identifier[init_send] ( identifier[self] ): literal[string] identifier[packet] = identifier[Packet] () identifier[self] . identifier[packets] . identifier[append] ( identifier[packet] ) identifier[packet] . identifier[add_payload] ( identifier[payloads] . identifier[SA] ()) identifier[packet] . identifier[add_payload] ( identifier[payloads] . identifier[KE] ( identifier[diffie_hellman] = identifier[self] . identifier[diffie_hellman] )) identifier[packet] . identifier[add_payload] ( identifier[payloads] . identifier[Nonce] ( identifier[nonce] = identifier[self] . identifier[Ni] )) identifier[packet] . identifier[iSPI] = identifier[self] . identifier[iSPI] = identifier[packet] . identifier[payloads] [ literal[int] ]. identifier[spi] identifier[self] . identifier[state] = identifier[State] . identifier[INIT] keyword[return] identifier[bytes] ( identifier[packet] )
def init_send(self): """ Generates the first (IKE_INIT) packet for Initiator :return: bytes() containing a valid IKE_INIT packet """ packet = Packet() self.packets.append(packet) packet.add_payload(payloads.SA()) packet.add_payload(payloads.KE(diffie_hellman=self.diffie_hellman)) packet.add_payload(payloads.Nonce(nonce=self.Ni)) packet.iSPI = self.iSPI = packet.payloads[0].spi self.state = State.INIT return bytes(packet)
def _padding_model_number(number, max_num): ''' This method returns a zero-front padded string It makes out of str(45) -> '0045' if 999 < max_num < 10000. This is meant to work for reasonable integers (maybe less than 10^6). Parameters ---------- number : integer number that the string should represent. max_num : integer max number of cycle list, implies how many 0s have be padded ''' cnum = str(number) clen = len(cnum) cmax = int(log10(max_num)) + 1 return (cmax - clen)*'0' + cnum
def function[_padding_model_number, parameter[number, max_num]]: constant[ This method returns a zero-front padded string It makes out of str(45) -> '0045' if 999 < max_num < 10000. This is meant to work for reasonable integers (maybe less than 10^6). Parameters ---------- number : integer number that the string should represent. max_num : integer max number of cycle list, implies how many 0s have be padded ] variable[cnum] assign[=] call[name[str], parameter[name[number]]] variable[clen] assign[=] call[name[len], parameter[name[cnum]]] variable[cmax] assign[=] binary_operation[call[name[int], parameter[call[name[log10], parameter[name[max_num]]]]] + constant[1]] return[binary_operation[binary_operation[binary_operation[name[cmax] - name[clen]] * constant[0]] + name[cnum]]]
keyword[def] identifier[_padding_model_number] ( identifier[number] , identifier[max_num] ): literal[string] identifier[cnum] = identifier[str] ( identifier[number] ) identifier[clen] = identifier[len] ( identifier[cnum] ) identifier[cmax] = identifier[int] ( identifier[log10] ( identifier[max_num] ))+ literal[int] keyword[return] ( identifier[cmax] - identifier[clen] )* literal[string] + identifier[cnum]
def _padding_model_number(number, max_num): """ This method returns a zero-front padded string It makes out of str(45) -> '0045' if 999 < max_num < 10000. This is meant to work for reasonable integers (maybe less than 10^6). Parameters ---------- number : integer number that the string should represent. max_num : integer max number of cycle list, implies how many 0s have be padded """ cnum = str(number) clen = len(cnum) cmax = int(log10(max_num)) + 1 return (cmax - clen) * '0' + cnum
def init_app(self, app, **kwargs): """Flask application initialization.""" self.init_config(app) state = _AppState(app=app, cache=kwargs.get('cache')) app.extensions['invenio-collections'] = state return state
def function[init_app, parameter[self, app]]: constant[Flask application initialization.] call[name[self].init_config, parameter[name[app]]] variable[state] assign[=] call[name[_AppState], parameter[]] call[name[app].extensions][constant[invenio-collections]] assign[=] name[state] return[name[state]]
keyword[def] identifier[init_app] ( identifier[self] , identifier[app] ,** identifier[kwargs] ): literal[string] identifier[self] . identifier[init_config] ( identifier[app] ) identifier[state] = identifier[_AppState] ( identifier[app] = identifier[app] , identifier[cache] = identifier[kwargs] . identifier[get] ( literal[string] )) identifier[app] . identifier[extensions] [ literal[string] ]= identifier[state] keyword[return] identifier[state]
def init_app(self, app, **kwargs): """Flask application initialization.""" self.init_config(app) state = _AppState(app=app, cache=kwargs.get('cache')) app.extensions['invenio-collections'] = state return state
def pivot(table, f1, f2, f3, aggfun, missing=None, presorted=False, buffersize=None, tempdir=None, cache=True): """ Construct a pivot table. E.g.:: >>> import petl as etl >>> table1 = [['region', 'gender', 'style', 'units'], ... ['east', 'boy', 'tee', 12], ... ['east', 'boy', 'golf', 14], ... ['east', 'boy', 'fancy', 7], ... ['east', 'girl', 'tee', 3], ... ['east', 'girl', 'golf', 8], ... ['east', 'girl', 'fancy', 18], ... ['west', 'boy', 'tee', 12], ... ['west', 'boy', 'golf', 15], ... ['west', 'boy', 'fancy', 8], ... ['west', 'girl', 'tee', 6], ... ['west', 'girl', 'golf', 16], ... ['west', 'girl', 'fancy', 1]] >>> table2 = etl.pivot(table1, 'region', 'gender', 'units', sum) >>> table2 +--------+-----+------+ | region | boy | girl | +========+=====+======+ | 'east' | 33 | 29 | +--------+-----+------+ | 'west' | 35 | 23 | +--------+-----+------+ >>> table3 = etl.pivot(table1, 'region', 'style', 'units', sum) >>> table3 +--------+-------+------+-----+ | region | fancy | golf | tee | +========+=======+======+=====+ | 'east' | 25 | 22 | 15 | +--------+-------+------+-----+ | 'west' | 9 | 31 | 18 | +--------+-------+------+-----+ >>> table4 = etl.pivot(table1, 'gender', 'style', 'units', sum) >>> table4 +--------+-------+------+-----+ | gender | fancy | golf | tee | +========+=======+======+=====+ | 'boy' | 15 | 29 | 24 | +--------+-------+------+-----+ | 'girl' | 19 | 24 | 9 | +--------+-------+------+-----+ See also :func:`petl.transform.reshape.recast`. """ return PivotView(table, f1, f2, f3, aggfun, missing=missing, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def function[pivot, parameter[table, f1, f2, f3, aggfun, missing, presorted, buffersize, tempdir, cache]]: constant[ Construct a pivot table. E.g.:: >>> import petl as etl >>> table1 = [['region', 'gender', 'style', 'units'], ... ['east', 'boy', 'tee', 12], ... ['east', 'boy', 'golf', 14], ... ['east', 'boy', 'fancy', 7], ... ['east', 'girl', 'tee', 3], ... ['east', 'girl', 'golf', 8], ... ['east', 'girl', 'fancy', 18], ... ['west', 'boy', 'tee', 12], ... ['west', 'boy', 'golf', 15], ... ['west', 'boy', 'fancy', 8], ... ['west', 'girl', 'tee', 6], ... ['west', 'girl', 'golf', 16], ... ['west', 'girl', 'fancy', 1]] >>> table2 = etl.pivot(table1, 'region', 'gender', 'units', sum) >>> table2 +--------+-----+------+ | region | boy | girl | +========+=====+======+ | 'east' | 33 | 29 | +--------+-----+------+ | 'west' | 35 | 23 | +--------+-----+------+ >>> table3 = etl.pivot(table1, 'region', 'style', 'units', sum) >>> table3 +--------+-------+------+-----+ | region | fancy | golf | tee | +========+=======+======+=====+ | 'east' | 25 | 22 | 15 | +--------+-------+------+-----+ | 'west' | 9 | 31 | 18 | +--------+-------+------+-----+ >>> table4 = etl.pivot(table1, 'gender', 'style', 'units', sum) >>> table4 +--------+-------+------+-----+ | gender | fancy | golf | tee | +========+=======+======+=====+ | 'boy' | 15 | 29 | 24 | +--------+-------+------+-----+ | 'girl' | 19 | 24 | 9 | +--------+-------+------+-----+ See also :func:`petl.transform.reshape.recast`. ] return[call[name[PivotView], parameter[name[table], name[f1], name[f2], name[f3], name[aggfun]]]]
keyword[def] identifier[pivot] ( identifier[table] , identifier[f1] , identifier[f2] , identifier[f3] , identifier[aggfun] , identifier[missing] = keyword[None] , identifier[presorted] = keyword[False] , identifier[buffersize] = keyword[None] , identifier[tempdir] = keyword[None] , identifier[cache] = keyword[True] ): literal[string] keyword[return] identifier[PivotView] ( identifier[table] , identifier[f1] , identifier[f2] , identifier[f3] , identifier[aggfun] , identifier[missing] = identifier[missing] , identifier[presorted] = identifier[presorted] , identifier[buffersize] = identifier[buffersize] , identifier[tempdir] = identifier[tempdir] , identifier[cache] = identifier[cache] )
def pivot(table, f1, f2, f3, aggfun, missing=None, presorted=False, buffersize=None, tempdir=None, cache=True): """ Construct a pivot table. E.g.:: >>> import petl as etl >>> table1 = [['region', 'gender', 'style', 'units'], ... ['east', 'boy', 'tee', 12], ... ['east', 'boy', 'golf', 14], ... ['east', 'boy', 'fancy', 7], ... ['east', 'girl', 'tee', 3], ... ['east', 'girl', 'golf', 8], ... ['east', 'girl', 'fancy', 18], ... ['west', 'boy', 'tee', 12], ... ['west', 'boy', 'golf', 15], ... ['west', 'boy', 'fancy', 8], ... ['west', 'girl', 'tee', 6], ... ['west', 'girl', 'golf', 16], ... ['west', 'girl', 'fancy', 1]] >>> table2 = etl.pivot(table1, 'region', 'gender', 'units', sum) >>> table2 +--------+-----+------+ | region | boy | girl | +========+=====+======+ | 'east' | 33 | 29 | +--------+-----+------+ | 'west' | 35 | 23 | +--------+-----+------+ >>> table3 = etl.pivot(table1, 'region', 'style', 'units', sum) >>> table3 +--------+-------+------+-----+ | region | fancy | golf | tee | +========+=======+======+=====+ | 'east' | 25 | 22 | 15 | +--------+-------+------+-----+ | 'west' | 9 | 31 | 18 | +--------+-------+------+-----+ >>> table4 = etl.pivot(table1, 'gender', 'style', 'units', sum) >>> table4 +--------+-------+------+-----+ | gender | fancy | golf | tee | +========+=======+======+=====+ | 'boy' | 15 | 29 | 24 | +--------+-------+------+-----+ | 'girl' | 19 | 24 | 9 | +--------+-------+------+-----+ See also :func:`petl.transform.reshape.recast`. """ return PivotView(table, f1, f2, f3, aggfun, missing=missing, presorted=presorted, buffersize=buffersize, tempdir=tempdir, cache=cache)
def get_row_failures(df, value_cols, type_cols, verbose=False, outfile=None): """ Input: already validated DataFrame, value & type column names, and output options. Get details on each detected issue, row by row. Output: DataFrame with type & value validation columns, plus an "issues" column with a dictionary of every problem for that row. """ # set temporary numeric index df["num"] = list(range(len(df))) # get column names for value & type validations names = value_cols.union(type_cols) # drop all non validation columns value_problems = df[names.union(["num"])] # drop validation columns that contain no problems failing_items = value_problems.dropna(how="all", subset=names) if not len(failing_items): if verbose: print("No problems") return [] failing_items = failing_items.dropna(how="all", axis=1) # get names of the failing items bad_items = list(failing_items.index) # get index numbers of the failing items bad_indices = list(failing_items["num"]) failing_items['issues'] = failing_items.drop("num", axis=1).apply(make_row_dict, axis=1).values # take output and print/write to file print_row_failures(failing_items, verbose, outfile) return failing_items
def function[get_row_failures, parameter[df, value_cols, type_cols, verbose, outfile]]: constant[ Input: already validated DataFrame, value & type column names, and output options. Get details on each detected issue, row by row. Output: DataFrame with type & value validation columns, plus an "issues" column with a dictionary of every problem for that row. ] call[name[df]][constant[num]] assign[=] call[name[list], parameter[call[name[range], parameter[call[name[len], parameter[name[df]]]]]]] variable[names] assign[=] call[name[value_cols].union, parameter[name[type_cols]]] variable[value_problems] assign[=] call[name[df]][call[name[names].union, parameter[list[[<ast.Constant object at 0x7da1b042e8f0>]]]]] variable[failing_items] assign[=] call[name[value_problems].dropna, parameter[]] if <ast.UnaryOp object at 0x7da1b042f520> begin[:] if name[verbose] begin[:] call[name[print], parameter[constant[No problems]]] return[list[[]]] variable[failing_items] assign[=] call[name[failing_items].dropna, parameter[]] variable[bad_items] assign[=] call[name[list], parameter[name[failing_items].index]] variable[bad_indices] assign[=] call[name[list], parameter[call[name[failing_items]][constant[num]]]] call[name[failing_items]][constant[issues]] assign[=] call[call[name[failing_items].drop, parameter[constant[num]]].apply, parameter[name[make_row_dict]]].values call[name[print_row_failures], parameter[name[failing_items], name[verbose], name[outfile]]] return[name[failing_items]]
keyword[def] identifier[get_row_failures] ( identifier[df] , identifier[value_cols] , identifier[type_cols] , identifier[verbose] = keyword[False] , identifier[outfile] = keyword[None] ): literal[string] identifier[df] [ literal[string] ]= identifier[list] ( identifier[range] ( identifier[len] ( identifier[df] ))) identifier[names] = identifier[value_cols] . identifier[union] ( identifier[type_cols] ) identifier[value_problems] = identifier[df] [ identifier[names] . identifier[union] ([ literal[string] ])] identifier[failing_items] = identifier[value_problems] . identifier[dropna] ( identifier[how] = literal[string] , identifier[subset] = identifier[names] ) keyword[if] keyword[not] identifier[len] ( identifier[failing_items] ): keyword[if] identifier[verbose] : identifier[print] ( literal[string] ) keyword[return] [] identifier[failing_items] = identifier[failing_items] . identifier[dropna] ( identifier[how] = literal[string] , identifier[axis] = literal[int] ) identifier[bad_items] = identifier[list] ( identifier[failing_items] . identifier[index] ) identifier[bad_indices] = identifier[list] ( identifier[failing_items] [ literal[string] ]) identifier[failing_items] [ literal[string] ]= identifier[failing_items] . identifier[drop] ( literal[string] , identifier[axis] = literal[int] ). identifier[apply] ( identifier[make_row_dict] , identifier[axis] = literal[int] ). identifier[values] identifier[print_row_failures] ( identifier[failing_items] , identifier[verbose] , identifier[outfile] ) keyword[return] identifier[failing_items]
def get_row_failures(df, value_cols, type_cols, verbose=False, outfile=None): """ Input: already validated DataFrame, value & type column names, and output options. Get details on each detected issue, row by row. Output: DataFrame with type & value validation columns, plus an "issues" column with a dictionary of every problem for that row. """ # set temporary numeric index df['num'] = list(range(len(df))) # get column names for value & type validations names = value_cols.union(type_cols) # drop all non validation columns value_problems = df[names.union(['num'])] # drop validation columns that contain no problems failing_items = value_problems.dropna(how='all', subset=names) if not len(failing_items): if verbose: print('No problems') # depends on [control=['if'], data=[]] return [] # depends on [control=['if'], data=[]] failing_items = failing_items.dropna(how='all', axis=1) # get names of the failing items bad_items = list(failing_items.index) # get index numbers of the failing items bad_indices = list(failing_items['num']) failing_items['issues'] = failing_items.drop('num', axis=1).apply(make_row_dict, axis=1).values # take output and print/write to file print_row_failures(failing_items, verbose, outfile) return failing_items
def incident(self, name, **kwargs): """Add Incident data to Batch object. Args: name (str): The name for this Group. date_added (str, kwargs): The date timestamp the Indicator was created. event_date (str, kwargs): The event datetime expression for this Group. status (str, kwargs): The status for this Group. xid (str, kwargs): The external id for this Group. Returns: obj: An instance of Incident. """ group_obj = Incident(name, **kwargs) return self._group(group_obj)
def function[incident, parameter[self, name]]: constant[Add Incident data to Batch object. Args: name (str): The name for this Group. date_added (str, kwargs): The date timestamp the Indicator was created. event_date (str, kwargs): The event datetime expression for this Group. status (str, kwargs): The status for this Group. xid (str, kwargs): The external id for this Group. Returns: obj: An instance of Incident. ] variable[group_obj] assign[=] call[name[Incident], parameter[name[name]]] return[call[name[self]._group, parameter[name[group_obj]]]]
keyword[def] identifier[incident] ( identifier[self] , identifier[name] ,** identifier[kwargs] ): literal[string] identifier[group_obj] = identifier[Incident] ( identifier[name] ,** identifier[kwargs] ) keyword[return] identifier[self] . identifier[_group] ( identifier[group_obj] )
def incident(self, name, **kwargs): """Add Incident data to Batch object. Args: name (str): The name for this Group. date_added (str, kwargs): The date timestamp the Indicator was created. event_date (str, kwargs): The event datetime expression for this Group. status (str, kwargs): The status for this Group. xid (str, kwargs): The external id for this Group. Returns: obj: An instance of Incident. """ group_obj = Incident(name, **kwargs) return self._group(group_obj)
def command2str(num): """ Turn command number into name """ for attr in SLOT.__dict__.keys(): if not attr.startswith('_') and attr == attr.upper(): if getattr(SLOT, attr) == num: return 'SLOT_%s' % attr return "0x%02x" % (num)
def function[command2str, parameter[num]]: constant[ Turn command number into name ] for taget[name[attr]] in starred[call[name[SLOT].__dict__.keys, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da1b083c790> begin[:] if compare[call[name[getattr], parameter[name[SLOT], name[attr]]] equal[==] name[num]] begin[:] return[binary_operation[constant[SLOT_%s] <ast.Mod object at 0x7da2590d6920> name[attr]]] return[binary_operation[constant[0x%02x] <ast.Mod object at 0x7da2590d6920> name[num]]]
keyword[def] identifier[command2str] ( identifier[num] ): literal[string] keyword[for] identifier[attr] keyword[in] identifier[SLOT] . identifier[__dict__] . identifier[keys] (): keyword[if] keyword[not] identifier[attr] . identifier[startswith] ( literal[string] ) keyword[and] identifier[attr] == identifier[attr] . identifier[upper] (): keyword[if] identifier[getattr] ( identifier[SLOT] , identifier[attr] )== identifier[num] : keyword[return] literal[string] % identifier[attr] keyword[return] literal[string] %( identifier[num] )
def command2str(num): """ Turn command number into name """ for attr in SLOT.__dict__.keys(): if not attr.startswith('_') and attr == attr.upper(): if getattr(SLOT, attr) == num: return 'SLOT_%s' % attr # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['attr']] return '0x%02x' % num
def pexpect(self): """ Run command and return pexpect process object. NOTE: Requires you to pip install 'pexpect' or will fail. """ import pexpect assert not self._ignore_errors _check_directory(self.directory) arguments = self.arguments return pexpect.spawn( arguments[0], args=arguments[1:], env=self.env, cwd=self.directory )
def function[pexpect, parameter[self]]: constant[ Run command and return pexpect process object. NOTE: Requires you to pip install 'pexpect' or will fail. ] import module[pexpect] assert[<ast.UnaryOp object at 0x7da1b25877c0>] call[name[_check_directory], parameter[name[self].directory]] variable[arguments] assign[=] name[self].arguments return[call[name[pexpect].spawn, parameter[call[name[arguments]][constant[0]]]]]
keyword[def] identifier[pexpect] ( identifier[self] ): literal[string] keyword[import] identifier[pexpect] keyword[assert] keyword[not] identifier[self] . identifier[_ignore_errors] identifier[_check_directory] ( identifier[self] . identifier[directory] ) identifier[arguments] = identifier[self] . identifier[arguments] keyword[return] identifier[pexpect] . identifier[spawn] ( identifier[arguments] [ literal[int] ], identifier[args] = identifier[arguments] [ literal[int] :], identifier[env] = identifier[self] . identifier[env] , identifier[cwd] = identifier[self] . identifier[directory] )
def pexpect(self): """ Run command and return pexpect process object. NOTE: Requires you to pip install 'pexpect' or will fail. """ import pexpect assert not self._ignore_errors _check_directory(self.directory) arguments = self.arguments return pexpect.spawn(arguments[0], args=arguments[1:], env=self.env, cwd=self.directory)
def _get_filtered_stmts(self, lookup_node, node, _stmts, mystmt): """method used in filter_stmts""" if self is mystmt: return _stmts, True if self.statement() is mystmt: # original node's statement is the assignment, only keep # current node (gen exp, list comp) return [node], True return _stmts, False
def function[_get_filtered_stmts, parameter[self, lookup_node, node, _stmts, mystmt]]: constant[method used in filter_stmts] if compare[name[self] is name[mystmt]] begin[:] return[tuple[[<ast.Name object at 0x7da1b1d39750>, <ast.Constant object at 0x7da1b1d3a230>]]] if compare[call[name[self].statement, parameter[]] is name[mystmt]] begin[:] return[tuple[[<ast.List object at 0x7da1b1d39000>, <ast.Constant object at 0x7da1b1d39390>]]] return[tuple[[<ast.Name object at 0x7da1b1d38ac0>, <ast.Constant object at 0x7da1b1d38e20>]]]
keyword[def] identifier[_get_filtered_stmts] ( identifier[self] , identifier[lookup_node] , identifier[node] , identifier[_stmts] , identifier[mystmt] ): literal[string] keyword[if] identifier[self] keyword[is] identifier[mystmt] : keyword[return] identifier[_stmts] , keyword[True] keyword[if] identifier[self] . identifier[statement] () keyword[is] identifier[mystmt] : keyword[return] [ identifier[node] ], keyword[True] keyword[return] identifier[_stmts] , keyword[False]
def _get_filtered_stmts(self, lookup_node, node, _stmts, mystmt): """method used in filter_stmts""" if self is mystmt: return (_stmts, True) # depends on [control=['if'], data=[]] if self.statement() is mystmt: # original node's statement is the assignment, only keep # current node (gen exp, list comp) return ([node], True) # depends on [control=['if'], data=[]] return (_stmts, False)
def _apply_gradients(self, grads, x, optim_state): """Refer to parent class documentation.""" new_x = [None] * len(x) new_optim_state = { "t": optim_state["t"] + 1., "m": [None] * len(x), "u": [None] * len(x) } t = new_optim_state["t"] for i in xrange(len(x)): g = grads[i] m_old = optim_state["m"][i] u_old = optim_state["u"][i] new_optim_state["m"][i] = ( self._beta1 * m_old + (1. - self._beta1) * g) new_optim_state["u"][i] = ( self._beta2 * u_old + (1. - self._beta2) * g * g) m_hat = new_optim_state["m"][i] / (1. - tf.pow(self._beta1, t)) u_hat = new_optim_state["u"][i] / (1. - tf.pow(self._beta2, t)) new_x[i] = ( x[i] - self._lr * m_hat / (tf.sqrt(u_hat) + self._epsilon)) return new_x, new_optim_state
def function[_apply_gradients, parameter[self, grads, x, optim_state]]: constant[Refer to parent class documentation.] variable[new_x] assign[=] binary_operation[list[[<ast.Constant object at 0x7da2044c0c70>]] * call[name[len], parameter[name[x]]]] variable[new_optim_state] assign[=] dictionary[[<ast.Constant object at 0x7da2044c2680>, <ast.Constant object at 0x7da2044c0b80>, <ast.Constant object at 0x7da2044c1a20>], [<ast.BinOp object at 0x7da2044c33d0>, <ast.BinOp object at 0x7da2044c0c10>, <ast.BinOp object at 0x7da2044c22f0>]] variable[t] assign[=] call[name[new_optim_state]][constant[t]] for taget[name[i]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[x]]]]]] begin[:] variable[g] assign[=] call[name[grads]][name[i]] variable[m_old] assign[=] call[call[name[optim_state]][constant[m]]][name[i]] variable[u_old] assign[=] call[call[name[optim_state]][constant[u]]][name[i]] call[call[name[new_optim_state]][constant[m]]][name[i]] assign[=] binary_operation[binary_operation[name[self]._beta1 * name[m_old]] + binary_operation[binary_operation[constant[1.0] - name[self]._beta1] * name[g]]] call[call[name[new_optim_state]][constant[u]]][name[i]] assign[=] binary_operation[binary_operation[name[self]._beta2 * name[u_old]] + binary_operation[binary_operation[binary_operation[constant[1.0] - name[self]._beta2] * name[g]] * name[g]]] variable[m_hat] assign[=] binary_operation[call[call[name[new_optim_state]][constant[m]]][name[i]] / binary_operation[constant[1.0] - call[name[tf].pow, parameter[name[self]._beta1, name[t]]]]] variable[u_hat] assign[=] binary_operation[call[call[name[new_optim_state]][constant[u]]][name[i]] / binary_operation[constant[1.0] - call[name[tf].pow, parameter[name[self]._beta2, name[t]]]]] call[name[new_x]][name[i]] assign[=] binary_operation[call[name[x]][name[i]] - binary_operation[binary_operation[name[self]._lr * name[m_hat]] / binary_operation[call[name[tf].sqrt, parameter[name[u_hat]]] + name[self]._epsilon]]] return[tuple[[<ast.Name object at 0x7da2044c03a0>, <ast.Name object at 0x7da2044c2890>]]]
keyword[def] identifier[_apply_gradients] ( identifier[self] , identifier[grads] , identifier[x] , identifier[optim_state] ): literal[string] identifier[new_x] =[ keyword[None] ]* identifier[len] ( identifier[x] ) identifier[new_optim_state] ={ literal[string] : identifier[optim_state] [ literal[string] ]+ literal[int] , literal[string] :[ keyword[None] ]* identifier[len] ( identifier[x] ), literal[string] :[ keyword[None] ]* identifier[len] ( identifier[x] ) } identifier[t] = identifier[new_optim_state] [ literal[string] ] keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[x] )): identifier[g] = identifier[grads] [ identifier[i] ] identifier[m_old] = identifier[optim_state] [ literal[string] ][ identifier[i] ] identifier[u_old] = identifier[optim_state] [ literal[string] ][ identifier[i] ] identifier[new_optim_state] [ literal[string] ][ identifier[i] ]=( identifier[self] . identifier[_beta1] * identifier[m_old] +( literal[int] - identifier[self] . identifier[_beta1] )* identifier[g] ) identifier[new_optim_state] [ literal[string] ][ identifier[i] ]=( identifier[self] . identifier[_beta2] * identifier[u_old] +( literal[int] - identifier[self] . identifier[_beta2] )* identifier[g] * identifier[g] ) identifier[m_hat] = identifier[new_optim_state] [ literal[string] ][ identifier[i] ]/( literal[int] - identifier[tf] . identifier[pow] ( identifier[self] . identifier[_beta1] , identifier[t] )) identifier[u_hat] = identifier[new_optim_state] [ literal[string] ][ identifier[i] ]/( literal[int] - identifier[tf] . identifier[pow] ( identifier[self] . identifier[_beta2] , identifier[t] )) identifier[new_x] [ identifier[i] ]=( identifier[x] [ identifier[i] ]- identifier[self] . identifier[_lr] * identifier[m_hat] /( identifier[tf] . identifier[sqrt] ( identifier[u_hat] )+ identifier[self] . identifier[_epsilon] )) keyword[return] identifier[new_x] , identifier[new_optim_state]
def _apply_gradients(self, grads, x, optim_state): """Refer to parent class documentation.""" new_x = [None] * len(x) new_optim_state = {'t': optim_state['t'] + 1.0, 'm': [None] * len(x), 'u': [None] * len(x)} t = new_optim_state['t'] for i in xrange(len(x)): g = grads[i] m_old = optim_state['m'][i] u_old = optim_state['u'][i] new_optim_state['m'][i] = self._beta1 * m_old + (1.0 - self._beta1) * g new_optim_state['u'][i] = self._beta2 * u_old + (1.0 - self._beta2) * g * g m_hat = new_optim_state['m'][i] / (1.0 - tf.pow(self._beta1, t)) u_hat = new_optim_state['u'][i] / (1.0 - tf.pow(self._beta2, t)) new_x[i] = x[i] - self._lr * m_hat / (tf.sqrt(u_hat) + self._epsilon) # depends on [control=['for'], data=['i']] return (new_x, new_optim_state)
def add_new_reset_method(cls): """ Replace existing cls.reset() method with a new one which also calls reset() on any clones. """ orig_reset = cls.reset def new_reset(self, seed=None): logger.debug(f"Calling reset() on {self} (seed={seed})") orig_reset(self, seed) for c in self._dependent_generators: c.reset_dependent_generator(seed) return self cls.reset = new_reset
def function[add_new_reset_method, parameter[cls]]: constant[ Replace existing cls.reset() method with a new one which also calls reset() on any clones. ] variable[orig_reset] assign[=] name[cls].reset def function[new_reset, parameter[self, seed]]: call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da1b10c1510>]] call[name[orig_reset], parameter[name[self], name[seed]]] for taget[name[c]] in starred[name[self]._dependent_generators] begin[:] call[name[c].reset_dependent_generator, parameter[name[seed]]] return[name[self]] name[cls].reset assign[=] name[new_reset]
keyword[def] identifier[add_new_reset_method] ( identifier[cls] ): literal[string] identifier[orig_reset] = identifier[cls] . identifier[reset] keyword[def] identifier[new_reset] ( identifier[self] , identifier[seed] = keyword[None] ): identifier[logger] . identifier[debug] ( literal[string] ) identifier[orig_reset] ( identifier[self] , identifier[seed] ) keyword[for] identifier[c] keyword[in] identifier[self] . identifier[_dependent_generators] : identifier[c] . identifier[reset_dependent_generator] ( identifier[seed] ) keyword[return] identifier[self] identifier[cls] . identifier[reset] = identifier[new_reset]
def add_new_reset_method(cls): """ Replace existing cls.reset() method with a new one which also calls reset() on any clones. """ orig_reset = cls.reset def new_reset(self, seed=None): logger.debug(f'Calling reset() on {self} (seed={seed})') orig_reset(self, seed) for c in self._dependent_generators: c.reset_dependent_generator(seed) # depends on [control=['for'], data=['c']] return self cls.reset = new_reset
def transform_cb(self, setting, value): """Handle callback related to changes in transformations.""" self.make_callback('transform') # whence=0 because need to calculate new extents for proper # cutout for rotation (TODO: always make extents consider # room for rotation) whence = 0 self.redraw(whence=whence)
def function[transform_cb, parameter[self, setting, value]]: constant[Handle callback related to changes in transformations.] call[name[self].make_callback, parameter[constant[transform]]] variable[whence] assign[=] constant[0] call[name[self].redraw, parameter[]]
keyword[def] identifier[transform_cb] ( identifier[self] , identifier[setting] , identifier[value] ): literal[string] identifier[self] . identifier[make_callback] ( literal[string] ) identifier[whence] = literal[int] identifier[self] . identifier[redraw] ( identifier[whence] = identifier[whence] )
def transform_cb(self, setting, value): """Handle callback related to changes in transformations.""" self.make_callback('transform') # whence=0 because need to calculate new extents for proper # cutout for rotation (TODO: always make extents consider # room for rotation) whence = 0 self.redraw(whence=whence)
def acquire_read(self): """ Acquire a read lock. Several threads can hold this typeof lock. It is exclusive with write locks. """ self.monitor.acquire() while self.rwlock < 0 or self.writers_waiting: self.readers_ok.wait() self.rwlock += 1 self.monitor.release()
def function[acquire_read, parameter[self]]: constant[ Acquire a read lock. Several threads can hold this typeof lock. It is exclusive with write locks. ] call[name[self].monitor.acquire, parameter[]] while <ast.BoolOp object at 0x7da1b18826e0> begin[:] call[name[self].readers_ok.wait, parameter[]] <ast.AugAssign object at 0x7da1b1882a40> call[name[self].monitor.release, parameter[]]
keyword[def] identifier[acquire_read] ( identifier[self] ): literal[string] identifier[self] . identifier[monitor] . identifier[acquire] () keyword[while] identifier[self] . identifier[rwlock] < literal[int] keyword[or] identifier[self] . identifier[writers_waiting] : identifier[self] . identifier[readers_ok] . identifier[wait] () identifier[self] . identifier[rwlock] += literal[int] identifier[self] . identifier[monitor] . identifier[release] ()
def acquire_read(self): """ Acquire a read lock. Several threads can hold this typeof lock. It is exclusive with write locks. """ self.monitor.acquire() while self.rwlock < 0 or self.writers_waiting: self.readers_ok.wait() # depends on [control=['while'], data=[]] self.rwlock += 1 self.monitor.release()
def fmt_sentence(text): """English sentence formatter. First letter is always upper case. Example: "Do you want to build a snow man?" **中文文档** 句子格式。每句话的第一个单词第一个字母大写。 """ text = text.strip() if len(text) == 0: # if empty string, return it return text else: text = text.lower() # lower all char # delete redundant empty space chunks = [chunk for chunk in text.split(" ") if len(chunk) >= 1] chunks[0] = chunks[0][0].upper() + chunks[0][1:] return " ".join(chunks)
def function[fmt_sentence, parameter[text]]: constant[English sentence formatter. First letter is always upper case. Example: "Do you want to build a snow man?" **中文文档** 句子格式。每句话的第一个单词第一个字母大写。 ] variable[text] assign[=] call[name[text].strip, parameter[]] if compare[call[name[len], parameter[name[text]]] equal[==] constant[0]] begin[:] return[name[text]]
keyword[def] identifier[fmt_sentence] ( identifier[text] ): literal[string] identifier[text] = identifier[text] . identifier[strip] () keyword[if] identifier[len] ( identifier[text] )== literal[int] : keyword[return] identifier[text] keyword[else] : identifier[text] = identifier[text] . identifier[lower] () identifier[chunks] =[ identifier[chunk] keyword[for] identifier[chunk] keyword[in] identifier[text] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[chunk] )>= literal[int] ] identifier[chunks] [ literal[int] ]= identifier[chunks] [ literal[int] ][ literal[int] ]. identifier[upper] ()+ identifier[chunks] [ literal[int] ][ literal[int] :] keyword[return] literal[string] . identifier[join] ( identifier[chunks] )
def fmt_sentence(text): """English sentence formatter. First letter is always upper case. Example: "Do you want to build a snow man?" **中文文档** 句子格式。每句话的第一个单词第一个字母大写。 """ text = text.strip() if len(text) == 0: # if empty string, return it return text # depends on [control=['if'], data=[]] else: text = text.lower() # lower all char # delete redundant empty space chunks = [chunk for chunk in text.split(' ') if len(chunk) >= 1] chunks[0] = chunks[0][0].upper() + chunks[0][1:] return ' '.join(chunks)
def record(self, tags, measurement_map, timestamp, attachments=None): """records stats with a set of tags""" assert all(vv >= 0 for vv in measurement_map.values()) for measure, value in measurement_map.items(): if measure != self._registered_measures.get(measure.name): return view_datas = [] for measure_name, view_data_list \ in self._measure_to_view_data_list_map.items(): if measure_name == measure.name: view_datas.extend(view_data_list) for view_data in view_datas: view_data.record( context=tags, value=value, timestamp=timestamp, attachments=attachments) self.export(view_datas)
def function[record, parameter[self, tags, measurement_map, timestamp, attachments]]: constant[records stats with a set of tags] assert[call[name[all], parameter[<ast.GeneratorExp object at 0x7da2047e9810>]]] for taget[tuple[[<ast.Name object at 0x7da2047e8fd0>, <ast.Name object at 0x7da2047e87f0>]]] in starred[call[name[measurement_map].items, parameter[]]] begin[:] if compare[name[measure] not_equal[!=] call[name[self]._registered_measures.get, parameter[name[measure].name]]] begin[:] return[None] variable[view_datas] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da2047eb3a0>, <ast.Name object at 0x7da2047e9150>]]] in starred[call[name[self]._measure_to_view_data_list_map.items, parameter[]]] begin[:] if compare[name[measure_name] equal[==] name[measure].name] begin[:] call[name[view_datas].extend, parameter[name[view_data_list]]] for taget[name[view_data]] in starred[name[view_datas]] begin[:] call[name[view_data].record, parameter[]] call[name[self].export, parameter[name[view_datas]]]
keyword[def] identifier[record] ( identifier[self] , identifier[tags] , identifier[measurement_map] , identifier[timestamp] , identifier[attachments] = keyword[None] ): literal[string] keyword[assert] identifier[all] ( identifier[vv] >= literal[int] keyword[for] identifier[vv] keyword[in] identifier[measurement_map] . identifier[values] ()) keyword[for] identifier[measure] , identifier[value] keyword[in] identifier[measurement_map] . identifier[items] (): keyword[if] identifier[measure] != identifier[self] . identifier[_registered_measures] . identifier[get] ( identifier[measure] . identifier[name] ): keyword[return] identifier[view_datas] =[] keyword[for] identifier[measure_name] , identifier[view_data_list] keyword[in] identifier[self] . identifier[_measure_to_view_data_list_map] . identifier[items] (): keyword[if] identifier[measure_name] == identifier[measure] . identifier[name] : identifier[view_datas] . identifier[extend] ( identifier[view_data_list] ) keyword[for] identifier[view_data] keyword[in] identifier[view_datas] : identifier[view_data] . identifier[record] ( identifier[context] = identifier[tags] , identifier[value] = identifier[value] , identifier[timestamp] = identifier[timestamp] , identifier[attachments] = identifier[attachments] ) identifier[self] . identifier[export] ( identifier[view_datas] )
def record(self, tags, measurement_map, timestamp, attachments=None): """records stats with a set of tags""" assert all((vv >= 0 for vv in measurement_map.values())) for (measure, value) in measurement_map.items(): if measure != self._registered_measures.get(measure.name): return # depends on [control=['if'], data=[]] view_datas = [] for (measure_name, view_data_list) in self._measure_to_view_data_list_map.items(): if measure_name == measure.name: view_datas.extend(view_data_list) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] for view_data in view_datas: view_data.record(context=tags, value=value, timestamp=timestamp, attachments=attachments) # depends on [control=['for'], data=['view_data']] self.export(view_datas) # depends on [control=['for'], data=[]]
def serial_adapters(self, serial_adapters): """ Sets the number of Serial adapters for this IOU VM. :param serial_adapters: number of adapters """ self._serial_adapters.clear() for _ in range(0, serial_adapters): self._serial_adapters.append(SerialAdapter(interfaces=4)) log.info('IOU "{name}" [{id}]: number of Serial adapters changed to {adapters}'.format(name=self._name, id=self._id, adapters=len(self._serial_adapters))) self._adapters = self._ethernet_adapters + self._serial_adapters
def function[serial_adapters, parameter[self, serial_adapters]]: constant[ Sets the number of Serial adapters for this IOU VM. :param serial_adapters: number of adapters ] call[name[self]._serial_adapters.clear, parameter[]] for taget[name[_]] in starred[call[name[range], parameter[constant[0], name[serial_adapters]]]] begin[:] call[name[self]._serial_adapters.append, parameter[call[name[SerialAdapter], parameter[]]]] call[name[log].info, parameter[call[constant[IOU "{name}" [{id}]: number of Serial adapters changed to {adapters}].format, parameter[]]]] name[self]._adapters assign[=] binary_operation[name[self]._ethernet_adapters + name[self]._serial_adapters]
keyword[def] identifier[serial_adapters] ( identifier[self] , identifier[serial_adapters] ): literal[string] identifier[self] . identifier[_serial_adapters] . identifier[clear] () keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] , identifier[serial_adapters] ): identifier[self] . identifier[_serial_adapters] . identifier[append] ( identifier[SerialAdapter] ( identifier[interfaces] = literal[int] )) identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] , identifier[id] = identifier[self] . identifier[_id] , identifier[adapters] = identifier[len] ( identifier[self] . identifier[_serial_adapters] ))) identifier[self] . identifier[_adapters] = identifier[self] . identifier[_ethernet_adapters] + identifier[self] . identifier[_serial_adapters]
def serial_adapters(self, serial_adapters): """ Sets the number of Serial adapters for this IOU VM. :param serial_adapters: number of adapters """ self._serial_adapters.clear() for _ in range(0, serial_adapters): self._serial_adapters.append(SerialAdapter(interfaces=4)) # depends on [control=['for'], data=[]] log.info('IOU "{name}" [{id}]: number of Serial adapters changed to {adapters}'.format(name=self._name, id=self._id, adapters=len(self._serial_adapters))) self._adapters = self._ethernet_adapters + self._serial_adapters
def add_input(cmd, immediate=False): '''add some command input to be processed''' if immediate: process_stdin(cmd) else: mpstate.input_queue.put(cmd)
def function[add_input, parameter[cmd, immediate]]: constant[add some command input to be processed] if name[immediate] begin[:] call[name[process_stdin], parameter[name[cmd]]]
keyword[def] identifier[add_input] ( identifier[cmd] , identifier[immediate] = keyword[False] ): literal[string] keyword[if] identifier[immediate] : identifier[process_stdin] ( identifier[cmd] ) keyword[else] : identifier[mpstate] . identifier[input_queue] . identifier[put] ( identifier[cmd] )
def add_input(cmd, immediate=False): """add some command input to be processed""" if immediate: process_stdin(cmd) # depends on [control=['if'], data=[]] else: mpstate.input_queue.put(cmd)
def show_profiles_from_aws_credentials_file(credentials_files = [aws_credentials_file, aws_config_file]): """ Show profile names from ~/.aws/credentials :param credentials_files: :return: """ profiles = get_profiles_from_aws_credentials_file(credentials_files) for profile in set(profiles): printInfo(' * %s' % profile)
def function[show_profiles_from_aws_credentials_file, parameter[credentials_files]]: constant[ Show profile names from ~/.aws/credentials :param credentials_files: :return: ] variable[profiles] assign[=] call[name[get_profiles_from_aws_credentials_file], parameter[name[credentials_files]]] for taget[name[profile]] in starred[call[name[set], parameter[name[profiles]]]] begin[:] call[name[printInfo], parameter[binary_operation[constant[ * %s] <ast.Mod object at 0x7da2590d6920> name[profile]]]]
keyword[def] identifier[show_profiles_from_aws_credentials_file] ( identifier[credentials_files] =[ identifier[aws_credentials_file] , identifier[aws_config_file] ]): literal[string] identifier[profiles] = identifier[get_profiles_from_aws_credentials_file] ( identifier[credentials_files] ) keyword[for] identifier[profile] keyword[in] identifier[set] ( identifier[profiles] ): identifier[printInfo] ( literal[string] % identifier[profile] )
def show_profiles_from_aws_credentials_file(credentials_files=[aws_credentials_file, aws_config_file]): """ Show profile names from ~/.aws/credentials :param credentials_files: :return: """ profiles = get_profiles_from_aws_credentials_file(credentials_files) for profile in set(profiles): printInfo(' * %s' % profile) # depends on [control=['for'], data=['profile']]