code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def recursive_replace(list_, target, repl=-1): r""" Recursively removes target in all lists and sublists and replaces them with the repl variable """ repl_list = [ recursive_replace(item, target, repl) if isinstance(item, (list, np.ndarray)) else (repl if item == target else item) for item in list_ ] return repl_list
def function[recursive_replace, parameter[list_, target, repl]]: constant[ Recursively removes target in all lists and sublists and replaces them with the repl variable ] variable[repl_list] assign[=] <ast.ListComp object at 0x7da1b24ad450> return[name[repl_list]]
keyword[def] identifier[recursive_replace] ( identifier[list_] , identifier[target] , identifier[repl] =- literal[int] ): literal[string] identifier[repl_list] =[ identifier[recursive_replace] ( identifier[item] , identifier[target] , identifier[repl] ) keyword[if] identifier[isinstance] ( identifier[item] ,( identifier[list] , identifier[np] . identifier[ndarray] )) keyword[else] ( identifier[repl] keyword[if] identifier[item] == identifier[target] keyword[else] identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[list_] ] keyword[return] identifier[repl_list]
def recursive_replace(list_, target, repl=-1): """ Recursively removes target in all lists and sublists and replaces them with the repl variable """ repl_list = [recursive_replace(item, target, repl) if isinstance(item, (list, np.ndarray)) else repl if item == target else item for item in list_] return repl_list
def plot_edoses(self, dos_pos=None, method="gaussian", step=0.01, width=0.1, **kwargs): """ Plot the band structure and the DOS. Args: dos_pos: Index of the task from which the DOS should be obtained. None is all DOSes should be displayed. Accepts integer or list of integers. method: String defining the method for the computation of the DOS. step: Energy step (eV) of the linear mesh. width: Standard deviation (eV) of the gaussian. kwargs: Keyword arguments passed to `plot` method to customize the plot. Returns: `matplotlib` figure. """ if dos_pos is not None and not isinstance(dos_pos, (list, tuple)): dos_pos = [dos_pos] from abipy.electrons.ebands import ElectronDosPlotter plotter = ElectronDosPlotter() for i, task in enumerate(self.dos_tasks): if dos_pos is not None and i not in dos_pos: continue with task.open_gsr() as gsr: edos = gsr.ebands.get_edos(method=method, step=step, width=width) ngkpt = task.get_inpvar("ngkpt") plotter.add_edos("ngkpt %s" % str(ngkpt), edos) return plotter.combiplot(**kwargs)
def function[plot_edoses, parameter[self, dos_pos, method, step, width]]: constant[ Plot the band structure and the DOS. Args: dos_pos: Index of the task from which the DOS should be obtained. None is all DOSes should be displayed. Accepts integer or list of integers. method: String defining the method for the computation of the DOS. step: Energy step (eV) of the linear mesh. width: Standard deviation (eV) of the gaussian. kwargs: Keyword arguments passed to `plot` method to customize the plot. Returns: `matplotlib` figure. ] if <ast.BoolOp object at 0x7da1b21860e0> begin[:] variable[dos_pos] assign[=] list[[<ast.Name object at 0x7da1b21852a0>]] from relative_module[abipy.electrons.ebands] import module[ElectronDosPlotter] variable[plotter] assign[=] call[name[ElectronDosPlotter], parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b2186fe0>, <ast.Name object at 0x7da1b2186620>]]] in starred[call[name[enumerate], parameter[name[self].dos_tasks]]] begin[:] if <ast.BoolOp object at 0x7da1b2185630> begin[:] continue with call[name[task].open_gsr, parameter[]] begin[:] variable[edos] assign[=] call[name[gsr].ebands.get_edos, parameter[]] variable[ngkpt] assign[=] call[name[task].get_inpvar, parameter[constant[ngkpt]]] call[name[plotter].add_edos, parameter[binary_operation[constant[ngkpt %s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[ngkpt]]]], name[edos]]] return[call[name[plotter].combiplot, parameter[]]]
keyword[def] identifier[plot_edoses] ( identifier[self] , identifier[dos_pos] = keyword[None] , identifier[method] = literal[string] , identifier[step] = literal[int] , identifier[width] = literal[int] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[dos_pos] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[dos_pos] ,( identifier[list] , identifier[tuple] )): identifier[dos_pos] =[ identifier[dos_pos] ] keyword[from] identifier[abipy] . identifier[electrons] . identifier[ebands] keyword[import] identifier[ElectronDosPlotter] identifier[plotter] = identifier[ElectronDosPlotter] () keyword[for] identifier[i] , identifier[task] keyword[in] identifier[enumerate] ( identifier[self] . identifier[dos_tasks] ): keyword[if] identifier[dos_pos] keyword[is] keyword[not] keyword[None] keyword[and] identifier[i] keyword[not] keyword[in] identifier[dos_pos] : keyword[continue] keyword[with] identifier[task] . identifier[open_gsr] () keyword[as] identifier[gsr] : identifier[edos] = identifier[gsr] . identifier[ebands] . identifier[get_edos] ( identifier[method] = identifier[method] , identifier[step] = identifier[step] , identifier[width] = identifier[width] ) identifier[ngkpt] = identifier[task] . identifier[get_inpvar] ( literal[string] ) identifier[plotter] . identifier[add_edos] ( literal[string] % identifier[str] ( identifier[ngkpt] ), identifier[edos] ) keyword[return] identifier[plotter] . identifier[combiplot] (** identifier[kwargs] )
def plot_edoses(self, dos_pos=None, method='gaussian', step=0.01, width=0.1, **kwargs): """ Plot the band structure and the DOS. Args: dos_pos: Index of the task from which the DOS should be obtained. None is all DOSes should be displayed. Accepts integer or list of integers. method: String defining the method for the computation of the DOS. step: Energy step (eV) of the linear mesh. width: Standard deviation (eV) of the gaussian. kwargs: Keyword arguments passed to `plot` method to customize the plot. Returns: `matplotlib` figure. """ if dos_pos is not None and (not isinstance(dos_pos, (list, tuple))): dos_pos = [dos_pos] # depends on [control=['if'], data=[]] from abipy.electrons.ebands import ElectronDosPlotter plotter = ElectronDosPlotter() for (i, task) in enumerate(self.dos_tasks): if dos_pos is not None and i not in dos_pos: continue # depends on [control=['if'], data=[]] with task.open_gsr() as gsr: edos = gsr.ebands.get_edos(method=method, step=step, width=width) ngkpt = task.get_inpvar('ngkpt') plotter.add_edos('ngkpt %s' % str(ngkpt), edos) # depends on [control=['with'], data=['gsr']] # depends on [control=['for'], data=[]] return plotter.combiplot(**kwargs)
def join(self, formatted_texts): """:type formatted_texts: list[FormattedText]""" formatted_texts = list(formatted_texts) # so that after the first iteration elements are not lost if generator for formatted_text in formatted_texts: assert self._is_compatible(formatted_text), "Cannot join text with different modes" self.text = self.text.join((formatted_text.text for formatted_text in formatted_texts)) return self
def function[join, parameter[self, formatted_texts]]: constant[:type formatted_texts: list[FormattedText]] variable[formatted_texts] assign[=] call[name[list], parameter[name[formatted_texts]]] for taget[name[formatted_text]] in starred[name[formatted_texts]] begin[:] assert[call[name[self]._is_compatible, parameter[name[formatted_text]]]] name[self].text assign[=] call[name[self].text.join, parameter[<ast.GeneratorExp object at 0x7da18dc99c60>]] return[name[self]]
keyword[def] identifier[join] ( identifier[self] , identifier[formatted_texts] ): literal[string] identifier[formatted_texts] = identifier[list] ( identifier[formatted_texts] ) keyword[for] identifier[formatted_text] keyword[in] identifier[formatted_texts] : keyword[assert] identifier[self] . identifier[_is_compatible] ( identifier[formatted_text] ), literal[string] identifier[self] . identifier[text] = identifier[self] . identifier[text] . identifier[join] (( identifier[formatted_text] . identifier[text] keyword[for] identifier[formatted_text] keyword[in] identifier[formatted_texts] )) keyword[return] identifier[self]
def join(self, formatted_texts): """:type formatted_texts: list[FormattedText]""" formatted_texts = list(formatted_texts) # so that after the first iteration elements are not lost if generator for formatted_text in formatted_texts: assert self._is_compatible(formatted_text), 'Cannot join text with different modes' # depends on [control=['for'], data=['formatted_text']] self.text = self.text.join((formatted_text.text for formatted_text in formatted_texts)) return self
def save(html,fname=None,launch=False): """wrap HTML in a top and bottom (with css) and save to disk.""" html=html_top+html+html_bot html=html.replace("~GENAT~",swhlab.common.datetimeToString()) if fname is None: fname = tempfile.gettempdir()+"/temp.html" launch=True fname=os.path.abspath(fname) with open(fname,'w') as f: f.write(html) global stylesheetSaved stylesheetPath=os.path.join(os.path.dirname(fname),"style.css") if not os.path.exists(stylesheetPath) or stylesheetSaved is False: with open(stylesheetPath,'w') as f: f.write(stylesheet) stylesheetSaved=True if launch: webbrowser.open(fname)
def function[save, parameter[html, fname, launch]]: constant[wrap HTML in a top and bottom (with css) and save to disk.] variable[html] assign[=] binary_operation[binary_operation[name[html_top] + name[html]] + name[html_bot]] variable[html] assign[=] call[name[html].replace, parameter[constant[~GENAT~], call[name[swhlab].common.datetimeToString, parameter[]]]] if compare[name[fname] is constant[None]] begin[:] variable[fname] assign[=] binary_operation[call[name[tempfile].gettempdir, parameter[]] + constant[/temp.html]] variable[launch] assign[=] constant[True] variable[fname] assign[=] call[name[os].path.abspath, parameter[name[fname]]] with call[name[open], parameter[name[fname], constant[w]]] begin[:] call[name[f].write, parameter[name[html]]] <ast.Global object at 0x7da1afe58d60> variable[stylesheetPath] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[fname]]], constant[style.css]]] if <ast.BoolOp object at 0x7da1afe59030> begin[:] with call[name[open], parameter[name[stylesheetPath], constant[w]]] begin[:] call[name[f].write, parameter[name[stylesheet]]] variable[stylesheetSaved] assign[=] constant[True] if name[launch] begin[:] call[name[webbrowser].open, parameter[name[fname]]]
keyword[def] identifier[save] ( identifier[html] , identifier[fname] = keyword[None] , identifier[launch] = keyword[False] ): literal[string] identifier[html] = identifier[html_top] + identifier[html] + identifier[html_bot] identifier[html] = identifier[html] . identifier[replace] ( literal[string] , identifier[swhlab] . identifier[common] . identifier[datetimeToString] ()) keyword[if] identifier[fname] keyword[is] keyword[None] : identifier[fname] = identifier[tempfile] . identifier[gettempdir] ()+ literal[string] identifier[launch] = keyword[True] identifier[fname] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[fname] ) keyword[with] identifier[open] ( identifier[fname] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[html] ) keyword[global] identifier[stylesheetSaved] identifier[stylesheetPath] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[fname] ), literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[stylesheetPath] ) keyword[or] identifier[stylesheetSaved] keyword[is] keyword[False] : keyword[with] identifier[open] ( identifier[stylesheetPath] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[stylesheet] ) identifier[stylesheetSaved] = keyword[True] keyword[if] identifier[launch] : identifier[webbrowser] . identifier[open] ( identifier[fname] )
def save(html, fname=None, launch=False): """wrap HTML in a top and bottom (with css) and save to disk.""" html = html_top + html + html_bot html = html.replace('~GENAT~', swhlab.common.datetimeToString()) if fname is None: fname = tempfile.gettempdir() + '/temp.html' launch = True # depends on [control=['if'], data=['fname']] fname = os.path.abspath(fname) with open(fname, 'w') as f: f.write(html) # depends on [control=['with'], data=['f']] global stylesheetSaved stylesheetPath = os.path.join(os.path.dirname(fname), 'style.css') if not os.path.exists(stylesheetPath) or stylesheetSaved is False: with open(stylesheetPath, 'w') as f: f.write(stylesheet) stylesheetSaved = True # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]] if launch: webbrowser.open(fname) # depends on [control=['if'], data=[]]
def CopyTextToLabel(cls, text, prefix=''): """Copies a string to a label. A label only supports a limited set of characters therefore unsupported characters are replaced with an underscore. Args: text (str): label text. prefix (Optional[str]): label prefix. Returns: str: label. """ text = '{0:s}{1:s}'.format(prefix, text) return cls._INVALID_LABEL_CHARACTERS_REGEX.sub('_', text)
def function[CopyTextToLabel, parameter[cls, text, prefix]]: constant[Copies a string to a label. A label only supports a limited set of characters therefore unsupported characters are replaced with an underscore. Args: text (str): label text. prefix (Optional[str]): label prefix. Returns: str: label. ] variable[text] assign[=] call[constant[{0:s}{1:s}].format, parameter[name[prefix], name[text]]] return[call[name[cls]._INVALID_LABEL_CHARACTERS_REGEX.sub, parameter[constant[_], name[text]]]]
keyword[def] identifier[CopyTextToLabel] ( identifier[cls] , identifier[text] , identifier[prefix] = literal[string] ): literal[string] identifier[text] = literal[string] . identifier[format] ( identifier[prefix] , identifier[text] ) keyword[return] identifier[cls] . identifier[_INVALID_LABEL_CHARACTERS_REGEX] . identifier[sub] ( literal[string] , identifier[text] )
def CopyTextToLabel(cls, text, prefix=''): """Copies a string to a label. A label only supports a limited set of characters therefore unsupported characters are replaced with an underscore. Args: text (str): label text. prefix (Optional[str]): label prefix. Returns: str: label. """ text = '{0:s}{1:s}'.format(prefix, text) return cls._INVALID_LABEL_CHARACTERS_REGEX.sub('_', text)
def single_path_generator(pathname): """ emits name,chunkgen pairs for the given file at pathname. If pathname is a directory, will act recursively and will emit for each file in the directory tree chunkgen is a generator that can be iterated over to obtain the contents of the file in multiple parts """ if isdir(pathname): trim = len(pathname) if pathname[-1] != sep: trim += 1 for entry in directory_generator(pathname, trim): yield entry else: zf = ZipFile(pathname) for f in zf.namelist(): if f[-1] != '/': yield f, zipentry_chunk(zf, f) zf.close()
def function[single_path_generator, parameter[pathname]]: constant[ emits name,chunkgen pairs for the given file at pathname. If pathname is a directory, will act recursively and will emit for each file in the directory tree chunkgen is a generator that can be iterated over to obtain the contents of the file in multiple parts ] if call[name[isdir], parameter[name[pathname]]] begin[:] variable[trim] assign[=] call[name[len], parameter[name[pathname]]] if compare[call[name[pathname]][<ast.UnaryOp object at 0x7da1b0b19a20>] not_equal[!=] name[sep]] begin[:] <ast.AugAssign object at 0x7da1b0b1a500> for taget[name[entry]] in starred[call[name[directory_generator], parameter[name[pathname], name[trim]]]] begin[:] <ast.Yield object at 0x7da1b0b18e50>
keyword[def] identifier[single_path_generator] ( identifier[pathname] ): literal[string] keyword[if] identifier[isdir] ( identifier[pathname] ): identifier[trim] = identifier[len] ( identifier[pathname] ) keyword[if] identifier[pathname] [- literal[int] ]!= identifier[sep] : identifier[trim] += literal[int] keyword[for] identifier[entry] keyword[in] identifier[directory_generator] ( identifier[pathname] , identifier[trim] ): keyword[yield] identifier[entry] keyword[else] : identifier[zf] = identifier[ZipFile] ( identifier[pathname] ) keyword[for] identifier[f] keyword[in] identifier[zf] . identifier[namelist] (): keyword[if] identifier[f] [- literal[int] ]!= literal[string] : keyword[yield] identifier[f] , identifier[zipentry_chunk] ( identifier[zf] , identifier[f] ) identifier[zf] . identifier[close] ()
def single_path_generator(pathname): """ emits name,chunkgen pairs for the given file at pathname. If pathname is a directory, will act recursively and will emit for each file in the directory tree chunkgen is a generator that can be iterated over to obtain the contents of the file in multiple parts """ if isdir(pathname): trim = len(pathname) if pathname[-1] != sep: trim += 1 # depends on [control=['if'], data=[]] for entry in directory_generator(pathname, trim): yield entry # depends on [control=['for'], data=['entry']] # depends on [control=['if'], data=[]] else: zf = ZipFile(pathname) for f in zf.namelist(): if f[-1] != '/': yield (f, zipentry_chunk(zf, f)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']] zf.close()
def is_iter(y, ignore=six.string_types): ''' Test if an object is iterable, but not a string type. Test if an object is an iterator or is iterable itself. By default this does not return True for string objects. The `ignore` argument defaults to a list of string types that are not considered iterable. This can be used to also exclude things like dictionaries or named tuples. Based on https://bitbucket.org/petershinners/yter ''' if ignore and isinstance(y, ignore): return False try: iter(y) return True except TypeError: return False
def function[is_iter, parameter[y, ignore]]: constant[ Test if an object is iterable, but not a string type. Test if an object is an iterator or is iterable itself. By default this does not return True for string objects. The `ignore` argument defaults to a list of string types that are not considered iterable. This can be used to also exclude things like dictionaries or named tuples. Based on https://bitbucket.org/petershinners/yter ] if <ast.BoolOp object at 0x7da18f00ed10> begin[:] return[constant[False]] <ast.Try object at 0x7da1b1f8f010>
keyword[def] identifier[is_iter] ( identifier[y] , identifier[ignore] = identifier[six] . identifier[string_types] ): literal[string] keyword[if] identifier[ignore] keyword[and] identifier[isinstance] ( identifier[y] , identifier[ignore] ): keyword[return] keyword[False] keyword[try] : identifier[iter] ( identifier[y] ) keyword[return] keyword[True] keyword[except] identifier[TypeError] : keyword[return] keyword[False]
def is_iter(y, ignore=six.string_types): """ Test if an object is iterable, but not a string type. Test if an object is an iterator or is iterable itself. By default this does not return True for string objects. The `ignore` argument defaults to a list of string types that are not considered iterable. This can be used to also exclude things like dictionaries or named tuples. Based on https://bitbucket.org/petershinners/yter """ if ignore and isinstance(y, ignore): return False # depends on [control=['if'], data=[]] try: iter(y) return True # depends on [control=['try'], data=[]] except TypeError: return False # depends on [control=['except'], data=[]]
def set_permission(permission, value, app): """Set a permission for the specified app Value should be 'deny' or 'allow' """ # The object created to wrap PermissionSettingsModule is to work around # an intermittent bug where it will sometimes be undefined. script = """ const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components; var a = {b: Cu.import("resource://gre/modules/PermissionSettings.jsm")}; return a.b.PermissionSettingsModule.addPermission({ type: '%s', origin: '%s', manifestURL: '%s/manifest.webapp', value: '%s', browserFlag: false }); """ app_url = 'app://' + app run_marionette_script(script % (permission, app_url, app_url, value), True)
def function[set_permission, parameter[permission, value, app]]: constant[Set a permission for the specified app Value should be 'deny' or 'allow' ] variable[script] assign[=] constant[ const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components; var a = {b: Cu.import("resource://gre/modules/PermissionSettings.jsm")}; return a.b.PermissionSettingsModule.addPermission({ type: '%s', origin: '%s', manifestURL: '%s/manifest.webapp', value: '%s', browserFlag: false }); ] variable[app_url] assign[=] binary_operation[constant[app://] + name[app]] call[name[run_marionette_script], parameter[binary_operation[name[script] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c795cc0>, <ast.Name object at 0x7da20c795180>, <ast.Name object at 0x7da20c796ad0>, <ast.Name object at 0x7da20c795330>]]], constant[True]]]
keyword[def] identifier[set_permission] ( identifier[permission] , identifier[value] , identifier[app] ): literal[string] identifier[script] = literal[string] identifier[app_url] = literal[string] + identifier[app] identifier[run_marionette_script] ( identifier[script] %( identifier[permission] , identifier[app_url] , identifier[app_url] , identifier[value] ), keyword[True] )
def set_permission(permission, value, app): """Set a permission for the specified app Value should be 'deny' or 'allow' """ # The object created to wrap PermissionSettingsModule is to work around # an intermittent bug where it will sometimes be undefined. script = '\n const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;\n var a = {b: Cu.import("resource://gre/modules/PermissionSettings.jsm")};\n return a.b.PermissionSettingsModule.addPermission({\n type: \'%s\',\n origin: \'%s\',\n manifestURL: \'%s/manifest.webapp\',\n value: \'%s\',\n browserFlag: false\n });\n ' app_url = 'app://' + app run_marionette_script(script % (permission, app_url, app_url, value), True)
def jackknife_stats(theta_subs, theta_full, N=None, d=1): """Compute Jackknife Estimates, SE, Bias, t-scores, p-values Parameters: ----------- theta_subs : ndarray The metrics, estimates, parameters, etc. of the model (see "func") for each subsample. It is a <C x M> matrix, i.e. C=binocoeff(N,d) subsamples, and M parameters that are returned by the model. theta_full : ndarray The metrics, estimates, parameters, etc. of the model (see "func") for the full sample. It is a <1 x M> vecotr with the M parameters that are returned by the model. N : int The number of observations in the full sample. Is required for Delete-d Jackknife, i.e. d>1. (Default is N=None) d : int The number of observations to leave out for each Jackknife subsample, i.e. the subsample size is N-d. (The default is d=1 for the "Delete-1 Jackknife" procedure.) Returns: -------- pvalues : ndarray The two-sided P-values of the t-Score for each Jackknife estimate. (In Social Sciences pval<0.05 is referred as acceptable but it is usually better to look for p-values way closer to Zero. Just remove or replace a variable/feature with high pval>=pcritical and run the Jackknife again.) tscores : ndarray The t-Score for each Jackknife estimate. (As rule of thumb a value abs(tscore)>2 indicates a bad model parameter but jsut check the p-value.) theta_jack : ndarray The bias-corrected Jackknife Estimates (model parameter, metric, coefficient, etc.). Use the parameters for prediction. se_jack : ndarray The Jackknife Standard Error theta_biased : ndarray The biased Jackknife Estimate. Other Variables: ---------------- These variables occur in the source code as intermediate results. Q : int The Number of independent variables of a model (incl. intercept). C : int The number of Jackknife subsamples if d>1. There are C=binocoeff(N,d) combinations. """ # The biased Jackknife Estimate import numpy as np theta_biased = np.mean(theta_subs, axis=0) # Inflation Factor for the Jackknife Standard Error if d is 1: if N is None: N = theta_subs.shape[0] inflation = (N - 1) / N elif d > 1: if N is None: raise Exception(( "If d>1 then you must provide N (number of " "observations in the full sample)")) C = theta_subs.shape[0] inflation = ((N - d) / d) / C # The Jackknife Standard Error se_jack = np.sqrt( inflation * np.sum((theta_subs - theta_biased)**2, axis=0)) # The bias-corrected Jackknife Estimate theta_jack = N * theta_full - (N - 1) * theta_biased # The Jackknife t-Statistics tscores = theta_jack / se_jack # Two-sided P-values import scipy.stats Q = theta_subs.shape[1] pvalues = scipy.stats.t.sf(np.abs(tscores), N - Q - d) * 2 # done return pvalues, tscores, theta_jack, se_jack, theta_biased
def function[jackknife_stats, parameter[theta_subs, theta_full, N, d]]: constant[Compute Jackknife Estimates, SE, Bias, t-scores, p-values Parameters: ----------- theta_subs : ndarray The metrics, estimates, parameters, etc. of the model (see "func") for each subsample. It is a <C x M> matrix, i.e. C=binocoeff(N,d) subsamples, and M parameters that are returned by the model. theta_full : ndarray The metrics, estimates, parameters, etc. of the model (see "func") for the full sample. It is a <1 x M> vecotr with the M parameters that are returned by the model. N : int The number of observations in the full sample. Is required for Delete-d Jackknife, i.e. d>1. (Default is N=None) d : int The number of observations to leave out for each Jackknife subsample, i.e. the subsample size is N-d. (The default is d=1 for the "Delete-1 Jackknife" procedure.) Returns: -------- pvalues : ndarray The two-sided P-values of the t-Score for each Jackknife estimate. (In Social Sciences pval<0.05 is referred as acceptable but it is usually better to look for p-values way closer to Zero. Just remove or replace a variable/feature with high pval>=pcritical and run the Jackknife again.) tscores : ndarray The t-Score for each Jackknife estimate. (As rule of thumb a value abs(tscore)>2 indicates a bad model parameter but jsut check the p-value.) theta_jack : ndarray The bias-corrected Jackknife Estimates (model parameter, metric, coefficient, etc.). Use the parameters for prediction. se_jack : ndarray The Jackknife Standard Error theta_biased : ndarray The biased Jackknife Estimate. Other Variables: ---------------- These variables occur in the source code as intermediate results. Q : int The Number of independent variables of a model (incl. intercept). C : int The number of Jackknife subsamples if d>1. There are C=binocoeff(N,d) combinations. ] import module[numpy] as alias[np] variable[theta_biased] assign[=] call[name[np].mean, parameter[name[theta_subs]]] if compare[name[d] is constant[1]] begin[:] if compare[name[N] is constant[None]] begin[:] variable[N] assign[=] call[name[theta_subs].shape][constant[0]] variable[inflation] assign[=] binary_operation[binary_operation[name[N] - constant[1]] / name[N]] variable[se_jack] assign[=] call[name[np].sqrt, parameter[binary_operation[name[inflation] * call[name[np].sum, parameter[binary_operation[binary_operation[name[theta_subs] - name[theta_biased]] ** constant[2]]]]]]] variable[theta_jack] assign[=] binary_operation[binary_operation[name[N] * name[theta_full]] - binary_operation[binary_operation[name[N] - constant[1]] * name[theta_biased]]] variable[tscores] assign[=] binary_operation[name[theta_jack] / name[se_jack]] import module[scipy.stats] variable[Q] assign[=] call[name[theta_subs].shape][constant[1]] variable[pvalues] assign[=] binary_operation[call[name[scipy].stats.t.sf, parameter[call[name[np].abs, parameter[name[tscores]]], binary_operation[binary_operation[name[N] - name[Q]] - name[d]]]] * constant[2]] return[tuple[[<ast.Name object at 0x7da1b13781f0>, <ast.Name object at 0x7da1b1379f90>, <ast.Name object at 0x7da1b137a8f0>, <ast.Name object at 0x7da1b1379480>, <ast.Name object at 0x7da1b1379d80>]]]
keyword[def] identifier[jackknife_stats] ( identifier[theta_subs] , identifier[theta_full] , identifier[N] = keyword[None] , identifier[d] = literal[int] ): literal[string] keyword[import] identifier[numpy] keyword[as] identifier[np] identifier[theta_biased] = identifier[np] . identifier[mean] ( identifier[theta_subs] , identifier[axis] = literal[int] ) keyword[if] identifier[d] keyword[is] literal[int] : keyword[if] identifier[N] keyword[is] keyword[None] : identifier[N] = identifier[theta_subs] . identifier[shape] [ literal[int] ] identifier[inflation] =( identifier[N] - literal[int] )/ identifier[N] keyword[elif] identifier[d] > literal[int] : keyword[if] identifier[N] keyword[is] keyword[None] : keyword[raise] identifier[Exception] (( literal[string] literal[string] )) identifier[C] = identifier[theta_subs] . identifier[shape] [ literal[int] ] identifier[inflation] =(( identifier[N] - identifier[d] )/ identifier[d] )/ identifier[C] identifier[se_jack] = identifier[np] . identifier[sqrt] ( identifier[inflation] * identifier[np] . identifier[sum] (( identifier[theta_subs] - identifier[theta_biased] )** literal[int] , identifier[axis] = literal[int] )) identifier[theta_jack] = identifier[N] * identifier[theta_full] -( identifier[N] - literal[int] )* identifier[theta_biased] identifier[tscores] = identifier[theta_jack] / identifier[se_jack] keyword[import] identifier[scipy] . identifier[stats] identifier[Q] = identifier[theta_subs] . identifier[shape] [ literal[int] ] identifier[pvalues] = identifier[scipy] . identifier[stats] . identifier[t] . identifier[sf] ( identifier[np] . identifier[abs] ( identifier[tscores] ), identifier[N] - identifier[Q] - identifier[d] )* literal[int] keyword[return] identifier[pvalues] , identifier[tscores] , identifier[theta_jack] , identifier[se_jack] , identifier[theta_biased]
def jackknife_stats(theta_subs, theta_full, N=None, d=1): """Compute Jackknife Estimates, SE, Bias, t-scores, p-values Parameters: ----------- theta_subs : ndarray The metrics, estimates, parameters, etc. of the model (see "func") for each subsample. It is a <C x M> matrix, i.e. C=binocoeff(N,d) subsamples, and M parameters that are returned by the model. theta_full : ndarray The metrics, estimates, parameters, etc. of the model (see "func") for the full sample. It is a <1 x M> vecotr with the M parameters that are returned by the model. N : int The number of observations in the full sample. Is required for Delete-d Jackknife, i.e. d>1. (Default is N=None) d : int The number of observations to leave out for each Jackknife subsample, i.e. the subsample size is N-d. (The default is d=1 for the "Delete-1 Jackknife" procedure.) Returns: -------- pvalues : ndarray The two-sided P-values of the t-Score for each Jackknife estimate. (In Social Sciences pval<0.05 is referred as acceptable but it is usually better to look for p-values way closer to Zero. Just remove or replace a variable/feature with high pval>=pcritical and run the Jackknife again.) tscores : ndarray The t-Score for each Jackknife estimate. (As rule of thumb a value abs(tscore)>2 indicates a bad model parameter but jsut check the p-value.) theta_jack : ndarray The bias-corrected Jackknife Estimates (model parameter, metric, coefficient, etc.). Use the parameters for prediction. se_jack : ndarray The Jackknife Standard Error theta_biased : ndarray The biased Jackknife Estimate. Other Variables: ---------------- These variables occur in the source code as intermediate results. Q : int The Number of independent variables of a model (incl. intercept). C : int The number of Jackknife subsamples if d>1. There are C=binocoeff(N,d) combinations. """ # The biased Jackknife Estimate import numpy as np theta_biased = np.mean(theta_subs, axis=0) # Inflation Factor for the Jackknife Standard Error if d is 1: if N is None: N = theta_subs.shape[0] # depends on [control=['if'], data=['N']] inflation = (N - 1) / N # depends on [control=['if'], data=[]] elif d > 1: if N is None: raise Exception('If d>1 then you must provide N (number of observations in the full sample)') # depends on [control=['if'], data=[]] C = theta_subs.shape[0] inflation = (N - d) / d / C # depends on [control=['if'], data=['d']] # The Jackknife Standard Error se_jack = np.sqrt(inflation * np.sum((theta_subs - theta_biased) ** 2, axis=0)) # The bias-corrected Jackknife Estimate theta_jack = N * theta_full - (N - 1) * theta_biased # The Jackknife t-Statistics tscores = theta_jack / se_jack # Two-sided P-values import scipy.stats Q = theta_subs.shape[1] pvalues = scipy.stats.t.sf(np.abs(tscores), N - Q - d) * 2 # done return (pvalues, tscores, theta_jack, se_jack, theta_biased)
def _get_with_criteria(self, criteria, offset=None, limit=None): ''' returns items selected by criteria ''' SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit) self._cursor.execute(SQL) for item in self._cursor.fetchall(): yield self._make_item(item)
def function[_get_with_criteria, parameter[self, criteria, offset, limit]]: constant[ returns items selected by criteria ] variable[SQL] assign[=] call[call[name[SQLBuilder], parameter[name[self]._table, name[criteria]]].select, parameter[]] call[name[self]._cursor.execute, parameter[name[SQL]]] for taget[name[item]] in starred[call[name[self]._cursor.fetchall, parameter[]]] begin[:] <ast.Yield object at 0x7da2041d90c0>
keyword[def] identifier[_get_with_criteria] ( identifier[self] , identifier[criteria] , identifier[offset] = keyword[None] , identifier[limit] = keyword[None] ): literal[string] identifier[SQL] = identifier[SQLBuilder] ( identifier[self] . identifier[_table] , identifier[criteria] ). identifier[select] ( identifier[offset] = identifier[offset] , identifier[limit] = identifier[limit] ) identifier[self] . identifier[_cursor] . identifier[execute] ( identifier[SQL] ) keyword[for] identifier[item] keyword[in] identifier[self] . identifier[_cursor] . identifier[fetchall] (): keyword[yield] identifier[self] . identifier[_make_item] ( identifier[item] )
def _get_with_criteria(self, criteria, offset=None, limit=None): """ returns items selected by criteria """ SQL = SQLBuilder(self._table, criteria).select(offset=offset, limit=limit) self._cursor.execute(SQL) for item in self._cursor.fetchall(): yield self._make_item(item) # depends on [control=['for'], data=['item']]
def functions(self): """ A list of functions declared or defined in this module. """ return [v for v in self.globals.values() if isinstance(v, values.Function)]
def function[functions, parameter[self]]: constant[ A list of functions declared or defined in this module. ] return[<ast.ListComp object at 0x7da1b194c250>]
keyword[def] identifier[functions] ( identifier[self] ): literal[string] keyword[return] [ identifier[v] keyword[for] identifier[v] keyword[in] identifier[self] . identifier[globals] . identifier[values] () keyword[if] identifier[isinstance] ( identifier[v] , identifier[values] . identifier[Function] )]
def functions(self): """ A list of functions declared or defined in this module. """ return [v for v in self.globals.values() if isinstance(v, values.Function)]
def xs(self, key, axis=1): """ Return slice of panel along selected axis. Parameters ---------- key : object Label axis : {'items', 'major', 'minor}, default 1/'major' Returns ------- y : ndim(self)-1 Notes ----- xs is only for getting, not setting values. MultiIndex Slicers is a generic way to get/set values on any level or levels and is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>` """ axis = self._get_axis_number(axis) if axis == 0: return self[key] self._consolidate_inplace() axis_number = self._get_axis_number(axis) new_data = self._data.xs(key, axis=axis_number, copy=False) result = self._construct_return_type(new_data) copy = new_data.is_mixed_type result._set_is_copy(self, copy=copy) return result
def function[xs, parameter[self, key, axis]]: constant[ Return slice of panel along selected axis. Parameters ---------- key : object Label axis : {'items', 'major', 'minor}, default 1/'major' Returns ------- y : ndim(self)-1 Notes ----- xs is only for getting, not setting values. MultiIndex Slicers is a generic way to get/set values on any level or levels and is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>` ] variable[axis] assign[=] call[name[self]._get_axis_number, parameter[name[axis]]] if compare[name[axis] equal[==] constant[0]] begin[:] return[call[name[self]][name[key]]] call[name[self]._consolidate_inplace, parameter[]] variable[axis_number] assign[=] call[name[self]._get_axis_number, parameter[name[axis]]] variable[new_data] assign[=] call[name[self]._data.xs, parameter[name[key]]] variable[result] assign[=] call[name[self]._construct_return_type, parameter[name[new_data]]] variable[copy] assign[=] name[new_data].is_mixed_type call[name[result]._set_is_copy, parameter[name[self]]] return[name[result]]
keyword[def] identifier[xs] ( identifier[self] , identifier[key] , identifier[axis] = literal[int] ): literal[string] identifier[axis] = identifier[self] . identifier[_get_axis_number] ( identifier[axis] ) keyword[if] identifier[axis] == literal[int] : keyword[return] identifier[self] [ identifier[key] ] identifier[self] . identifier[_consolidate_inplace] () identifier[axis_number] = identifier[self] . identifier[_get_axis_number] ( identifier[axis] ) identifier[new_data] = identifier[self] . identifier[_data] . identifier[xs] ( identifier[key] , identifier[axis] = identifier[axis_number] , identifier[copy] = keyword[False] ) identifier[result] = identifier[self] . identifier[_construct_return_type] ( identifier[new_data] ) identifier[copy] = identifier[new_data] . identifier[is_mixed_type] identifier[result] . identifier[_set_is_copy] ( identifier[self] , identifier[copy] = identifier[copy] ) keyword[return] identifier[result]
def xs(self, key, axis=1): """ Return slice of panel along selected axis. Parameters ---------- key : object Label axis : {'items', 'major', 'minor}, default 1/'major' Returns ------- y : ndim(self)-1 Notes ----- xs is only for getting, not setting values. MultiIndex Slicers is a generic way to get/set values on any level or levels and is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>` """ axis = self._get_axis_number(axis) if axis == 0: return self[key] # depends on [control=['if'], data=[]] self._consolidate_inplace() axis_number = self._get_axis_number(axis) new_data = self._data.xs(key, axis=axis_number, copy=False) result = self._construct_return_type(new_data) copy = new_data.is_mixed_type result._set_is_copy(self, copy=copy) return result
def get(self, CachableItem): """Returns current ICachedItem for ICachableItem Args: CachableItem: ICachableItem, used as a reference to find a cached version Returns: ICachedItem or None, if CachableItem has not been cached """ return self.session.\ query(self.mapper.factory().__class__).\ filter(self.mapper.factory().__class__.__dict__[self.mapper.key()]==CachableItem.getId()).\ first()
def function[get, parameter[self, CachableItem]]: constant[Returns current ICachedItem for ICachableItem Args: CachableItem: ICachableItem, used as a reference to find a cached version Returns: ICachedItem or None, if CachableItem has not been cached ] return[call[call[call[name[self].session.query, parameter[call[name[self].mapper.factory, parameter[]].__class__]].filter, parameter[compare[call[call[name[self].mapper.factory, parameter[]].__class__.__dict__][call[name[self].mapper.key, parameter[]]] equal[==] call[name[CachableItem].getId, parameter[]]]]].first, parameter[]]]
keyword[def] identifier[get] ( identifier[self] , identifier[CachableItem] ): literal[string] keyword[return] identifier[self] . identifier[session] . identifier[query] ( identifier[self] . identifier[mapper] . identifier[factory] (). identifier[__class__] ). identifier[filter] ( identifier[self] . identifier[mapper] . identifier[factory] (). identifier[__class__] . identifier[__dict__] [ identifier[self] . identifier[mapper] . identifier[key] ()]== identifier[CachableItem] . identifier[getId] ()). identifier[first] ()
def get(self, CachableItem): """Returns current ICachedItem for ICachableItem Args: CachableItem: ICachableItem, used as a reference to find a cached version Returns: ICachedItem or None, if CachableItem has not been cached """ return self.session.query(self.mapper.factory().__class__).filter(self.mapper.factory().__class__.__dict__[self.mapper.key()] == CachableItem.getId()).first()
def load_dataset(*args, **kwargs): """ `load_dataset` will be removed a future version of xarray. The current behavior of this function can be achived by using `tutorial.open_dataset(...).load()`. See Also -------- open_dataset """ warnings.warn( "load_dataset` will be removed in a future version of xarray. The " "current behavior of this function can be achived by using " "`tutorial.open_dataset(...).load()`.", DeprecationWarning, stacklevel=2) return open_dataset(*args, **kwargs).load()
def function[load_dataset, parameter[]]: constant[ `load_dataset` will be removed a future version of xarray. The current behavior of this function can be achived by using `tutorial.open_dataset(...).load()`. See Also -------- open_dataset ] call[name[warnings].warn, parameter[constant[load_dataset` will be removed in a future version of xarray. The current behavior of this function can be achived by using `tutorial.open_dataset(...).load()`.], name[DeprecationWarning]]] return[call[call[name[open_dataset], parameter[<ast.Starred object at 0x7da20c6c51b0>]].load, parameter[]]]
keyword[def] identifier[load_dataset] (* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[warnings] . identifier[warn] ( literal[string] literal[string] literal[string] , identifier[DeprecationWarning] , identifier[stacklevel] = literal[int] ) keyword[return] identifier[open_dataset] (* identifier[args] ,** identifier[kwargs] ). identifier[load] ()
def load_dataset(*args, **kwargs): """ `load_dataset` will be removed a future version of xarray. The current behavior of this function can be achived by using `tutorial.open_dataset(...).load()`. See Also -------- open_dataset """ warnings.warn('load_dataset` will be removed in a future version of xarray. The current behavior of this function can be achived by using `tutorial.open_dataset(...).load()`.', DeprecationWarning, stacklevel=2) return open_dataset(*args, **kwargs).load()
def add_ch_grp_to_interface( self, nexus_host, if_type, port, ch_grp): """Applies channel-group n to ethernet interface.""" if if_type != "ethernet": LOG.error("Unexpected interface type %(iftype)s when " "adding change group", {'iftype': if_type}) return starttime = time.time() path_snip = snipp.PATH_ALL path_interface = "phys-[eth" + port + "]" body_snip = snipp.BODY_ADD_CH_GRP % (ch_grp, ch_grp, path_interface) self.send_edit_string(nexus_host, path_snip, body_snip) self.capture_and_print_timeshot( starttime, "add_ch_group", switch=nexus_host)
def function[add_ch_grp_to_interface, parameter[self, nexus_host, if_type, port, ch_grp]]: constant[Applies channel-group n to ethernet interface.] if compare[name[if_type] not_equal[!=] constant[ethernet]] begin[:] call[name[LOG].error, parameter[constant[Unexpected interface type %(iftype)s when adding change group], dictionary[[<ast.Constant object at 0x7da1b1c60d30>], [<ast.Name object at 0x7da1b1c633d0>]]]] return[None] variable[starttime] assign[=] call[name[time].time, parameter[]] variable[path_snip] assign[=] name[snipp].PATH_ALL variable[path_interface] assign[=] binary_operation[binary_operation[constant[phys-[eth] + name[port]] + constant[]]] variable[body_snip] assign[=] binary_operation[name[snipp].BODY_ADD_CH_GRP <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bc70130>, <ast.Name object at 0x7da18bc70b50>, <ast.Name object at 0x7da18bc72200>]]] call[name[self].send_edit_string, parameter[name[nexus_host], name[path_snip], name[body_snip]]] call[name[self].capture_and_print_timeshot, parameter[name[starttime], constant[add_ch_group]]]
keyword[def] identifier[add_ch_grp_to_interface] ( identifier[self] , identifier[nexus_host] , identifier[if_type] , identifier[port] , identifier[ch_grp] ): literal[string] keyword[if] identifier[if_type] != literal[string] : identifier[LOG] . identifier[error] ( literal[string] literal[string] ,{ literal[string] : identifier[if_type] }) keyword[return] identifier[starttime] = identifier[time] . identifier[time] () identifier[path_snip] = identifier[snipp] . identifier[PATH_ALL] identifier[path_interface] = literal[string] + identifier[port] + literal[string] identifier[body_snip] = identifier[snipp] . identifier[BODY_ADD_CH_GRP] %( identifier[ch_grp] , identifier[ch_grp] , identifier[path_interface] ) identifier[self] . identifier[send_edit_string] ( identifier[nexus_host] , identifier[path_snip] , identifier[body_snip] ) identifier[self] . identifier[capture_and_print_timeshot] ( identifier[starttime] , literal[string] , identifier[switch] = identifier[nexus_host] )
def add_ch_grp_to_interface(self, nexus_host, if_type, port, ch_grp): """Applies channel-group n to ethernet interface.""" if if_type != 'ethernet': LOG.error('Unexpected interface type %(iftype)s when adding change group', {'iftype': if_type}) return # depends on [control=['if'], data=['if_type']] starttime = time.time() path_snip = snipp.PATH_ALL path_interface = 'phys-[eth' + port + ']' body_snip = snipp.BODY_ADD_CH_GRP % (ch_grp, ch_grp, path_interface) self.send_edit_string(nexus_host, path_snip, body_snip) self.capture_and_print_timeshot(starttime, 'add_ch_group', switch=nexus_host)
def holidays(self, start=None, end=None, return_name=False): """ Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays """ if self.rules is None: raise Exception('Holiday Calendar {name} does not have any ' 'rules specified'.format(name=self.name)) if start is None: start = AbstractHolidayCalendar.start_date if end is None: end = AbstractHolidayCalendar.end_date start = Timestamp(start) end = Timestamp(end) holidays = None # If we don't have a cache or the dates are outside the prior cache, we # get them again if (self._cache is None or start < self._cache[0] or end > self._cache[1]): for rule in self.rules: rule_holidays = rule.dates(start, end, return_name=True) if holidays is None: holidays = rule_holidays else: holidays = holidays.append(rule_holidays) self._cache = (start, end, holidays.sort_index()) holidays = self._cache[2] holidays = holidays[start:end] if return_name: return holidays else: return holidays.index
def function[holidays, parameter[self, start, end, return_name]]: constant[ Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays ] if compare[name[self].rules is constant[None]] begin[:] <ast.Raise object at 0x7da1b206b9a0> if compare[name[start] is constant[None]] begin[:] variable[start] assign[=] name[AbstractHolidayCalendar].start_date if compare[name[end] is constant[None]] begin[:] variable[end] assign[=] name[AbstractHolidayCalendar].end_date variable[start] assign[=] call[name[Timestamp], parameter[name[start]]] variable[end] assign[=] call[name[Timestamp], parameter[name[end]]] variable[holidays] assign[=] constant[None] if <ast.BoolOp object at 0x7da1b2068520> begin[:] for taget[name[rule]] in starred[name[self].rules] begin[:] variable[rule_holidays] assign[=] call[name[rule].dates, parameter[name[start], name[end]]] if compare[name[holidays] is constant[None]] begin[:] variable[holidays] assign[=] name[rule_holidays] name[self]._cache assign[=] tuple[[<ast.Name object at 0x7da1b2344c40>, <ast.Name object at 0x7da1b2347b20>, <ast.Call object at 0x7da1b2345f90>]] variable[holidays] assign[=] call[name[self]._cache][constant[2]] variable[holidays] assign[=] call[name[holidays]][<ast.Slice object at 0x7da1b2344a90>] if name[return_name] begin[:] return[name[holidays]]
keyword[def] identifier[holidays] ( identifier[self] , identifier[start] = keyword[None] , identifier[end] = keyword[None] , identifier[return_name] = keyword[False] ): literal[string] keyword[if] identifier[self] . identifier[rules] keyword[is] keyword[None] : keyword[raise] identifier[Exception] ( literal[string] literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[name] )) keyword[if] identifier[start] keyword[is] keyword[None] : identifier[start] = identifier[AbstractHolidayCalendar] . identifier[start_date] keyword[if] identifier[end] keyword[is] keyword[None] : identifier[end] = identifier[AbstractHolidayCalendar] . identifier[end_date] identifier[start] = identifier[Timestamp] ( identifier[start] ) identifier[end] = identifier[Timestamp] ( identifier[end] ) identifier[holidays] = keyword[None] keyword[if] ( identifier[self] . identifier[_cache] keyword[is] keyword[None] keyword[or] identifier[start] < identifier[self] . identifier[_cache] [ literal[int] ] keyword[or] identifier[end] > identifier[self] . identifier[_cache] [ literal[int] ]): keyword[for] identifier[rule] keyword[in] identifier[self] . identifier[rules] : identifier[rule_holidays] = identifier[rule] . identifier[dates] ( identifier[start] , identifier[end] , identifier[return_name] = keyword[True] ) keyword[if] identifier[holidays] keyword[is] keyword[None] : identifier[holidays] = identifier[rule_holidays] keyword[else] : identifier[holidays] = identifier[holidays] . identifier[append] ( identifier[rule_holidays] ) identifier[self] . identifier[_cache] =( identifier[start] , identifier[end] , identifier[holidays] . identifier[sort_index] ()) identifier[holidays] = identifier[self] . identifier[_cache] [ literal[int] ] identifier[holidays] = identifier[holidays] [ identifier[start] : identifier[end] ] keyword[if] identifier[return_name] : keyword[return] identifier[holidays] keyword[else] : keyword[return] identifier[holidays] . identifier[index]
def holidays(self, start=None, end=None, return_name=False): """ Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays """ if self.rules is None: raise Exception('Holiday Calendar {name} does not have any rules specified'.format(name=self.name)) # depends on [control=['if'], data=[]] if start is None: start = AbstractHolidayCalendar.start_date # depends on [control=['if'], data=['start']] if end is None: end = AbstractHolidayCalendar.end_date # depends on [control=['if'], data=['end']] start = Timestamp(start) end = Timestamp(end) holidays = None # If we don't have a cache or the dates are outside the prior cache, we # get them again if self._cache is None or start < self._cache[0] or end > self._cache[1]: for rule in self.rules: rule_holidays = rule.dates(start, end, return_name=True) if holidays is None: holidays = rule_holidays # depends on [control=['if'], data=['holidays']] else: holidays = holidays.append(rule_holidays) # depends on [control=['for'], data=['rule']] self._cache = (start, end, holidays.sort_index()) # depends on [control=['if'], data=[]] holidays = self._cache[2] holidays = holidays[start:end] if return_name: return holidays # depends on [control=['if'], data=[]] else: return holidays.index
def get_digital_channels(channel_list): """Goes through channel list and returns digital channels with ids Dev1/port0/line08, Dev1/port0/line09... Dev1/port0/line30.""" dig_ids = digital_channel_ids() dig_channels = [] for ln in dig_ids: for ch in channel_list: if ch.dct['id'] == ln: dig_channels.append(ch) break return dig_channels
def function[get_digital_channels, parameter[channel_list]]: constant[Goes through channel list and returns digital channels with ids Dev1/port0/line08, Dev1/port0/line09... Dev1/port0/line30.] variable[dig_ids] assign[=] call[name[digital_channel_ids], parameter[]] variable[dig_channels] assign[=] list[[]] for taget[name[ln]] in starred[name[dig_ids]] begin[:] for taget[name[ch]] in starred[name[channel_list]] begin[:] if compare[call[name[ch].dct][constant[id]] equal[==] name[ln]] begin[:] call[name[dig_channels].append, parameter[name[ch]]] break return[name[dig_channels]]
keyword[def] identifier[get_digital_channels] ( identifier[channel_list] ): literal[string] identifier[dig_ids] = identifier[digital_channel_ids] () identifier[dig_channels] =[] keyword[for] identifier[ln] keyword[in] identifier[dig_ids] : keyword[for] identifier[ch] keyword[in] identifier[channel_list] : keyword[if] identifier[ch] . identifier[dct] [ literal[string] ]== identifier[ln] : identifier[dig_channels] . identifier[append] ( identifier[ch] ) keyword[break] keyword[return] identifier[dig_channels]
def get_digital_channels(channel_list): """Goes through channel list and returns digital channels with ids Dev1/port0/line08, Dev1/port0/line09... Dev1/port0/line30.""" dig_ids = digital_channel_ids() dig_channels = [] for ln in dig_ids: for ch in channel_list: if ch.dct['id'] == ln: dig_channels.append(ch) break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ch']] # depends on [control=['for'], data=['ln']] return dig_channels
def setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", predictionCol="prediction", numPartitions=None): """ setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", \ predictionCol="prediction", numPartitions=None) """ kwargs = self._input_kwargs return self._set(**kwargs)
def function[setParams, parameter[self, minSupport, minConfidence, itemsCol, predictionCol, numPartitions]]: constant[ setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", predictionCol="prediction", numPartitions=None) ] variable[kwargs] assign[=] name[self]._input_kwargs return[call[name[self]._set, parameter[]]]
keyword[def] identifier[setParams] ( identifier[self] , identifier[minSupport] = literal[int] , identifier[minConfidence] = literal[int] , identifier[itemsCol] = literal[string] , identifier[predictionCol] = literal[string] , identifier[numPartitions] = keyword[None] ): literal[string] identifier[kwargs] = identifier[self] . identifier[_input_kwargs] keyword[return] identifier[self] . identifier[_set] (** identifier[kwargs] )
def setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol='items', predictionCol='prediction', numPartitions=None): """ setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", predictionCol="prediction", numPartitions=None) """ kwargs = self._input_kwargs return self._set(**kwargs)
def authenticate_user(token): """ Add the content curation Authorizatino `token` header to `config.SESSION`. """ config.SESSION.headers.update({"Authorization": "Token {0}".format(token)}) try: response = config.SESSION.post(config.authentication_url()) response.raise_for_status() user = json.loads(response._content.decode("utf-8")) config.LOGGER.info("Logged in with username {0}".format(user['username'])) return user['username'], token except HTTPError: config.LOGGER.error("Invalid token: Credentials not found") sys.exit()
def function[authenticate_user, parameter[token]]: constant[ Add the content curation Authorizatino `token` header to `config.SESSION`. ] call[name[config].SESSION.headers.update, parameter[dictionary[[<ast.Constant object at 0x7da18f09d930>], [<ast.Call object at 0x7da18f09e6e0>]]]] <ast.Try object at 0x7da18f09f070>
keyword[def] identifier[authenticate_user] ( identifier[token] ): literal[string] identifier[config] . identifier[SESSION] . identifier[headers] . identifier[update] ({ literal[string] : literal[string] . identifier[format] ( identifier[token] )}) keyword[try] : identifier[response] = identifier[config] . identifier[SESSION] . identifier[post] ( identifier[config] . identifier[authentication_url] ()) identifier[response] . identifier[raise_for_status] () identifier[user] = identifier[json] . identifier[loads] ( identifier[response] . identifier[_content] . identifier[decode] ( literal[string] )) identifier[config] . identifier[LOGGER] . identifier[info] ( literal[string] . identifier[format] ( identifier[user] [ literal[string] ])) keyword[return] identifier[user] [ literal[string] ], identifier[token] keyword[except] identifier[HTTPError] : identifier[config] . identifier[LOGGER] . identifier[error] ( literal[string] ) identifier[sys] . identifier[exit] ()
def authenticate_user(token): """ Add the content curation Authorizatino `token` header to `config.SESSION`. """ config.SESSION.headers.update({'Authorization': 'Token {0}'.format(token)}) try: response = config.SESSION.post(config.authentication_url()) response.raise_for_status() user = json.loads(response._content.decode('utf-8')) config.LOGGER.info('Logged in with username {0}'.format(user['username'])) return (user['username'], token) # depends on [control=['try'], data=[]] except HTTPError: config.LOGGER.error('Invalid token: Credentials not found') sys.exit() # depends on [control=['except'], data=[]]
def expand_families(stmts_in, **kwargs): """Expand FamPlex Agents to individual genes. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to expand. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of expanded statements. """ from indra.tools.expand_families import Expander logger.info('Expanding families on %d statements...' % len(stmts_in)) expander = Expander(hierarchies) stmts_out = expander.expand_families(stmts_in) logger.info('%d statements after expanding families...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
def function[expand_families, parameter[stmts_in]]: constant[Expand FamPlex Agents to individual genes. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to expand. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of expanded statements. ] from relative_module[indra.tools.expand_families] import module[Expander] call[name[logger].info, parameter[binary_operation[constant[Expanding families on %d statements...] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[stmts_in]]]]]] variable[expander] assign[=] call[name[Expander], parameter[name[hierarchies]]] variable[stmts_out] assign[=] call[name[expander].expand_families, parameter[name[stmts_in]]] call[name[logger].info, parameter[binary_operation[constant[%d statements after expanding families...] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[stmts_out]]]]]] variable[dump_pkl] assign[=] call[name[kwargs].get, parameter[constant[save]]] if name[dump_pkl] begin[:] call[name[dump_statements], parameter[name[stmts_out], name[dump_pkl]]] return[name[stmts_out]]
keyword[def] identifier[expand_families] ( identifier[stmts_in] ,** identifier[kwargs] ): literal[string] keyword[from] identifier[indra] . identifier[tools] . identifier[expand_families] keyword[import] identifier[Expander] identifier[logger] . identifier[info] ( literal[string] % identifier[len] ( identifier[stmts_in] )) identifier[expander] = identifier[Expander] ( identifier[hierarchies] ) identifier[stmts_out] = identifier[expander] . identifier[expand_families] ( identifier[stmts_in] ) identifier[logger] . identifier[info] ( literal[string] % identifier[len] ( identifier[stmts_out] )) identifier[dump_pkl] = identifier[kwargs] . identifier[get] ( literal[string] ) keyword[if] identifier[dump_pkl] : identifier[dump_statements] ( identifier[stmts_out] , identifier[dump_pkl] ) keyword[return] identifier[stmts_out]
def expand_families(stmts_in, **kwargs): """Expand FamPlex Agents to individual genes. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to expand. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of expanded statements. """ from indra.tools.expand_families import Expander logger.info('Expanding families on %d statements...' % len(stmts_in)) expander = Expander(hierarchies) stmts_out = expander.expand_families(stmts_in) logger.info('%d statements after expanding families...' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) # depends on [control=['if'], data=[]] return stmts_out
def _construct(self, strings_collection): """ Naive generalized suffix tree construction algorithm, with quadratic [O(n_1^2 + ... + n_m^2)] worst-case time complexity, where m is the number of strings in collection. """ # 0. Add a unique character to each string in the collection, # to preserve simplicity while building the tree strings_collection = utils.make_unique_endings(strings_collection) root = ast.AnnotatedSuffixTree.Node() root.strings_collection = strings_collection # For each string in the collection... for string_ind in xrange(len(strings_collection)): string = strings_collection[string_ind] # For each suffix of that string... # (do not handle unique last characters as suffixes) for suffix_start in xrange(len(string)-1): suffix = string[suffix_start:] # ... first try to find maximal matching path node = root child_node = node.chose_arc(suffix) while child_node: (str_ind, substr_start, substr_end) = child_node.arc() match = utils.match_strings( suffix, strings_collection[str_ind][substr_start:substr_end]) if match == substr_end-substr_start: # matched the arc, proceed with child node suffix = suffix[match:] suffix_start += match node = child_node node.weight += 1 child_node = node.chose_arc(suffix) else: # ... then, where the matching path ends; # create new inner node # (that's the only possible alternative # since we have unique string endings) node.remove_child(child_node) new_node = node.add_new_child(string_ind, suffix_start, suffix_start+match) new_leaf = new_node.add_new_child(string_ind, suffix_start+match, len(string)) (osi, oss, ose) = child_node._arc child_node._arc = (osi, oss+match, ose) new_node.add_child(child_node) new_leaf.weight = 1 new_node.weight = 1 + child_node.weight suffix = '' break # ... or create new leaf if there was no appropriate arc to proceed if suffix: new_leaf = node.add_new_child(string_ind, suffix_start, len(string)) new_leaf.weight = 1 # Root will also be annotated by the weight of its children, # to preserve simplicity while calculating string matching for k in root.children: root.weight += root.children[k].weight return root
def function[_construct, parameter[self, strings_collection]]: constant[ Naive generalized suffix tree construction algorithm, with quadratic [O(n_1^2 + ... + n_m^2)] worst-case time complexity, where m is the number of strings in collection. ] variable[strings_collection] assign[=] call[name[utils].make_unique_endings, parameter[name[strings_collection]]] variable[root] assign[=] call[name[ast].AnnotatedSuffixTree.Node, parameter[]] name[root].strings_collection assign[=] name[strings_collection] for taget[name[string_ind]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[strings_collection]]]]]] begin[:] variable[string] assign[=] call[name[strings_collection]][name[string_ind]] for taget[name[suffix_start]] in starred[call[name[xrange], parameter[binary_operation[call[name[len], parameter[name[string]]] - constant[1]]]]] begin[:] variable[suffix] assign[=] call[name[string]][<ast.Slice object at 0x7da20cabf190>] variable[node] assign[=] name[root] variable[child_node] assign[=] call[name[node].chose_arc, parameter[name[suffix]]] while name[child_node] begin[:] <ast.Tuple object at 0x7da20cabe8f0> assign[=] call[name[child_node].arc, parameter[]] variable[match] assign[=] call[name[utils].match_strings, parameter[name[suffix], call[call[name[strings_collection]][name[str_ind]]][<ast.Slice object at 0x7da20cabe410>]]] if compare[name[match] equal[==] binary_operation[name[substr_end] - name[substr_start]]] begin[:] variable[suffix] assign[=] call[name[suffix]][<ast.Slice object at 0x7da20cabecb0>] <ast.AugAssign object at 0x7da20cabe4d0> variable[node] assign[=] name[child_node] <ast.AugAssign object at 0x7da1b2468d30> variable[child_node] assign[=] call[name[node].chose_arc, parameter[name[suffix]]] if name[suffix] begin[:] variable[new_leaf] assign[=] call[name[node].add_new_child, parameter[name[string_ind], name[suffix_start], call[name[len], parameter[name[string]]]]] name[new_leaf].weight assign[=] constant[1] for taget[name[k]] in starred[name[root].children] begin[:] <ast.AugAssign object at 0x7da1b2424490> return[name[root]]
keyword[def] identifier[_construct] ( identifier[self] , identifier[strings_collection] ): literal[string] identifier[strings_collection] = identifier[utils] . identifier[make_unique_endings] ( identifier[strings_collection] ) identifier[root] = identifier[ast] . identifier[AnnotatedSuffixTree] . identifier[Node] () identifier[root] . identifier[strings_collection] = identifier[strings_collection] keyword[for] identifier[string_ind] keyword[in] identifier[xrange] ( identifier[len] ( identifier[strings_collection] )): identifier[string] = identifier[strings_collection] [ identifier[string_ind] ] keyword[for] identifier[suffix_start] keyword[in] identifier[xrange] ( identifier[len] ( identifier[string] )- literal[int] ): identifier[suffix] = identifier[string] [ identifier[suffix_start] :] identifier[node] = identifier[root] identifier[child_node] = identifier[node] . identifier[chose_arc] ( identifier[suffix] ) keyword[while] identifier[child_node] : ( identifier[str_ind] , identifier[substr_start] , identifier[substr_end] )= identifier[child_node] . identifier[arc] () identifier[match] = identifier[utils] . identifier[match_strings] ( identifier[suffix] , identifier[strings_collection] [ identifier[str_ind] ][ identifier[substr_start] : identifier[substr_end] ]) keyword[if] identifier[match] == identifier[substr_end] - identifier[substr_start] : identifier[suffix] = identifier[suffix] [ identifier[match] :] identifier[suffix_start] += identifier[match] identifier[node] = identifier[child_node] identifier[node] . identifier[weight] += literal[int] identifier[child_node] = identifier[node] . identifier[chose_arc] ( identifier[suffix] ) keyword[else] : identifier[node] . identifier[remove_child] ( identifier[child_node] ) identifier[new_node] = identifier[node] . identifier[add_new_child] ( identifier[string_ind] , identifier[suffix_start] , identifier[suffix_start] + identifier[match] ) identifier[new_leaf] = identifier[new_node] . identifier[add_new_child] ( identifier[string_ind] , identifier[suffix_start] + identifier[match] , identifier[len] ( identifier[string] )) ( identifier[osi] , identifier[oss] , identifier[ose] )= identifier[child_node] . identifier[_arc] identifier[child_node] . identifier[_arc] =( identifier[osi] , identifier[oss] + identifier[match] , identifier[ose] ) identifier[new_node] . identifier[add_child] ( identifier[child_node] ) identifier[new_leaf] . identifier[weight] = literal[int] identifier[new_node] . identifier[weight] = literal[int] + identifier[child_node] . identifier[weight] identifier[suffix] = literal[string] keyword[break] keyword[if] identifier[suffix] : identifier[new_leaf] = identifier[node] . identifier[add_new_child] ( identifier[string_ind] , identifier[suffix_start] , identifier[len] ( identifier[string] )) identifier[new_leaf] . identifier[weight] = literal[int] keyword[for] identifier[k] keyword[in] identifier[root] . identifier[children] : identifier[root] . identifier[weight] += identifier[root] . identifier[children] [ identifier[k] ]. identifier[weight] keyword[return] identifier[root]
def _construct(self, strings_collection): """ Naive generalized suffix tree construction algorithm, with quadratic [O(n_1^2 + ... + n_m^2)] worst-case time complexity, where m is the number of strings in collection. """ # 0. Add a unique character to each string in the collection, # to preserve simplicity while building the tree strings_collection = utils.make_unique_endings(strings_collection) root = ast.AnnotatedSuffixTree.Node() root.strings_collection = strings_collection # For each string in the collection... for string_ind in xrange(len(strings_collection)): string = strings_collection[string_ind] # For each suffix of that string... # (do not handle unique last characters as suffixes) for suffix_start in xrange(len(string) - 1): suffix = string[suffix_start:] # ... first try to find maximal matching path node = root child_node = node.chose_arc(suffix) while child_node: (str_ind, substr_start, substr_end) = child_node.arc() match = utils.match_strings(suffix, strings_collection[str_ind][substr_start:substr_end]) if match == substr_end - substr_start: # matched the arc, proceed with child node suffix = suffix[match:] suffix_start += match node = child_node node.weight += 1 child_node = node.chose_arc(suffix) # depends on [control=['if'], data=['match']] else: # ... then, where the matching path ends; # create new inner node # (that's the only possible alternative # since we have unique string endings) node.remove_child(child_node) new_node = node.add_new_child(string_ind, suffix_start, suffix_start + match) new_leaf = new_node.add_new_child(string_ind, suffix_start + match, len(string)) (osi, oss, ose) = child_node._arc child_node._arc = (osi, oss + match, ose) new_node.add_child(child_node) new_leaf.weight = 1 new_node.weight = 1 + child_node.weight suffix = '' break # depends on [control=['while'], data=[]] # ... or create new leaf if there was no appropriate arc to proceed if suffix: new_leaf = node.add_new_child(string_ind, suffix_start, len(string)) new_leaf.weight = 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['suffix_start']] # depends on [control=['for'], data=['string_ind']] # Root will also be annotated by the weight of its children, # to preserve simplicity while calculating string matching for k in root.children: root.weight += root.children[k].weight # depends on [control=['for'], data=['k']] return root
def meta_model_fit(X_train, y_train, svm_hardness, fit_intercept, number_of_threads, regressor_type="LinearSVR"): """ Trains meta-labeler for predicting number of labels for each user. Based on: Tang, L., Rajan, S., & Narayanan, V. K. (2009, April). Large scale multi-label classification via metalabeler. In Proceedings of the 18th international conference on World wide web (pp. 211-220). ACM. """ if regressor_type == "LinearSVR": if X_train.shape[0] > X_train.shape[1]: dual = False else: dual = True model = LinearSVR(C=svm_hardness, random_state=0, dual=dual, fit_intercept=fit_intercept) y_train_meta = y_train.sum(axis=1) model.fit(X_train, y_train_meta) else: print("Invalid regressor type.") raise RuntimeError return model
def function[meta_model_fit, parameter[X_train, y_train, svm_hardness, fit_intercept, number_of_threads, regressor_type]]: constant[ Trains meta-labeler for predicting number of labels for each user. Based on: Tang, L., Rajan, S., & Narayanan, V. K. (2009, April). Large scale multi-label classification via metalabeler. In Proceedings of the 18th international conference on World wide web (pp. 211-220). ACM. ] if compare[name[regressor_type] equal[==] constant[LinearSVR]] begin[:] if compare[call[name[X_train].shape][constant[0]] greater[>] call[name[X_train].shape][constant[1]]] begin[:] variable[dual] assign[=] constant[False] variable[model] assign[=] call[name[LinearSVR], parameter[]] variable[y_train_meta] assign[=] call[name[y_train].sum, parameter[]] call[name[model].fit, parameter[name[X_train], name[y_train_meta]]] return[name[model]]
keyword[def] identifier[meta_model_fit] ( identifier[X_train] , identifier[y_train] , identifier[svm_hardness] , identifier[fit_intercept] , identifier[number_of_threads] , identifier[regressor_type] = literal[string] ): literal[string] keyword[if] identifier[regressor_type] == literal[string] : keyword[if] identifier[X_train] . identifier[shape] [ literal[int] ]> identifier[X_train] . identifier[shape] [ literal[int] ]: identifier[dual] = keyword[False] keyword[else] : identifier[dual] = keyword[True] identifier[model] = identifier[LinearSVR] ( identifier[C] = identifier[svm_hardness] , identifier[random_state] = literal[int] , identifier[dual] = identifier[dual] , identifier[fit_intercept] = identifier[fit_intercept] ) identifier[y_train_meta] = identifier[y_train] . identifier[sum] ( identifier[axis] = literal[int] ) identifier[model] . identifier[fit] ( identifier[X_train] , identifier[y_train_meta] ) keyword[else] : identifier[print] ( literal[string] ) keyword[raise] identifier[RuntimeError] keyword[return] identifier[model]
def meta_model_fit(X_train, y_train, svm_hardness, fit_intercept, number_of_threads, regressor_type='LinearSVR'): """ Trains meta-labeler for predicting number of labels for each user. Based on: Tang, L., Rajan, S., & Narayanan, V. K. (2009, April). Large scale multi-label classification via metalabeler. In Proceedings of the 18th international conference on World wide web (pp. 211-220). ACM. """ if regressor_type == 'LinearSVR': if X_train.shape[0] > X_train.shape[1]: dual = False # depends on [control=['if'], data=[]] else: dual = True model = LinearSVR(C=svm_hardness, random_state=0, dual=dual, fit_intercept=fit_intercept) y_train_meta = y_train.sum(axis=1) model.fit(X_train, y_train_meta) # depends on [control=['if'], data=[]] else: print('Invalid regressor type.') raise RuntimeError return model
def disable_dao_fork(chain_class: Type[BaseChain]) -> Type[BaseChain]: """ Set the ``support_dao_fork`` flag to ``False`` on the :class:`~eth.vm.forks.homestead.HomesteadVM`. Requires that presence of the :class:`~eth.vm.forks.homestead.HomesteadVM` in the ``vm_configuration`` """ homstead_vms_found = any( _is_homestead(vm_class) for _, vm_class in chain_class.vm_configuration ) if not homstead_vms_found: raise ValidationError("No HomesteadVM found in vm_configuration.") vm_configuration = _set_vm_dao_support_false(chain_class.vm_configuration) return chain_class.configure(vm_configuration=vm_configuration)
def function[disable_dao_fork, parameter[chain_class]]: constant[ Set the ``support_dao_fork`` flag to ``False`` on the :class:`~eth.vm.forks.homestead.HomesteadVM`. Requires that presence of the :class:`~eth.vm.forks.homestead.HomesteadVM` in the ``vm_configuration`` ] variable[homstead_vms_found] assign[=] call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b180d090>]] if <ast.UnaryOp object at 0x7da1b1720c40> begin[:] <ast.Raise object at 0x7da1b1720130> variable[vm_configuration] assign[=] call[name[_set_vm_dao_support_false], parameter[name[chain_class].vm_configuration]] return[call[name[chain_class].configure, parameter[]]]
keyword[def] identifier[disable_dao_fork] ( identifier[chain_class] : identifier[Type] [ identifier[BaseChain] ])-> identifier[Type] [ identifier[BaseChain] ]: literal[string] identifier[homstead_vms_found] = identifier[any] ( identifier[_is_homestead] ( identifier[vm_class] ) keyword[for] identifier[_] , identifier[vm_class] keyword[in] identifier[chain_class] . identifier[vm_configuration] ) keyword[if] keyword[not] identifier[homstead_vms_found] : keyword[raise] identifier[ValidationError] ( literal[string] ) identifier[vm_configuration] = identifier[_set_vm_dao_support_false] ( identifier[chain_class] . identifier[vm_configuration] ) keyword[return] identifier[chain_class] . identifier[configure] ( identifier[vm_configuration] = identifier[vm_configuration] )
def disable_dao_fork(chain_class: Type[BaseChain]) -> Type[BaseChain]: """ Set the ``support_dao_fork`` flag to ``False`` on the :class:`~eth.vm.forks.homestead.HomesteadVM`. Requires that presence of the :class:`~eth.vm.forks.homestead.HomesteadVM` in the ``vm_configuration`` """ homstead_vms_found = any((_is_homestead(vm_class) for (_, vm_class) in chain_class.vm_configuration)) if not homstead_vms_found: raise ValidationError('No HomesteadVM found in vm_configuration.') # depends on [control=['if'], data=[]] vm_configuration = _set_vm_dao_support_false(chain_class.vm_configuration) return chain_class.configure(vm_configuration=vm_configuration)
def dmtoind(dm, f_min, f_max, nchan0, inttime, it): """ Given FDMT state, return indices to slice partial FDMT solution and sump to a given DM """ # maxDT = dmtodt(dm) # need to write if it>0: correction = dF/2. else: correction = 0 shift = [] nchan = nchan0/2**(iteration_num) for i_F in range(nchan): f_start = (f_max - f_min)/float(nchan) * (i_F) + f_min f_end = (f_max - f_min)/float(nchan) *(i_F+1) + f_min f_middle = (f_end - f_start)/2. + f_start - correction f_middle_larger = (f_end - f_start)/2 + f_start + correction dT_middle = int(round(i_dT * (1./f_middle**2 - 1./f_start**2)/(1./f_end**2 - 1./f_start**2))) dT_middle_larger = int(round(i_dT * (1./f_middle_larger**2 - 1./f_start**2)/(1./f_end**2 - 1./f_start**2))) shift.append( (-dT_middle_larger, i_F) )
def function[dmtoind, parameter[dm, f_min, f_max, nchan0, inttime, it]]: constant[ Given FDMT state, return indices to slice partial FDMT solution and sump to a given DM ] if compare[name[it] greater[>] constant[0]] begin[:] variable[correction] assign[=] binary_operation[name[dF] / constant[2.0]] variable[shift] assign[=] list[[]] variable[nchan] assign[=] binary_operation[name[nchan0] / binary_operation[constant[2] ** name[iteration_num]]] for taget[name[i_F]] in starred[call[name[range], parameter[name[nchan]]]] begin[:] variable[f_start] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[f_max] - name[f_min]] / call[name[float], parameter[name[nchan]]]] * name[i_F]] + name[f_min]] variable[f_end] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[f_max] - name[f_min]] / call[name[float], parameter[name[nchan]]]] * binary_operation[name[i_F] + constant[1]]] + name[f_min]] variable[f_middle] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[f_end] - name[f_start]] / constant[2.0]] + name[f_start]] - name[correction]] variable[f_middle_larger] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[f_end] - name[f_start]] / constant[2]] + name[f_start]] + name[correction]] variable[dT_middle] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[binary_operation[name[i_dT] * binary_operation[binary_operation[constant[1.0] / binary_operation[name[f_middle] ** constant[2]]] - binary_operation[constant[1.0] / binary_operation[name[f_start] ** constant[2]]]]] / binary_operation[binary_operation[constant[1.0] / binary_operation[name[f_end] ** constant[2]]] - binary_operation[constant[1.0] / binary_operation[name[f_start] ** constant[2]]]]]]]]] variable[dT_middle_larger] assign[=] call[name[int], parameter[call[name[round], parameter[binary_operation[binary_operation[name[i_dT] * binary_operation[binary_operation[constant[1.0] / binary_operation[name[f_middle_larger] ** constant[2]]] - binary_operation[constant[1.0] / binary_operation[name[f_start] ** constant[2]]]]] / binary_operation[binary_operation[constant[1.0] / binary_operation[name[f_end] ** constant[2]]] - binary_operation[constant[1.0] / binary_operation[name[f_start] ** constant[2]]]]]]]]] call[name[shift].append, parameter[tuple[[<ast.UnaryOp object at 0x7da1b2443ee0>, <ast.Name object at 0x7da1b2443f10>]]]]
keyword[def] identifier[dmtoind] ( identifier[dm] , identifier[f_min] , identifier[f_max] , identifier[nchan0] , identifier[inttime] , identifier[it] ): literal[string] keyword[if] identifier[it] > literal[int] : identifier[correction] = identifier[dF] / literal[int] keyword[else] : identifier[correction] = literal[int] identifier[shift] =[] identifier[nchan] = identifier[nchan0] / literal[int] **( identifier[iteration_num] ) keyword[for] identifier[i_F] keyword[in] identifier[range] ( identifier[nchan] ): identifier[f_start] =( identifier[f_max] - identifier[f_min] )/ identifier[float] ( identifier[nchan] )*( identifier[i_F] )+ identifier[f_min] identifier[f_end] =( identifier[f_max] - identifier[f_min] )/ identifier[float] ( identifier[nchan] )*( identifier[i_F] + literal[int] )+ identifier[f_min] identifier[f_middle] =( identifier[f_end] - identifier[f_start] )/ literal[int] + identifier[f_start] - identifier[correction] identifier[f_middle_larger] =( identifier[f_end] - identifier[f_start] )/ literal[int] + identifier[f_start] + identifier[correction] identifier[dT_middle] = identifier[int] ( identifier[round] ( identifier[i_dT] *( literal[int] / identifier[f_middle] ** literal[int] - literal[int] / identifier[f_start] ** literal[int] )/( literal[int] / identifier[f_end] ** literal[int] - literal[int] / identifier[f_start] ** literal[int] ))) identifier[dT_middle_larger] = identifier[int] ( identifier[round] ( identifier[i_dT] *( literal[int] / identifier[f_middle_larger] ** literal[int] - literal[int] / identifier[f_start] ** literal[int] )/( literal[int] / identifier[f_end] ** literal[int] - literal[int] / identifier[f_start] ** literal[int] ))) identifier[shift] . identifier[append] ((- identifier[dT_middle_larger] , identifier[i_F] ))
def dmtoind(dm, f_min, f_max, nchan0, inttime, it): """ Given FDMT state, return indices to slice partial FDMT solution and sump to a given DM """ # maxDT = dmtodt(dm) # need to write if it > 0: correction = dF / 2.0 # depends on [control=['if'], data=[]] else: correction = 0 shift = [] nchan = nchan0 / 2 ** iteration_num for i_F in range(nchan): f_start = (f_max - f_min) / float(nchan) * i_F + f_min f_end = (f_max - f_min) / float(nchan) * (i_F + 1) + f_min f_middle = (f_end - f_start) / 2.0 + f_start - correction f_middle_larger = (f_end - f_start) / 2 + f_start + correction dT_middle = int(round(i_dT * (1.0 / f_middle ** 2 - 1.0 / f_start ** 2) / (1.0 / f_end ** 2 - 1.0 / f_start ** 2))) dT_middle_larger = int(round(i_dT * (1.0 / f_middle_larger ** 2 - 1.0 / f_start ** 2) / (1.0 / f_end ** 2 - 1.0 / f_start ** 2))) shift.append((-dT_middle_larger, i_F)) # depends on [control=['for'], data=['i_F']]
def get_clusters_representation(chromosome, count_clusters=None): """ Convert chromosome to cluster representation: chromosome : [0, 1, 1, 0, 2, 3, 3] clusters: [[0, 3], [1, 2], [4], [5, 6]] """ if count_clusters is None: count_clusters = ga_math.calc_count_centers(chromosome) # Initialize empty clusters clusters = [[] for _ in range(count_clusters)] # Fill clusters with index of data for _idx_data in range(len(chromosome)): clusters[chromosome[_idx_data]].append(_idx_data) return clusters
def function[get_clusters_representation, parameter[chromosome, count_clusters]]: constant[ Convert chromosome to cluster representation: chromosome : [0, 1, 1, 0, 2, 3, 3] clusters: [[0, 3], [1, 2], [4], [5, 6]] ] if compare[name[count_clusters] is constant[None]] begin[:] variable[count_clusters] assign[=] call[name[ga_math].calc_count_centers, parameter[name[chromosome]]] variable[clusters] assign[=] <ast.ListComp object at 0x7da1b0191480> for taget[name[_idx_data]] in starred[call[name[range], parameter[call[name[len], parameter[name[chromosome]]]]]] begin[:] call[call[name[clusters]][call[name[chromosome]][name[_idx_data]]].append, parameter[name[_idx_data]]] return[name[clusters]]
keyword[def] identifier[get_clusters_representation] ( identifier[chromosome] , identifier[count_clusters] = keyword[None] ): literal[string] keyword[if] identifier[count_clusters] keyword[is] keyword[None] : identifier[count_clusters] = identifier[ga_math] . identifier[calc_count_centers] ( identifier[chromosome] ) identifier[clusters] =[[] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[count_clusters] )] keyword[for] identifier[_idx_data] keyword[in] identifier[range] ( identifier[len] ( identifier[chromosome] )): identifier[clusters] [ identifier[chromosome] [ identifier[_idx_data] ]]. identifier[append] ( identifier[_idx_data] ) keyword[return] identifier[clusters]
def get_clusters_representation(chromosome, count_clusters=None): """ Convert chromosome to cluster representation: chromosome : [0, 1, 1, 0, 2, 3, 3] clusters: [[0, 3], [1, 2], [4], [5, 6]] """ if count_clusters is None: count_clusters = ga_math.calc_count_centers(chromosome) # depends on [control=['if'], data=['count_clusters']] # Initialize empty clusters clusters = [[] for _ in range(count_clusters)] # Fill clusters with index of data for _idx_data in range(len(chromosome)): clusters[chromosome[_idx_data]].append(_idx_data) # depends on [control=['for'], data=['_idx_data']] return clusters
def _lclist_parallel_worker(task): '''This is a parallel worker for makelclist. Parameters ---------- task : tuple This is a tuple containing the following items: task[0] = lcf task[1] = columns task[2] = lcformat task[3] = lcformatdir task[4] = lcndetkey Returns ------- dict or None This contains all of the info for the object processed in this LC read operation. If this fails, returns None ''' lcf, columns, lcformat, lcformatdir, lcndetkey = task # get the bits needed for lcformat handling # NOTE: we re-import things in this worker function because sometimes # functions can't be pickled correctly for passing them to worker functions # in a processing pool try: formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir) if formatinfo: (dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo else: LOGERROR("can't figure out the light curve format") return None except Exception as e: LOGEXCEPTION("can't figure out the light curve format") return None # we store the full path of the light curve lcobjdict = {'lcfname':os.path.abspath(lcf)} try: # read the light curve in lcdict = readerfunc(lcf) # this should handle lists/tuples being returned by readerfunc # we assume that the first element is the actual lcdict # FIXME: figure out how to not need this assumption if ( (isinstance(lcdict, (list, tuple))) and (isinstance(lcdict[0], dict)) ): lcdict = lcdict[0] # insert all of the columns for colkey in columns: if '.' in colkey: getkey = colkey.split('.') else: getkey = [colkey] try: thiscolval = _dict_get(lcdict, getkey) except Exception as e: LOGWARNING('column %s does not exist for %s' % (colkey, lcf)) thiscolval = np.nan # update the lcobjdict with this value lcobjdict[getkey[-1]] = thiscolval except Exception as e: LOGEXCEPTION('could not figure out columns for %s' % lcf) # insert all of the columns as nans for colkey in columns: if '.' in colkey: getkey = colkey.split('.') else: getkey = [colkey] thiscolval = np.nan # update the lclistdict with this value lcobjdict[getkey[-1]] = thiscolval # now get the actual ndets; this excludes nans and infs for dk in lcndetkey: try: if '.' in dk: getdk = dk.split('.') else: getdk = [dk] ndetcol = _dict_get(lcdict, getdk) actualndets = ndetcol[np.isfinite(ndetcol)].size lcobjdict['%s.ndet' % getdk[-1]] = actualndets except Exception as e: lcobjdict['%s.ndet' % getdk[-1]] = np.nan return lcobjdict
def function[_lclist_parallel_worker, parameter[task]]: constant[This is a parallel worker for makelclist. Parameters ---------- task : tuple This is a tuple containing the following items: task[0] = lcf task[1] = columns task[2] = lcformat task[3] = lcformatdir task[4] = lcndetkey Returns ------- dict or None This contains all of the info for the object processed in this LC read operation. If this fails, returns None ] <ast.Tuple object at 0x7da1affea2c0> assign[=] name[task] <ast.Try object at 0x7da1affeae60> variable[lcobjdict] assign[=] dictionary[[<ast.Constant object at 0x7da1afff5d80>], [<ast.Call object at 0x7da1afff59c0>]] <ast.Try object at 0x7da1afff5b70> for taget[name[dk]] in starred[name[lcndetkey]] begin[:] <ast.Try object at 0x7da1afe05bd0> return[name[lcobjdict]]
keyword[def] identifier[_lclist_parallel_worker] ( identifier[task] ): literal[string] identifier[lcf] , identifier[columns] , identifier[lcformat] , identifier[lcformatdir] , identifier[lcndetkey] = identifier[task] keyword[try] : identifier[formatinfo] = identifier[get_lcformat] ( identifier[lcformat] , identifier[use_lcformat_dir] = identifier[lcformatdir] ) keyword[if] identifier[formatinfo] : ( identifier[dfileglob] , identifier[readerfunc] , identifier[dtimecols] , identifier[dmagcols] , identifier[derrcols] , identifier[magsarefluxes] , identifier[normfunc] )= identifier[formatinfo] keyword[else] : identifier[LOGERROR] ( literal[string] ) keyword[return] keyword[None] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[LOGEXCEPTION] ( literal[string] ) keyword[return] keyword[None] identifier[lcobjdict] ={ literal[string] : identifier[os] . identifier[path] . identifier[abspath] ( identifier[lcf] )} keyword[try] : identifier[lcdict] = identifier[readerfunc] ( identifier[lcf] ) keyword[if] (( identifier[isinstance] ( identifier[lcdict] ,( identifier[list] , identifier[tuple] ))) keyword[and] ( identifier[isinstance] ( identifier[lcdict] [ literal[int] ], identifier[dict] ))): identifier[lcdict] = identifier[lcdict] [ literal[int] ] keyword[for] identifier[colkey] keyword[in] identifier[columns] : keyword[if] literal[string] keyword[in] identifier[colkey] : identifier[getkey] = identifier[colkey] . identifier[split] ( literal[string] ) keyword[else] : identifier[getkey] =[ identifier[colkey] ] keyword[try] : identifier[thiscolval] = identifier[_dict_get] ( identifier[lcdict] , identifier[getkey] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[LOGWARNING] ( literal[string] % ( identifier[colkey] , identifier[lcf] )) identifier[thiscolval] = identifier[np] . identifier[nan] identifier[lcobjdict] [ identifier[getkey] [- literal[int] ]]= identifier[thiscolval] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[LOGEXCEPTION] ( literal[string] % identifier[lcf] ) keyword[for] identifier[colkey] keyword[in] identifier[columns] : keyword[if] literal[string] keyword[in] identifier[colkey] : identifier[getkey] = identifier[colkey] . identifier[split] ( literal[string] ) keyword[else] : identifier[getkey] =[ identifier[colkey] ] identifier[thiscolval] = identifier[np] . identifier[nan] identifier[lcobjdict] [ identifier[getkey] [- literal[int] ]]= identifier[thiscolval] keyword[for] identifier[dk] keyword[in] identifier[lcndetkey] : keyword[try] : keyword[if] literal[string] keyword[in] identifier[dk] : identifier[getdk] = identifier[dk] . identifier[split] ( literal[string] ) keyword[else] : identifier[getdk] =[ identifier[dk] ] identifier[ndetcol] = identifier[_dict_get] ( identifier[lcdict] , identifier[getdk] ) identifier[actualndets] = identifier[ndetcol] [ identifier[np] . identifier[isfinite] ( identifier[ndetcol] )]. identifier[size] identifier[lcobjdict] [ literal[string] % identifier[getdk] [- literal[int] ]]= identifier[actualndets] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[lcobjdict] [ literal[string] % identifier[getdk] [- literal[int] ]]= identifier[np] . identifier[nan] keyword[return] identifier[lcobjdict]
def _lclist_parallel_worker(task): """This is a parallel worker for makelclist. Parameters ---------- task : tuple This is a tuple containing the following items: task[0] = lcf task[1] = columns task[2] = lcformat task[3] = lcformatdir task[4] = lcndetkey Returns ------- dict or None This contains all of the info for the object processed in this LC read operation. If this fails, returns None """ (lcf, columns, lcformat, lcformatdir, lcndetkey) = task # get the bits needed for lcformat handling # NOTE: we re-import things in this worker function because sometimes # functions can't be pickled correctly for passing them to worker functions # in a processing pool try: formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir) if formatinfo: (dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo # depends on [control=['if'], data=[]] else: LOGERROR("can't figure out the light curve format") return None # depends on [control=['try'], data=[]] except Exception as e: LOGEXCEPTION("can't figure out the light curve format") return None # depends on [control=['except'], data=[]] # we store the full path of the light curve lcobjdict = {'lcfname': os.path.abspath(lcf)} try: # read the light curve in lcdict = readerfunc(lcf) # this should handle lists/tuples being returned by readerfunc # we assume that the first element is the actual lcdict # FIXME: figure out how to not need this assumption if isinstance(lcdict, (list, tuple)) and isinstance(lcdict[0], dict): lcdict = lcdict[0] # depends on [control=['if'], data=[]] # insert all of the columns for colkey in columns: if '.' in colkey: getkey = colkey.split('.') # depends on [control=['if'], data=['colkey']] else: getkey = [colkey] try: thiscolval = _dict_get(lcdict, getkey) # depends on [control=['try'], data=[]] except Exception as e: LOGWARNING('column %s does not exist for %s' % (colkey, lcf)) thiscolval = np.nan # depends on [control=['except'], data=[]] # update the lcobjdict with this value lcobjdict[getkey[-1]] = thiscolval # depends on [control=['for'], data=['colkey']] # depends on [control=['try'], data=[]] except Exception as e: LOGEXCEPTION('could not figure out columns for %s' % lcf) # insert all of the columns as nans for colkey in columns: if '.' in colkey: getkey = colkey.split('.') # depends on [control=['if'], data=['colkey']] else: getkey = [colkey] thiscolval = np.nan # update the lclistdict with this value lcobjdict[getkey[-1]] = thiscolval # depends on [control=['for'], data=['colkey']] # depends on [control=['except'], data=[]] # now get the actual ndets; this excludes nans and infs for dk in lcndetkey: try: if '.' in dk: getdk = dk.split('.') # depends on [control=['if'], data=['dk']] else: getdk = [dk] ndetcol = _dict_get(lcdict, getdk) actualndets = ndetcol[np.isfinite(ndetcol)].size lcobjdict['%s.ndet' % getdk[-1]] = actualndets # depends on [control=['try'], data=[]] except Exception as e: lcobjdict['%s.ndet' % getdk[-1]] = np.nan # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['dk']] return lcobjdict
def on_state_changed(self, state): """ Connects/disconnects slots to/from signals when the mode state changed. """ super(GoToDefinitionMode, self).on_state_changed(state) if state: self.editor.mouse_moved.connect(self._on_mouse_moved) self.editor.mouse_released.connect(self._on_mouse_released) self.editor.add_action(self.action_goto, sub_menu='COBOL') self.editor.mouse_double_clicked.connect( self._timer.cancel_requests) else: self.editor.mouse_moved.disconnect(self._on_mouse_moved) self.editor.mouse_released.disconnect(self._on_mouse_released) self.editor.remove_action(self.action_goto, sub_menu='Python') self.editor.mouse_double_clicked.disconnect( self._timer.cancel_requests)
def function[on_state_changed, parameter[self, state]]: constant[ Connects/disconnects slots to/from signals when the mode state changed. ] call[call[name[super], parameter[name[GoToDefinitionMode], name[self]]].on_state_changed, parameter[name[state]]] if name[state] begin[:] call[name[self].editor.mouse_moved.connect, parameter[name[self]._on_mouse_moved]] call[name[self].editor.mouse_released.connect, parameter[name[self]._on_mouse_released]] call[name[self].editor.add_action, parameter[name[self].action_goto]] call[name[self].editor.mouse_double_clicked.connect, parameter[name[self]._timer.cancel_requests]]
keyword[def] identifier[on_state_changed] ( identifier[self] , identifier[state] ): literal[string] identifier[super] ( identifier[GoToDefinitionMode] , identifier[self] ). identifier[on_state_changed] ( identifier[state] ) keyword[if] identifier[state] : identifier[self] . identifier[editor] . identifier[mouse_moved] . identifier[connect] ( identifier[self] . identifier[_on_mouse_moved] ) identifier[self] . identifier[editor] . identifier[mouse_released] . identifier[connect] ( identifier[self] . identifier[_on_mouse_released] ) identifier[self] . identifier[editor] . identifier[add_action] ( identifier[self] . identifier[action_goto] , identifier[sub_menu] = literal[string] ) identifier[self] . identifier[editor] . identifier[mouse_double_clicked] . identifier[connect] ( identifier[self] . identifier[_timer] . identifier[cancel_requests] ) keyword[else] : identifier[self] . identifier[editor] . identifier[mouse_moved] . identifier[disconnect] ( identifier[self] . identifier[_on_mouse_moved] ) identifier[self] . identifier[editor] . identifier[mouse_released] . identifier[disconnect] ( identifier[self] . identifier[_on_mouse_released] ) identifier[self] . identifier[editor] . identifier[remove_action] ( identifier[self] . identifier[action_goto] , identifier[sub_menu] = literal[string] ) identifier[self] . identifier[editor] . identifier[mouse_double_clicked] . identifier[disconnect] ( identifier[self] . identifier[_timer] . identifier[cancel_requests] )
def on_state_changed(self, state): """ Connects/disconnects slots to/from signals when the mode state changed. """ super(GoToDefinitionMode, self).on_state_changed(state) if state: self.editor.mouse_moved.connect(self._on_mouse_moved) self.editor.mouse_released.connect(self._on_mouse_released) self.editor.add_action(self.action_goto, sub_menu='COBOL') self.editor.mouse_double_clicked.connect(self._timer.cancel_requests) # depends on [control=['if'], data=[]] else: self.editor.mouse_moved.disconnect(self._on_mouse_moved) self.editor.mouse_released.disconnect(self._on_mouse_released) self.editor.remove_action(self.action_goto, sub_menu='Python') self.editor.mouse_double_clicked.disconnect(self._timer.cancel_requests)
def plot_factor_contribution_to_perf( perf_attrib_data, ax=None, title='Cumulative common returns attribution', ): """ Plot each factor's contribution to performance. Parameters ---------- perf_attrib_data : pd.DataFrame df with factors, common returns, and specific returns as columns, and datetimes as index - Example: momentum reversal common_returns specific_returns dt 2017-01-01 0.249087 0.935925 1.185012 1.185012 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980 ax : matplotlib.axes.Axes axes on which plots are made. if None, current axes will be used title : str, optional title of plot Returns ------- ax : matplotlib.axes.Axes """ if ax is None: ax = plt.gca() factors_to_plot = perf_attrib_data.drop( ['total_returns', 'common_returns'], axis='columns', errors='ignore' ) factors_cumulative = pd.DataFrame() for factor in factors_to_plot: factors_cumulative[factor] = ep.cum_returns(factors_to_plot[factor]) for col in factors_cumulative: ax.plot(factors_cumulative[col]) ax.axhline(0, color='k') configure_legend(ax, change_colors=True) ax.set_ylabel('Cumulative returns by factor') ax.set_title(title) return ax
def function[plot_factor_contribution_to_perf, parameter[perf_attrib_data, ax, title]]: constant[ Plot each factor's contribution to performance. Parameters ---------- perf_attrib_data : pd.DataFrame df with factors, common returns, and specific returns as columns, and datetimes as index - Example: momentum reversal common_returns specific_returns dt 2017-01-01 0.249087 0.935925 1.185012 1.185012 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980 ax : matplotlib.axes.Axes axes on which plots are made. if None, current axes will be used title : str, optional title of plot Returns ------- ax : matplotlib.axes.Axes ] if compare[name[ax] is constant[None]] begin[:] variable[ax] assign[=] call[name[plt].gca, parameter[]] variable[factors_to_plot] assign[=] call[name[perf_attrib_data].drop, parameter[list[[<ast.Constant object at 0x7da1b00085b0>, <ast.Constant object at 0x7da1b0009ff0>]]]] variable[factors_cumulative] assign[=] call[name[pd].DataFrame, parameter[]] for taget[name[factor]] in starred[name[factors_to_plot]] begin[:] call[name[factors_cumulative]][name[factor]] assign[=] call[name[ep].cum_returns, parameter[call[name[factors_to_plot]][name[factor]]]] for taget[name[col]] in starred[name[factors_cumulative]] begin[:] call[name[ax].plot, parameter[call[name[factors_cumulative]][name[col]]]] call[name[ax].axhline, parameter[constant[0]]] call[name[configure_legend], parameter[name[ax]]] call[name[ax].set_ylabel, parameter[constant[Cumulative returns by factor]]] call[name[ax].set_title, parameter[name[title]]] return[name[ax]]
keyword[def] identifier[plot_factor_contribution_to_perf] ( identifier[perf_attrib_data] , identifier[ax] = keyword[None] , identifier[title] = literal[string] , ): literal[string] keyword[if] identifier[ax] keyword[is] keyword[None] : identifier[ax] = identifier[plt] . identifier[gca] () identifier[factors_to_plot] = identifier[perf_attrib_data] . identifier[drop] ( [ literal[string] , literal[string] ], identifier[axis] = literal[string] , identifier[errors] = literal[string] ) identifier[factors_cumulative] = identifier[pd] . identifier[DataFrame] () keyword[for] identifier[factor] keyword[in] identifier[factors_to_plot] : identifier[factors_cumulative] [ identifier[factor] ]= identifier[ep] . identifier[cum_returns] ( identifier[factors_to_plot] [ identifier[factor] ]) keyword[for] identifier[col] keyword[in] identifier[factors_cumulative] : identifier[ax] . identifier[plot] ( identifier[factors_cumulative] [ identifier[col] ]) identifier[ax] . identifier[axhline] ( literal[int] , identifier[color] = literal[string] ) identifier[configure_legend] ( identifier[ax] , identifier[change_colors] = keyword[True] ) identifier[ax] . identifier[set_ylabel] ( literal[string] ) identifier[ax] . identifier[set_title] ( identifier[title] ) keyword[return] identifier[ax]
def plot_factor_contribution_to_perf(perf_attrib_data, ax=None, title='Cumulative common returns attribution'): """ Plot each factor's contribution to performance. Parameters ---------- perf_attrib_data : pd.DataFrame df with factors, common returns, and specific returns as columns, and datetimes as index - Example: momentum reversal common_returns specific_returns dt 2017-01-01 0.249087 0.935925 1.185012 1.185012 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980 ax : matplotlib.axes.Axes axes on which plots are made. if None, current axes will be used title : str, optional title of plot Returns ------- ax : matplotlib.axes.Axes """ if ax is None: ax = plt.gca() # depends on [control=['if'], data=['ax']] factors_to_plot = perf_attrib_data.drop(['total_returns', 'common_returns'], axis='columns', errors='ignore') factors_cumulative = pd.DataFrame() for factor in factors_to_plot: factors_cumulative[factor] = ep.cum_returns(factors_to_plot[factor]) # depends on [control=['for'], data=['factor']] for col in factors_cumulative: ax.plot(factors_cumulative[col]) # depends on [control=['for'], data=['col']] ax.axhline(0, color='k') configure_legend(ax, change_colors=True) ax.set_ylabel('Cumulative returns by factor') ax.set_title(title) return ax
def connect(self, protocol=None): """Initialize DAP IO pins for JTAG or SWD""" # Convert protocol to port enum. if protocol is not None: port = self.PORT_MAP[protocol] else: port = DAPAccess.PORT.DEFAULT try: self._link.connect(port) except DAPAccess.Error as exc: six.raise_from(self._convert_exception(exc), exc) # Read the current mode and save it. actualMode = self._link.get_swj_mode() self._protocol = self.PORT_MAP[actualMode] self._invalidate_cached_registers()
def function[connect, parameter[self, protocol]]: constant[Initialize DAP IO pins for JTAG or SWD] if compare[name[protocol] is_not constant[None]] begin[:] variable[port] assign[=] call[name[self].PORT_MAP][name[protocol]] <ast.Try object at 0x7da1b18a2500> variable[actualMode] assign[=] call[name[self]._link.get_swj_mode, parameter[]] name[self]._protocol assign[=] call[name[self].PORT_MAP][name[actualMode]] call[name[self]._invalidate_cached_registers, parameter[]]
keyword[def] identifier[connect] ( identifier[self] , identifier[protocol] = keyword[None] ): literal[string] keyword[if] identifier[protocol] keyword[is] keyword[not] keyword[None] : identifier[port] = identifier[self] . identifier[PORT_MAP] [ identifier[protocol] ] keyword[else] : identifier[port] = identifier[DAPAccess] . identifier[PORT] . identifier[DEFAULT] keyword[try] : identifier[self] . identifier[_link] . identifier[connect] ( identifier[port] ) keyword[except] identifier[DAPAccess] . identifier[Error] keyword[as] identifier[exc] : identifier[six] . identifier[raise_from] ( identifier[self] . identifier[_convert_exception] ( identifier[exc] ), identifier[exc] ) identifier[actualMode] = identifier[self] . identifier[_link] . identifier[get_swj_mode] () identifier[self] . identifier[_protocol] = identifier[self] . identifier[PORT_MAP] [ identifier[actualMode] ] identifier[self] . identifier[_invalidate_cached_registers] ()
def connect(self, protocol=None): """Initialize DAP IO pins for JTAG or SWD""" # Convert protocol to port enum. if protocol is not None: port = self.PORT_MAP[protocol] # depends on [control=['if'], data=['protocol']] else: port = DAPAccess.PORT.DEFAULT try: self._link.connect(port) # depends on [control=['try'], data=[]] except DAPAccess.Error as exc: six.raise_from(self._convert_exception(exc), exc) # depends on [control=['except'], data=['exc']] # Read the current mode and save it. actualMode = self._link.get_swj_mode() self._protocol = self.PORT_MAP[actualMode] self._invalidate_cached_registers()
def uavionix_adsb_transceiver_health_report_send(self, rfHealth, force_mavlink1=False): ''' Transceiver heartbeat with health report (updated every 10s) rfHealth : ADS-B transponder messages (uint8_t) ''' return self.send(self.uavionix_adsb_transceiver_health_report_encode(rfHealth), force_mavlink1=force_mavlink1)
def function[uavionix_adsb_transceiver_health_report_send, parameter[self, rfHealth, force_mavlink1]]: constant[ Transceiver heartbeat with health report (updated every 10s) rfHealth : ADS-B transponder messages (uint8_t) ] return[call[name[self].send, parameter[call[name[self].uavionix_adsb_transceiver_health_report_encode, parameter[name[rfHealth]]]]]]
keyword[def] identifier[uavionix_adsb_transceiver_health_report_send] ( identifier[self] , identifier[rfHealth] , identifier[force_mavlink1] = keyword[False] ): literal[string] keyword[return] identifier[self] . identifier[send] ( identifier[self] . identifier[uavionix_adsb_transceiver_health_report_encode] ( identifier[rfHealth] ), identifier[force_mavlink1] = identifier[force_mavlink1] )
def uavionix_adsb_transceiver_health_report_send(self, rfHealth, force_mavlink1=False): """ Transceiver heartbeat with health report (updated every 10s) rfHealth : ADS-B transponder messages (uint8_t) """ return self.send(self.uavionix_adsb_transceiver_health_report_encode(rfHealth), force_mavlink1=force_mavlink1)
def _get_ansible_playbook(self, playbook, **kwargs): """ Get an instance of AnsiblePlaybook and returns it. :param playbook: A string containing an absolute path to a provisioner's playbook. :param kwargs: An optional keyword arguments. :return: object """ return ansible_playbook.AnsiblePlaybook(playbook, self._config, **kwargs)
def function[_get_ansible_playbook, parameter[self, playbook]]: constant[ Get an instance of AnsiblePlaybook and returns it. :param playbook: A string containing an absolute path to a provisioner's playbook. :param kwargs: An optional keyword arguments. :return: object ] return[call[name[ansible_playbook].AnsiblePlaybook, parameter[name[playbook], name[self]._config]]]
keyword[def] identifier[_get_ansible_playbook] ( identifier[self] , identifier[playbook] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[ansible_playbook] . identifier[AnsiblePlaybook] ( identifier[playbook] , identifier[self] . identifier[_config] , ** identifier[kwargs] )
def _get_ansible_playbook(self, playbook, **kwargs): """ Get an instance of AnsiblePlaybook and returns it. :param playbook: A string containing an absolute path to a provisioner's playbook. :param kwargs: An optional keyword arguments. :return: object """ return ansible_playbook.AnsiblePlaybook(playbook, self._config, **kwargs)
def load(cls, sc, path): """Load an IsotonicRegressionModel.""" java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel.load( sc._jsc.sc(), path) py_boundaries = _java2py(sc, java_model.boundaryVector()).toArray() py_predictions = _java2py(sc, java_model.predictionVector()).toArray() return IsotonicRegressionModel(py_boundaries, py_predictions, java_model.isotonic)
def function[load, parameter[cls, sc, path]]: constant[Load an IsotonicRegressionModel.] variable[java_model] assign[=] call[name[sc]._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel.load, parameter[call[name[sc]._jsc.sc, parameter[]], name[path]]] variable[py_boundaries] assign[=] call[call[name[_java2py], parameter[name[sc], call[name[java_model].boundaryVector, parameter[]]]].toArray, parameter[]] variable[py_predictions] assign[=] call[call[name[_java2py], parameter[name[sc], call[name[java_model].predictionVector, parameter[]]]].toArray, parameter[]] return[call[name[IsotonicRegressionModel], parameter[name[py_boundaries], name[py_predictions], name[java_model].isotonic]]]
keyword[def] identifier[load] ( identifier[cls] , identifier[sc] , identifier[path] ): literal[string] identifier[java_model] = identifier[sc] . identifier[_jvm] . identifier[org] . identifier[apache] . identifier[spark] . identifier[mllib] . identifier[regression] . identifier[IsotonicRegressionModel] . identifier[load] ( identifier[sc] . identifier[_jsc] . identifier[sc] (), identifier[path] ) identifier[py_boundaries] = identifier[_java2py] ( identifier[sc] , identifier[java_model] . identifier[boundaryVector] ()). identifier[toArray] () identifier[py_predictions] = identifier[_java2py] ( identifier[sc] , identifier[java_model] . identifier[predictionVector] ()). identifier[toArray] () keyword[return] identifier[IsotonicRegressionModel] ( identifier[py_boundaries] , identifier[py_predictions] , identifier[java_model] . identifier[isotonic] )
def load(cls, sc, path): """Load an IsotonicRegressionModel.""" java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel.load(sc._jsc.sc(), path) py_boundaries = _java2py(sc, java_model.boundaryVector()).toArray() py_predictions = _java2py(sc, java_model.predictionVector()).toArray() return IsotonicRegressionModel(py_boundaries, py_predictions, java_model.isotonic)
def visit_Assignment(self, node): """Visitor for `Assignment` AST node.""" var_name = node.left.identifier.name var_symbol = self.table[var_name] if var_symbol is not None and not var_symbol.is_mutable: raise SementicError(f"Re-assignment of immutable variable `{var_name}`.") self.visit(node.left) self.visit(node.right)
def function[visit_Assignment, parameter[self, node]]: constant[Visitor for `Assignment` AST node.] variable[var_name] assign[=] name[node].left.identifier.name variable[var_symbol] assign[=] call[name[self].table][name[var_name]] if <ast.BoolOp object at 0x7da1b08051b0> begin[:] <ast.Raise object at 0x7da1b0805ff0> call[name[self].visit, parameter[name[node].left]] call[name[self].visit, parameter[name[node].right]]
keyword[def] identifier[visit_Assignment] ( identifier[self] , identifier[node] ): literal[string] identifier[var_name] = identifier[node] . identifier[left] . identifier[identifier] . identifier[name] identifier[var_symbol] = identifier[self] . identifier[table] [ identifier[var_name] ] keyword[if] identifier[var_symbol] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[var_symbol] . identifier[is_mutable] : keyword[raise] identifier[SementicError] ( literal[string] ) identifier[self] . identifier[visit] ( identifier[node] . identifier[left] ) identifier[self] . identifier[visit] ( identifier[node] . identifier[right] )
def visit_Assignment(self, node): """Visitor for `Assignment` AST node.""" var_name = node.left.identifier.name var_symbol = self.table[var_name] if var_symbol is not None and (not var_symbol.is_mutable): raise SementicError(f'Re-assignment of immutable variable `{var_name}`.') # depends on [control=['if'], data=[]] self.visit(node.left) self.visit(node.right)
def _maximization(X, posterior, force_weights=None): """Estimate new centers, weights, and concentrations from Parameters ---------- posterior : array, [n_centers, n_examples] The posterior matrix from the expectation step. force_weights : None or array, [n_centers, ] If None is passed, will estimate weights. If an array is passed, will use instead of estimating. Returns ---------- centers (mu) : array, [n_centers x n_features] weights (alpha) : array, [n_centers, ] (alpha) concentrations (kappa) : array, [n_centers, ] """ n_examples, n_features = X.shape n_clusters, n_examples = posterior.shape concentrations = np.zeros((n_clusters,)) centers = np.zeros((n_clusters, n_features)) if force_weights is None: weights = np.zeros((n_clusters,)) for cc in range(n_clusters): # update weights (alpha) if force_weights is None: weights[cc] = np.mean(posterior[cc, :]) else: weights = force_weights # update centers (mu) X_scaled = X.copy() if sp.issparse(X): X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr)) else: for ee in range(n_examples): X_scaled[ee, :] *= posterior[cc, ee] centers[cc, :] = X_scaled.sum(axis=0) # normalize centers center_norm = np.linalg.norm(centers[cc, :]) if center_norm > 1e-8: centers[cc, :] = centers[cc, :] / center_norm # update concentration (kappa) [TODO: add other kappa approximations] rbar = center_norm / (n_examples * weights[cc]) concentrations[cc] = rbar * n_features - np.power(rbar, 3.) if np.abs(rbar - 1.0) < 1e-10: concentrations[cc] = MAX_CONTENTRATION else: concentrations[cc] /= 1. - np.power(rbar, 2.) # let python know we can free this (good for large dense X) del X_scaled return centers, weights, concentrations
def function[_maximization, parameter[X, posterior, force_weights]]: constant[Estimate new centers, weights, and concentrations from Parameters ---------- posterior : array, [n_centers, n_examples] The posterior matrix from the expectation step. force_weights : None or array, [n_centers, ] If None is passed, will estimate weights. If an array is passed, will use instead of estimating. Returns ---------- centers (mu) : array, [n_centers x n_features] weights (alpha) : array, [n_centers, ] (alpha) concentrations (kappa) : array, [n_centers, ] ] <ast.Tuple object at 0x7da1b1179e70> assign[=] name[X].shape <ast.Tuple object at 0x7da1b117aa40> assign[=] name[posterior].shape variable[concentrations] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b1178a30>]]]] variable[centers] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b1178e50>, <ast.Name object at 0x7da1b1178730>]]]] if compare[name[force_weights] is constant[None]] begin[:] variable[weights] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b12f4ca0>]]]] for taget[name[cc]] in starred[call[name[range], parameter[name[n_clusters]]]] begin[:] if compare[name[force_weights] is constant[None]] begin[:] call[name[weights]][name[cc]] assign[=] call[name[np].mean, parameter[call[name[posterior]][tuple[[<ast.Name object at 0x7da1b12f47c0>, <ast.Slice object at 0x7da1b12f4eb0>]]]]] variable[X_scaled] assign[=] call[name[X].copy, parameter[]] if call[name[sp].issparse, parameter[name[X]]] begin[:] <ast.AugAssign object at 0x7da1b12f4b50> call[name[centers]][tuple[[<ast.Name object at 0x7da1b12f49a0>, <ast.Slice object at 0x7da1b12f4be0>]]] assign[=] call[name[X_scaled].sum, parameter[]] variable[center_norm] assign[=] call[name[np].linalg.norm, parameter[call[name[centers]][tuple[[<ast.Name object at 0x7da1b117b1c0>, <ast.Slice object at 0x7da1b117a110>]]]]] if compare[name[center_norm] greater[>] constant[1e-08]] begin[:] call[name[centers]][tuple[[<ast.Name object at 0x7da1b117aa10>, <ast.Slice object at 0x7da1b117ac20>]]] assign[=] binary_operation[call[name[centers]][tuple[[<ast.Name object at 0x7da1b11797b0>, <ast.Slice object at 0x7da1b117aad0>]]] / name[center_norm]] variable[rbar] assign[=] binary_operation[name[center_norm] / binary_operation[name[n_examples] * call[name[weights]][name[cc]]]] call[name[concentrations]][name[cc]] assign[=] binary_operation[binary_operation[name[rbar] * name[n_features]] - call[name[np].power, parameter[name[rbar], constant[3.0]]]] if compare[call[name[np].abs, parameter[binary_operation[name[rbar] - constant[1.0]]]] less[<] constant[1e-10]] begin[:] call[name[concentrations]][name[cc]] assign[=] name[MAX_CONTENTRATION] <ast.Delete object at 0x7da1b127a230> return[tuple[[<ast.Name object at 0x7da1b1279540>, <ast.Name object at 0x7da1b1278d00>, <ast.Name object at 0x7da1b1279c30>]]]
keyword[def] identifier[_maximization] ( identifier[X] , identifier[posterior] , identifier[force_weights] = keyword[None] ): literal[string] identifier[n_examples] , identifier[n_features] = identifier[X] . identifier[shape] identifier[n_clusters] , identifier[n_examples] = identifier[posterior] . identifier[shape] identifier[concentrations] = identifier[np] . identifier[zeros] (( identifier[n_clusters] ,)) identifier[centers] = identifier[np] . identifier[zeros] (( identifier[n_clusters] , identifier[n_features] )) keyword[if] identifier[force_weights] keyword[is] keyword[None] : identifier[weights] = identifier[np] . identifier[zeros] (( identifier[n_clusters] ,)) keyword[for] identifier[cc] keyword[in] identifier[range] ( identifier[n_clusters] ): keyword[if] identifier[force_weights] keyword[is] keyword[None] : identifier[weights] [ identifier[cc] ]= identifier[np] . identifier[mean] ( identifier[posterior] [ identifier[cc] ,:]) keyword[else] : identifier[weights] = identifier[force_weights] identifier[X_scaled] = identifier[X] . identifier[copy] () keyword[if] identifier[sp] . identifier[issparse] ( identifier[X] ): identifier[X_scaled] . identifier[data] *= identifier[posterior] [ identifier[cc] ,:]. identifier[repeat] ( identifier[np] . identifier[diff] ( identifier[X_scaled] . identifier[indptr] )) keyword[else] : keyword[for] identifier[ee] keyword[in] identifier[range] ( identifier[n_examples] ): identifier[X_scaled] [ identifier[ee] ,:]*= identifier[posterior] [ identifier[cc] , identifier[ee] ] identifier[centers] [ identifier[cc] ,:]= identifier[X_scaled] . identifier[sum] ( identifier[axis] = literal[int] ) identifier[center_norm] = identifier[np] . identifier[linalg] . identifier[norm] ( identifier[centers] [ identifier[cc] ,:]) keyword[if] identifier[center_norm] > literal[int] : identifier[centers] [ identifier[cc] ,:]= identifier[centers] [ identifier[cc] ,:]/ identifier[center_norm] identifier[rbar] = identifier[center_norm] /( identifier[n_examples] * identifier[weights] [ identifier[cc] ]) identifier[concentrations] [ identifier[cc] ]= identifier[rbar] * identifier[n_features] - identifier[np] . identifier[power] ( identifier[rbar] , literal[int] ) keyword[if] identifier[np] . identifier[abs] ( identifier[rbar] - literal[int] )< literal[int] : identifier[concentrations] [ identifier[cc] ]= identifier[MAX_CONTENTRATION] keyword[else] : identifier[concentrations] [ identifier[cc] ]/= literal[int] - identifier[np] . identifier[power] ( identifier[rbar] , literal[int] ) keyword[del] identifier[X_scaled] keyword[return] identifier[centers] , identifier[weights] , identifier[concentrations]
def _maximization(X, posterior, force_weights=None): """Estimate new centers, weights, and concentrations from Parameters ---------- posterior : array, [n_centers, n_examples] The posterior matrix from the expectation step. force_weights : None or array, [n_centers, ] If None is passed, will estimate weights. If an array is passed, will use instead of estimating. Returns ---------- centers (mu) : array, [n_centers x n_features] weights (alpha) : array, [n_centers, ] (alpha) concentrations (kappa) : array, [n_centers, ] """ (n_examples, n_features) = X.shape (n_clusters, n_examples) = posterior.shape concentrations = np.zeros((n_clusters,)) centers = np.zeros((n_clusters, n_features)) if force_weights is None: weights = np.zeros((n_clusters,)) # depends on [control=['if'], data=[]] for cc in range(n_clusters): # update weights (alpha) if force_weights is None: weights[cc] = np.mean(posterior[cc, :]) # depends on [control=['if'], data=[]] else: weights = force_weights # update centers (mu) X_scaled = X.copy() if sp.issparse(X): X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr)) # depends on [control=['if'], data=[]] else: for ee in range(n_examples): X_scaled[ee, :] *= posterior[cc, ee] # depends on [control=['for'], data=['ee']] centers[cc, :] = X_scaled.sum(axis=0) # normalize centers center_norm = np.linalg.norm(centers[cc, :]) if center_norm > 1e-08: centers[cc, :] = centers[cc, :] / center_norm # depends on [control=['if'], data=['center_norm']] # update concentration (kappa) [TODO: add other kappa approximations] rbar = center_norm / (n_examples * weights[cc]) concentrations[cc] = rbar * n_features - np.power(rbar, 3.0) if np.abs(rbar - 1.0) < 1e-10: concentrations[cc] = MAX_CONTENTRATION # depends on [control=['if'], data=[]] else: concentrations[cc] /= 1.0 - np.power(rbar, 2.0) # let python know we can free this (good for large dense X) del X_scaled # depends on [control=['for'], data=['cc']] return (centers, weights, concentrations)
def viterbi_binary(prob, transition, p_state=None, p_init=None, return_logp=False): '''Viterbi decoding from binary (multi-label), discriminative state predictions. Given a sequence of conditional state predictions `prob[s, t]`, indicating the conditional likelihood of state `s` being active conditional on observation at time `t`, and a 2*2 transition matrix `transition` which encodes the conditional probability of moving from state `s` to state `~s` (not-`s`), the Viterbi algorithm computes the most likely sequence of states from the observations. This function differs from `viterbi_discriminative` in that it does not assume the states to be mutually exclusive. `viterbi_binary` is implemented by transforming the multi-label decoding problem to a collection of binary Viterbi problems (one for each *state* or label). The output is a binary matrix `states[s, t]` indicating whether each state `s` is active at time `t`. Parameters ---------- prob : np.ndarray [shape=(n_steps,) or (n_states, n_steps)], non-negative `prob[s, t]` is the probability of state `s` being active conditional on the observation at time `t`. Must be non-negative and less than 1. If `prob` is 1-dimensional, it is expanded to shape `(1, n_steps)`. transition : np.ndarray [shape=(2, 2) or (n_states, 2, 2)], non-negative If 2-dimensional, the same transition matrix is applied to each sub-problem. `transition[0, i]` is the probability of the state going from inactive to `i`, `transition[1, i]` is the probability of the state going from active to `i`. Each row must sum to 1. If 3-dimensional, `transition[s]` is interpreted as the 2x2 transition matrix for state label `s`. p_state : np.ndarray [shape=(n_states,)] Optional: marginal probability for each state (between [0,1]). If not provided, a uniform distribution (0.5 for each state) is assumed. p_init : np.ndarray [shape=(n_states,)] Optional: initial state distribution. If not provided, it is assumed to be uniform. return_logp : bool If `True`, return the log-likelihood of the state sequence. Returns ------- Either `states` or `(states, logp)`: states : np.ndarray [shape=(n_states, n_steps)] The most likely state sequence. logp : np.ndarray [shape=(n_states,)] If `return_logp=True`, the log probability of each state activation sequence `states` See Also -------- viterbi : Viterbi decoding from observation likelihoods viterbi_discriminative : Viterbi decoding for discriminative (mutually exclusive) state predictions Examples -------- In this example, we have a sequence of binary state likelihoods that we want to de-noise under the assumption that state changes are relatively uncommon. Positive predictions should only be retained if they persist for multiple steps, and any transient predictions should be considered as errors. This use case arises frequently in problems such as instrument recognition, where state activations tend to be stable over time, but subject to abrupt changes (e.g., when an instrument joins the mix). We assume that the 0 state has a self-transition probability of 90%, and the 1 state has a self-transition probability of 70%. We assume the marginal and initial probability of either state is 50%. >>> trans = np.array([[0.9, 0.1], [0.3, 0.7]]) >>> prob = np.array([0.1, 0.7, 0.4, 0.3, 0.8, 0.9, 0.8, 0.2, 0.6, 0.3]) >>> librosa.sequence.viterbi_binary(prob, trans, p_state=0.5, p_init=0.5) array([[0, 0, 0, 0, 1, 1, 1, 0, 0, 0]]) ''' prob = np.atleast_2d(prob) n_states, n_steps = prob.shape if transition.shape == (2, 2): transition = np.tile(transition, (n_states, 1, 1)) elif transition.shape != (n_states, 2, 2): raise ParameterError('transition.shape={}, must be (2,2) or ' '(n_states, 2, 2)={}'.format(transition.shape, (n_states))) if np.any(transition < 0) or not np.allclose(transition.sum(axis=-1), 1): raise ParameterError('Invalid transition matrix: must be non-negative ' 'and sum to 1 on each row.') if np.any(prob < 0) or np.any(prob > 1): raise ParameterError('Invalid probability values: prob must be between [0, 1]') if p_state is None: p_state = np.empty(n_states) p_state.fill(0.5) else: p_state = np.atleast_1d(p_state) if p_state.shape != (n_states,) or np.any(p_state < 0) or np.any(p_state > 1): raise ParameterError('Invalid marginal state distributions: p_state={}'.format(p_state)) if p_init is None: p_init = np.empty(n_states) p_init.fill(0.5) else: p_init = np.atleast_1d(p_init) if p_init.shape != (n_states,) or np.any(p_init < 0) or np.any(p_init > 1): raise ParameterError('Invalid initial state distributions: p_init={}'.format(p_init)) states = np.empty((n_states, n_steps), dtype=int) logp = np.empty(n_states) prob_binary = np.empty((2, n_steps)) p_state_binary = np.empty(2) p_init_binary = np.empty(2) for state in range(n_states): prob_binary[0] = 1 - prob[state] prob_binary[1] = prob[state] p_state_binary[0] = 1 - p_state[state] p_state_binary[1] = p_state[state] p_init_binary[0] = 1 - p_init[state] p_init_binary[1] = p_init[state] states[state, :], logp[state] = viterbi_discriminative(prob_binary, transition[state], p_state=p_state_binary, p_init=p_init_binary, return_logp=True) if return_logp: return states, logp return states
def function[viterbi_binary, parameter[prob, transition, p_state, p_init, return_logp]]: constant[Viterbi decoding from binary (multi-label), discriminative state predictions. Given a sequence of conditional state predictions `prob[s, t]`, indicating the conditional likelihood of state `s` being active conditional on observation at time `t`, and a 2*2 transition matrix `transition` which encodes the conditional probability of moving from state `s` to state `~s` (not-`s`), the Viterbi algorithm computes the most likely sequence of states from the observations. This function differs from `viterbi_discriminative` in that it does not assume the states to be mutually exclusive. `viterbi_binary` is implemented by transforming the multi-label decoding problem to a collection of binary Viterbi problems (one for each *state* or label). The output is a binary matrix `states[s, t]` indicating whether each state `s` is active at time `t`. Parameters ---------- prob : np.ndarray [shape=(n_steps,) or (n_states, n_steps)], non-negative `prob[s, t]` is the probability of state `s` being active conditional on the observation at time `t`. Must be non-negative and less than 1. If `prob` is 1-dimensional, it is expanded to shape `(1, n_steps)`. transition : np.ndarray [shape=(2, 2) or (n_states, 2, 2)], non-negative If 2-dimensional, the same transition matrix is applied to each sub-problem. `transition[0, i]` is the probability of the state going from inactive to `i`, `transition[1, i]` is the probability of the state going from active to `i`. Each row must sum to 1. If 3-dimensional, `transition[s]` is interpreted as the 2x2 transition matrix for state label `s`. p_state : np.ndarray [shape=(n_states,)] Optional: marginal probability for each state (between [0,1]). If not provided, a uniform distribution (0.5 for each state) is assumed. p_init : np.ndarray [shape=(n_states,)] Optional: initial state distribution. If not provided, it is assumed to be uniform. return_logp : bool If `True`, return the log-likelihood of the state sequence. Returns ------- Either `states` or `(states, logp)`: states : np.ndarray [shape=(n_states, n_steps)] The most likely state sequence. logp : np.ndarray [shape=(n_states,)] If `return_logp=True`, the log probability of each state activation sequence `states` See Also -------- viterbi : Viterbi decoding from observation likelihoods viterbi_discriminative : Viterbi decoding for discriminative (mutually exclusive) state predictions Examples -------- In this example, we have a sequence of binary state likelihoods that we want to de-noise under the assumption that state changes are relatively uncommon. Positive predictions should only be retained if they persist for multiple steps, and any transient predictions should be considered as errors. This use case arises frequently in problems such as instrument recognition, where state activations tend to be stable over time, but subject to abrupt changes (e.g., when an instrument joins the mix). We assume that the 0 state has a self-transition probability of 90%, and the 1 state has a self-transition probability of 70%. We assume the marginal and initial probability of either state is 50%. >>> trans = np.array([[0.9, 0.1], [0.3, 0.7]]) >>> prob = np.array([0.1, 0.7, 0.4, 0.3, 0.8, 0.9, 0.8, 0.2, 0.6, 0.3]) >>> librosa.sequence.viterbi_binary(prob, trans, p_state=0.5, p_init=0.5) array([[0, 0, 0, 0, 1, 1, 1, 0, 0, 0]]) ] variable[prob] assign[=] call[name[np].atleast_2d, parameter[name[prob]]] <ast.Tuple object at 0x7da1b0530460> assign[=] name[prob].shape if compare[name[transition].shape equal[==] tuple[[<ast.Constant object at 0x7da1b0530640>, <ast.Constant object at 0x7da1b0530670>]]] begin[:] variable[transition] assign[=] call[name[np].tile, parameter[name[transition], tuple[[<ast.Name object at 0x7da1b05307f0>, <ast.Constant object at 0x7da1b0530820>, <ast.Constant object at 0x7da1b0530850>]]]] if <ast.BoolOp object at 0x7da1b0530be0> begin[:] <ast.Raise object at 0x7da1b0530f40> if <ast.BoolOp object at 0x7da18c4cc520> begin[:] <ast.Raise object at 0x7da18c4ce7d0> if compare[name[p_state] is constant[None]] begin[:] variable[p_state] assign[=] call[name[np].empty, parameter[name[n_states]]] call[name[p_state].fill, parameter[constant[0.5]]] if <ast.BoolOp object at 0x7da18c4cda20> begin[:] <ast.Raise object at 0x7da18dc9beb0> if compare[name[p_init] is constant[None]] begin[:] variable[p_init] assign[=] call[name[np].empty, parameter[name[n_states]]] call[name[p_init].fill, parameter[constant[0.5]]] if <ast.BoolOp object at 0x7da20c794b50> begin[:] <ast.Raise object at 0x7da20c7959f0> variable[states] assign[=] call[name[np].empty, parameter[tuple[[<ast.Name object at 0x7da20c794dc0>, <ast.Name object at 0x7da20c795660>]]]] variable[logp] assign[=] call[name[np].empty, parameter[name[n_states]]] variable[prob_binary] assign[=] call[name[np].empty, parameter[tuple[[<ast.Constant object at 0x7da20c794be0>, <ast.Name object at 0x7da20c795990>]]]] variable[p_state_binary] assign[=] call[name[np].empty, parameter[constant[2]]] variable[p_init_binary] assign[=] call[name[np].empty, parameter[constant[2]]] for taget[name[state]] in starred[call[name[range], parameter[name[n_states]]]] begin[:] call[name[prob_binary]][constant[0]] assign[=] binary_operation[constant[1] - call[name[prob]][name[state]]] call[name[prob_binary]][constant[1]] assign[=] call[name[prob]][name[state]] call[name[p_state_binary]][constant[0]] assign[=] binary_operation[constant[1] - call[name[p_state]][name[state]]] call[name[p_state_binary]][constant[1]] assign[=] call[name[p_state]][name[state]] call[name[p_init_binary]][constant[0]] assign[=] binary_operation[constant[1] - call[name[p_init]][name[state]]] call[name[p_init_binary]][constant[1]] assign[=] call[name[p_init]][name[state]] <ast.Tuple object at 0x7da18f09e1a0> assign[=] call[name[viterbi_discriminative], parameter[name[prob_binary], call[name[transition]][name[state]]]] if name[return_logp] begin[:] return[tuple[[<ast.Name object at 0x7da18f09c850>, <ast.Name object at 0x7da18f09cca0>]]] return[name[states]]
keyword[def] identifier[viterbi_binary] ( identifier[prob] , identifier[transition] , identifier[p_state] = keyword[None] , identifier[p_init] = keyword[None] , identifier[return_logp] = keyword[False] ): literal[string] identifier[prob] = identifier[np] . identifier[atleast_2d] ( identifier[prob] ) identifier[n_states] , identifier[n_steps] = identifier[prob] . identifier[shape] keyword[if] identifier[transition] . identifier[shape] ==( literal[int] , literal[int] ): identifier[transition] = identifier[np] . identifier[tile] ( identifier[transition] ,( identifier[n_states] , literal[int] , literal[int] )) keyword[elif] identifier[transition] . identifier[shape] !=( identifier[n_states] , literal[int] , literal[int] ): keyword[raise] identifier[ParameterError] ( literal[string] literal[string] . identifier[format] ( identifier[transition] . identifier[shape] ,( identifier[n_states] ))) keyword[if] identifier[np] . identifier[any] ( identifier[transition] < literal[int] ) keyword[or] keyword[not] identifier[np] . identifier[allclose] ( identifier[transition] . identifier[sum] ( identifier[axis] =- literal[int] ), literal[int] ): keyword[raise] identifier[ParameterError] ( literal[string] literal[string] ) keyword[if] identifier[np] . identifier[any] ( identifier[prob] < literal[int] ) keyword[or] identifier[np] . identifier[any] ( identifier[prob] > literal[int] ): keyword[raise] identifier[ParameterError] ( literal[string] ) keyword[if] identifier[p_state] keyword[is] keyword[None] : identifier[p_state] = identifier[np] . identifier[empty] ( identifier[n_states] ) identifier[p_state] . identifier[fill] ( literal[int] ) keyword[else] : identifier[p_state] = identifier[np] . identifier[atleast_1d] ( identifier[p_state] ) keyword[if] identifier[p_state] . identifier[shape] !=( identifier[n_states] ,) keyword[or] identifier[np] . identifier[any] ( identifier[p_state] < literal[int] ) keyword[or] identifier[np] . identifier[any] ( identifier[p_state] > literal[int] ): keyword[raise] identifier[ParameterError] ( literal[string] . identifier[format] ( identifier[p_state] )) keyword[if] identifier[p_init] keyword[is] keyword[None] : identifier[p_init] = identifier[np] . identifier[empty] ( identifier[n_states] ) identifier[p_init] . identifier[fill] ( literal[int] ) keyword[else] : identifier[p_init] = identifier[np] . identifier[atleast_1d] ( identifier[p_init] ) keyword[if] identifier[p_init] . identifier[shape] !=( identifier[n_states] ,) keyword[or] identifier[np] . identifier[any] ( identifier[p_init] < literal[int] ) keyword[or] identifier[np] . identifier[any] ( identifier[p_init] > literal[int] ): keyword[raise] identifier[ParameterError] ( literal[string] . identifier[format] ( identifier[p_init] )) identifier[states] = identifier[np] . identifier[empty] (( identifier[n_states] , identifier[n_steps] ), identifier[dtype] = identifier[int] ) identifier[logp] = identifier[np] . identifier[empty] ( identifier[n_states] ) identifier[prob_binary] = identifier[np] . identifier[empty] (( literal[int] , identifier[n_steps] )) identifier[p_state_binary] = identifier[np] . identifier[empty] ( literal[int] ) identifier[p_init_binary] = identifier[np] . identifier[empty] ( literal[int] ) keyword[for] identifier[state] keyword[in] identifier[range] ( identifier[n_states] ): identifier[prob_binary] [ literal[int] ]= literal[int] - identifier[prob] [ identifier[state] ] identifier[prob_binary] [ literal[int] ]= identifier[prob] [ identifier[state] ] identifier[p_state_binary] [ literal[int] ]= literal[int] - identifier[p_state] [ identifier[state] ] identifier[p_state_binary] [ literal[int] ]= identifier[p_state] [ identifier[state] ] identifier[p_init_binary] [ literal[int] ]= literal[int] - identifier[p_init] [ identifier[state] ] identifier[p_init_binary] [ literal[int] ]= identifier[p_init] [ identifier[state] ] identifier[states] [ identifier[state] ,:], identifier[logp] [ identifier[state] ]= identifier[viterbi_discriminative] ( identifier[prob_binary] , identifier[transition] [ identifier[state] ], identifier[p_state] = identifier[p_state_binary] , identifier[p_init] = identifier[p_init_binary] , identifier[return_logp] = keyword[True] ) keyword[if] identifier[return_logp] : keyword[return] identifier[states] , identifier[logp] keyword[return] identifier[states]
def viterbi_binary(prob, transition, p_state=None, p_init=None, return_logp=False): """Viterbi decoding from binary (multi-label), discriminative state predictions. Given a sequence of conditional state predictions `prob[s, t]`, indicating the conditional likelihood of state `s` being active conditional on observation at time `t`, and a 2*2 transition matrix `transition` which encodes the conditional probability of moving from state `s` to state `~s` (not-`s`), the Viterbi algorithm computes the most likely sequence of states from the observations. This function differs from `viterbi_discriminative` in that it does not assume the states to be mutually exclusive. `viterbi_binary` is implemented by transforming the multi-label decoding problem to a collection of binary Viterbi problems (one for each *state* or label). The output is a binary matrix `states[s, t]` indicating whether each state `s` is active at time `t`. Parameters ---------- prob : np.ndarray [shape=(n_steps,) or (n_states, n_steps)], non-negative `prob[s, t]` is the probability of state `s` being active conditional on the observation at time `t`. Must be non-negative and less than 1. If `prob` is 1-dimensional, it is expanded to shape `(1, n_steps)`. transition : np.ndarray [shape=(2, 2) or (n_states, 2, 2)], non-negative If 2-dimensional, the same transition matrix is applied to each sub-problem. `transition[0, i]` is the probability of the state going from inactive to `i`, `transition[1, i]` is the probability of the state going from active to `i`. Each row must sum to 1. If 3-dimensional, `transition[s]` is interpreted as the 2x2 transition matrix for state label `s`. p_state : np.ndarray [shape=(n_states,)] Optional: marginal probability for each state (between [0,1]). If not provided, a uniform distribution (0.5 for each state) is assumed. p_init : np.ndarray [shape=(n_states,)] Optional: initial state distribution. If not provided, it is assumed to be uniform. return_logp : bool If `True`, return the log-likelihood of the state sequence. Returns ------- Either `states` or `(states, logp)`: states : np.ndarray [shape=(n_states, n_steps)] The most likely state sequence. logp : np.ndarray [shape=(n_states,)] If `return_logp=True`, the log probability of each state activation sequence `states` See Also -------- viterbi : Viterbi decoding from observation likelihoods viterbi_discriminative : Viterbi decoding for discriminative (mutually exclusive) state predictions Examples -------- In this example, we have a sequence of binary state likelihoods that we want to de-noise under the assumption that state changes are relatively uncommon. Positive predictions should only be retained if they persist for multiple steps, and any transient predictions should be considered as errors. This use case arises frequently in problems such as instrument recognition, where state activations tend to be stable over time, but subject to abrupt changes (e.g., when an instrument joins the mix). We assume that the 0 state has a self-transition probability of 90%, and the 1 state has a self-transition probability of 70%. We assume the marginal and initial probability of either state is 50%. >>> trans = np.array([[0.9, 0.1], [0.3, 0.7]]) >>> prob = np.array([0.1, 0.7, 0.4, 0.3, 0.8, 0.9, 0.8, 0.2, 0.6, 0.3]) >>> librosa.sequence.viterbi_binary(prob, trans, p_state=0.5, p_init=0.5) array([[0, 0, 0, 0, 1, 1, 1, 0, 0, 0]]) """ prob = np.atleast_2d(prob) (n_states, n_steps) = prob.shape if transition.shape == (2, 2): transition = np.tile(transition, (n_states, 1, 1)) # depends on [control=['if'], data=[]] elif transition.shape != (n_states, 2, 2): raise ParameterError('transition.shape={}, must be (2,2) or (n_states, 2, 2)={}'.format(transition.shape, n_states)) # depends on [control=['if'], data=[]] if np.any(transition < 0) or not np.allclose(transition.sum(axis=-1), 1): raise ParameterError('Invalid transition matrix: must be non-negative and sum to 1 on each row.') # depends on [control=['if'], data=[]] if np.any(prob < 0) or np.any(prob > 1): raise ParameterError('Invalid probability values: prob must be between [0, 1]') # depends on [control=['if'], data=[]] if p_state is None: p_state = np.empty(n_states) p_state.fill(0.5) # depends on [control=['if'], data=['p_state']] else: p_state = np.atleast_1d(p_state) if p_state.shape != (n_states,) or np.any(p_state < 0) or np.any(p_state > 1): raise ParameterError('Invalid marginal state distributions: p_state={}'.format(p_state)) # depends on [control=['if'], data=[]] if p_init is None: p_init = np.empty(n_states) p_init.fill(0.5) # depends on [control=['if'], data=['p_init']] else: p_init = np.atleast_1d(p_init) if p_init.shape != (n_states,) or np.any(p_init < 0) or np.any(p_init > 1): raise ParameterError('Invalid initial state distributions: p_init={}'.format(p_init)) # depends on [control=['if'], data=[]] states = np.empty((n_states, n_steps), dtype=int) logp = np.empty(n_states) prob_binary = np.empty((2, n_steps)) p_state_binary = np.empty(2) p_init_binary = np.empty(2) for state in range(n_states): prob_binary[0] = 1 - prob[state] prob_binary[1] = prob[state] p_state_binary[0] = 1 - p_state[state] p_state_binary[1] = p_state[state] p_init_binary[0] = 1 - p_init[state] p_init_binary[1] = p_init[state] (states[state, :], logp[state]) = viterbi_discriminative(prob_binary, transition[state], p_state=p_state_binary, p_init=p_init_binary, return_logp=True) # depends on [control=['for'], data=['state']] if return_logp: return (states, logp) # depends on [control=['if'], data=[]] return states
def repeat_masker_iterator(fh, alignment_index=None, header=True, verbose=False): """ Iterator for repeatmasker coordinate annotation files. These files describe the location of repeat occurrences. There is (optionally) a two-line header with the names of the fields (ignored by the iterator, if present). Each line is a record of an occurrence. The description of fields for each line is given in from_repeat_masker_string. :param fh: stream-like object, or string filename, to load the annotations from :param alignment_index: an IndexedFile for full alignments; keys should be repeat-masker IDs :param header: if True, expect and discard the two-line header; otherwise we will expect there is no header :param verbose: if True, output additional status messages about progress to stderr. """ strm = fh if type(fh).__name__ == "str": strm = open(fh) # try to get an idea of how much data we have... if verbose: try: total = os.path.getsize(strm.name) pind = ProgressIndicator(totalToDo=total, messagePrefix="completed", messageSuffix="of processing " + strm.name) except AttributeError as e: sys.stderr.write(str(e)) sys.stderr.write("completed [unknown] of processing index") verbose = False if header: # chomp first 2 lines next(strm) next(strm) for line in strm: if verbose: pind.done = strm.tell() pind.showProgress() line = line.strip() if line == "": continue rto = retrotransposon.from_repeat_masker_string(line) if alignment_index is not None: rto.pairwise_alignment =\ JustInTimePairwiseAlignment(alignment_index, rto.uniq_id) yield rto
def function[repeat_masker_iterator, parameter[fh, alignment_index, header, verbose]]: constant[ Iterator for repeatmasker coordinate annotation files. These files describe the location of repeat occurrences. There is (optionally) a two-line header with the names of the fields (ignored by the iterator, if present). Each line is a record of an occurrence. The description of fields for each line is given in from_repeat_masker_string. :param fh: stream-like object, or string filename, to load the annotations from :param alignment_index: an IndexedFile for full alignments; keys should be repeat-masker IDs :param header: if True, expect and discard the two-line header; otherwise we will expect there is no header :param verbose: if True, output additional status messages about progress to stderr. ] variable[strm] assign[=] name[fh] if compare[call[name[type], parameter[name[fh]]].__name__ equal[==] constant[str]] begin[:] variable[strm] assign[=] call[name[open], parameter[name[fh]]] if name[verbose] begin[:] <ast.Try object at 0x7da204566110> if name[header] begin[:] call[name[next], parameter[name[strm]]] call[name[next], parameter[name[strm]]] for taget[name[line]] in starred[name[strm]] begin[:] if name[verbose] begin[:] name[pind].done assign[=] call[name[strm].tell, parameter[]] call[name[pind].showProgress, parameter[]] variable[line] assign[=] call[name[line].strip, parameter[]] if compare[name[line] equal[==] constant[]] begin[:] continue variable[rto] assign[=] call[name[retrotransposon].from_repeat_masker_string, parameter[name[line]]] if compare[name[alignment_index] is_not constant[None]] begin[:] name[rto].pairwise_alignment assign[=] call[name[JustInTimePairwiseAlignment], parameter[name[alignment_index], name[rto].uniq_id]] <ast.Yield object at 0x7da2043469b0>
keyword[def] identifier[repeat_masker_iterator] ( identifier[fh] , identifier[alignment_index] = keyword[None] , identifier[header] = keyword[True] , identifier[verbose] = keyword[False] ): literal[string] identifier[strm] = identifier[fh] keyword[if] identifier[type] ( identifier[fh] ). identifier[__name__] == literal[string] : identifier[strm] = identifier[open] ( identifier[fh] ) keyword[if] identifier[verbose] : keyword[try] : identifier[total] = identifier[os] . identifier[path] . identifier[getsize] ( identifier[strm] . identifier[name] ) identifier[pind] = identifier[ProgressIndicator] ( identifier[totalToDo] = identifier[total] , identifier[messagePrefix] = literal[string] , identifier[messageSuffix] = literal[string] + identifier[strm] . identifier[name] ) keyword[except] identifier[AttributeError] keyword[as] identifier[e] : identifier[sys] . identifier[stderr] . identifier[write] ( identifier[str] ( identifier[e] )) identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] ) identifier[verbose] = keyword[False] keyword[if] identifier[header] : identifier[next] ( identifier[strm] ) identifier[next] ( identifier[strm] ) keyword[for] identifier[line] keyword[in] identifier[strm] : keyword[if] identifier[verbose] : identifier[pind] . identifier[done] = identifier[strm] . identifier[tell] () identifier[pind] . identifier[showProgress] () identifier[line] = identifier[line] . identifier[strip] () keyword[if] identifier[line] == literal[string] : keyword[continue] identifier[rto] = identifier[retrotransposon] . identifier[from_repeat_masker_string] ( identifier[line] ) keyword[if] identifier[alignment_index] keyword[is] keyword[not] keyword[None] : identifier[rto] . identifier[pairwise_alignment] = identifier[JustInTimePairwiseAlignment] ( identifier[alignment_index] , identifier[rto] . identifier[uniq_id] ) keyword[yield] identifier[rto]
def repeat_masker_iterator(fh, alignment_index=None, header=True, verbose=False): """ Iterator for repeatmasker coordinate annotation files. These files describe the location of repeat occurrences. There is (optionally) a two-line header with the names of the fields (ignored by the iterator, if present). Each line is a record of an occurrence. The description of fields for each line is given in from_repeat_masker_string. :param fh: stream-like object, or string filename, to load the annotations from :param alignment_index: an IndexedFile for full alignments; keys should be repeat-masker IDs :param header: if True, expect and discard the two-line header; otherwise we will expect there is no header :param verbose: if True, output additional status messages about progress to stderr. """ strm = fh if type(fh).__name__ == 'str': strm = open(fh) # depends on [control=['if'], data=[]] # try to get an idea of how much data we have... if verbose: try: total = os.path.getsize(strm.name) pind = ProgressIndicator(totalToDo=total, messagePrefix='completed', messageSuffix='of processing ' + strm.name) # depends on [control=['try'], data=[]] except AttributeError as e: sys.stderr.write(str(e)) sys.stderr.write('completed [unknown] of processing index') verbose = False # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] if header: # chomp first 2 lines next(strm) next(strm) # depends on [control=['if'], data=[]] for line in strm: if verbose: pind.done = strm.tell() pind.showProgress() # depends on [control=['if'], data=[]] line = line.strip() if line == '': continue # depends on [control=['if'], data=[]] rto = retrotransposon.from_repeat_masker_string(line) if alignment_index is not None: rto.pairwise_alignment = JustInTimePairwiseAlignment(alignment_index, rto.uniq_id) # depends on [control=['if'], data=['alignment_index']] yield rto # depends on [control=['for'], data=['line']]
def remove_option(self, section, option): """Remove an option.""" if not section or section == DEFAULTSECT: sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise NoSectionError(section) option = self.optionxform(option) existed = option in sectdict if existed: del sectdict[option] return existed
def function[remove_option, parameter[self, section, option]]: constant[Remove an option.] if <ast.BoolOp object at 0x7da18f00da20> begin[:] variable[sectdict] assign[=] name[self]._defaults variable[option] assign[=] call[name[self].optionxform, parameter[name[option]]] variable[existed] assign[=] compare[name[option] in name[sectdict]] if name[existed] begin[:] <ast.Delete object at 0x7da18eb56b30> return[name[existed]]
keyword[def] identifier[remove_option] ( identifier[self] , identifier[section] , identifier[option] ): literal[string] keyword[if] keyword[not] identifier[section] keyword[or] identifier[section] == identifier[DEFAULTSECT] : identifier[sectdict] = identifier[self] . identifier[_defaults] keyword[else] : keyword[try] : identifier[sectdict] = identifier[self] . identifier[_sections] [ identifier[section] ] keyword[except] identifier[KeyError] : keyword[raise] identifier[NoSectionError] ( identifier[section] ) identifier[option] = identifier[self] . identifier[optionxform] ( identifier[option] ) identifier[existed] = identifier[option] keyword[in] identifier[sectdict] keyword[if] identifier[existed] : keyword[del] identifier[sectdict] [ identifier[option] ] keyword[return] identifier[existed]
def remove_option(self, section, option): """Remove an option.""" if not section or section == DEFAULTSECT: sectdict = self._defaults # depends on [control=['if'], data=[]] else: try: sectdict = self._sections[section] # depends on [control=['try'], data=[]] except KeyError: raise NoSectionError(section) # depends on [control=['except'], data=[]] option = self.optionxform(option) existed = option in sectdict if existed: del sectdict[option] # depends on [control=['if'], data=[]] return existed
def f_delete_items(self, iterator, *args, **kwargs): """Deletes items from storage on disk. Per default the item is NOT removed from the trajectory. Links are NOT deleted on the hard disk, please delete links manually before deleting data! :param iterator: A sequence of items you want to remove. Either the instances themselves or strings with the names of the items. :param remove_from_trajectory: If items should also be removed from trajectory. Default is `False`. :param args: Additional arguments passed to the storage service :param kwargs: Additional keyword arguments passed to the storage service If you use the standard hdf5 storage service, you can pass the following additional keyword argument: :param delete_only: You can partially delete leaf nodes. Specify a list of parts of the result node that should be deleted like `delete_only=['mystuff','otherstuff']`. This wil only delete the hdf5 sub parts `mystuff` and `otherstuff` from disk. BE CAREFUL, erasing data partly happens at your own risk. Depending on how complex the loading process of your result node is, you might not be able to reconstruct any data due to partially deleting some of it. Be aware that you need to specify the names of parts as they were stored to HDF5. Depending on how your leaf construction works, this may differ from the names the data might have in your leaf in the trajectory container. If the hdf5 nodes you specified in `delete_only` cannot be found a warning is issued. Note that massive deletion will fragment your HDF5 file. Try to avoid changing data on disk whenever you can. If you want to erase a full node, simply ignore this argument or set to `None`. :param remove_from_item: If data that you want to delete from storage should also be removed from the items in `iterator` if they contain these. Default is `False`. :param recursive: If you want to delete a group node and it has children you need to set `recursive` to `True. Default is `False`. """ remove_from_trajectory = kwargs.pop('remove_from_trajectory', False) recursive = kwargs.get('recursive', False) # Will format the request in a form that is understood by the storage service # aka (msg, item, args, kwargs) fetched_items = self._nn_interface._fetch_items(REMOVE, iterator, args, kwargs) if fetched_items: try: self._storage_service.store(pypetconstants.LIST, fetched_items, trajectory_name=self.v_name) except: self._logger.error('Could not remove `%s` from the trajectory. Maybe the' ' item(s) was/were never stored to disk.' % str(fetched_items)) raise for _, item, dummy1, dummy2 in fetched_items: if remove_from_trajectory: self._nn_interface._remove_node_or_leaf(item, recursive=recursive) else: item._stored = False else: self._logger.warning('Your removal was not successful, could not find a single ' 'item to remove.')
def function[f_delete_items, parameter[self, iterator]]: constant[Deletes items from storage on disk. Per default the item is NOT removed from the trajectory. Links are NOT deleted on the hard disk, please delete links manually before deleting data! :param iterator: A sequence of items you want to remove. Either the instances themselves or strings with the names of the items. :param remove_from_trajectory: If items should also be removed from trajectory. Default is `False`. :param args: Additional arguments passed to the storage service :param kwargs: Additional keyword arguments passed to the storage service If you use the standard hdf5 storage service, you can pass the following additional keyword argument: :param delete_only: You can partially delete leaf nodes. Specify a list of parts of the result node that should be deleted like `delete_only=['mystuff','otherstuff']`. This wil only delete the hdf5 sub parts `mystuff` and `otherstuff` from disk. BE CAREFUL, erasing data partly happens at your own risk. Depending on how complex the loading process of your result node is, you might not be able to reconstruct any data due to partially deleting some of it. Be aware that you need to specify the names of parts as they were stored to HDF5. Depending on how your leaf construction works, this may differ from the names the data might have in your leaf in the trajectory container. If the hdf5 nodes you specified in `delete_only` cannot be found a warning is issued. Note that massive deletion will fragment your HDF5 file. Try to avoid changing data on disk whenever you can. If you want to erase a full node, simply ignore this argument or set to `None`. :param remove_from_item: If data that you want to delete from storage should also be removed from the items in `iterator` if they contain these. Default is `False`. :param recursive: If you want to delete a group node and it has children you need to set `recursive` to `True. Default is `False`. ] variable[remove_from_trajectory] assign[=] call[name[kwargs].pop, parameter[constant[remove_from_trajectory], constant[False]]] variable[recursive] assign[=] call[name[kwargs].get, parameter[constant[recursive], constant[False]]] variable[fetched_items] assign[=] call[name[self]._nn_interface._fetch_items, parameter[name[REMOVE], name[iterator], name[args], name[kwargs]]] if name[fetched_items] begin[:] <ast.Try object at 0x7da1b03b8e20> for taget[tuple[[<ast.Name object at 0x7da1b03b99c0>, <ast.Name object at 0x7da1b03b83a0>, <ast.Name object at 0x7da1b03b8040>, <ast.Name object at 0x7da1b03ba3b0>]]] in starred[name[fetched_items]] begin[:] if name[remove_from_trajectory] begin[:] call[name[self]._nn_interface._remove_node_or_leaf, parameter[name[item]]]
keyword[def] identifier[f_delete_items] ( identifier[self] , identifier[iterator] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[remove_from_trajectory] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ) identifier[recursive] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ) identifier[fetched_items] = identifier[self] . identifier[_nn_interface] . identifier[_fetch_items] ( identifier[REMOVE] , identifier[iterator] , identifier[args] , identifier[kwargs] ) keyword[if] identifier[fetched_items] : keyword[try] : identifier[self] . identifier[_storage_service] . identifier[store] ( identifier[pypetconstants] . identifier[LIST] , identifier[fetched_items] , identifier[trajectory_name] = identifier[self] . identifier[v_name] ) keyword[except] : identifier[self] . identifier[_logger] . identifier[error] ( literal[string] literal[string] % identifier[str] ( identifier[fetched_items] )) keyword[raise] keyword[for] identifier[_] , identifier[item] , identifier[dummy1] , identifier[dummy2] keyword[in] identifier[fetched_items] : keyword[if] identifier[remove_from_trajectory] : identifier[self] . identifier[_nn_interface] . identifier[_remove_node_or_leaf] ( identifier[item] , identifier[recursive] = identifier[recursive] ) keyword[else] : identifier[item] . identifier[_stored] = keyword[False] keyword[else] : identifier[self] . identifier[_logger] . identifier[warning] ( literal[string] literal[string] )
def f_delete_items(self, iterator, *args, **kwargs): """Deletes items from storage on disk. Per default the item is NOT removed from the trajectory. Links are NOT deleted on the hard disk, please delete links manually before deleting data! :param iterator: A sequence of items you want to remove. Either the instances themselves or strings with the names of the items. :param remove_from_trajectory: If items should also be removed from trajectory. Default is `False`. :param args: Additional arguments passed to the storage service :param kwargs: Additional keyword arguments passed to the storage service If you use the standard hdf5 storage service, you can pass the following additional keyword argument: :param delete_only: You can partially delete leaf nodes. Specify a list of parts of the result node that should be deleted like `delete_only=['mystuff','otherstuff']`. This wil only delete the hdf5 sub parts `mystuff` and `otherstuff` from disk. BE CAREFUL, erasing data partly happens at your own risk. Depending on how complex the loading process of your result node is, you might not be able to reconstruct any data due to partially deleting some of it. Be aware that you need to specify the names of parts as they were stored to HDF5. Depending on how your leaf construction works, this may differ from the names the data might have in your leaf in the trajectory container. If the hdf5 nodes you specified in `delete_only` cannot be found a warning is issued. Note that massive deletion will fragment your HDF5 file. Try to avoid changing data on disk whenever you can. If you want to erase a full node, simply ignore this argument or set to `None`. :param remove_from_item: If data that you want to delete from storage should also be removed from the items in `iterator` if they contain these. Default is `False`. :param recursive: If you want to delete a group node and it has children you need to set `recursive` to `True. Default is `False`. """ remove_from_trajectory = kwargs.pop('remove_from_trajectory', False) recursive = kwargs.get('recursive', False) # Will format the request in a form that is understood by the storage service # aka (msg, item, args, kwargs) fetched_items = self._nn_interface._fetch_items(REMOVE, iterator, args, kwargs) if fetched_items: try: self._storage_service.store(pypetconstants.LIST, fetched_items, trajectory_name=self.v_name) # depends on [control=['try'], data=[]] except: self._logger.error('Could not remove `%s` from the trajectory. Maybe the item(s) was/were never stored to disk.' % str(fetched_items)) raise # depends on [control=['except'], data=[]] for (_, item, dummy1, dummy2) in fetched_items: if remove_from_trajectory: self._nn_interface._remove_node_or_leaf(item, recursive=recursive) # depends on [control=['if'], data=[]] else: item._stored = False # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: self._logger.warning('Your removal was not successful, could not find a single item to remove.')
def update(self, list_id, webhook_id, data): """ Update the settings for an existing webhook. :param list_id: The unique id for the list :type list_id: :py:class:`str` :param webhook_id: The unique id for the webhook :type webhook_id: :py:class:`str` """ self.list_id = list_id self.webhook_id = webhook_id return self._mc_client._patch(url=self._build_path(list_id, 'webhooks', webhook_id), data=data)
def function[update, parameter[self, list_id, webhook_id, data]]: constant[ Update the settings for an existing webhook. :param list_id: The unique id for the list :type list_id: :py:class:`str` :param webhook_id: The unique id for the webhook :type webhook_id: :py:class:`str` ] name[self].list_id assign[=] name[list_id] name[self].webhook_id assign[=] name[webhook_id] return[call[name[self]._mc_client._patch, parameter[]]]
keyword[def] identifier[update] ( identifier[self] , identifier[list_id] , identifier[webhook_id] , identifier[data] ): literal[string] identifier[self] . identifier[list_id] = identifier[list_id] identifier[self] . identifier[webhook_id] = identifier[webhook_id] keyword[return] identifier[self] . identifier[_mc_client] . identifier[_patch] ( identifier[url] = identifier[self] . identifier[_build_path] ( identifier[list_id] , literal[string] , identifier[webhook_id] ), identifier[data] = identifier[data] )
def update(self, list_id, webhook_id, data): """ Update the settings for an existing webhook. :param list_id: The unique id for the list :type list_id: :py:class:`str` :param webhook_id: The unique id for the webhook :type webhook_id: :py:class:`str` """ self.list_id = list_id self.webhook_id = webhook_id return self._mc_client._patch(url=self._build_path(list_id, 'webhooks', webhook_id), data=data)
def _find_and_replace(self, date_string, captures): """ :warning: when multiple tz matches exist the last sorted capture will trump :param date_string: :return: date_string, tz_string """ # add timezones to replace cloned_replacements = copy.copy(REPLACEMENTS) # don't mutate for tz_string in captures.get("timezones", []): cloned_replacements.update({tz_string: " "}) date_string = date_string.lower() for key, replacement in cloned_replacements.items(): # we really want to match all permutations of the key surrounded by whitespace chars except one # for example: consider the key = 'to' # 1. match 'to ' # 2. match ' to' # 3. match ' to ' # but never match r'(\s|)to(\s|)' which would make 'october' > 'ocber' date_string = re.sub( r"(^|\s)" + key + r"(\s|$)", replacement, date_string, flags=re.IGNORECASE, ) return date_string, self._pop_tz_string(sorted(captures.get("timezones", [])))
def function[_find_and_replace, parameter[self, date_string, captures]]: constant[ :warning: when multiple tz matches exist the last sorted capture will trump :param date_string: :return: date_string, tz_string ] variable[cloned_replacements] assign[=] call[name[copy].copy, parameter[name[REPLACEMENTS]]] for taget[name[tz_string]] in starred[call[name[captures].get, parameter[constant[timezones], list[[]]]]] begin[:] call[name[cloned_replacements].update, parameter[dictionary[[<ast.Name object at 0x7da1b063ea40>], [<ast.Constant object at 0x7da1b063ff70>]]]] variable[date_string] assign[=] call[name[date_string].lower, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b063c160>, <ast.Name object at 0x7da1b063c2e0>]]] in starred[call[name[cloned_replacements].items, parameter[]]] begin[:] variable[date_string] assign[=] call[name[re].sub, parameter[binary_operation[binary_operation[constant[(^|\s)] + name[key]] + constant[(\s|$)]], name[replacement], name[date_string]]] return[tuple[[<ast.Name object at 0x7da1b063dfc0>, <ast.Call object at 0x7da1b063fdc0>]]]
keyword[def] identifier[_find_and_replace] ( identifier[self] , identifier[date_string] , identifier[captures] ): literal[string] identifier[cloned_replacements] = identifier[copy] . identifier[copy] ( identifier[REPLACEMENTS] ) keyword[for] identifier[tz_string] keyword[in] identifier[captures] . identifier[get] ( literal[string] ,[]): identifier[cloned_replacements] . identifier[update] ({ identifier[tz_string] : literal[string] }) identifier[date_string] = identifier[date_string] . identifier[lower] () keyword[for] identifier[key] , identifier[replacement] keyword[in] identifier[cloned_replacements] . identifier[items] (): identifier[date_string] = identifier[re] . identifier[sub] ( literal[string] + identifier[key] + literal[string] , identifier[replacement] , identifier[date_string] , identifier[flags] = identifier[re] . identifier[IGNORECASE] , ) keyword[return] identifier[date_string] , identifier[self] . identifier[_pop_tz_string] ( identifier[sorted] ( identifier[captures] . identifier[get] ( literal[string] ,[])))
def _find_and_replace(self, date_string, captures): """ :warning: when multiple tz matches exist the last sorted capture will trump :param date_string: :return: date_string, tz_string """ # add timezones to replace cloned_replacements = copy.copy(REPLACEMENTS) # don't mutate for tz_string in captures.get('timezones', []): cloned_replacements.update({tz_string: ' '}) # depends on [control=['for'], data=['tz_string']] date_string = date_string.lower() for (key, replacement) in cloned_replacements.items(): # we really want to match all permutations of the key surrounded by whitespace chars except one # for example: consider the key = 'to' # 1. match 'to ' # 2. match ' to' # 3. match ' to ' # but never match r'(\s|)to(\s|)' which would make 'october' > 'ocber' date_string = re.sub('(^|\\s)' + key + '(\\s|$)', replacement, date_string, flags=re.IGNORECASE) # depends on [control=['for'], data=[]] return (date_string, self._pop_tz_string(sorted(captures.get('timezones', []))))
def _symbol_bars( self, symbols, size, _from=None, to=None, limit=None): ''' Query historic_agg either minute or day in parallel for multiple symbols, and return in dict. symbols: list[str] size: str ('day', 'minute') _from: str or pd.Timestamp to: str or pd.Timestamp limit: str or int return: dict[str -> pd.DataFrame] ''' assert size in ('day', 'minute') # temp workaround for less bars after masking by # market hours query_limit = limit if query_limit is not None: query_limit *= 2 @skip_http_error((404, 504)) def fetch(symbol): df = self._api.polygon.historic_agg( size, symbol, _from, to, query_limit).df # zipline -> right label # API result -> left label (beginning of bucket) if size == 'minute': df.index += pd.Timedelta('1min') # mask out bars outside market hours mask = self._cal.minutes_in_range( df.index[0], df.index[-1], ).tz_convert(NY) df = df.reindex(mask) if limit is not None: df = df.iloc[-limit:] return df return parallelize(fetch)(symbols)
def function[_symbol_bars, parameter[self, symbols, size, _from, to, limit]]: constant[ Query historic_agg either minute or day in parallel for multiple symbols, and return in dict. symbols: list[str] size: str ('day', 'minute') _from: str or pd.Timestamp to: str or pd.Timestamp limit: str or int return: dict[str -> pd.DataFrame] ] assert[compare[name[size] in tuple[[<ast.Constant object at 0x7da18c4cc3d0>, <ast.Constant object at 0x7da18c4cfa60>]]]] variable[query_limit] assign[=] name[limit] if compare[name[query_limit] is_not constant[None]] begin[:] <ast.AugAssign object at 0x7da18c4cc340> def function[fetch, parameter[symbol]]: variable[df] assign[=] call[name[self]._api.polygon.historic_agg, parameter[name[size], name[symbol], name[_from], name[to], name[query_limit]]].df if compare[name[size] equal[==] constant[minute]] begin[:] <ast.AugAssign object at 0x7da18c4cf5b0> variable[mask] assign[=] call[call[name[self]._cal.minutes_in_range, parameter[call[name[df].index][constant[0]], call[name[df].index][<ast.UnaryOp object at 0x7da18c4cd900>]]].tz_convert, parameter[name[NY]]] variable[df] assign[=] call[name[df].reindex, parameter[name[mask]]] if compare[name[limit] is_not constant[None]] begin[:] variable[df] assign[=] call[name[df].iloc][<ast.Slice object at 0x7da18c4ceef0>] return[name[df]] return[call[call[name[parallelize], parameter[name[fetch]]], parameter[name[symbols]]]]
keyword[def] identifier[_symbol_bars] ( identifier[self] , identifier[symbols] , identifier[size] , identifier[_from] = keyword[None] , identifier[to] = keyword[None] , identifier[limit] = keyword[None] ): literal[string] keyword[assert] identifier[size] keyword[in] ( literal[string] , literal[string] ) identifier[query_limit] = identifier[limit] keyword[if] identifier[query_limit] keyword[is] keyword[not] keyword[None] : identifier[query_limit] *= literal[int] @ identifier[skip_http_error] (( literal[int] , literal[int] )) keyword[def] identifier[fetch] ( identifier[symbol] ): identifier[df] = identifier[self] . identifier[_api] . identifier[polygon] . identifier[historic_agg] ( identifier[size] , identifier[symbol] , identifier[_from] , identifier[to] , identifier[query_limit] ). identifier[df] keyword[if] identifier[size] == literal[string] : identifier[df] . identifier[index] += identifier[pd] . identifier[Timedelta] ( literal[string] ) identifier[mask] = identifier[self] . identifier[_cal] . identifier[minutes_in_range] ( identifier[df] . identifier[index] [ literal[int] ], identifier[df] . identifier[index] [- literal[int] ], ). identifier[tz_convert] ( identifier[NY] ) identifier[df] = identifier[df] . identifier[reindex] ( identifier[mask] ) keyword[if] identifier[limit] keyword[is] keyword[not] keyword[None] : identifier[df] = identifier[df] . identifier[iloc] [- identifier[limit] :] keyword[return] identifier[df] keyword[return] identifier[parallelize] ( identifier[fetch] )( identifier[symbols] )
def _symbol_bars(self, symbols, size, _from=None, to=None, limit=None): """ Query historic_agg either minute or day in parallel for multiple symbols, and return in dict. symbols: list[str] size: str ('day', 'minute') _from: str or pd.Timestamp to: str or pd.Timestamp limit: str or int return: dict[str -> pd.DataFrame] """ assert size in ('day', 'minute') # temp workaround for less bars after masking by # market hours query_limit = limit if query_limit is not None: query_limit *= 2 # depends on [control=['if'], data=['query_limit']] @skip_http_error((404, 504)) def fetch(symbol): df = self._api.polygon.historic_agg(size, symbol, _from, to, query_limit).df # zipline -> right label # API result -> left label (beginning of bucket) if size == 'minute': df.index += pd.Timedelta('1min') # mask out bars outside market hours mask = self._cal.minutes_in_range(df.index[0], df.index[-1]).tz_convert(NY) df = df.reindex(mask) # depends on [control=['if'], data=[]] if limit is not None: df = df.iloc[-limit:] # depends on [control=['if'], data=['limit']] return df return parallelize(fetch)(symbols)
def leaf_list(cls, name, parent=None, interleave=None): """Create _list_ node for a leaf-list.""" node = cls("_list_", parent, interleave=interleave) node.attr["name"] = name node.keys = None node.minEl = "0" node.maxEl = None node.occur = 3 return node
def function[leaf_list, parameter[cls, name, parent, interleave]]: constant[Create _list_ node for a leaf-list.] variable[node] assign[=] call[name[cls], parameter[constant[_list_], name[parent]]] call[name[node].attr][constant[name]] assign[=] name[name] name[node].keys assign[=] constant[None] name[node].minEl assign[=] constant[0] name[node].maxEl assign[=] constant[None] name[node].occur assign[=] constant[3] return[name[node]]
keyword[def] identifier[leaf_list] ( identifier[cls] , identifier[name] , identifier[parent] = keyword[None] , identifier[interleave] = keyword[None] ): literal[string] identifier[node] = identifier[cls] ( literal[string] , identifier[parent] , identifier[interleave] = identifier[interleave] ) identifier[node] . identifier[attr] [ literal[string] ]= identifier[name] identifier[node] . identifier[keys] = keyword[None] identifier[node] . identifier[minEl] = literal[string] identifier[node] . identifier[maxEl] = keyword[None] identifier[node] . identifier[occur] = literal[int] keyword[return] identifier[node]
def leaf_list(cls, name, parent=None, interleave=None): """Create _list_ node for a leaf-list.""" node = cls('_list_', parent, interleave=interleave) node.attr['name'] = name node.keys = None node.minEl = '0' node.maxEl = None node.occur = 3 return node
def pip(name): '''Parse requirements file''' with io.open(os.path.join('requirements', '{0}.pip'.format(name))) as f: return f.readlines()
def function[pip, parameter[name]]: constant[Parse requirements file] with call[name[io].open, parameter[call[name[os].path.join, parameter[constant[requirements], call[constant[{0}.pip].format, parameter[name[name]]]]]]] begin[:] return[call[name[f].readlines, parameter[]]]
keyword[def] identifier[pip] ( identifier[name] ): literal[string] keyword[with] identifier[io] . identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( literal[string] , literal[string] . identifier[format] ( identifier[name] ))) keyword[as] identifier[f] : keyword[return] identifier[f] . identifier[readlines] ()
def pip(name): """Parse requirements file""" with io.open(os.path.join('requirements', '{0}.pip'.format(name))) as f: return f.readlines() # depends on [control=['with'], data=['f']]
def get_imports(self, ast_body): """ Return all the import statements given an AST body (AST nodes). Args: ast_body (compiled code's body): the body to filter. Returns: list of dict: the import statements. """ imports = [] for node in ast_body: if isinstance(node, ast.Import): imports.extend({'target': name.name, 'lineno': node.lineno} for name in node.names) elif isinstance(node, ast.ImportFrom): for name in node.names: name = ( self.absolute_name(self.depth - node.level) + '.' if node.level > 0 else '' ) + ( node.module + '.' if node.module else '' ) + name.name imports.append({'target': name, 'lineno': node.lineno}) elif isinstance(node, Module.RECURSIVE_NODES): imports.extend(self.get_imports(node.body)) if isinstance(node, ast.Try): imports.extend(self.get_imports(node.finalbody)) return imports
def function[get_imports, parameter[self, ast_body]]: constant[ Return all the import statements given an AST body (AST nodes). Args: ast_body (compiled code's body): the body to filter. Returns: list of dict: the import statements. ] variable[imports] assign[=] list[[]] for taget[name[node]] in starred[name[ast_body]] begin[:] if call[name[isinstance], parameter[name[node], name[ast].Import]] begin[:] call[name[imports].extend, parameter[<ast.GeneratorExp object at 0x7da2044c2d70>]] return[name[imports]]
keyword[def] identifier[get_imports] ( identifier[self] , identifier[ast_body] ): literal[string] identifier[imports] =[] keyword[for] identifier[node] keyword[in] identifier[ast_body] : keyword[if] identifier[isinstance] ( identifier[node] , identifier[ast] . identifier[Import] ): identifier[imports] . identifier[extend] ({ literal[string] : identifier[name] . identifier[name] , literal[string] : identifier[node] . identifier[lineno] } keyword[for] identifier[name] keyword[in] identifier[node] . identifier[names] ) keyword[elif] identifier[isinstance] ( identifier[node] , identifier[ast] . identifier[ImportFrom] ): keyword[for] identifier[name] keyword[in] identifier[node] . identifier[names] : identifier[name] =( identifier[self] . identifier[absolute_name] ( identifier[self] . identifier[depth] - identifier[node] . identifier[level] )+ literal[string] keyword[if] identifier[node] . identifier[level] > literal[int] keyword[else] literal[string] )+( identifier[node] . identifier[module] + literal[string] keyword[if] identifier[node] . identifier[module] keyword[else] literal[string] )+ identifier[name] . identifier[name] identifier[imports] . identifier[append] ({ literal[string] : identifier[name] , literal[string] : identifier[node] . identifier[lineno] }) keyword[elif] identifier[isinstance] ( identifier[node] , identifier[Module] . identifier[RECURSIVE_NODES] ): identifier[imports] . identifier[extend] ( identifier[self] . identifier[get_imports] ( identifier[node] . identifier[body] )) keyword[if] identifier[isinstance] ( identifier[node] , identifier[ast] . identifier[Try] ): identifier[imports] . identifier[extend] ( identifier[self] . identifier[get_imports] ( identifier[node] . identifier[finalbody] )) keyword[return] identifier[imports]
def get_imports(self, ast_body): """ Return all the import statements given an AST body (AST nodes). Args: ast_body (compiled code's body): the body to filter. Returns: list of dict: the import statements. """ imports = [] for node in ast_body: if isinstance(node, ast.Import): imports.extend(({'target': name.name, 'lineno': node.lineno} for name in node.names)) # depends on [control=['if'], data=[]] elif isinstance(node, ast.ImportFrom): for name in node.names: name = (self.absolute_name(self.depth - node.level) + '.' if node.level > 0 else '') + (node.module + '.' if node.module else '') + name.name imports.append({'target': name, 'lineno': node.lineno}) # depends on [control=['for'], data=['name']] # depends on [control=['if'], data=[]] elif isinstance(node, Module.RECURSIVE_NODES): imports.extend(self.get_imports(node.body)) if isinstance(node, ast.Try): imports.extend(self.get_imports(node.finalbody)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']] return imports
def challenge_hash(peer_challenge, authenticator_challenge, username): """ChallengeHash""" sha_hash = hashlib.sha1() sha_hash.update(peer_challenge) sha_hash.update(authenticator_challenge) sha_hash.update(username) return sha_hash.digest()[:8]
def function[challenge_hash, parameter[peer_challenge, authenticator_challenge, username]]: constant[ChallengeHash] variable[sha_hash] assign[=] call[name[hashlib].sha1, parameter[]] call[name[sha_hash].update, parameter[name[peer_challenge]]] call[name[sha_hash].update, parameter[name[authenticator_challenge]]] call[name[sha_hash].update, parameter[name[username]]] return[call[call[name[sha_hash].digest, parameter[]]][<ast.Slice object at 0x7da2046210f0>]]
keyword[def] identifier[challenge_hash] ( identifier[peer_challenge] , identifier[authenticator_challenge] , identifier[username] ): literal[string] identifier[sha_hash] = identifier[hashlib] . identifier[sha1] () identifier[sha_hash] . identifier[update] ( identifier[peer_challenge] ) identifier[sha_hash] . identifier[update] ( identifier[authenticator_challenge] ) identifier[sha_hash] . identifier[update] ( identifier[username] ) keyword[return] identifier[sha_hash] . identifier[digest] ()[: literal[int] ]
def challenge_hash(peer_challenge, authenticator_challenge, username): """ChallengeHash""" sha_hash = hashlib.sha1() sha_hash.update(peer_challenge) sha_hash.update(authenticator_challenge) sha_hash.update(username) return sha_hash.digest()[:8]
async def apply(self, sender: str, recipient: str, mailbox: str, append_msg: AppendMessage) \ -> Tuple[Optional[str], AppendMessage]: """Run the filter and return the mailbox where it should be appended, or None to discard, and the message to be appended, which is usually the same as ``append_msg``. Args: sender: The envelope sender of the message. recipient: The envelope recipient of the message. mailbox: The intended mailbox to append the message. append_msg: The message to be appended. raises: :exc:`~pymap.exceptions.AppendFailure` """ ...
<ast.AsyncFunctionDef object at 0x7da20e9551e0>
keyword[async] keyword[def] identifier[apply] ( identifier[self] , identifier[sender] : identifier[str] , identifier[recipient] : identifier[str] , identifier[mailbox] : identifier[str] , identifier[append_msg] : identifier[AppendMessage] )-> identifier[Tuple] [ identifier[Optional] [ identifier[str] ], identifier[AppendMessage] ]: literal[string] ...
async def apply(self, sender: str, recipient: str, mailbox: str, append_msg: AppendMessage) -> Tuple[Optional[str], AppendMessage]: """Run the filter and return the mailbox where it should be appended, or None to discard, and the message to be appended, which is usually the same as ``append_msg``. Args: sender: The envelope sender of the message. recipient: The envelope recipient of the message. mailbox: The intended mailbox to append the message. append_msg: The message to be appended. raises: :exc:`~pymap.exceptions.AppendFailure` """ ...
def wait_for_ready(self, instance_id, limit=14400, delay=10, pending=False): """Determine if a Server is ready. A server is ready when no transactions are running on it. :param int instance_id: The instance ID with the pending transaction :param int limit: The maximum amount of seconds to wait. :param int delay: The number of seconds to sleep before checks. Defaults to 10. """ now = time.time() until = now + limit mask = "mask[id, lastOperatingSystemReload[id], activeTransaction, provisionDate]" instance = self.get_hardware(instance_id, mask=mask) while now <= until: if utils.is_ready(instance, pending): return True transaction = utils.lookup(instance, 'activeTransaction', 'transactionStatus', 'friendlyName') snooze = min(delay, until - now) LOGGER.info("%s - %d not ready. Auto retry in %ds", transaction, instance_id, snooze) time.sleep(snooze) instance = self.get_hardware(instance_id, mask=mask) now = time.time() LOGGER.info("Waiting for %d expired.", instance_id) return False
def function[wait_for_ready, parameter[self, instance_id, limit, delay, pending]]: constant[Determine if a Server is ready. A server is ready when no transactions are running on it. :param int instance_id: The instance ID with the pending transaction :param int limit: The maximum amount of seconds to wait. :param int delay: The number of seconds to sleep before checks. Defaults to 10. ] variable[now] assign[=] call[name[time].time, parameter[]] variable[until] assign[=] binary_operation[name[now] + name[limit]] variable[mask] assign[=] constant[mask[id, lastOperatingSystemReload[id], activeTransaction, provisionDate]] variable[instance] assign[=] call[name[self].get_hardware, parameter[name[instance_id]]] while compare[name[now] less_or_equal[<=] name[until]] begin[:] if call[name[utils].is_ready, parameter[name[instance], name[pending]]] begin[:] return[constant[True]] variable[transaction] assign[=] call[name[utils].lookup, parameter[name[instance], constant[activeTransaction], constant[transactionStatus], constant[friendlyName]]] variable[snooze] assign[=] call[name[min], parameter[name[delay], binary_operation[name[until] - name[now]]]] call[name[LOGGER].info, parameter[constant[%s - %d not ready. Auto retry in %ds], name[transaction], name[instance_id], name[snooze]]] call[name[time].sleep, parameter[name[snooze]]] variable[instance] assign[=] call[name[self].get_hardware, parameter[name[instance_id]]] variable[now] assign[=] call[name[time].time, parameter[]] call[name[LOGGER].info, parameter[constant[Waiting for %d expired.], name[instance_id]]] return[constant[False]]
keyword[def] identifier[wait_for_ready] ( identifier[self] , identifier[instance_id] , identifier[limit] = literal[int] , identifier[delay] = literal[int] , identifier[pending] = keyword[False] ): literal[string] identifier[now] = identifier[time] . identifier[time] () identifier[until] = identifier[now] + identifier[limit] identifier[mask] = literal[string] identifier[instance] = identifier[self] . identifier[get_hardware] ( identifier[instance_id] , identifier[mask] = identifier[mask] ) keyword[while] identifier[now] <= identifier[until] : keyword[if] identifier[utils] . identifier[is_ready] ( identifier[instance] , identifier[pending] ): keyword[return] keyword[True] identifier[transaction] = identifier[utils] . identifier[lookup] ( identifier[instance] , literal[string] , literal[string] , literal[string] ) identifier[snooze] = identifier[min] ( identifier[delay] , identifier[until] - identifier[now] ) identifier[LOGGER] . identifier[info] ( literal[string] , identifier[transaction] , identifier[instance_id] , identifier[snooze] ) identifier[time] . identifier[sleep] ( identifier[snooze] ) identifier[instance] = identifier[self] . identifier[get_hardware] ( identifier[instance_id] , identifier[mask] = identifier[mask] ) identifier[now] = identifier[time] . identifier[time] () identifier[LOGGER] . identifier[info] ( literal[string] , identifier[instance_id] ) keyword[return] keyword[False]
def wait_for_ready(self, instance_id, limit=14400, delay=10, pending=False): """Determine if a Server is ready. A server is ready when no transactions are running on it. :param int instance_id: The instance ID with the pending transaction :param int limit: The maximum amount of seconds to wait. :param int delay: The number of seconds to sleep before checks. Defaults to 10. """ now = time.time() until = now + limit mask = 'mask[id, lastOperatingSystemReload[id], activeTransaction, provisionDate]' instance = self.get_hardware(instance_id, mask=mask) while now <= until: if utils.is_ready(instance, pending): return True # depends on [control=['if'], data=[]] transaction = utils.lookup(instance, 'activeTransaction', 'transactionStatus', 'friendlyName') snooze = min(delay, until - now) LOGGER.info('%s - %d not ready. Auto retry in %ds', transaction, instance_id, snooze) time.sleep(snooze) instance = self.get_hardware(instance_id, mask=mask) now = time.time() # depends on [control=['while'], data=['now', 'until']] LOGGER.info('Waiting for %d expired.', instance_id) return False
def class2md(self, cls, depth=2): """Takes a class and creates markdown text to document its methods and variables. """ section = "#" * depth subsection = "#" * (depth + 2) clsname = cls.__name__ modname = cls.__module__ header = clsname path = self.get_src_path(cls) doc = self.doc2md(cls) try: init = self.func2md(cls.__init__, clsname=clsname) except (ValueError, TypeError): # this happens if __init__ is outside the repo init = "" variables = [] for name, obj in getmembers(cls, lambda a: not (inspect.isroutine(a) or inspect.ismethod(a))): if not name.startswith("_") and type(obj) == property: comments = self.doc2md(obj) or inspect.getcomments(obj) comments = "\n %s" % comments if comments else "" variables.append("\n%s %s.%s%s\n" % (subsection, clsname, name, comments)) handlers = [] for name, obj in getmembers(cls, inspect.ismethoddescriptor): if not name.startswith("_") and hasattr(obj, "__module__") and obj.__module__ == modname: handlers.append("\n%s %s.%s\n *Handler*" % (subsection, clsname, name)) methods = [] for name, obj in getmembers(cls, inspect.ismethod): if not name.startswith("_") and hasattr(obj, "__module__") and obj.__module__ == modname and name not in handlers: methods.append(self.func2md(obj, clsname=clsname, depth=depth + 1)) string = CLASS_TEMPLATE.format(section=section, header=header, path=path, doc=doc if doc else "", init=init, variables="".join(variables), handlers="".join(handlers), methods="".join(methods)) return string
def function[class2md, parameter[self, cls, depth]]: constant[Takes a class and creates markdown text to document its methods and variables. ] variable[section] assign[=] binary_operation[constant[#] * name[depth]] variable[subsection] assign[=] binary_operation[constant[#] * binary_operation[name[depth] + constant[2]]] variable[clsname] assign[=] name[cls].__name__ variable[modname] assign[=] name[cls].__module__ variable[header] assign[=] name[clsname] variable[path] assign[=] call[name[self].get_src_path, parameter[name[cls]]] variable[doc] assign[=] call[name[self].doc2md, parameter[name[cls]]] <ast.Try object at 0x7da204347280> variable[variables] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da204345f90>, <ast.Name object at 0x7da204344700>]]] in starred[call[name[getmembers], parameter[name[cls], <ast.Lambda object at 0x7da204346890>]]] begin[:] if <ast.BoolOp object at 0x7da204347700> begin[:] variable[comments] assign[=] <ast.BoolOp object at 0x7da2043444f0> variable[comments] assign[=] <ast.IfExp object at 0x7da204347c70> call[name[variables].append, parameter[binary_operation[constant[ %s %s.%s%s ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204347c40>, <ast.Name object at 0x7da204347190>, <ast.Name object at 0x7da204344a90>, <ast.Name object at 0x7da204345f60>]]]]] variable[handlers] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da2043470d0>, <ast.Name object at 0x7da2101f4dc0>]]] in starred[call[name[getmembers], parameter[name[cls], name[inspect].ismethoddescriptor]]] begin[:] if <ast.BoolOp object at 0x7da237d346a0> begin[:] call[name[handlers].append, parameter[binary_operation[constant[ %s %s.%s *Handler*] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f812c80>, <ast.Name object at 0x7da18f8105b0>, <ast.Name object at 0x7da18f812d10>]]]]] variable[methods] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da18f813d60>, <ast.Name object at 0x7da18f812830>]]] in starred[call[name[getmembers], parameter[name[cls], name[inspect].ismethod]]] begin[:] if <ast.BoolOp object at 0x7da18f811b40> begin[:] call[name[methods].append, parameter[call[name[self].func2md, parameter[name[obj]]]]] variable[string] assign[=] call[name[CLASS_TEMPLATE].format, parameter[]] return[name[string]]
keyword[def] identifier[class2md] ( identifier[self] , identifier[cls] , identifier[depth] = literal[int] ): literal[string] identifier[section] = literal[string] * identifier[depth] identifier[subsection] = literal[string] *( identifier[depth] + literal[int] ) identifier[clsname] = identifier[cls] . identifier[__name__] identifier[modname] = identifier[cls] . identifier[__module__] identifier[header] = identifier[clsname] identifier[path] = identifier[self] . identifier[get_src_path] ( identifier[cls] ) identifier[doc] = identifier[self] . identifier[doc2md] ( identifier[cls] ) keyword[try] : identifier[init] = identifier[self] . identifier[func2md] ( identifier[cls] . identifier[__init__] , identifier[clsname] = identifier[clsname] ) keyword[except] ( identifier[ValueError] , identifier[TypeError] ): identifier[init] = literal[string] identifier[variables] =[] keyword[for] identifier[name] , identifier[obj] keyword[in] identifier[getmembers] ( identifier[cls] , keyword[lambda] identifier[a] : keyword[not] ( identifier[inspect] . identifier[isroutine] ( identifier[a] ) keyword[or] identifier[inspect] . identifier[ismethod] ( identifier[a] ))): keyword[if] keyword[not] identifier[name] . identifier[startswith] ( literal[string] ) keyword[and] identifier[type] ( identifier[obj] )== identifier[property] : identifier[comments] = identifier[self] . identifier[doc2md] ( identifier[obj] ) keyword[or] identifier[inspect] . identifier[getcomments] ( identifier[obj] ) identifier[comments] = literal[string] % identifier[comments] keyword[if] identifier[comments] keyword[else] literal[string] identifier[variables] . identifier[append] ( literal[string] %( identifier[subsection] , identifier[clsname] , identifier[name] , identifier[comments] )) identifier[handlers] =[] keyword[for] identifier[name] , identifier[obj] keyword[in] identifier[getmembers] ( identifier[cls] , identifier[inspect] . identifier[ismethoddescriptor] ): keyword[if] keyword[not] identifier[name] . identifier[startswith] ( literal[string] ) keyword[and] identifier[hasattr] ( identifier[obj] , literal[string] ) keyword[and] identifier[obj] . identifier[__module__] == identifier[modname] : identifier[handlers] . identifier[append] ( literal[string] %( identifier[subsection] , identifier[clsname] , identifier[name] )) identifier[methods] =[] keyword[for] identifier[name] , identifier[obj] keyword[in] identifier[getmembers] ( identifier[cls] , identifier[inspect] . identifier[ismethod] ): keyword[if] keyword[not] identifier[name] . identifier[startswith] ( literal[string] ) keyword[and] identifier[hasattr] ( identifier[obj] , literal[string] ) keyword[and] identifier[obj] . identifier[__module__] == identifier[modname] keyword[and] identifier[name] keyword[not] keyword[in] identifier[handlers] : identifier[methods] . identifier[append] ( identifier[self] . identifier[func2md] ( identifier[obj] , identifier[clsname] = identifier[clsname] , identifier[depth] = identifier[depth] + literal[int] )) identifier[string] = identifier[CLASS_TEMPLATE] . identifier[format] ( identifier[section] = identifier[section] , identifier[header] = identifier[header] , identifier[path] = identifier[path] , identifier[doc] = identifier[doc] keyword[if] identifier[doc] keyword[else] literal[string] , identifier[init] = identifier[init] , identifier[variables] = literal[string] . identifier[join] ( identifier[variables] ), identifier[handlers] = literal[string] . identifier[join] ( identifier[handlers] ), identifier[methods] = literal[string] . identifier[join] ( identifier[methods] )) keyword[return] identifier[string]
def class2md(self, cls, depth=2): """Takes a class and creates markdown text to document its methods and variables. """ section = '#' * depth subsection = '#' * (depth + 2) clsname = cls.__name__ modname = cls.__module__ header = clsname path = self.get_src_path(cls) doc = self.doc2md(cls) try: init = self.func2md(cls.__init__, clsname=clsname) # depends on [control=['try'], data=[]] except (ValueError, TypeError): # this happens if __init__ is outside the repo init = '' # depends on [control=['except'], data=[]] variables = [] for (name, obj) in getmembers(cls, lambda a: not (inspect.isroutine(a) or inspect.ismethod(a))): if not name.startswith('_') and type(obj) == property: comments = self.doc2md(obj) or inspect.getcomments(obj) comments = '\n %s' % comments if comments else '' variables.append('\n%s %s.%s%s\n' % (subsection, clsname, name, comments)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] handlers = [] for (name, obj) in getmembers(cls, inspect.ismethoddescriptor): if not name.startswith('_') and hasattr(obj, '__module__') and (obj.__module__ == modname): handlers.append('\n%s %s.%s\n *Handler*' % (subsection, clsname, name)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] methods = [] for (name, obj) in getmembers(cls, inspect.ismethod): if not name.startswith('_') and hasattr(obj, '__module__') and (obj.__module__ == modname) and (name not in handlers): methods.append(self.func2md(obj, clsname=clsname, depth=depth + 1)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] string = CLASS_TEMPLATE.format(section=section, header=header, path=path, doc=doc if doc else '', init=init, variables=''.join(variables), handlers=''.join(handlers), methods=''.join(methods)) return string
def _get_item_labels(self, item, no_from_study=False): """ Returns the labels for the XNAT subject and sessions given the frequency and provided IDs. """ subject_id = self.inv_map_subject_id(item.subject_id) visit_id = self.inv_map_visit_id(item.visit_id) subj_label, sess_label = self._get_labels( item.frequency, subject_id, visit_id) if not no_from_study and item.from_study is not None: sess_label += '_' + item.from_study return (subj_label, sess_label)
def function[_get_item_labels, parameter[self, item, no_from_study]]: constant[ Returns the labels for the XNAT subject and sessions given the frequency and provided IDs. ] variable[subject_id] assign[=] call[name[self].inv_map_subject_id, parameter[name[item].subject_id]] variable[visit_id] assign[=] call[name[self].inv_map_visit_id, parameter[name[item].visit_id]] <ast.Tuple object at 0x7da20cabedd0> assign[=] call[name[self]._get_labels, parameter[name[item].frequency, name[subject_id], name[visit_id]]] if <ast.BoolOp object at 0x7da18bc70070> begin[:] <ast.AugAssign object at 0x7da18bc71600> return[tuple[[<ast.Name object at 0x7da18bc709d0>, <ast.Name object at 0x7da18bc73940>]]]
keyword[def] identifier[_get_item_labels] ( identifier[self] , identifier[item] , identifier[no_from_study] = keyword[False] ): literal[string] identifier[subject_id] = identifier[self] . identifier[inv_map_subject_id] ( identifier[item] . identifier[subject_id] ) identifier[visit_id] = identifier[self] . identifier[inv_map_visit_id] ( identifier[item] . identifier[visit_id] ) identifier[subj_label] , identifier[sess_label] = identifier[self] . identifier[_get_labels] ( identifier[item] . identifier[frequency] , identifier[subject_id] , identifier[visit_id] ) keyword[if] keyword[not] identifier[no_from_study] keyword[and] identifier[item] . identifier[from_study] keyword[is] keyword[not] keyword[None] : identifier[sess_label] += literal[string] + identifier[item] . identifier[from_study] keyword[return] ( identifier[subj_label] , identifier[sess_label] )
def _get_item_labels(self, item, no_from_study=False): """ Returns the labels for the XNAT subject and sessions given the frequency and provided IDs. """ subject_id = self.inv_map_subject_id(item.subject_id) visit_id = self.inv_map_visit_id(item.visit_id) (subj_label, sess_label) = self._get_labels(item.frequency, subject_id, visit_id) if not no_from_study and item.from_study is not None: sess_label += '_' + item.from_study # depends on [control=['if'], data=[]] return (subj_label, sess_label)
def get_network_params(self): """ get network params """ ip = self.__address[0] mask = b'' gate = b'' cmd_response = self.__send_command(const.CMD_OPTIONS_RRQ, b'IPAddress\x00', 1024) if cmd_response.get('status'): ip = (self.__data.split(b'=', 1)[-1].split(b'\x00')[0]) cmd_response = self.__send_command(const.CMD_OPTIONS_RRQ, b'NetMask\x00', 1024) if cmd_response.get('status'): mask = (self.__data.split(b'=', 1)[-1].split(b'\x00')[0]) cmd_response = self.__send_command(const.CMD_OPTIONS_RRQ, b'GATEIPAddress\x00', 1024) if cmd_response.get('status'): gate = (self.__data.split(b'=', 1)[-1].split(b'\x00')[0]) return {'ip': ip.decode(), 'mask': mask.decode(), 'gateway': gate.decode()}
def function[get_network_params, parameter[self]]: constant[ get network params ] variable[ip] assign[=] call[name[self].__address][constant[0]] variable[mask] assign[=] constant[b''] variable[gate] assign[=] constant[b''] variable[cmd_response] assign[=] call[name[self].__send_command, parameter[name[const].CMD_OPTIONS_RRQ, constant[b'IPAddress\x00'], constant[1024]]] if call[name[cmd_response].get, parameter[constant[status]]] begin[:] variable[ip] assign[=] call[call[call[call[name[self].__data.split, parameter[constant[b'='], constant[1]]]][<ast.UnaryOp object at 0x7da1b1e65a20>].split, parameter[constant[b'\x00']]]][constant[0]] variable[cmd_response] assign[=] call[name[self].__send_command, parameter[name[const].CMD_OPTIONS_RRQ, constant[b'NetMask\x00'], constant[1024]]] if call[name[cmd_response].get, parameter[constant[status]]] begin[:] variable[mask] assign[=] call[call[call[call[name[self].__data.split, parameter[constant[b'='], constant[1]]]][<ast.UnaryOp object at 0x7da1b1e649a0>].split, parameter[constant[b'\x00']]]][constant[0]] variable[cmd_response] assign[=] call[name[self].__send_command, parameter[name[const].CMD_OPTIONS_RRQ, constant[b'GATEIPAddress\x00'], constant[1024]]] if call[name[cmd_response].get, parameter[constant[status]]] begin[:] variable[gate] assign[=] call[call[call[call[name[self].__data.split, parameter[constant[b'='], constant[1]]]][<ast.UnaryOp object at 0x7da1b1e675b0>].split, parameter[constant[b'\x00']]]][constant[0]] return[dictionary[[<ast.Constant object at 0x7da1b1e66ad0>, <ast.Constant object at 0x7da1b1e65900>, <ast.Constant object at 0x7da1b1e67a60>], [<ast.Call object at 0x7da1b1e64700>, <ast.Call object at 0x7da1b1e648e0>, <ast.Call object at 0x7da1b1e66a40>]]]
keyword[def] identifier[get_network_params] ( identifier[self] ): literal[string] identifier[ip] = identifier[self] . identifier[__address] [ literal[int] ] identifier[mask] = literal[string] identifier[gate] = literal[string] identifier[cmd_response] = identifier[self] . identifier[__send_command] ( identifier[const] . identifier[CMD_OPTIONS_RRQ] , literal[string] , literal[int] ) keyword[if] identifier[cmd_response] . identifier[get] ( literal[string] ): identifier[ip] =( identifier[self] . identifier[__data] . identifier[split] ( literal[string] , literal[int] )[- literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]) identifier[cmd_response] = identifier[self] . identifier[__send_command] ( identifier[const] . identifier[CMD_OPTIONS_RRQ] , literal[string] , literal[int] ) keyword[if] identifier[cmd_response] . identifier[get] ( literal[string] ): identifier[mask] =( identifier[self] . identifier[__data] . identifier[split] ( literal[string] , literal[int] )[- literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]) identifier[cmd_response] = identifier[self] . identifier[__send_command] ( identifier[const] . identifier[CMD_OPTIONS_RRQ] , literal[string] , literal[int] ) keyword[if] identifier[cmd_response] . identifier[get] ( literal[string] ): identifier[gate] =( identifier[self] . identifier[__data] . identifier[split] ( literal[string] , literal[int] )[- literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]) keyword[return] { literal[string] : identifier[ip] . identifier[decode] (), literal[string] : identifier[mask] . identifier[decode] (), literal[string] : identifier[gate] . identifier[decode] ()}
def get_network_params(self): """ get network params """ ip = self.__address[0] mask = b'' gate = b'' cmd_response = self.__send_command(const.CMD_OPTIONS_RRQ, b'IPAddress\x00', 1024) if cmd_response.get('status'): ip = self.__data.split(b'=', 1)[-1].split(b'\x00')[0] # depends on [control=['if'], data=[]] cmd_response = self.__send_command(const.CMD_OPTIONS_RRQ, b'NetMask\x00', 1024) if cmd_response.get('status'): mask = self.__data.split(b'=', 1)[-1].split(b'\x00')[0] # depends on [control=['if'], data=[]] cmd_response = self.__send_command(const.CMD_OPTIONS_RRQ, b'GATEIPAddress\x00', 1024) if cmd_response.get('status'): gate = self.__data.split(b'=', 1)[-1].split(b'\x00')[0] # depends on [control=['if'], data=[]] return {'ip': ip.decode(), 'mask': mask.decode(), 'gateway': gate.decode()}
def format_time(time_string): ''' Format time string with invalid time elements in hours/minutes/seconds Format for the timestring needs to be "%Y_%m_%d_%H_%M_%S" e.g. 2014_03_31_24_10_11 => 2014_04_01_00_10_11 ''' subseconds = False data = time_string.split("_") hours, minutes, seconds = int(data[3]), int(data[4]), int(data[5]) date = datetime.datetime.strptime("_".join(data[:3]), "%Y_%m_%d") subsec = 0.0 if len(data) == 7: if float(data[6]) != 0: subsec = float(data[6]) / 10**len(data[6]) subseconds = True date_time = date + \ datetime.timedelta(hours=hours, minutes=minutes, seconds=seconds + subsec) return date_time, subseconds
def function[format_time, parameter[time_string]]: constant[ Format time string with invalid time elements in hours/minutes/seconds Format for the timestring needs to be "%Y_%m_%d_%H_%M_%S" e.g. 2014_03_31_24_10_11 => 2014_04_01_00_10_11 ] variable[subseconds] assign[=] constant[False] variable[data] assign[=] call[name[time_string].split, parameter[constant[_]]] <ast.Tuple object at 0x7da20c993df0> assign[=] tuple[[<ast.Call object at 0x7da20c9927a0>, <ast.Call object at 0x7da20c992dd0>, <ast.Call object at 0x7da20c992ad0>]] variable[date] assign[=] call[name[datetime].datetime.strptime, parameter[call[constant[_].join, parameter[call[name[data]][<ast.Slice object at 0x7da20c992b90>]]], constant[%Y_%m_%d]]] variable[subsec] assign[=] constant[0.0] if compare[call[name[len], parameter[name[data]]] equal[==] constant[7]] begin[:] if compare[call[name[float], parameter[call[name[data]][constant[6]]]] not_equal[!=] constant[0]] begin[:] variable[subsec] assign[=] binary_operation[call[name[float], parameter[call[name[data]][constant[6]]]] / binary_operation[constant[10] ** call[name[len], parameter[call[name[data]][constant[6]]]]]] variable[subseconds] assign[=] constant[True] variable[date_time] assign[=] binary_operation[name[date] + call[name[datetime].timedelta, parameter[]]] return[tuple[[<ast.Name object at 0x7da20c991240>, <ast.Name object at 0x7da20c9914e0>]]]
keyword[def] identifier[format_time] ( identifier[time_string] ): literal[string] identifier[subseconds] = keyword[False] identifier[data] = identifier[time_string] . identifier[split] ( literal[string] ) identifier[hours] , identifier[minutes] , identifier[seconds] = identifier[int] ( identifier[data] [ literal[int] ]), identifier[int] ( identifier[data] [ literal[int] ]), identifier[int] ( identifier[data] [ literal[int] ]) identifier[date] = identifier[datetime] . identifier[datetime] . identifier[strptime] ( literal[string] . identifier[join] ( identifier[data] [: literal[int] ]), literal[string] ) identifier[subsec] = literal[int] keyword[if] identifier[len] ( identifier[data] )== literal[int] : keyword[if] identifier[float] ( identifier[data] [ literal[int] ])!= literal[int] : identifier[subsec] = identifier[float] ( identifier[data] [ literal[int] ])/ literal[int] ** identifier[len] ( identifier[data] [ literal[int] ]) identifier[subseconds] = keyword[True] identifier[date_time] = identifier[date] + identifier[datetime] . identifier[timedelta] ( identifier[hours] = identifier[hours] , identifier[minutes] = identifier[minutes] , identifier[seconds] = identifier[seconds] + identifier[subsec] ) keyword[return] identifier[date_time] , identifier[subseconds]
def format_time(time_string): """ Format time string with invalid time elements in hours/minutes/seconds Format for the timestring needs to be "%Y_%m_%d_%H_%M_%S" e.g. 2014_03_31_24_10_11 => 2014_04_01_00_10_11 """ subseconds = False data = time_string.split('_') (hours, minutes, seconds) = (int(data[3]), int(data[4]), int(data[5])) date = datetime.datetime.strptime('_'.join(data[:3]), '%Y_%m_%d') subsec = 0.0 if len(data) == 7: if float(data[6]) != 0: subsec = float(data[6]) / 10 ** len(data[6]) subseconds = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] date_time = date + datetime.timedelta(hours=hours, minutes=minutes, seconds=seconds + subsec) return (date_time, subseconds)
def fromtree(cls, tree): """ Create a METS from an ElementTree or Element. :param ElementTree tree: ElementTree to build a METS document from. """ mets = cls() mets.tree = tree mets._parse_tree(tree) return mets
def function[fromtree, parameter[cls, tree]]: constant[ Create a METS from an ElementTree or Element. :param ElementTree tree: ElementTree to build a METS document from. ] variable[mets] assign[=] call[name[cls], parameter[]] name[mets].tree assign[=] name[tree] call[name[mets]._parse_tree, parameter[name[tree]]] return[name[mets]]
keyword[def] identifier[fromtree] ( identifier[cls] , identifier[tree] ): literal[string] identifier[mets] = identifier[cls] () identifier[mets] . identifier[tree] = identifier[tree] identifier[mets] . identifier[_parse_tree] ( identifier[tree] ) keyword[return] identifier[mets]
def fromtree(cls, tree): """ Create a METS from an ElementTree or Element. :param ElementTree tree: ElementTree to build a METS document from. """ mets = cls() mets.tree = tree mets._parse_tree(tree) return mets
def finalize(self): """Output the default sprite names found in the project.""" print('{} default sprite names found:'.format(self.total_default)) for name in self.list_default: print(name)
def function[finalize, parameter[self]]: constant[Output the default sprite names found in the project.] call[name[print], parameter[call[constant[{} default sprite names found:].format, parameter[name[self].total_default]]]] for taget[name[name]] in starred[name[self].list_default] begin[:] call[name[print], parameter[name[name]]]
keyword[def] identifier[finalize] ( identifier[self] ): literal[string] identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[total_default] )) keyword[for] identifier[name] keyword[in] identifier[self] . identifier[list_default] : identifier[print] ( identifier[name] )
def finalize(self): """Output the default sprite names found in the project.""" print('{} default sprite names found:'.format(self.total_default)) for name in self.list_default: print(name) # depends on [control=['for'], data=['name']]
def visible_devices(self): """Unify all visible devices across all connected adapters Returns: dict: A dictionary mapping UUIDs to device information dictionaries """ devs = {} for device_id, adapters in self._devices.items(): dev = None max_signal = None best_adapter = None for adapter_id, devinfo in adapters.items(): connstring = "adapter/{0}/{1}".format(adapter_id, devinfo['connection_string']) if dev is None: dev = copy.deepcopy(devinfo) del dev['connection_string'] if 'adapters' not in dev: dev['adapters'] = [] best_adapter = adapter_id dev['adapters'].append((adapter_id, devinfo['signal_strength'], connstring)) if max_signal is None: max_signal = devinfo['signal_strength'] elif devinfo['signal_strength'] > max_signal: max_signal = devinfo['signal_strength'] best_adapter = adapter_id # If device has been seen in no adapters, it will get expired # don't return it if dev is None: continue dev['connection_string'] = "device/%x" % dev['uuid'] dev['adapters'] = sorted(dev['adapters'], key=lambda x: x[1], reverse=True) dev['best_adapter'] = best_adapter dev['signal_strength'] = max_signal devs[device_id] = dev return devs
def function[visible_devices, parameter[self]]: constant[Unify all visible devices across all connected adapters Returns: dict: A dictionary mapping UUIDs to device information dictionaries ] variable[devs] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da204622e60>, <ast.Name object at 0x7da204620a00>]]] in starred[call[name[self]._devices.items, parameter[]]] begin[:] variable[dev] assign[=] constant[None] variable[max_signal] assign[=] constant[None] variable[best_adapter] assign[=] constant[None] for taget[tuple[[<ast.Name object at 0x7da204620310>, <ast.Name object at 0x7da204622e00>]]] in starred[call[name[adapters].items, parameter[]]] begin[:] variable[connstring] assign[=] call[constant[adapter/{0}/{1}].format, parameter[name[adapter_id], call[name[devinfo]][constant[connection_string]]]] if compare[name[dev] is constant[None]] begin[:] variable[dev] assign[=] call[name[copy].deepcopy, parameter[name[devinfo]]] <ast.Delete object at 0x7da204621cc0> if compare[constant[adapters] <ast.NotIn object at 0x7da2590d7190> name[dev]] begin[:] call[name[dev]][constant[adapters]] assign[=] list[[]] variable[best_adapter] assign[=] name[adapter_id] call[call[name[dev]][constant[adapters]].append, parameter[tuple[[<ast.Name object at 0x7da2046226e0>, <ast.Subscript object at 0x7da204620eb0>, <ast.Name object at 0x7da204620be0>]]]] if compare[name[max_signal] is constant[None]] begin[:] variable[max_signal] assign[=] call[name[devinfo]][constant[signal_strength]] if compare[name[dev] is constant[None]] begin[:] continue call[name[dev]][constant[connection_string]] assign[=] binary_operation[constant[device/%x] <ast.Mod object at 0x7da2590d6920> call[name[dev]][constant[uuid]]] call[name[dev]][constant[adapters]] assign[=] call[name[sorted], parameter[call[name[dev]][constant[adapters]]]] call[name[dev]][constant[best_adapter]] assign[=] name[best_adapter] call[name[dev]][constant[signal_strength]] assign[=] name[max_signal] call[name[devs]][name[device_id]] assign[=] name[dev] return[name[devs]]
keyword[def] identifier[visible_devices] ( identifier[self] ): literal[string] identifier[devs] ={} keyword[for] identifier[device_id] , identifier[adapters] keyword[in] identifier[self] . identifier[_devices] . identifier[items] (): identifier[dev] = keyword[None] identifier[max_signal] = keyword[None] identifier[best_adapter] = keyword[None] keyword[for] identifier[adapter_id] , identifier[devinfo] keyword[in] identifier[adapters] . identifier[items] (): identifier[connstring] = literal[string] . identifier[format] ( identifier[adapter_id] , identifier[devinfo] [ literal[string] ]) keyword[if] identifier[dev] keyword[is] keyword[None] : identifier[dev] = identifier[copy] . identifier[deepcopy] ( identifier[devinfo] ) keyword[del] identifier[dev] [ literal[string] ] keyword[if] literal[string] keyword[not] keyword[in] identifier[dev] : identifier[dev] [ literal[string] ]=[] identifier[best_adapter] = identifier[adapter_id] identifier[dev] [ literal[string] ]. identifier[append] (( identifier[adapter_id] , identifier[devinfo] [ literal[string] ], identifier[connstring] )) keyword[if] identifier[max_signal] keyword[is] keyword[None] : identifier[max_signal] = identifier[devinfo] [ literal[string] ] keyword[elif] identifier[devinfo] [ literal[string] ]> identifier[max_signal] : identifier[max_signal] = identifier[devinfo] [ literal[string] ] identifier[best_adapter] = identifier[adapter_id] keyword[if] identifier[dev] keyword[is] keyword[None] : keyword[continue] identifier[dev] [ literal[string] ]= literal[string] % identifier[dev] [ literal[string] ] identifier[dev] [ literal[string] ]= identifier[sorted] ( identifier[dev] [ literal[string] ], identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ], identifier[reverse] = keyword[True] ) identifier[dev] [ literal[string] ]= identifier[best_adapter] identifier[dev] [ literal[string] ]= identifier[max_signal] identifier[devs] [ identifier[device_id] ]= identifier[dev] keyword[return] identifier[devs]
def visible_devices(self): """Unify all visible devices across all connected adapters Returns: dict: A dictionary mapping UUIDs to device information dictionaries """ devs = {} for (device_id, adapters) in self._devices.items(): dev = None max_signal = None best_adapter = None for (adapter_id, devinfo) in adapters.items(): connstring = 'adapter/{0}/{1}'.format(adapter_id, devinfo['connection_string']) if dev is None: dev = copy.deepcopy(devinfo) del dev['connection_string'] # depends on [control=['if'], data=['dev']] if 'adapters' not in dev: dev['adapters'] = [] best_adapter = adapter_id # depends on [control=['if'], data=['dev']] dev['adapters'].append((adapter_id, devinfo['signal_strength'], connstring)) if max_signal is None: max_signal = devinfo['signal_strength'] # depends on [control=['if'], data=['max_signal']] elif devinfo['signal_strength'] > max_signal: max_signal = devinfo['signal_strength'] best_adapter = adapter_id # depends on [control=['if'], data=['max_signal']] # depends on [control=['for'], data=[]] # If device has been seen in no adapters, it will get expired # don't return it if dev is None: continue # depends on [control=['if'], data=[]] dev['connection_string'] = 'device/%x' % dev['uuid'] dev['adapters'] = sorted(dev['adapters'], key=lambda x: x[1], reverse=True) dev['best_adapter'] = best_adapter dev['signal_strength'] = max_signal devs[device_id] = dev # depends on [control=['for'], data=[]] return devs
def imresize(img, size, interpolate="bilinear", channel_first=False, **kwargs): """ Resize ``img`` to ``size``. As default, the shape of input image has to be (height, width, channel). Args: img (numpy.ndarray): Input image. size (tuple of int): Output shape. The order is (width, height). interpolate (str): Interpolation method. This argument is depend on the backend. If you want to specify this argument, you should pay much attention to which backend you use now. What you can select is below: - pil backend: ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]. - cv2 backend: ["nearest", "bilinear", "bicubic", "lanczos"]. Default is "bilinear" for both backends. channel_first (bool): If True, the shape of the output array is (channel, height, width) for RGB image. Default is False. Returns: numpy.ndarray """ return backend_manager.module.imresize(img, size, interpolate=interpolate, channel_first=channel_first, **kwargs)
def function[imresize, parameter[img, size, interpolate, channel_first]]: constant[ Resize ``img`` to ``size``. As default, the shape of input image has to be (height, width, channel). Args: img (numpy.ndarray): Input image. size (tuple of int): Output shape. The order is (width, height). interpolate (str): Interpolation method. This argument is depend on the backend. If you want to specify this argument, you should pay much attention to which backend you use now. What you can select is below: - pil backend: ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]. - cv2 backend: ["nearest", "bilinear", "bicubic", "lanczos"]. Default is "bilinear" for both backends. channel_first (bool): If True, the shape of the output array is (channel, height, width) for RGB image. Default is False. Returns: numpy.ndarray ] return[call[name[backend_manager].module.imresize, parameter[name[img], name[size]]]]
keyword[def] identifier[imresize] ( identifier[img] , identifier[size] , identifier[interpolate] = literal[string] , identifier[channel_first] = keyword[False] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[backend_manager] . identifier[module] . identifier[imresize] ( identifier[img] , identifier[size] , identifier[interpolate] = identifier[interpolate] , identifier[channel_first] = identifier[channel_first] ,** identifier[kwargs] )
def imresize(img, size, interpolate='bilinear', channel_first=False, **kwargs): """ Resize ``img`` to ``size``. As default, the shape of input image has to be (height, width, channel). Args: img (numpy.ndarray): Input image. size (tuple of int): Output shape. The order is (width, height). interpolate (str): Interpolation method. This argument is depend on the backend. If you want to specify this argument, you should pay much attention to which backend you use now. What you can select is below: - pil backend: ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"]. - cv2 backend: ["nearest", "bilinear", "bicubic", "lanczos"]. Default is "bilinear" for both backends. channel_first (bool): If True, the shape of the output array is (channel, height, width) for RGB image. Default is False. Returns: numpy.ndarray """ return backend_manager.module.imresize(img, size, interpolate=interpolate, channel_first=channel_first, **kwargs)
def forward(A, pobs, pi, T=None, alpha_out=None): """Compute P( obs | A, B, pi ) and all forward coefficients. Parameters ---------- A : ndarray((N,N), dtype = float) transition matrix of the hidden states pobs : ndarray((T,N), dtype = float) pobs[t,i] is the observation probability for observation at time t given hidden state i pi : ndarray((N), dtype = float) initial distribution of hidden states T : int, optional, default = None trajectory length. If not given, T = pobs.shape[0] will be used. alpha_out : ndarray((T,N), dtype = float), optional, default = None containter for the alpha result variables. If None, a new container will be created. Returns ------- logprob : float The probability to observe the sequence `ob` with the model given by `A`, `B` and `pi`. alpha : ndarray((T,N), dtype = float), optional, default = None alpha[t,i] is the ith forward coefficient of time t. These can be used in many different algorithms related to HMMs. """ if __impl__ == __IMPL_PYTHON__: return ip.forward(A, pobs, pi, T=T, alpha_out=alpha_out, dtype=config.dtype) elif __impl__ == __IMPL_C__: return ic.forward(A, pobs, pi, T=T, alpha_out=alpha_out, dtype=config.dtype) else: raise RuntimeError('Nonexisting implementation selected: '+str(__impl__))
def function[forward, parameter[A, pobs, pi, T, alpha_out]]: constant[Compute P( obs | A, B, pi ) and all forward coefficients. Parameters ---------- A : ndarray((N,N), dtype = float) transition matrix of the hidden states pobs : ndarray((T,N), dtype = float) pobs[t,i] is the observation probability for observation at time t given hidden state i pi : ndarray((N), dtype = float) initial distribution of hidden states T : int, optional, default = None trajectory length. If not given, T = pobs.shape[0] will be used. alpha_out : ndarray((T,N), dtype = float), optional, default = None containter for the alpha result variables. If None, a new container will be created. Returns ------- logprob : float The probability to observe the sequence `ob` with the model given by `A`, `B` and `pi`. alpha : ndarray((T,N), dtype = float), optional, default = None alpha[t,i] is the ith forward coefficient of time t. These can be used in many different algorithms related to HMMs. ] if compare[name[__impl__] equal[==] name[__IMPL_PYTHON__]] begin[:] return[call[name[ip].forward, parameter[name[A], name[pobs], name[pi]]]]
keyword[def] identifier[forward] ( identifier[A] , identifier[pobs] , identifier[pi] , identifier[T] = keyword[None] , identifier[alpha_out] = keyword[None] ): literal[string] keyword[if] identifier[__impl__] == identifier[__IMPL_PYTHON__] : keyword[return] identifier[ip] . identifier[forward] ( identifier[A] , identifier[pobs] , identifier[pi] , identifier[T] = identifier[T] , identifier[alpha_out] = identifier[alpha_out] , identifier[dtype] = identifier[config] . identifier[dtype] ) keyword[elif] identifier[__impl__] == identifier[__IMPL_C__] : keyword[return] identifier[ic] . identifier[forward] ( identifier[A] , identifier[pobs] , identifier[pi] , identifier[T] = identifier[T] , identifier[alpha_out] = identifier[alpha_out] , identifier[dtype] = identifier[config] . identifier[dtype] ) keyword[else] : keyword[raise] identifier[RuntimeError] ( literal[string] + identifier[str] ( identifier[__impl__] ))
def forward(A, pobs, pi, T=None, alpha_out=None): """Compute P( obs | A, B, pi ) and all forward coefficients. Parameters ---------- A : ndarray((N,N), dtype = float) transition matrix of the hidden states pobs : ndarray((T,N), dtype = float) pobs[t,i] is the observation probability for observation at time t given hidden state i pi : ndarray((N), dtype = float) initial distribution of hidden states T : int, optional, default = None trajectory length. If not given, T = pobs.shape[0] will be used. alpha_out : ndarray((T,N), dtype = float), optional, default = None containter for the alpha result variables. If None, a new container will be created. Returns ------- logprob : float The probability to observe the sequence `ob` with the model given by `A`, `B` and `pi`. alpha : ndarray((T,N), dtype = float), optional, default = None alpha[t,i] is the ith forward coefficient of time t. These can be used in many different algorithms related to HMMs. """ if __impl__ == __IMPL_PYTHON__: return ip.forward(A, pobs, pi, T=T, alpha_out=alpha_out, dtype=config.dtype) # depends on [control=['if'], data=[]] elif __impl__ == __IMPL_C__: return ic.forward(A, pobs, pi, T=T, alpha_out=alpha_out, dtype=config.dtype) # depends on [control=['if'], data=[]] else: raise RuntimeError('Nonexisting implementation selected: ' + str(__impl__))
def _unpack_images(self, rdata): """ Set image data from RESTBase response """ image = rdata.get('image') # /page/mobile-sections-lead originalimage = rdata.get('originalimage') # /page/summary thumbnail = rdata.get('thumbnail') # /page/summary if image or originalimage or thumbnail: if 'image' not in self.data: self.data['image'] = [] def file_url(info): """ put image source in url and set file key """ if 'source' in info: info['url'] = info['source'] info['file'] = info['source'].split('/')[-1] del info['source'] return info if image: img = {'kind': 'restbase-image'} img.update(image) self.data['image'].append(file_url(img)) if originalimage: img = {'kind': 'restbase-original'} img.update(originalimage) self.data['image'].append(file_url(img)) if thumbnail: img = {'kind': 'restbase-thumb'} img.update(thumbnail) self.data['image'].append(file_url(img))
def function[_unpack_images, parameter[self, rdata]]: constant[ Set image data from RESTBase response ] variable[image] assign[=] call[name[rdata].get, parameter[constant[image]]] variable[originalimage] assign[=] call[name[rdata].get, parameter[constant[originalimage]]] variable[thumbnail] assign[=] call[name[rdata].get, parameter[constant[thumbnail]]] if <ast.BoolOp object at 0x7da1b138d5a0> begin[:] if compare[constant[image] <ast.NotIn object at 0x7da2590d7190> name[self].data] begin[:] call[name[self].data][constant[image]] assign[=] list[[]] def function[file_url, parameter[info]]: constant[ put image source in url and set file key ] if compare[constant[source] in name[info]] begin[:] call[name[info]][constant[url]] assign[=] call[name[info]][constant[source]] call[name[info]][constant[file]] assign[=] call[call[call[name[info]][constant[source]].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da1b1295de0>] <ast.Delete object at 0x7da1b1295990> return[name[info]] if name[image] begin[:] variable[img] assign[=] dictionary[[<ast.Constant object at 0x7da1b1294190>], [<ast.Constant object at 0x7da1b12953c0>]] call[name[img].update, parameter[name[image]]] call[call[name[self].data][constant[image]].append, parameter[call[name[file_url], parameter[name[img]]]]] if name[originalimage] begin[:] variable[img] assign[=] dictionary[[<ast.Constant object at 0x7da1b1296530>], [<ast.Constant object at 0x7da1b1297c10>]] call[name[img].update, parameter[name[originalimage]]] call[call[name[self].data][constant[image]].append, parameter[call[name[file_url], parameter[name[img]]]]] if name[thumbnail] begin[:] variable[img] assign[=] dictionary[[<ast.Constant object at 0x7da1b1294b20>], [<ast.Constant object at 0x7da1b1297a30>]] call[name[img].update, parameter[name[thumbnail]]] call[call[name[self].data][constant[image]].append, parameter[call[name[file_url], parameter[name[img]]]]]
keyword[def] identifier[_unpack_images] ( identifier[self] , identifier[rdata] ): literal[string] identifier[image] = identifier[rdata] . identifier[get] ( literal[string] ) identifier[originalimage] = identifier[rdata] . identifier[get] ( literal[string] ) identifier[thumbnail] = identifier[rdata] . identifier[get] ( literal[string] ) keyword[if] identifier[image] keyword[or] identifier[originalimage] keyword[or] identifier[thumbnail] : keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[data] : identifier[self] . identifier[data] [ literal[string] ]=[] keyword[def] identifier[file_url] ( identifier[info] ): literal[string] keyword[if] literal[string] keyword[in] identifier[info] : identifier[info] [ literal[string] ]= identifier[info] [ literal[string] ] identifier[info] [ literal[string] ]= identifier[info] [ literal[string] ]. identifier[split] ( literal[string] )[- literal[int] ] keyword[del] identifier[info] [ literal[string] ] keyword[return] identifier[info] keyword[if] identifier[image] : identifier[img] ={ literal[string] : literal[string] } identifier[img] . identifier[update] ( identifier[image] ) identifier[self] . identifier[data] [ literal[string] ]. identifier[append] ( identifier[file_url] ( identifier[img] )) keyword[if] identifier[originalimage] : identifier[img] ={ literal[string] : literal[string] } identifier[img] . identifier[update] ( identifier[originalimage] ) identifier[self] . identifier[data] [ literal[string] ]. identifier[append] ( identifier[file_url] ( identifier[img] )) keyword[if] identifier[thumbnail] : identifier[img] ={ literal[string] : literal[string] } identifier[img] . identifier[update] ( identifier[thumbnail] ) identifier[self] . identifier[data] [ literal[string] ]. identifier[append] ( identifier[file_url] ( identifier[img] ))
def _unpack_images(self, rdata): """ Set image data from RESTBase response """ image = rdata.get('image') # /page/mobile-sections-lead originalimage = rdata.get('originalimage') # /page/summary thumbnail = rdata.get('thumbnail') # /page/summary if image or originalimage or thumbnail: if 'image' not in self.data: self.data['image'] = [] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] def file_url(info): """ put image source in url and set file key """ if 'source' in info: info['url'] = info['source'] info['file'] = info['source'].split('/')[-1] del info['source'] # depends on [control=['if'], data=['info']] return info if image: img = {'kind': 'restbase-image'} img.update(image) self.data['image'].append(file_url(img)) # depends on [control=['if'], data=[]] if originalimage: img = {'kind': 'restbase-original'} img.update(originalimage) self.data['image'].append(file_url(img)) # depends on [control=['if'], data=[]] if thumbnail: img = {'kind': 'restbase-thumb'} img.update(thumbnail) self.data['image'].append(file_url(img)) # depends on [control=['if'], data=[]]
def _set_manifest_data(self, files_list): """ Write manifest files :param files_list: list :return: """ if files_list: data = ",".join(files_list) self.s3.put_object(Bucket=self.sitename, Key=self.manifest_file, Body=data, ACL='private')
def function[_set_manifest_data, parameter[self, files_list]]: constant[ Write manifest files :param files_list: list :return: ] if name[files_list] begin[:] variable[data] assign[=] call[constant[,].join, parameter[name[files_list]]] call[name[self].s3.put_object, parameter[]]
keyword[def] identifier[_set_manifest_data] ( identifier[self] , identifier[files_list] ): literal[string] keyword[if] identifier[files_list] : identifier[data] = literal[string] . identifier[join] ( identifier[files_list] ) identifier[self] . identifier[s3] . identifier[put_object] ( identifier[Bucket] = identifier[self] . identifier[sitename] , identifier[Key] = identifier[self] . identifier[manifest_file] , identifier[Body] = identifier[data] , identifier[ACL] = literal[string] )
def _set_manifest_data(self, files_list): """ Write manifest files :param files_list: list :return: """ if files_list: data = ','.join(files_list) self.s3.put_object(Bucket=self.sitename, Key=self.manifest_file, Body=data, ACL='private') # depends on [control=['if'], data=[]]
def post_signup(self, user, login_user=None, send_email=None): """Executes post signup actions: sending the signal, logging in the user and sending the welcome email """ self.signup_signal.send(self, user=user) if (login_user is None and self.options["login_user_on_signup"]) or login_user: self._login(user, user.signup_provider) to_email = getattr(user, self.options["email_column"], None) if to_email and ((send_email is None and self.options["send_welcome_email"]) or send_email): template = "users/welcome.txt" if self.options["send_welcome_email"] == True else self.options["send_welcome_email"] current_app.features.emails.send(to_email, template, user=user)
def function[post_signup, parameter[self, user, login_user, send_email]]: constant[Executes post signup actions: sending the signal, logging in the user and sending the welcome email ] call[name[self].signup_signal.send, parameter[name[self]]] if <ast.BoolOp object at 0x7da18f09dc00> begin[:] call[name[self]._login, parameter[name[user], name[user].signup_provider]] variable[to_email] assign[=] call[name[getattr], parameter[name[user], call[name[self].options][constant[email_column]], constant[None]]] if <ast.BoolOp object at 0x7da18f09f880> begin[:] variable[template] assign[=] <ast.IfExp object at 0x7da18f09cdc0> call[name[current_app].features.emails.send, parameter[name[to_email], name[template]]]
keyword[def] identifier[post_signup] ( identifier[self] , identifier[user] , identifier[login_user] = keyword[None] , identifier[send_email] = keyword[None] ): literal[string] identifier[self] . identifier[signup_signal] . identifier[send] ( identifier[self] , identifier[user] = identifier[user] ) keyword[if] ( identifier[login_user] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[options] [ literal[string] ]) keyword[or] identifier[login_user] : identifier[self] . identifier[_login] ( identifier[user] , identifier[user] . identifier[signup_provider] ) identifier[to_email] = identifier[getattr] ( identifier[user] , identifier[self] . identifier[options] [ literal[string] ], keyword[None] ) keyword[if] identifier[to_email] keyword[and] (( identifier[send_email] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[options] [ literal[string] ]) keyword[or] identifier[send_email] ): identifier[template] = literal[string] keyword[if] identifier[self] . identifier[options] [ literal[string] ]== keyword[True] keyword[else] identifier[self] . identifier[options] [ literal[string] ] identifier[current_app] . identifier[features] . identifier[emails] . identifier[send] ( identifier[to_email] , identifier[template] , identifier[user] = identifier[user] )
def post_signup(self, user, login_user=None, send_email=None): """Executes post signup actions: sending the signal, logging in the user and sending the welcome email """ self.signup_signal.send(self, user=user) if login_user is None and self.options['login_user_on_signup'] or login_user: self._login(user, user.signup_provider) # depends on [control=['if'], data=[]] to_email = getattr(user, self.options['email_column'], None) if to_email and (send_email is None and self.options['send_welcome_email'] or send_email): template = 'users/welcome.txt' if self.options['send_welcome_email'] == True else self.options['send_welcome_email'] current_app.features.emails.send(to_email, template, user=user) # depends on [control=['if'], data=[]]
def get_release_id(self, package_name: str, version: str) -> bytes: """ Returns the 32 byte identifier of a release for the given package name and version, if they are available on the current registry. """ validate_package_name(package_name) validate_package_version(version) self._validate_set_registry() return self.registry._get_release_id(package_name, version)
def function[get_release_id, parameter[self, package_name, version]]: constant[ Returns the 32 byte identifier of a release for the given package name and version, if they are available on the current registry. ] call[name[validate_package_name], parameter[name[package_name]]] call[name[validate_package_version], parameter[name[version]]] call[name[self]._validate_set_registry, parameter[]] return[call[name[self].registry._get_release_id, parameter[name[package_name], name[version]]]]
keyword[def] identifier[get_release_id] ( identifier[self] , identifier[package_name] : identifier[str] , identifier[version] : identifier[str] )-> identifier[bytes] : literal[string] identifier[validate_package_name] ( identifier[package_name] ) identifier[validate_package_version] ( identifier[version] ) identifier[self] . identifier[_validate_set_registry] () keyword[return] identifier[self] . identifier[registry] . identifier[_get_release_id] ( identifier[package_name] , identifier[version] )
def get_release_id(self, package_name: str, version: str) -> bytes: """ Returns the 32 byte identifier of a release for the given package name and version, if they are available on the current registry. """ validate_package_name(package_name) validate_package_version(version) self._validate_set_registry() return self.registry._get_release_id(package_name, version)
def _show(self): """ Return a list of unsorted bridge details. """ p = _runshell([brctlexe, 'show', self.name], "Could not show %s." % self.name) return p.stdout.read().split()[7:]
def function[_show, parameter[self]]: constant[ Return a list of unsorted bridge details. ] variable[p] assign[=] call[name[_runshell], parameter[list[[<ast.Name object at 0x7da1b0ce4310>, <ast.Constant object at 0x7da1b0ce7880>, <ast.Attribute object at 0x7da1b0ce44f0>]], binary_operation[constant[Could not show %s.] <ast.Mod object at 0x7da2590d6920> name[self].name]]] return[call[call[call[name[p].stdout.read, parameter[]].split, parameter[]]][<ast.Slice object at 0x7da1b0ce4220>]]
keyword[def] identifier[_show] ( identifier[self] ): literal[string] identifier[p] = identifier[_runshell] ([ identifier[brctlexe] , literal[string] , identifier[self] . identifier[name] ], literal[string] % identifier[self] . identifier[name] ) keyword[return] identifier[p] . identifier[stdout] . identifier[read] (). identifier[split] ()[ literal[int] :]
def _show(self): """ Return a list of unsorted bridge details. """ p = _runshell([brctlexe, 'show', self.name], 'Could not show %s.' % self.name) return p.stdout.read().split()[7:]
def get_ccc_handle_from_uuid(self, uuid): """Utility function to retrieve the client characteristic configuration descriptor handle for a given characteristic. Args: uuid (str): a string containing the hex-encoded UUID Returns: None if an error occurs, otherwise an integer handle. """ if uuid in self.uuid_cccds: return self.uuid_cccds[uuid].handle char = self.get_characteristic_from_uuid(uuid) if char is None: return None ccc = char.get_descriptor_by_uuid(UUID_GATT_CCC) if ccc is not None: self.uuid_cccds[uuid] = ccc return None if ccc is None else ccc.handle
def function[get_ccc_handle_from_uuid, parameter[self, uuid]]: constant[Utility function to retrieve the client characteristic configuration descriptor handle for a given characteristic. Args: uuid (str): a string containing the hex-encoded UUID Returns: None if an error occurs, otherwise an integer handle. ] if compare[name[uuid] in name[self].uuid_cccds] begin[:] return[call[name[self].uuid_cccds][name[uuid]].handle] variable[char] assign[=] call[name[self].get_characteristic_from_uuid, parameter[name[uuid]]] if compare[name[char] is constant[None]] begin[:] return[constant[None]] variable[ccc] assign[=] call[name[char].get_descriptor_by_uuid, parameter[name[UUID_GATT_CCC]]] if compare[name[ccc] is_not constant[None]] begin[:] call[name[self].uuid_cccds][name[uuid]] assign[=] name[ccc] return[<ast.IfExp object at 0x7da1b15f7220>]
keyword[def] identifier[get_ccc_handle_from_uuid] ( identifier[self] , identifier[uuid] ): literal[string] keyword[if] identifier[uuid] keyword[in] identifier[self] . identifier[uuid_cccds] : keyword[return] identifier[self] . identifier[uuid_cccds] [ identifier[uuid] ]. identifier[handle] identifier[char] = identifier[self] . identifier[get_characteristic_from_uuid] ( identifier[uuid] ) keyword[if] identifier[char] keyword[is] keyword[None] : keyword[return] keyword[None] identifier[ccc] = identifier[char] . identifier[get_descriptor_by_uuid] ( identifier[UUID_GATT_CCC] ) keyword[if] identifier[ccc] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[uuid_cccds] [ identifier[uuid] ]= identifier[ccc] keyword[return] keyword[None] keyword[if] identifier[ccc] keyword[is] keyword[None] keyword[else] identifier[ccc] . identifier[handle]
def get_ccc_handle_from_uuid(self, uuid): """Utility function to retrieve the client characteristic configuration descriptor handle for a given characteristic. Args: uuid (str): a string containing the hex-encoded UUID Returns: None if an error occurs, otherwise an integer handle. """ if uuid in self.uuid_cccds: return self.uuid_cccds[uuid].handle # depends on [control=['if'], data=['uuid']] char = self.get_characteristic_from_uuid(uuid) if char is None: return None # depends on [control=['if'], data=[]] ccc = char.get_descriptor_by_uuid(UUID_GATT_CCC) if ccc is not None: self.uuid_cccds[uuid] = ccc # depends on [control=['if'], data=['ccc']] return None if ccc is None else ccc.handle
def _CreateStyleForRoute(self, doc, route): """Create a KML Style element for the route. The style sets the line colour if the route colour is specified. The line thickness is set depending on the vehicle type. Args: doc: The KML Document ElementTree.Element instance. route: The transitfeed.Route to create the style for. Returns: The id of the style as a string. """ style_id = 'route_%s' % route.route_id style = ET.SubElement(doc, 'Style', {'id': style_id}) linestyle = ET.SubElement(style, 'LineStyle') width = ET.SubElement(linestyle, 'width') type_to_width = {0: '3', # Tram 1: '3', # Subway 2: '5', # Rail 3: '1'} # Bus width.text = type_to_width.get(route.route_type, '1') if route.route_color: color = ET.SubElement(linestyle, 'color') red = route.route_color[0:2].lower() green = route.route_color[2:4].lower() blue = route.route_color[4:6].lower() color.text = 'ff%s%s%s' % (blue, green, red) return style_id
def function[_CreateStyleForRoute, parameter[self, doc, route]]: constant[Create a KML Style element for the route. The style sets the line colour if the route colour is specified. The line thickness is set depending on the vehicle type. Args: doc: The KML Document ElementTree.Element instance. route: The transitfeed.Route to create the style for. Returns: The id of the style as a string. ] variable[style_id] assign[=] binary_operation[constant[route_%s] <ast.Mod object at 0x7da2590d6920> name[route].route_id] variable[style] assign[=] call[name[ET].SubElement, parameter[name[doc], constant[Style], dictionary[[<ast.Constant object at 0x7da1b17a90c0>], [<ast.Name object at 0x7da1b17ab400>]]]] variable[linestyle] assign[=] call[name[ET].SubElement, parameter[name[style], constant[LineStyle]]] variable[width] assign[=] call[name[ET].SubElement, parameter[name[linestyle], constant[width]]] variable[type_to_width] assign[=] dictionary[[<ast.Constant object at 0x7da1b17a8b20>, <ast.Constant object at 0x7da1b17a9c60>, <ast.Constant object at 0x7da1b17a9270>, <ast.Constant object at 0x7da1b17a9510>], [<ast.Constant object at 0x7da1b17ab970>, <ast.Constant object at 0x7da1b17abd60>, <ast.Constant object at 0x7da1b17abd90>, <ast.Constant object at 0x7da1b17abcd0>]] name[width].text assign[=] call[name[type_to_width].get, parameter[name[route].route_type, constant[1]]] if name[route].route_color begin[:] variable[color] assign[=] call[name[ET].SubElement, parameter[name[linestyle], constant[color]]] variable[red] assign[=] call[call[name[route].route_color][<ast.Slice object at 0x7da1b17abe20>].lower, parameter[]] variable[green] assign[=] call[call[name[route].route_color][<ast.Slice object at 0x7da1b17aa410>].lower, parameter[]] variable[blue] assign[=] call[call[name[route].route_color][<ast.Slice object at 0x7da1b17aa7d0>].lower, parameter[]] name[color].text assign[=] binary_operation[constant[ff%s%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b17a91b0>, <ast.Name object at 0x7da1b17a8bb0>, <ast.Name object at 0x7da1b17aab60>]]] return[name[style_id]]
keyword[def] identifier[_CreateStyleForRoute] ( identifier[self] , identifier[doc] , identifier[route] ): literal[string] identifier[style_id] = literal[string] % identifier[route] . identifier[route_id] identifier[style] = identifier[ET] . identifier[SubElement] ( identifier[doc] , literal[string] ,{ literal[string] : identifier[style_id] }) identifier[linestyle] = identifier[ET] . identifier[SubElement] ( identifier[style] , literal[string] ) identifier[width] = identifier[ET] . identifier[SubElement] ( identifier[linestyle] , literal[string] ) identifier[type_to_width] ={ literal[int] : literal[string] , literal[int] : literal[string] , literal[int] : literal[string] , literal[int] : literal[string] } identifier[width] . identifier[text] = identifier[type_to_width] . identifier[get] ( identifier[route] . identifier[route_type] , literal[string] ) keyword[if] identifier[route] . identifier[route_color] : identifier[color] = identifier[ET] . identifier[SubElement] ( identifier[linestyle] , literal[string] ) identifier[red] = identifier[route] . identifier[route_color] [ literal[int] : literal[int] ]. identifier[lower] () identifier[green] = identifier[route] . identifier[route_color] [ literal[int] : literal[int] ]. identifier[lower] () identifier[blue] = identifier[route] . identifier[route_color] [ literal[int] : literal[int] ]. identifier[lower] () identifier[color] . identifier[text] = literal[string] %( identifier[blue] , identifier[green] , identifier[red] ) keyword[return] identifier[style_id]
def _CreateStyleForRoute(self, doc, route): """Create a KML Style element for the route. The style sets the line colour if the route colour is specified. The line thickness is set depending on the vehicle type. Args: doc: The KML Document ElementTree.Element instance. route: The transitfeed.Route to create the style for. Returns: The id of the style as a string. """ style_id = 'route_%s' % route.route_id style = ET.SubElement(doc, 'Style', {'id': style_id}) linestyle = ET.SubElement(style, 'LineStyle') width = ET.SubElement(linestyle, 'width') # Tram # Subway # Rail type_to_width = {0: '3', 1: '3', 2: '5', 3: '1'} # Bus width.text = type_to_width.get(route.route_type, '1') if route.route_color: color = ET.SubElement(linestyle, 'color') red = route.route_color[0:2].lower() green = route.route_color[2:4].lower() blue = route.route_color[4:6].lower() color.text = 'ff%s%s%s' % (blue, green, red) # depends on [control=['if'], data=[]] return style_id
def CreateGroup(self, GroupName): """Creates a custom contact group. :Parameters: GroupName : unicode Group name. :return: A group object. :rtype: `Group` :see: `DeleteGroup` """ groups = self.CustomGroups self._DoCommand('CREATE GROUP %s' % tounicode(GroupName)) for g in self.CustomGroups: if g not in groups and g.DisplayName == GroupName: return g raise SkypeError(0, 'Group creating failed')
def function[CreateGroup, parameter[self, GroupName]]: constant[Creates a custom contact group. :Parameters: GroupName : unicode Group name. :return: A group object. :rtype: `Group` :see: `DeleteGroup` ] variable[groups] assign[=] name[self].CustomGroups call[name[self]._DoCommand, parameter[binary_operation[constant[CREATE GROUP %s] <ast.Mod object at 0x7da2590d6920> call[name[tounicode], parameter[name[GroupName]]]]]] for taget[name[g]] in starred[name[self].CustomGroups] begin[:] if <ast.BoolOp object at 0x7da1b23457e0> begin[:] return[name[g]] <ast.Raise object at 0x7da1b2347280>
keyword[def] identifier[CreateGroup] ( identifier[self] , identifier[GroupName] ): literal[string] identifier[groups] = identifier[self] . identifier[CustomGroups] identifier[self] . identifier[_DoCommand] ( literal[string] % identifier[tounicode] ( identifier[GroupName] )) keyword[for] identifier[g] keyword[in] identifier[self] . identifier[CustomGroups] : keyword[if] identifier[g] keyword[not] keyword[in] identifier[groups] keyword[and] identifier[g] . identifier[DisplayName] == identifier[GroupName] : keyword[return] identifier[g] keyword[raise] identifier[SkypeError] ( literal[int] , literal[string] )
def CreateGroup(self, GroupName): """Creates a custom contact group. :Parameters: GroupName : unicode Group name. :return: A group object. :rtype: `Group` :see: `DeleteGroup` """ groups = self.CustomGroups self._DoCommand('CREATE GROUP %s' % tounicode(GroupName)) for g in self.CustomGroups: if g not in groups and g.DisplayName == GroupName: return g # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['g']] raise SkypeError(0, 'Group creating failed')
def _parse_repo_file(filename): ''' Turn a single repo file into a dict ''' parsed = configparser.ConfigParser() config = {} try: parsed.read(filename) except configparser.MissingSectionHeaderError as err: log.error( 'Failed to parse file %s, error: %s', filename, err.message ) return ('', {}) for section in parsed._sections: section_dict = dict(parsed._sections[section]) section_dict.pop('__name__', None) config[section] = section_dict # Try to extract header comments, as well as comments for each repo. Read # from the beginning of the file and assume any leading comments are # header comments. Continue to read each section header and then find the # comments for each repo. headers = '' section = None with salt.utils.files.fopen(filename, 'r') as repofile: for line in repofile: line = salt.utils.stringutils.to_unicode(line) line = line.strip() if line.startswith('#'): if section is None: headers += line + '\n' else: try: comments = config[section].setdefault('comments', []) comments.append(line[1:].lstrip()) except KeyError: log.debug( 'Found comment in %s which does not appear to ' 'belong to any repo section: %s', filename, line ) elif line.startswith('[') and line.endswith(']'): section = line[1:-1] return (headers, salt.utils.data.decode(config))
def function[_parse_repo_file, parameter[filename]]: constant[ Turn a single repo file into a dict ] variable[parsed] assign[=] call[name[configparser].ConfigParser, parameter[]] variable[config] assign[=] dictionary[[], []] <ast.Try object at 0x7da18f58f4c0> for taget[name[section]] in starred[name[parsed]._sections] begin[:] variable[section_dict] assign[=] call[name[dict], parameter[call[name[parsed]._sections][name[section]]]] call[name[section_dict].pop, parameter[constant[__name__], constant[None]]] call[name[config]][name[section]] assign[=] name[section_dict] variable[headers] assign[=] constant[] variable[section] assign[=] constant[None] with call[name[salt].utils.files.fopen, parameter[name[filename], constant[r]]] begin[:] for taget[name[line]] in starred[name[repofile]] begin[:] variable[line] assign[=] call[name[salt].utils.stringutils.to_unicode, parameter[name[line]]] variable[line] assign[=] call[name[line].strip, parameter[]] if call[name[line].startswith, parameter[constant[#]]] begin[:] if compare[name[section] is constant[None]] begin[:] <ast.AugAssign object at 0x7da18f58e7d0> return[tuple[[<ast.Name object at 0x7da18f58e2f0>, <ast.Call object at 0x7da18f58db70>]]]
keyword[def] identifier[_parse_repo_file] ( identifier[filename] ): literal[string] identifier[parsed] = identifier[configparser] . identifier[ConfigParser] () identifier[config] ={} keyword[try] : identifier[parsed] . identifier[read] ( identifier[filename] ) keyword[except] identifier[configparser] . identifier[MissingSectionHeaderError] keyword[as] identifier[err] : identifier[log] . identifier[error] ( literal[string] , identifier[filename] , identifier[err] . identifier[message] ) keyword[return] ( literal[string] ,{}) keyword[for] identifier[section] keyword[in] identifier[parsed] . identifier[_sections] : identifier[section_dict] = identifier[dict] ( identifier[parsed] . identifier[_sections] [ identifier[section] ]) identifier[section_dict] . identifier[pop] ( literal[string] , keyword[None] ) identifier[config] [ identifier[section] ]= identifier[section_dict] identifier[headers] = literal[string] identifier[section] = keyword[None] keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[filename] , literal[string] ) keyword[as] identifier[repofile] : keyword[for] identifier[line] keyword[in] identifier[repofile] : identifier[line] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[line] ) identifier[line] = identifier[line] . identifier[strip] () keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): keyword[if] identifier[section] keyword[is] keyword[None] : identifier[headers] += identifier[line] + literal[string] keyword[else] : keyword[try] : identifier[comments] = identifier[config] [ identifier[section] ]. identifier[setdefault] ( literal[string] ,[]) identifier[comments] . identifier[append] ( identifier[line] [ literal[int] :]. identifier[lstrip] ()) keyword[except] identifier[KeyError] : identifier[log] . identifier[debug] ( literal[string] literal[string] , identifier[filename] , identifier[line] ) keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ) keyword[and] identifier[line] . identifier[endswith] ( literal[string] ): identifier[section] = identifier[line] [ literal[int] :- literal[int] ] keyword[return] ( identifier[headers] , identifier[salt] . identifier[utils] . identifier[data] . identifier[decode] ( identifier[config] ))
def _parse_repo_file(filename): """ Turn a single repo file into a dict """ parsed = configparser.ConfigParser() config = {} try: parsed.read(filename) # depends on [control=['try'], data=[]] except configparser.MissingSectionHeaderError as err: log.error('Failed to parse file %s, error: %s', filename, err.message) return ('', {}) # depends on [control=['except'], data=['err']] for section in parsed._sections: section_dict = dict(parsed._sections[section]) section_dict.pop('__name__', None) config[section] = section_dict # depends on [control=['for'], data=['section']] # Try to extract header comments, as well as comments for each repo. Read # from the beginning of the file and assume any leading comments are # header comments. Continue to read each section header and then find the # comments for each repo. headers = '' section = None with salt.utils.files.fopen(filename, 'r') as repofile: for line in repofile: line = salt.utils.stringutils.to_unicode(line) line = line.strip() if line.startswith('#'): if section is None: headers += line + '\n' # depends on [control=['if'], data=[]] else: try: comments = config[section].setdefault('comments', []) comments.append(line[1:].lstrip()) # depends on [control=['try'], data=[]] except KeyError: log.debug('Found comment in %s which does not appear to belong to any repo section: %s', filename, line) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] elif line.startswith('[') and line.endswith(']'): section = line[1:-1] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['repofile']] return (headers, salt.utils.data.decode(config))
def remove_my_api_key_from_groups(self, body, **kwargs): # noqa: E501 """Remove API key from groups. # noqa: E501 An endpoint for removing API key from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.remove_my_api_key_from_groups(body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param list[str] body: A list of IDs of the groups to be updated. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.remove_my_api_key_from_groups_with_http_info(body, **kwargs) # noqa: E501 else: (data) = self.remove_my_api_key_from_groups_with_http_info(body, **kwargs) # noqa: E501 return data
def function[remove_my_api_key_from_groups, parameter[self, body]]: constant[Remove API key from groups. # noqa: E501 An endpoint for removing API key from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.remove_my_api_key_from_groups(body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param list[str] body: A list of IDs of the groups to be updated. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[asynchronous]]] begin[:] return[call[name[self].remove_my_api_key_from_groups_with_http_info, parameter[name[body]]]]
keyword[def] identifier[remove_my_api_key_from_groups] ( identifier[self] , identifier[body] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[remove_my_api_key_from_groups_with_http_info] ( identifier[body] ,** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[remove_my_api_key_from_groups_with_http_info] ( identifier[body] ,** identifier[kwargs] ) keyword[return] identifier[data]
def remove_my_api_key_from_groups(self, body, **kwargs): # noqa: E501 "Remove API key from groups. # noqa: E501\n\n An endpoint for removing API key from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.remove_my_api_key_from_groups(body, asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param list[str] body: A list of IDs of the groups to be updated. (required)\n :return: UpdatedResponse\n If the method is called asynchronously,\n returns the request thread.\n " kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.remove_my_api_key_from_groups_with_http_info(body, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]] else: data = self.remove_my_api_key_from_groups_with_http_info(body, **kwargs) # noqa: E501 return data
def get_securities(self, page=1, **filter_param): """ Queries /<security_type> endpoint to return a paged list of securities. """ url_path = self._build_url_path(None, None) params = {'page': page} # the endpoints respond just fine to invaliid query params, # they just ignore them, but the the real value of the endpoints # is only revealed when using the filters, so let's not waste # requests on filters that don't do anything. if filter_param: query_filter = filter_param.popitem() if query_filter[0] in self.VALID_SECURITY_FILTERS: params[query_filter[0]] = query_filter[1] else: error_msg = 'Invalid filter param. Must be one of: {0}'.format(','.join(self.VALID_SECURITY_FILTERS)) raise exceptions.PyChartsRequestException(error_msg) return self._get_data(url_path, params)
def function[get_securities, parameter[self, page]]: constant[ Queries /<security_type> endpoint to return a paged list of securities. ] variable[url_path] assign[=] call[name[self]._build_url_path, parameter[constant[None], constant[None]]] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b2844b50>], [<ast.Name object at 0x7da1b28448b0>]] if name[filter_param] begin[:] variable[query_filter] assign[=] call[name[filter_param].popitem, parameter[]] if compare[call[name[query_filter]][constant[0]] in name[self].VALID_SECURITY_FILTERS] begin[:] call[name[params]][call[name[query_filter]][constant[0]]] assign[=] call[name[query_filter]][constant[1]] return[call[name[self]._get_data, parameter[name[url_path], name[params]]]]
keyword[def] identifier[get_securities] ( identifier[self] , identifier[page] = literal[int] ,** identifier[filter_param] ): literal[string] identifier[url_path] = identifier[self] . identifier[_build_url_path] ( keyword[None] , keyword[None] ) identifier[params] ={ literal[string] : identifier[page] } keyword[if] identifier[filter_param] : identifier[query_filter] = identifier[filter_param] . identifier[popitem] () keyword[if] identifier[query_filter] [ literal[int] ] keyword[in] identifier[self] . identifier[VALID_SECURITY_FILTERS] : identifier[params] [ identifier[query_filter] [ literal[int] ]]= identifier[query_filter] [ literal[int] ] keyword[else] : identifier[error_msg] = literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[self] . identifier[VALID_SECURITY_FILTERS] )) keyword[raise] identifier[exceptions] . identifier[PyChartsRequestException] ( identifier[error_msg] ) keyword[return] identifier[self] . identifier[_get_data] ( identifier[url_path] , identifier[params] )
def get_securities(self, page=1, **filter_param): """ Queries /<security_type> endpoint to return a paged list of securities. """ url_path = self._build_url_path(None, None) params = {'page': page} # the endpoints respond just fine to invaliid query params, # they just ignore them, but the the real value of the endpoints # is only revealed when using the filters, so let's not waste # requests on filters that don't do anything. if filter_param: query_filter = filter_param.popitem() if query_filter[0] in self.VALID_SECURITY_FILTERS: params[query_filter[0]] = query_filter[1] # depends on [control=['if'], data=[]] else: error_msg = 'Invalid filter param. Must be one of: {0}'.format(','.join(self.VALID_SECURITY_FILTERS)) raise exceptions.PyChartsRequestException(error_msg) # depends on [control=['if'], data=[]] return self._get_data(url_path, params)
def get_service_endpoints_by_names(self, project, endpoint_names, type=None, auth_schemes=None, include_failed=None, include_details=None): """GetServiceEndpointsByNames. [Preview API] Get the service endpoints by name. :param str project: Project ID or project name :param [str] endpoint_names: Names of the service endpoints. :param str type: Type of the service endpoints. :param [str] auth_schemes: Authorization schemes used for service endpoints. :param bool include_failed: Failed flag for service endpoints. :param bool include_details: Flag to include more details for service endpoints. This is for internal use only and the flag will be treated as false for all other requests :rtype: [ServiceEndpoint] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if endpoint_names is not None: endpoint_names = ",".join(endpoint_names) query_parameters['endpointNames'] = self._serialize.query('endpoint_names', endpoint_names, 'str') if type is not None: query_parameters['type'] = self._serialize.query('type', type, 'str') if auth_schemes is not None: auth_schemes = ",".join(auth_schemes) query_parameters['authSchemes'] = self._serialize.query('auth_schemes', auth_schemes, 'str') if include_failed is not None: query_parameters['includeFailed'] = self._serialize.query('include_failed', include_failed, 'bool') if include_details is not None: query_parameters['includeDetails'] = self._serialize.query('include_details', include_details, 'bool') response = self._send(http_method='GET', location_id='e85f1c62-adfc-4b74-b618-11a150fb195e', version='5.0-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[ServiceEndpoint]', self._unwrap_collection(response))
def function[get_service_endpoints_by_names, parameter[self, project, endpoint_names, type, auth_schemes, include_failed, include_details]]: constant[GetServiceEndpointsByNames. [Preview API] Get the service endpoints by name. :param str project: Project ID or project name :param [str] endpoint_names: Names of the service endpoints. :param str type: Type of the service endpoints. :param [str] auth_schemes: Authorization schemes used for service endpoints. :param bool include_failed: Failed flag for service endpoints. :param bool include_details: Flag to include more details for service endpoints. This is for internal use only and the flag will be treated as false for all other requests :rtype: [ServiceEndpoint] ] variable[route_values] assign[=] dictionary[[], []] if compare[name[project] is_not constant[None]] begin[:] call[name[route_values]][constant[project]] assign[=] call[name[self]._serialize.url, parameter[constant[project], name[project], constant[str]]] variable[query_parameters] assign[=] dictionary[[], []] if compare[name[endpoint_names] is_not constant[None]] begin[:] variable[endpoint_names] assign[=] call[constant[,].join, parameter[name[endpoint_names]]] call[name[query_parameters]][constant[endpointNames]] assign[=] call[name[self]._serialize.query, parameter[constant[endpoint_names], name[endpoint_names], constant[str]]] if compare[name[type] is_not constant[None]] begin[:] call[name[query_parameters]][constant[type]] assign[=] call[name[self]._serialize.query, parameter[constant[type], name[type], constant[str]]] if compare[name[auth_schemes] is_not constant[None]] begin[:] variable[auth_schemes] assign[=] call[constant[,].join, parameter[name[auth_schemes]]] call[name[query_parameters]][constant[authSchemes]] assign[=] call[name[self]._serialize.query, parameter[constant[auth_schemes], name[auth_schemes], constant[str]]] if compare[name[include_failed] is_not constant[None]] begin[:] call[name[query_parameters]][constant[includeFailed]] assign[=] call[name[self]._serialize.query, parameter[constant[include_failed], name[include_failed], constant[bool]]] if compare[name[include_details] is_not constant[None]] begin[:] call[name[query_parameters]][constant[includeDetails]] assign[=] call[name[self]._serialize.query, parameter[constant[include_details], name[include_details], constant[bool]]] variable[response] assign[=] call[name[self]._send, parameter[]] return[call[name[self]._deserialize, parameter[constant[[ServiceEndpoint]], call[name[self]._unwrap_collection, parameter[name[response]]]]]]
keyword[def] identifier[get_service_endpoints_by_names] ( identifier[self] , identifier[project] , identifier[endpoint_names] , identifier[type] = keyword[None] , identifier[auth_schemes] = keyword[None] , identifier[include_failed] = keyword[None] , identifier[include_details] = keyword[None] ): literal[string] identifier[route_values] ={} keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] : identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project] , literal[string] ) identifier[query_parameters] ={} keyword[if] identifier[endpoint_names] keyword[is] keyword[not] keyword[None] : identifier[endpoint_names] = literal[string] . identifier[join] ( identifier[endpoint_names] ) identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[endpoint_names] , literal[string] ) keyword[if] identifier[type] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[type] , literal[string] ) keyword[if] identifier[auth_schemes] keyword[is] keyword[not] keyword[None] : identifier[auth_schemes] = literal[string] . identifier[join] ( identifier[auth_schemes] ) identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[auth_schemes] , literal[string] ) keyword[if] identifier[include_failed] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[include_failed] , literal[string] ) keyword[if] identifier[include_details] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[include_details] , literal[string] ) identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] , identifier[location_id] = literal[string] , identifier[version] = literal[string] , identifier[route_values] = identifier[route_values] , identifier[query_parameters] = identifier[query_parameters] ) keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[self] . identifier[_unwrap_collection] ( identifier[response] ))
def get_service_endpoints_by_names(self, project, endpoint_names, type=None, auth_schemes=None, include_failed=None, include_details=None): """GetServiceEndpointsByNames. [Preview API] Get the service endpoints by name. :param str project: Project ID or project name :param [str] endpoint_names: Names of the service endpoints. :param str type: Type of the service endpoints. :param [str] auth_schemes: Authorization schemes used for service endpoints. :param bool include_failed: Failed flag for service endpoints. :param bool include_details: Flag to include more details for service endpoints. This is for internal use only and the flag will be treated as false for all other requests :rtype: [ServiceEndpoint] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') # depends on [control=['if'], data=['project']] query_parameters = {} if endpoint_names is not None: endpoint_names = ','.join(endpoint_names) query_parameters['endpointNames'] = self._serialize.query('endpoint_names', endpoint_names, 'str') # depends on [control=['if'], data=['endpoint_names']] if type is not None: query_parameters['type'] = self._serialize.query('type', type, 'str') # depends on [control=['if'], data=['type']] if auth_schemes is not None: auth_schemes = ','.join(auth_schemes) query_parameters['authSchemes'] = self._serialize.query('auth_schemes', auth_schemes, 'str') # depends on [control=['if'], data=['auth_schemes']] if include_failed is not None: query_parameters['includeFailed'] = self._serialize.query('include_failed', include_failed, 'bool') # depends on [control=['if'], data=['include_failed']] if include_details is not None: query_parameters['includeDetails'] = self._serialize.query('include_details', include_details, 'bool') # depends on [control=['if'], data=['include_details']] response = self._send(http_method='GET', location_id='e85f1c62-adfc-4b74-b618-11a150fb195e', version='5.0-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[ServiceEndpoint]', self._unwrap_collection(response))
def categorical_to_numeric(table): """Encode categorical columns to numeric by converting each category to an integer value. Parameters ---------- table : pandas.DataFrame Table with categorical columns to encode. Returns ------- encoded : pandas.DataFrame Table with categorical columns encoded as numeric. Numeric columns in the input table remain unchanged. """ def transform(column): if is_categorical_dtype(column.dtype): return column.cat.codes if column.dtype.char == "O": try: nc = column.astype(numpy.int64) except ValueError: classes = column.dropna().unique() classes.sort(kind="mergesort") nc = column.replace(classes, numpy.arange(classes.shape[0])) return nc elif column.dtype == bool: return column.astype(numpy.int64) return column if isinstance(table, pandas.Series): return pandas.Series(transform(table), name=table.name, index=table.index) else: if _pandas_version_under0p23: return table.apply(transform, axis=0, reduce=False) else: return table.apply(transform, axis=0, result_type='reduce')
def function[categorical_to_numeric, parameter[table]]: constant[Encode categorical columns to numeric by converting each category to an integer value. Parameters ---------- table : pandas.DataFrame Table with categorical columns to encode. Returns ------- encoded : pandas.DataFrame Table with categorical columns encoded as numeric. Numeric columns in the input table remain unchanged. ] def function[transform, parameter[column]]: if call[name[is_categorical_dtype], parameter[name[column].dtype]] begin[:] return[name[column].cat.codes] if compare[name[column].dtype.char equal[==] constant[O]] begin[:] <ast.Try object at 0x7da1b170dc90> return[name[nc]] return[name[column]] if call[name[isinstance], parameter[name[table], name[pandas].Series]] begin[:] return[call[name[pandas].Series, parameter[call[name[transform], parameter[name[table]]]]]]
keyword[def] identifier[categorical_to_numeric] ( identifier[table] ): literal[string] keyword[def] identifier[transform] ( identifier[column] ): keyword[if] identifier[is_categorical_dtype] ( identifier[column] . identifier[dtype] ): keyword[return] identifier[column] . identifier[cat] . identifier[codes] keyword[if] identifier[column] . identifier[dtype] . identifier[char] == literal[string] : keyword[try] : identifier[nc] = identifier[column] . identifier[astype] ( identifier[numpy] . identifier[int64] ) keyword[except] identifier[ValueError] : identifier[classes] = identifier[column] . identifier[dropna] (). identifier[unique] () identifier[classes] . identifier[sort] ( identifier[kind] = literal[string] ) identifier[nc] = identifier[column] . identifier[replace] ( identifier[classes] , identifier[numpy] . identifier[arange] ( identifier[classes] . identifier[shape] [ literal[int] ])) keyword[return] identifier[nc] keyword[elif] identifier[column] . identifier[dtype] == identifier[bool] : keyword[return] identifier[column] . identifier[astype] ( identifier[numpy] . identifier[int64] ) keyword[return] identifier[column] keyword[if] identifier[isinstance] ( identifier[table] , identifier[pandas] . identifier[Series] ): keyword[return] identifier[pandas] . identifier[Series] ( identifier[transform] ( identifier[table] ), identifier[name] = identifier[table] . identifier[name] , identifier[index] = identifier[table] . identifier[index] ) keyword[else] : keyword[if] identifier[_pandas_version_under0p23] : keyword[return] identifier[table] . identifier[apply] ( identifier[transform] , identifier[axis] = literal[int] , identifier[reduce] = keyword[False] ) keyword[else] : keyword[return] identifier[table] . identifier[apply] ( identifier[transform] , identifier[axis] = literal[int] , identifier[result_type] = literal[string] )
def categorical_to_numeric(table): """Encode categorical columns to numeric by converting each category to an integer value. Parameters ---------- table : pandas.DataFrame Table with categorical columns to encode. Returns ------- encoded : pandas.DataFrame Table with categorical columns encoded as numeric. Numeric columns in the input table remain unchanged. """ def transform(column): if is_categorical_dtype(column.dtype): return column.cat.codes # depends on [control=['if'], data=[]] if column.dtype.char == 'O': try: nc = column.astype(numpy.int64) # depends on [control=['try'], data=[]] except ValueError: classes = column.dropna().unique() classes.sort(kind='mergesort') nc = column.replace(classes, numpy.arange(classes.shape[0])) # depends on [control=['except'], data=[]] return nc # depends on [control=['if'], data=[]] elif column.dtype == bool: return column.astype(numpy.int64) # depends on [control=['if'], data=[]] return column if isinstance(table, pandas.Series): return pandas.Series(transform(table), name=table.name, index=table.index) # depends on [control=['if'], data=[]] elif _pandas_version_under0p23: return table.apply(transform, axis=0, reduce=False) # depends on [control=['if'], data=[]] else: return table.apply(transform, axis=0, result_type='reduce')
def ensure_path(path, mode=0o777): """Ensure that path exists in a multiprocessing safe way. If the path does not exist, recursively create it and its parent directories using the provided mode. If the path already exists, do nothing. The umask is cleared to enable the mode to be set, and then reset to the original value after the mode is set. Parameters ---------- path : str file system path to a non-existent directory that should be created. mode : int octal representation of the mode to use when creating the directory. Raises ------ OSError If os.makedirs raises an OSError for any reason other than if the directory already exists. """ if path: try: umask = os.umask(000) os.makedirs(path, mode) os.umask(umask) except OSError as e: if e.errno != errno.EEXIST: raise
def function[ensure_path, parameter[path, mode]]: constant[Ensure that path exists in a multiprocessing safe way. If the path does not exist, recursively create it and its parent directories using the provided mode. If the path already exists, do nothing. The umask is cleared to enable the mode to be set, and then reset to the original value after the mode is set. Parameters ---------- path : str file system path to a non-existent directory that should be created. mode : int octal representation of the mode to use when creating the directory. Raises ------ OSError If os.makedirs raises an OSError for any reason other than if the directory already exists. ] if name[path] begin[:] <ast.Try object at 0x7da2045644f0>
keyword[def] identifier[ensure_path] ( identifier[path] , identifier[mode] = literal[int] ): literal[string] keyword[if] identifier[path] : keyword[try] : identifier[umask] = identifier[os] . identifier[umask] ( literal[int] ) identifier[os] . identifier[makedirs] ( identifier[path] , identifier[mode] ) identifier[os] . identifier[umask] ( identifier[umask] ) keyword[except] identifier[OSError] keyword[as] identifier[e] : keyword[if] identifier[e] . identifier[errno] != identifier[errno] . identifier[EEXIST] : keyword[raise]
def ensure_path(path, mode=511): """Ensure that path exists in a multiprocessing safe way. If the path does not exist, recursively create it and its parent directories using the provided mode. If the path already exists, do nothing. The umask is cleared to enable the mode to be set, and then reset to the original value after the mode is set. Parameters ---------- path : str file system path to a non-existent directory that should be created. mode : int octal representation of the mode to use when creating the directory. Raises ------ OSError If os.makedirs raises an OSError for any reason other than if the directory already exists. """ if path: try: umask = os.umask(0) os.makedirs(path, mode) os.umask(umask) # depends on [control=['try'], data=[]] except OSError as e: if e.errno != errno.EEXIST: raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
def each(coro, iterable, limit=0, loop=None, collect=False, timeout=None, return_exceptions=False, *args, **kw): """ Concurrently iterates values yielded from an iterable, passing them to an asynchronous coroutine. You can optionally collect yielded values passing collect=True param, which would be equivalent to `paco.map()``. Mapped values will be returned as an ordered list. Items order is preserved based on origin iterable order. Concurrency level can be configurable via `limit` param. All coroutines will be executed in the same loop. This function is a coroutine. This function can be composed in a pipeline chain with ``|`` operator. Arguments: coro (coroutinefunction): coroutine iterator function that accepts iterable values. iterable (iterable|asynchronousiterable): an iterable collection yielding coroutines functions. limit (int): max iteration concurrency limit. Use ``0`` for no limit. collect (bool): return yielded values from coroutines. Default False. loop (asyncio.BaseEventLoop): optional event loop to use. return_exceptions (bool): enable/disable returning exceptions in case of error. `collect` param must be True. timeout (int|float): timeout can be used to control the maximum number of seconds to wait before returning. timeout can be an int or float. If timeout is not specified or None, there is no limit to the wait time. *args (mixed): optional variadic arguments to pass to the coroutine iterable function. Returns: results (list): ordered list of values yielded by coroutines Raises: TypeError: in case of invalid input arguments. Usage:: async def mul_2(num): return num * 2 await paco.each(mul_2, [1, 2, 3, 4, 5]) # => None await paco.each(mul_2, [1, 2, 3, 4, 5], collect=True) # => [2, 4, 6, 8, 10] """ assert_corofunction(coro=coro) assert_iter(iterable=iterable) # By default do not collect yielded values from coroutines results = None if collect: # Store ordered results results = [None] * len(iterable) # Create concurrent executor pool = ConcurrentExecutor(limit=limit, loop=loop) @asyncio.coroutine def collector(index, item): result = yield from safe_run(coro(item, *args, **kw), return_exceptions=return_exceptions) if collect: results[index] = result return result # Iterate and pass elements to coroutine for index, value in enumerate(iterable): pool.add(collector(index, value)) # Wait until all the coroutines finishes yield from pool.run(return_exceptions=return_exceptions, ignore_empty=True, timeout=timeout) # Returns list of mapped results in order return results
def function[each, parameter[coro, iterable, limit, loop, collect, timeout, return_exceptions]]: constant[ Concurrently iterates values yielded from an iterable, passing them to an asynchronous coroutine. You can optionally collect yielded values passing collect=True param, which would be equivalent to `paco.map()``. Mapped values will be returned as an ordered list. Items order is preserved based on origin iterable order. Concurrency level can be configurable via `limit` param. All coroutines will be executed in the same loop. This function is a coroutine. This function can be composed in a pipeline chain with ``|`` operator. Arguments: coro (coroutinefunction): coroutine iterator function that accepts iterable values. iterable (iterable|asynchronousiterable): an iterable collection yielding coroutines functions. limit (int): max iteration concurrency limit. Use ``0`` for no limit. collect (bool): return yielded values from coroutines. Default False. loop (asyncio.BaseEventLoop): optional event loop to use. return_exceptions (bool): enable/disable returning exceptions in case of error. `collect` param must be True. timeout (int|float): timeout can be used to control the maximum number of seconds to wait before returning. timeout can be an int or float. If timeout is not specified or None, there is no limit to the wait time. *args (mixed): optional variadic arguments to pass to the coroutine iterable function. Returns: results (list): ordered list of values yielded by coroutines Raises: TypeError: in case of invalid input arguments. Usage:: async def mul_2(num): return num * 2 await paco.each(mul_2, [1, 2, 3, 4, 5]) # => None await paco.each(mul_2, [1, 2, 3, 4, 5], collect=True) # => [2, 4, 6, 8, 10] ] call[name[assert_corofunction], parameter[]] call[name[assert_iter], parameter[]] variable[results] assign[=] constant[None] if name[collect] begin[:] variable[results] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18bcc8580>]] * call[name[len], parameter[name[iterable]]]] variable[pool] assign[=] call[name[ConcurrentExecutor], parameter[]] def function[collector, parameter[index, item]]: variable[result] assign[=] <ast.YieldFrom object at 0x7da18bcc8ac0> if name[collect] begin[:] call[name[results]][name[index]] assign[=] name[result] return[name[result]] for taget[tuple[[<ast.Name object at 0x7da18bcc8550>, <ast.Name object at 0x7da18bccb760>]]] in starred[call[name[enumerate], parameter[name[iterable]]]] begin[:] call[name[pool].add, parameter[call[name[collector], parameter[name[index], name[value]]]]] <ast.YieldFrom object at 0x7da20e9b2c80> return[name[results]]
keyword[def] identifier[each] ( identifier[coro] , identifier[iterable] , identifier[limit] = literal[int] , identifier[loop] = keyword[None] , identifier[collect] = keyword[False] , identifier[timeout] = keyword[None] , identifier[return_exceptions] = keyword[False] ,* identifier[args] ,** identifier[kw] ): literal[string] identifier[assert_corofunction] ( identifier[coro] = identifier[coro] ) identifier[assert_iter] ( identifier[iterable] = identifier[iterable] ) identifier[results] = keyword[None] keyword[if] identifier[collect] : identifier[results] =[ keyword[None] ]* identifier[len] ( identifier[iterable] ) identifier[pool] = identifier[ConcurrentExecutor] ( identifier[limit] = identifier[limit] , identifier[loop] = identifier[loop] ) @ identifier[asyncio] . identifier[coroutine] keyword[def] identifier[collector] ( identifier[index] , identifier[item] ): identifier[result] = keyword[yield] keyword[from] identifier[safe_run] ( identifier[coro] ( identifier[item] ,* identifier[args] ,** identifier[kw] ), identifier[return_exceptions] = identifier[return_exceptions] ) keyword[if] identifier[collect] : identifier[results] [ identifier[index] ]= identifier[result] keyword[return] identifier[result] keyword[for] identifier[index] , identifier[value] keyword[in] identifier[enumerate] ( identifier[iterable] ): identifier[pool] . identifier[add] ( identifier[collector] ( identifier[index] , identifier[value] )) keyword[yield] keyword[from] identifier[pool] . identifier[run] ( identifier[return_exceptions] = identifier[return_exceptions] , identifier[ignore_empty] = keyword[True] , identifier[timeout] = identifier[timeout] ) keyword[return] identifier[results]
def each(coro, iterable, limit=0, loop=None, collect=False, timeout=None, return_exceptions=False, *args, **kw): """ Concurrently iterates values yielded from an iterable, passing them to an asynchronous coroutine. You can optionally collect yielded values passing collect=True param, which would be equivalent to `paco.map()``. Mapped values will be returned as an ordered list. Items order is preserved based on origin iterable order. Concurrency level can be configurable via `limit` param. All coroutines will be executed in the same loop. This function is a coroutine. This function can be composed in a pipeline chain with ``|`` operator. Arguments: coro (coroutinefunction): coroutine iterator function that accepts iterable values. iterable (iterable|asynchronousiterable): an iterable collection yielding coroutines functions. limit (int): max iteration concurrency limit. Use ``0`` for no limit. collect (bool): return yielded values from coroutines. Default False. loop (asyncio.BaseEventLoop): optional event loop to use. return_exceptions (bool): enable/disable returning exceptions in case of error. `collect` param must be True. timeout (int|float): timeout can be used to control the maximum number of seconds to wait before returning. timeout can be an int or float. If timeout is not specified or None, there is no limit to the wait time. *args (mixed): optional variadic arguments to pass to the coroutine iterable function. Returns: results (list): ordered list of values yielded by coroutines Raises: TypeError: in case of invalid input arguments. Usage:: async def mul_2(num): return num * 2 await paco.each(mul_2, [1, 2, 3, 4, 5]) # => None await paco.each(mul_2, [1, 2, 3, 4, 5], collect=True) # => [2, 4, 6, 8, 10] """ assert_corofunction(coro=coro) assert_iter(iterable=iterable) # By default do not collect yielded values from coroutines results = None if collect: # Store ordered results results = [None] * len(iterable) # depends on [control=['if'], data=[]] # Create concurrent executor pool = ConcurrentExecutor(limit=limit, loop=loop) @asyncio.coroutine def collector(index, item): result = (yield from safe_run(coro(item, *args, **kw), return_exceptions=return_exceptions)) if collect: results[index] = result # depends on [control=['if'], data=[]] return result # Iterate and pass elements to coroutine for (index, value) in enumerate(iterable): pool.add(collector(index, value)) # depends on [control=['for'], data=[]] # Wait until all the coroutines finishes yield from pool.run(return_exceptions=return_exceptions, ignore_empty=True, timeout=timeout) # Returns list of mapped results in order return results
def parse_bamPEFragmentSize(self): """Find bamPEFragmentSize output. Supports the --table option""" self.deeptools_bamPEFragmentSize = dict() for f in self.find_log_files('deeptools/bamPEFragmentSizeTable'): parsed_data = self.parseBamPEFile(f) for k, v in parsed_data.items(): if k in self.deeptools_bamPEFragmentSize: log.warning("Replacing duplicate sample {}.".format(k)) self.deeptools_bamPEFragmentSize[k] = v if len(parsed_data) > 0: self.add_data_source(f, section='bamPEFragmentSize') if len(self.deeptools_bamPEFragmentSize) > 0: headersSE = OrderedDict() headersSE["Reads Sampled"] = { 'title': '# Sampled', 'description': 'Number of reads sampled', 'format': '{:,.0f}' } headersSE["Read Len. Min."] = { 'title': 'Min', 'description': 'Minimum read length', 'format': '{:,.0f}', 'shared_key': 'read_length' } headersSE["Read Len. 1st. Qu."] = { 'title': '1st Quartile', 'description': '1st quartile read length', 'format': '{:,.0f}', 'shared_key': 'read_length' } headersSE["Read Len. Mean"] = { 'title': 'Mean', 'description': 'Mean read length', 'shared_key': 'read_length' } headersSE["Read Len. Median"] = { 'title': 'Median', 'description': 'Median read length', 'format': '{:,.0f}', 'shared_key': 'read_length' } headersSE["Read Len. 3rd Qu."] = { 'title': '3rd Quartile', 'description': '3rd quartile read length', 'format': '{:,.0f}', 'shared_key': 'read_length' } headersSE["Read Len. Max"] = { 'title': 'Max', 'description': 'Maximum read length', 'format': '{:,.0f}', 'shared_key': 'read_length' } headersSE["Read Len. Std."] = { 'title': 'Std. Dev.', 'description': 'read length standard deviation', 'shared_key': 'read_length' } headersSE["Read Med. Abs. Dev."] = { 'title': 'MAD', 'description': 'read length median absolute deviation', 'shared_key': 'read_length' } config = {'namespace': 'deepTools bamPEFragmentSize'} self.add_section( name = "Read length metrics", anchor = "deeptools_readlengths", plot = table.plot(self.deeptools_bamPEFragmentSize, headersSE, config) ) headersPE = OrderedDict() headersPE["Frag. Sampled"] = { 'title': '# Sampled', 'description': 'Number of fragments sampled', 'format': '{:,.0f}' } headersPE["Frag. Len. Min."] = { 'title': 'Min', 'description': 'Minimum fragment length', 'format': '{:,.0f}', 'shared_key': 'frag_length' } headersPE["Frag. Len. 1st. Qu."] = { 'title': '1st Quartile', 'description': '1st quartile fragment length', 'format': '{:,.0f}', 'shared_key': 'frag_length' } headersPE["Frag. Len. Mean"] = { 'title': 'Mean', 'description': 'Mean fragment length', 'format': '{:,.0f}', 'shared_key': 'frag_length' } headersPE["Frag. Len. Median"] = { 'title': 'Median', 'description': 'Median fragment length', 'format': '{:,.0f}', 'shared_key': 'frag_length' } headersPE["Frag. Len. 3rd Qu."] = { 'title': '3rd Quartile', 'description': '3rd quartile fragment length', 'format': '{:,.0f}', 'shared_key': 'frag_length' } headersPE["Frag. Len. Max"] = { 'title': 'Max', 'description': 'Maximum fragment length', 'format': '{:,.0f}', 'shared_key': 'frag_length' } headersPE["Frag. Len. Std."] = { 'title': 'Std. Dev.', 'description': 'Fragment length standard deviation', 'shared_key': 'frag_length' } headersPE["Frag. Med. Abs. Dev."] = { 'title': 'MAD', 'description': 'Fragment length median absolute deviation', 'shared_key': 'frag_length' } # Are there any PE datasets? PE = False for k, v in self.deeptools_bamPEFragmentSize.items(): if 'Frag. Len. Min.' in v: PE = True break if PE: self.add_section(name="Fragment length metrics", anchor="deeptools_fragmentlengths", plot=table.plot(self.deeptools_bamPEFragmentSize, headersPE, config)) # Read length plot config = { 'data_labels': [ { 'name': "Read length distribution", 'title': "Read length distribution", 'ylab': "Read length (bases)" }, { 'name': "Fragment length distribution", 'title': "Fragment length distribution", 'ylab': "Fragment length (bases)" } ], 'id': 'deeptools_readlengthsPlot', 'title': 'deepTools: Read/Fragment length distribution', 'namespace': 'deepTools bamPEFragmentSize', 'ylab': "Read length (bases)", 'xlab': "Percentile" } SE = dict() PE = dict() for k, v in self.deeptools_bamPEFragmentSize.items(): SE[k] = {0: v['Read Len. Min.'], 10: v['Read Len. 10%'], 20: v['Read Len. 20%'], 25: v['Read Len. 1st. Qu.'], 30: v['Read Len. 30%'], 40: v['Read Len. 40%'], 50: v['Read Len. Median'], 60: v['Read Len. 60%'], 70: v['Read Len. 70%'], 75: v['Read Len. 3rd Qu.'], 80: v['Read Len. 80%'], 90: v['Read Len. 90%'], 99: v['Read Len. 99%'], 100: v['Read Len. Max']} if 'Frag. Len. Min.' not in v: continue PE[k] = {0: v['Frag. Len. Min.'], 10: v['Frag. Len. 10%'], 20: v['Frag. Len. 20%'], 25: v['Frag. Len. 1st. Qu.'], 30: v['Frag. Len. 30%'], 40: v['Frag. Len. 40%'], 50: v['Frag. Len. Median'], 60: v['Frag. Len. 60%'], 70: v['Frag. Len. 70%'], 75: v['Frag. Len. 3rd Qu.'], 80: v['Frag. Len. 80%'], 90: v['Frag. Len. 90%'], 99: v['Frag. Len. 99%'], 100: v['Frag. Len. Max']} self.add_section( name = "Read/fragment length distribution", anchor = "deeptools_fragmentlengths_dist", plot = linegraph.plot([SE, PE], config) ) return len(self.deeptools_bamPEFragmentSize)
def function[parse_bamPEFragmentSize, parameter[self]]: constant[Find bamPEFragmentSize output. Supports the --table option] name[self].deeptools_bamPEFragmentSize assign[=] call[name[dict], parameter[]] for taget[name[f]] in starred[call[name[self].find_log_files, parameter[constant[deeptools/bamPEFragmentSizeTable]]]] begin[:] variable[parsed_data] assign[=] call[name[self].parseBamPEFile, parameter[name[f]]] for taget[tuple[[<ast.Name object at 0x7da18bc721d0>, <ast.Name object at 0x7da18bc73550>]]] in starred[call[name[parsed_data].items, parameter[]]] begin[:] if compare[name[k] in name[self].deeptools_bamPEFragmentSize] begin[:] call[name[log].warning, parameter[call[constant[Replacing duplicate sample {}.].format, parameter[name[k]]]]] call[name[self].deeptools_bamPEFragmentSize][name[k]] assign[=] name[v] if compare[call[name[len], parameter[name[parsed_data]]] greater[>] constant[0]] begin[:] call[name[self].add_data_source, parameter[name[f]]] if compare[call[name[len], parameter[name[self].deeptools_bamPEFragmentSize]] greater[>] constant[0]] begin[:] variable[headersSE] assign[=] call[name[OrderedDict], parameter[]] call[name[headersSE]][constant[Reads Sampled]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc72e60>, <ast.Constant object at 0x7da18bc73910>, <ast.Constant object at 0x7da18bc729b0>], [<ast.Constant object at 0x7da18bc708b0>, <ast.Constant object at 0x7da18bc73610>, <ast.Constant object at 0x7da18bc71540>]] call[name[headersSE]][constant[Read Len. Min.]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc70b80>, <ast.Constant object at 0x7da18bc73d30>, <ast.Constant object at 0x7da18bc72890>, <ast.Constant object at 0x7da18bc72290>], [<ast.Constant object at 0x7da18bc71fc0>, <ast.Constant object at 0x7da18bc73070>, <ast.Constant object at 0x7da18bc72cb0>, <ast.Constant object at 0x7da18bc72ce0>]] call[name[headersSE]][constant[Read Len. 1st. Qu.]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc70250>, <ast.Constant object at 0x7da18bc70d60>, <ast.Constant object at 0x7da18bc73eb0>, <ast.Constant object at 0x7da18bc71c60>], [<ast.Constant object at 0x7da18bc70b50>, <ast.Constant object at 0x7da18bc70b20>, <ast.Constant object at 0x7da18bc70f40>, <ast.Constant object at 0x7da18bc72320>]] call[name[headersSE]][constant[Read Len. Mean]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc70340>, <ast.Constant object at 0x7da18bc71bd0>, <ast.Constant object at 0x7da18bc73940>], [<ast.Constant object at 0x7da18bc70460>, <ast.Constant object at 0x7da18bc70eb0>, <ast.Constant object at 0x7da18bc727d0>]] call[name[headersSE]][constant[Read Len. Median]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc73c70>, <ast.Constant object at 0x7da18bc70070>, <ast.Constant object at 0x7da18bc73d00>, <ast.Constant object at 0x7da18bc71210>], [<ast.Constant object at 0x7da18bc70940>, <ast.Constant object at 0x7da18bc729e0>, <ast.Constant object at 0x7da18bc708e0>, <ast.Constant object at 0x7da18bc725c0>]] call[name[headersSE]][constant[Read Len. 3rd Qu.]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc715d0>, <ast.Constant object at 0x7da18bc720b0>, <ast.Constant object at 0x7da18bc70e50>, <ast.Constant object at 0x7da18bc730d0>], [<ast.Constant object at 0x7da18bc73730>, <ast.Constant object at 0x7da18bc703d0>, <ast.Constant object at 0x7da18bc71e10>, <ast.Constant object at 0x7da18bc73430>]] call[name[headersSE]][constant[Read Len. Max]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc72b30>, <ast.Constant object at 0x7da18bc71de0>, <ast.Constant object at 0x7da18bc73c10>, <ast.Constant object at 0x7da18bc71480>], [<ast.Constant object at 0x7da18bc71ae0>, <ast.Constant object at 0x7da18bc73220>, <ast.Constant object at 0x7da18bc70fd0>, <ast.Constant object at 0x7da18bc72ad0>]] call[name[headersSE]][constant[Read Len. Std.]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc73b80>, <ast.Constant object at 0x7da18bc73040>, <ast.Constant object at 0x7da18bc71f30>], [<ast.Constant object at 0x7da18bc73fd0>, <ast.Constant object at 0x7da18bc72dd0>, <ast.Constant object at 0x7da18bc733d0>]] call[name[headersSE]][constant[Read Med. Abs. Dev.]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc73f40>, <ast.Constant object at 0x7da18bc72920>, <ast.Constant object at 0x7da18bc73760>], [<ast.Constant object at 0x7da18bc722f0>, <ast.Constant object at 0x7da18bc71270>, <ast.Constant object at 0x7da18bc73640>]] variable[config] assign[=] dictionary[[<ast.Constant object at 0x7da18bc70610>], [<ast.Constant object at 0x7da18bc72590>]] call[name[self].add_section, parameter[]] variable[headersPE] assign[=] call[name[OrderedDict], parameter[]] call[name[headersPE]][constant[Frag. Sampled]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc70370>, <ast.Constant object at 0x7da18bc72770>, <ast.Constant object at 0x7da18bc72260>], [<ast.Constant object at 0x7da18bc709d0>, <ast.Constant object at 0x7da18bc73c40>, <ast.Constant object at 0x7da18bc70d30>]] call[name[headersPE]][constant[Frag. Len. Min.]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc738e0>, <ast.Constant object at 0x7da18bc73fa0>, <ast.Constant object at 0x7da18bc71ff0>, <ast.Constant object at 0x7da18bc73250>], [<ast.Constant object at 0x7da18bc73ca0>, <ast.Constant object at 0x7da18bc70700>, <ast.Constant object at 0x7da18bc701f0>, <ast.Constant object at 0x7da18bc71e40>]] call[name[headersPE]][constant[Frag. Len. 1st. Qu.]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc73f10>, <ast.Constant object at 0x7da18bc70d90>, <ast.Constant object at 0x7da18bc72b90>, <ast.Constant object at 0x7da18bc70f10>], [<ast.Constant object at 0x7da18bc700d0>, <ast.Constant object at 0x7da18bc73520>, <ast.Constant object at 0x7da18bc72440>, <ast.Constant object at 0x7da18bc702e0>]] call[name[headersPE]][constant[Frag. Len. Mean]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc72530>, <ast.Constant object at 0x7da18bc736d0>, <ast.Constant object at 0x7da18bc713f0>, <ast.Constant object at 0x7da18bc73a60>], [<ast.Constant object at 0x7da18bc724d0>, <ast.Constant object at 0x7da18bc73cd0>, <ast.Constant object at 0x7da18bc71db0>, <ast.Constant object at 0x7da18bc73580>]] call[name[headersPE]][constant[Frag. Len. Median]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc73df0>, <ast.Constant object at 0x7da18bc71720>, <ast.Constant object at 0x7da18bc73160>, <ast.Constant object at 0x7da18bc73820>], [<ast.Constant object at 0x7da18bc71c00>, <ast.Constant object at 0x7da18bc733a0>, <ast.Constant object at 0x7da18bc70df0>, <ast.Constant object at 0x7da18bc71030>]] call[name[headersPE]][constant[Frag. Len. 3rd Qu.]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc70190>, <ast.Constant object at 0x7da18bc70820>, <ast.Constant object at 0x7da18bc716c0>, <ast.Constant object at 0x7da18bc71120>], [<ast.Constant object at 0x7da18bc71c90>, <ast.Constant object at 0x7da18bc71900>, <ast.Constant object at 0x7da18bc72950>, <ast.Constant object at 0x7da18bc709a0>]] call[name[headersPE]][constant[Frag. Len. Max]] assign[=] dictionary[[<ast.Constant object at 0x7da18bc703a0>, <ast.Constant object at 0x7da18bc71690>, <ast.Constant object at 0x7da18bc737f0>, <ast.Constant object at 0x7da18c4cf850>], [<ast.Constant object at 0x7da18c4cd5d0>, <ast.Constant object at 0x7da18c4cf9a0>, <ast.Constant object at 0x7da18c4cc310>, <ast.Constant object at 0x7da18c4cf670>]] call[name[headersPE]][constant[Frag. Len. Std.]] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cce50>, <ast.Constant object at 0x7da18c4cf7f0>, <ast.Constant object at 0x7da18c4cd780>], [<ast.Constant object at 0x7da18c4cdbd0>, <ast.Constant object at 0x7da18c4cf010>, <ast.Constant object at 0x7da18c4cf880>]] call[name[headersPE]][constant[Frag. Med. Abs. Dev.]] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf4c0>, <ast.Constant object at 0x7da18c4cc4c0>, <ast.Constant object at 0x7da18c4cd1b0>], [<ast.Constant object at 0x7da18c4cd840>, <ast.Constant object at 0x7da18c4cfa90>, <ast.Constant object at 0x7da18c4cc6a0>]] variable[PE] assign[=] constant[False] for taget[tuple[[<ast.Name object at 0x7da18c4ccd30>, <ast.Name object at 0x7da18c4cdfc0>]]] in starred[call[name[self].deeptools_bamPEFragmentSize.items, parameter[]]] begin[:] if compare[constant[Frag. Len. Min.] in name[v]] begin[:] variable[PE] assign[=] constant[True] break if name[PE] begin[:] call[name[self].add_section, parameter[]] variable[config] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cfd90>, <ast.Constant object at 0x7da18c4ccb20>, <ast.Constant object at 0x7da18c4cc940>, <ast.Constant object at 0x7da18c4cde40>, <ast.Constant object at 0x7da18c4cf730>, <ast.Constant object at 0x7da18c4cd9f0>], [<ast.List object at 0x7da18c4cdb40>, <ast.Constant object at 0x7da18c4cc160>, <ast.Constant object at 0x7da18c4cddb0>, <ast.Constant object at 0x7da18c4cc790>, <ast.Constant object at 0x7da18c4cf640>, <ast.Constant object at 0x7da18c4cd8d0>]] variable[SE] assign[=] call[name[dict], parameter[]] variable[PE] assign[=] call[name[dict], parameter[]] for taget[tuple[[<ast.Name object at 0x7da18c4cc370>, <ast.Name object at 0x7da18c4ce020>]]] in starred[call[name[self].deeptools_bamPEFragmentSize.items, parameter[]]] begin[:] call[name[SE]][name[k]] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf820>, <ast.Constant object at 0x7da18c4ccc70>, <ast.Constant object at 0x7da18c4cfbe0>, <ast.Constant object at 0x7da18c4cff70>, <ast.Constant object at 0x7da18c4ce650>, <ast.Constant object at 0x7da18c4cc670>, <ast.Constant object at 0x7da18c4cd7e0>, <ast.Constant object at 0x7da18c4ceb00>, <ast.Constant object at 0x7da18c4cc580>, <ast.Constant object at 0x7da18c4cf1f0>, <ast.Constant object at 0x7da18c4ce8f0>, <ast.Constant object at 0x7da18c4cd300>, <ast.Constant object at 0x7da18c4cc400>, <ast.Constant object at 0x7da18c4cc7f0>], [<ast.Subscript object at 0x7da18c4ccd00>, <ast.Subscript object at 0x7da18c4cf4f0>, <ast.Subscript object at 0x7da18c4cdab0>, <ast.Subscript object at 0x7da18c4ce6b0>, <ast.Subscript object at 0x7da18c4ce050>, <ast.Subscript object at 0x7da18c4ce4a0>, <ast.Subscript object at 0x7da18c4cf250>, <ast.Subscript object at 0x7da18c4cfb50>, <ast.Subscript object at 0x7da18c4cfd00>, <ast.Subscript object at 0x7da18c4cd6c0>, <ast.Subscript object at 0x7da18c4ce9e0>, <ast.Subscript object at 0x7da18c4cc8e0>, <ast.Subscript object at 0x7da18c4ce530>, <ast.Subscript object at 0x7da18c4cdde0>]] if compare[constant[Frag. Len. Min.] <ast.NotIn object at 0x7da2590d7190> name[v]] begin[:] continue call[name[PE]][name[k]] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cd1e0>, <ast.Constant object at 0x7da18c4cf220>, <ast.Constant object at 0x7da18c4cd510>, <ast.Constant object at 0x7da18c4cdea0>, <ast.Constant object at 0x7da18c4ceb30>, <ast.Constant object at 0x7da18c4cc490>, <ast.Constant object at 0x7da18c4cdc30>, <ast.Constant object at 0x7da18c4ceda0>, <ast.Constant object at 0x7da18c4ce290>, <ast.Constant object at 0x7da18c4cd690>, <ast.Constant object at 0x7da18c4cf550>, <ast.Constant object at 0x7da18c4ce320>, <ast.Constant object at 0x7da18c4ccc40>, <ast.Constant object at 0x7da18c4cefe0>], [<ast.Subscript object at 0x7da18c4cdb10>, <ast.Subscript object at 0x7da18c4cfa60>, <ast.Subscript object at 0x7da18c4ce860>, <ast.Subscript object at 0x7da18c4cd3f0>, <ast.Subscript object at 0x7da18c4cf310>, <ast.Subscript object at 0x7da18c4ce440>, <ast.Subscript object at 0x7da18c4cedd0>, <ast.Subscript object at 0x7da18c4cded0>, <ast.Subscript object at 0x7da18c4ceb90>, <ast.Subscript object at 0x7da18c4ccbb0>, <ast.Subscript object at 0x7da18c4cdf30>, <ast.Subscript object at 0x7da18c4cd660>, <ast.Subscript object at 0x7da18c4ce6e0>, <ast.Subscript object at 0x7da18c4cf520>]] call[name[self].add_section, parameter[]] return[call[name[len], parameter[name[self].deeptools_bamPEFragmentSize]]]
keyword[def] identifier[parse_bamPEFragmentSize] ( identifier[self] ): literal[string] identifier[self] . identifier[deeptools_bamPEFragmentSize] = identifier[dict] () keyword[for] identifier[f] keyword[in] identifier[self] . identifier[find_log_files] ( literal[string] ): identifier[parsed_data] = identifier[self] . identifier[parseBamPEFile] ( identifier[f] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[parsed_data] . identifier[items] (): keyword[if] identifier[k] keyword[in] identifier[self] . identifier[deeptools_bamPEFragmentSize] : identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[k] )) identifier[self] . identifier[deeptools_bamPEFragmentSize] [ identifier[k] ]= identifier[v] keyword[if] identifier[len] ( identifier[parsed_data] )> literal[int] : identifier[self] . identifier[add_data_source] ( identifier[f] , identifier[section] = literal[string] ) keyword[if] identifier[len] ( identifier[self] . identifier[deeptools_bamPEFragmentSize] )> literal[int] : identifier[headersSE] = identifier[OrderedDict] () identifier[headersSE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersSE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersSE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersSE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersSE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersSE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersSE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersSE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersSE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[config] ={ literal[string] : literal[string] } identifier[self] . identifier[add_section] ( identifier[name] = literal[string] , identifier[anchor] = literal[string] , identifier[plot] = identifier[table] . identifier[plot] ( identifier[self] . identifier[deeptools_bamPEFragmentSize] , identifier[headersSE] , identifier[config] ) ) identifier[headersPE] = identifier[OrderedDict] () identifier[headersPE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersPE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersPE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersPE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersPE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersPE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersPE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersPE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[headersPE] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[PE] = keyword[False] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[deeptools_bamPEFragmentSize] . identifier[items] (): keyword[if] literal[string] keyword[in] identifier[v] : identifier[PE] = keyword[True] keyword[break] keyword[if] identifier[PE] : identifier[self] . identifier[add_section] ( identifier[name] = literal[string] , identifier[anchor] = literal[string] , identifier[plot] = identifier[table] . identifier[plot] ( identifier[self] . identifier[deeptools_bamPEFragmentSize] , identifier[headersPE] , identifier[config] )) identifier[config] ={ literal[string] :[ { literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }, { literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } ], literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } identifier[SE] = identifier[dict] () identifier[PE] = identifier[dict] () keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[deeptools_bamPEFragmentSize] . identifier[items] (): identifier[SE] [ identifier[k] ]={ literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ]} keyword[if] literal[string] keyword[not] keyword[in] identifier[v] : keyword[continue] identifier[PE] [ identifier[k] ]={ literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ], literal[int] : identifier[v] [ literal[string] ]} identifier[self] . identifier[add_section] ( identifier[name] = literal[string] , identifier[anchor] = literal[string] , identifier[plot] = identifier[linegraph] . identifier[plot] ([ identifier[SE] , identifier[PE] ], identifier[config] ) ) keyword[return] identifier[len] ( identifier[self] . identifier[deeptools_bamPEFragmentSize] )
def parse_bamPEFragmentSize(self): """Find bamPEFragmentSize output. Supports the --table option""" self.deeptools_bamPEFragmentSize = dict() for f in self.find_log_files('deeptools/bamPEFragmentSizeTable'): parsed_data = self.parseBamPEFile(f) for (k, v) in parsed_data.items(): if k in self.deeptools_bamPEFragmentSize: log.warning('Replacing duplicate sample {}.'.format(k)) # depends on [control=['if'], data=['k']] self.deeptools_bamPEFragmentSize[k] = v # depends on [control=['for'], data=[]] if len(parsed_data) > 0: self.add_data_source(f, section='bamPEFragmentSize') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']] if len(self.deeptools_bamPEFragmentSize) > 0: headersSE = OrderedDict() headersSE['Reads Sampled'] = {'title': '# Sampled', 'description': 'Number of reads sampled', 'format': '{:,.0f}'} headersSE['Read Len. Min.'] = {'title': 'Min', 'description': 'Minimum read length', 'format': '{:,.0f}', 'shared_key': 'read_length'} headersSE['Read Len. 1st. Qu.'] = {'title': '1st Quartile', 'description': '1st quartile read length', 'format': '{:,.0f}', 'shared_key': 'read_length'} headersSE['Read Len. Mean'] = {'title': 'Mean', 'description': 'Mean read length', 'shared_key': 'read_length'} headersSE['Read Len. Median'] = {'title': 'Median', 'description': 'Median read length', 'format': '{:,.0f}', 'shared_key': 'read_length'} headersSE['Read Len. 3rd Qu.'] = {'title': '3rd Quartile', 'description': '3rd quartile read length', 'format': '{:,.0f}', 'shared_key': 'read_length'} headersSE['Read Len. Max'] = {'title': 'Max', 'description': 'Maximum read length', 'format': '{:,.0f}', 'shared_key': 'read_length'} headersSE['Read Len. Std.'] = {'title': 'Std. Dev.', 'description': 'read length standard deviation', 'shared_key': 'read_length'} headersSE['Read Med. Abs. Dev.'] = {'title': 'MAD', 'description': 'read length median absolute deviation', 'shared_key': 'read_length'} config = {'namespace': 'deepTools bamPEFragmentSize'} self.add_section(name='Read length metrics', anchor='deeptools_readlengths', plot=table.plot(self.deeptools_bamPEFragmentSize, headersSE, config)) headersPE = OrderedDict() headersPE['Frag. Sampled'] = {'title': '# Sampled', 'description': 'Number of fragments sampled', 'format': '{:,.0f}'} headersPE['Frag. Len. Min.'] = {'title': 'Min', 'description': 'Minimum fragment length', 'format': '{:,.0f}', 'shared_key': 'frag_length'} headersPE['Frag. Len. 1st. Qu.'] = {'title': '1st Quartile', 'description': '1st quartile fragment length', 'format': '{:,.0f}', 'shared_key': 'frag_length'} headersPE['Frag. Len. Mean'] = {'title': 'Mean', 'description': 'Mean fragment length', 'format': '{:,.0f}', 'shared_key': 'frag_length'} headersPE['Frag. Len. Median'] = {'title': 'Median', 'description': 'Median fragment length', 'format': '{:,.0f}', 'shared_key': 'frag_length'} headersPE['Frag. Len. 3rd Qu.'] = {'title': '3rd Quartile', 'description': '3rd quartile fragment length', 'format': '{:,.0f}', 'shared_key': 'frag_length'} headersPE['Frag. Len. Max'] = {'title': 'Max', 'description': 'Maximum fragment length', 'format': '{:,.0f}', 'shared_key': 'frag_length'} headersPE['Frag. Len. Std.'] = {'title': 'Std. Dev.', 'description': 'Fragment length standard deviation', 'shared_key': 'frag_length'} headersPE['Frag. Med. Abs. Dev.'] = {'title': 'MAD', 'description': 'Fragment length median absolute deviation', 'shared_key': 'frag_length'} # Are there any PE datasets? PE = False for (k, v) in self.deeptools_bamPEFragmentSize.items(): if 'Frag. Len. Min.' in v: PE = True break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] if PE: self.add_section(name='Fragment length metrics', anchor='deeptools_fragmentlengths', plot=table.plot(self.deeptools_bamPEFragmentSize, headersPE, config)) # depends on [control=['if'], data=[]] # Read length plot config = {'data_labels': [{'name': 'Read length distribution', 'title': 'Read length distribution', 'ylab': 'Read length (bases)'}, {'name': 'Fragment length distribution', 'title': 'Fragment length distribution', 'ylab': 'Fragment length (bases)'}], 'id': 'deeptools_readlengthsPlot', 'title': 'deepTools: Read/Fragment length distribution', 'namespace': 'deepTools bamPEFragmentSize', 'ylab': 'Read length (bases)', 'xlab': 'Percentile'} SE = dict() PE = dict() for (k, v) in self.deeptools_bamPEFragmentSize.items(): SE[k] = {0: v['Read Len. Min.'], 10: v['Read Len. 10%'], 20: v['Read Len. 20%'], 25: v['Read Len. 1st. Qu.'], 30: v['Read Len. 30%'], 40: v['Read Len. 40%'], 50: v['Read Len. Median'], 60: v['Read Len. 60%'], 70: v['Read Len. 70%'], 75: v['Read Len. 3rd Qu.'], 80: v['Read Len. 80%'], 90: v['Read Len. 90%'], 99: v['Read Len. 99%'], 100: v['Read Len. Max']} if 'Frag. Len. Min.' not in v: continue # depends on [control=['if'], data=[]] PE[k] = {0: v['Frag. Len. Min.'], 10: v['Frag. Len. 10%'], 20: v['Frag. Len. 20%'], 25: v['Frag. Len. 1st. Qu.'], 30: v['Frag. Len. 30%'], 40: v['Frag. Len. 40%'], 50: v['Frag. Len. Median'], 60: v['Frag. Len. 60%'], 70: v['Frag. Len. 70%'], 75: v['Frag. Len. 3rd Qu.'], 80: v['Frag. Len. 80%'], 90: v['Frag. Len. 90%'], 99: v['Frag. Len. 99%'], 100: v['Frag. Len. Max']} # depends on [control=['for'], data=[]] self.add_section(name='Read/fragment length distribution', anchor='deeptools_fragmentlengths_dist', plot=linegraph.plot([SE, PE], config)) # depends on [control=['if'], data=[]] return len(self.deeptools_bamPEFragmentSize)
def rename_file_group_to_serial_nums(file_lst): """Will rename all files in file_lst to a padded serial number plus its extension :param file_lst: list of path.py paths """ file_lst.sort() c = 1 for f in file_lst: dirname = get_abspath(f.dirname()) fdest = f.joinpath(dirname, "{0:04d}".format(c) + OUTPUT_DICOM_EXTENSION) log.info('Renaming {0} to {1}'.format(f, fdest)) f.rename(fdest) c += 1
def function[rename_file_group_to_serial_nums, parameter[file_lst]]: constant[Will rename all files in file_lst to a padded serial number plus its extension :param file_lst: list of path.py paths ] call[name[file_lst].sort, parameter[]] variable[c] assign[=] constant[1] for taget[name[f]] in starred[name[file_lst]] begin[:] variable[dirname] assign[=] call[name[get_abspath], parameter[call[name[f].dirname, parameter[]]]] variable[fdest] assign[=] call[name[f].joinpath, parameter[name[dirname], binary_operation[call[constant[{0:04d}].format, parameter[name[c]]] + name[OUTPUT_DICOM_EXTENSION]]]] call[name[log].info, parameter[call[constant[Renaming {0} to {1}].format, parameter[name[f], name[fdest]]]]] call[name[f].rename, parameter[name[fdest]]] <ast.AugAssign object at 0x7da1afe3be50>
keyword[def] identifier[rename_file_group_to_serial_nums] ( identifier[file_lst] ): literal[string] identifier[file_lst] . identifier[sort] () identifier[c] = literal[int] keyword[for] identifier[f] keyword[in] identifier[file_lst] : identifier[dirname] = identifier[get_abspath] ( identifier[f] . identifier[dirname] ()) identifier[fdest] = identifier[f] . identifier[joinpath] ( identifier[dirname] , literal[string] . identifier[format] ( identifier[c] )+ identifier[OUTPUT_DICOM_EXTENSION] ) identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[f] , identifier[fdest] )) identifier[f] . identifier[rename] ( identifier[fdest] ) identifier[c] += literal[int]
def rename_file_group_to_serial_nums(file_lst): """Will rename all files in file_lst to a padded serial number plus its extension :param file_lst: list of path.py paths """ file_lst.sort() c = 1 for f in file_lst: dirname = get_abspath(f.dirname()) fdest = f.joinpath(dirname, '{0:04d}'.format(c) + OUTPUT_DICOM_EXTENSION) log.info('Renaming {0} to {1}'.format(f, fdest)) f.rename(fdest) c += 1 # depends on [control=['for'], data=['f']]
def check_groups_on_profile_update(sender, instance, created, *args, **kwargs): """ Trigger check when main character or state changes. """ AutogroupsConfig.objects.update_groups_for_user(instance.user)
def function[check_groups_on_profile_update, parameter[sender, instance, created]]: constant[ Trigger check when main character or state changes. ] call[name[AutogroupsConfig].objects.update_groups_for_user, parameter[name[instance].user]]
keyword[def] identifier[check_groups_on_profile_update] ( identifier[sender] , identifier[instance] , identifier[created] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[AutogroupsConfig] . identifier[objects] . identifier[update_groups_for_user] ( identifier[instance] . identifier[user] )
def check_groups_on_profile_update(sender, instance, created, *args, **kwargs): """ Trigger check when main character or state changes. """ AutogroupsConfig.objects.update_groups_for_user(instance.user)
def deserialize(raw): """Instantiate :py:class:`ofxclient.Account` subclass from dictionary :param raw: serilized Account :param type: dict as given by :py:meth:`~ofxclient.Account.serialize` :rtype: subclass of :py:class:`ofxclient.Account` """ from ofxclient.institution import Institution institution = Institution.deserialize(raw['institution']) del raw['institution'] del raw['local_id'] if 'broker_id' in raw: a = BrokerageAccount(institution=institution, **raw) elif 'routing_number' in raw: a = BankAccount(institution=institution, **raw) else: a = CreditCardAccount(institution=institution, **raw) return a
def function[deserialize, parameter[raw]]: constant[Instantiate :py:class:`ofxclient.Account` subclass from dictionary :param raw: serilized Account :param type: dict as given by :py:meth:`~ofxclient.Account.serialize` :rtype: subclass of :py:class:`ofxclient.Account` ] from relative_module[ofxclient.institution] import module[Institution] variable[institution] assign[=] call[name[Institution].deserialize, parameter[call[name[raw]][constant[institution]]]] <ast.Delete object at 0x7da18f00f2e0> <ast.Delete object at 0x7da18f00ded0> if compare[constant[broker_id] in name[raw]] begin[:] variable[a] assign[=] call[name[BrokerageAccount], parameter[]] return[name[a]]
keyword[def] identifier[deserialize] ( identifier[raw] ): literal[string] keyword[from] identifier[ofxclient] . identifier[institution] keyword[import] identifier[Institution] identifier[institution] = identifier[Institution] . identifier[deserialize] ( identifier[raw] [ literal[string] ]) keyword[del] identifier[raw] [ literal[string] ] keyword[del] identifier[raw] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[raw] : identifier[a] = identifier[BrokerageAccount] ( identifier[institution] = identifier[institution] ,** identifier[raw] ) keyword[elif] literal[string] keyword[in] identifier[raw] : identifier[a] = identifier[BankAccount] ( identifier[institution] = identifier[institution] ,** identifier[raw] ) keyword[else] : identifier[a] = identifier[CreditCardAccount] ( identifier[institution] = identifier[institution] ,** identifier[raw] ) keyword[return] identifier[a]
def deserialize(raw): """Instantiate :py:class:`ofxclient.Account` subclass from dictionary :param raw: serilized Account :param type: dict as given by :py:meth:`~ofxclient.Account.serialize` :rtype: subclass of :py:class:`ofxclient.Account` """ from ofxclient.institution import Institution institution = Institution.deserialize(raw['institution']) del raw['institution'] del raw['local_id'] if 'broker_id' in raw: a = BrokerageAccount(institution=institution, **raw) # depends on [control=['if'], data=['raw']] elif 'routing_number' in raw: a = BankAccount(institution=institution, **raw) # depends on [control=['if'], data=['raw']] else: a = CreditCardAccount(institution=institution, **raw) return a
def resolved_task(cls, task): """Task instance representing 'task', if any""" for t in cls.tasks: if t is task or t.execute is task: return t
def function[resolved_task, parameter[cls, task]]: constant[Task instance representing 'task', if any] for taget[name[t]] in starred[name[cls].tasks] begin[:] if <ast.BoolOp object at 0x7da1b242b280> begin[:] return[name[t]]
keyword[def] identifier[resolved_task] ( identifier[cls] , identifier[task] ): literal[string] keyword[for] identifier[t] keyword[in] identifier[cls] . identifier[tasks] : keyword[if] identifier[t] keyword[is] identifier[task] keyword[or] identifier[t] . identifier[execute] keyword[is] identifier[task] : keyword[return] identifier[t]
def resolved_task(cls, task): """Task instance representing 'task', if any""" for t in cls.tasks: if t is task or t.execute is task: return t # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']]
def get_public_ip(self, addr_family=None, *args, **kwargs): """Alias for get_ip('public')""" return self.get_ip('public', addr_family, *args, **kwargs)
def function[get_public_ip, parameter[self, addr_family]]: constant[Alias for get_ip('public')] return[call[name[self].get_ip, parameter[constant[public], name[addr_family], <ast.Starred object at 0x7da1b0e33d90>]]]
keyword[def] identifier[get_public_ip] ( identifier[self] , identifier[addr_family] = keyword[None] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[get_ip] ( literal[string] , identifier[addr_family] ,* identifier[args] ,** identifier[kwargs] )
def get_public_ip(self, addr_family=None, *args, **kwargs): """Alias for get_ip('public')""" return self.get_ip('public', addr_family, *args, **kwargs)
def get_interfaces_counters(self): """Return interfaces counters.""" query = junos_views.junos_iface_counter_table(self.device) query.get() interface_counters = {} for interface, counters in query.items(): interface_counters[interface] = { k: v if v is not None else -1 for k, v in counters } return interface_counters
def function[get_interfaces_counters, parameter[self]]: constant[Return interfaces counters.] variable[query] assign[=] call[name[junos_views].junos_iface_counter_table, parameter[name[self].device]] call[name[query].get, parameter[]] variable[interface_counters] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b1cc2920>, <ast.Name object at 0x7da1b1cc3df0>]]] in starred[call[name[query].items, parameter[]]] begin[:] call[name[interface_counters]][name[interface]] assign[=] <ast.DictComp object at 0x7da1b1cc0250> return[name[interface_counters]]
keyword[def] identifier[get_interfaces_counters] ( identifier[self] ): literal[string] identifier[query] = identifier[junos_views] . identifier[junos_iface_counter_table] ( identifier[self] . identifier[device] ) identifier[query] . identifier[get] () identifier[interface_counters] ={} keyword[for] identifier[interface] , identifier[counters] keyword[in] identifier[query] . identifier[items] (): identifier[interface_counters] [ identifier[interface] ]={ identifier[k] : identifier[v] keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] keyword[else] - literal[int] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[counters] } keyword[return] identifier[interface_counters]
def get_interfaces_counters(self): """Return interfaces counters.""" query = junos_views.junos_iface_counter_table(self.device) query.get() interface_counters = {} for (interface, counters) in query.items(): interface_counters[interface] = {k: v if v is not None else -1 for (k, v) in counters} # depends on [control=['for'], data=[]] return interface_counters
def get_aws_regions(*, force=False): """Load a list of AWS regions from the AWS static data. Args: force (`bool`): Force fetch list of regions even if we already have a cached version Returns: :obj:`list` of `str` """ from cloud_inquisitor.config import dbconfig global __regions if force or not __regions: logger.debug('Loading list of AWS regions from static data') data = requests.get('https://ip-ranges.amazonaws.com/ip-ranges.json').json() rgx = re.compile(dbconfig.get('ignored_aws_regions_regexp', default='(^cn-|GLOBAL|-gov)'), re.I) __regions = sorted(list({x['region'] for x in data['prefixes'] if not rgx.search(x['region'])})) return __regions
def function[get_aws_regions, parameter[]]: constant[Load a list of AWS regions from the AWS static data. Args: force (`bool`): Force fetch list of regions even if we already have a cached version Returns: :obj:`list` of `str` ] from relative_module[cloud_inquisitor.config] import module[dbconfig] <ast.Global object at 0x7da1b1e92d10> if <ast.BoolOp object at 0x7da1b1e92ad0> begin[:] call[name[logger].debug, parameter[constant[Loading list of AWS regions from static data]]] variable[data] assign[=] call[call[name[requests].get, parameter[constant[https://ip-ranges.amazonaws.com/ip-ranges.json]]].json, parameter[]] variable[rgx] assign[=] call[name[re].compile, parameter[call[name[dbconfig].get, parameter[constant[ignored_aws_regions_regexp]]], name[re].I]] variable[__regions] assign[=] call[name[sorted], parameter[call[name[list], parameter[<ast.SetComp object at 0x7da1b20f91b0>]]]] return[name[__regions]]
keyword[def] identifier[get_aws_regions] (*, identifier[force] = keyword[False] ): literal[string] keyword[from] identifier[cloud_inquisitor] . identifier[config] keyword[import] identifier[dbconfig] keyword[global] identifier[__regions] keyword[if] identifier[force] keyword[or] keyword[not] identifier[__regions] : identifier[logger] . identifier[debug] ( literal[string] ) identifier[data] = identifier[requests] . identifier[get] ( literal[string] ). identifier[json] () identifier[rgx] = identifier[re] . identifier[compile] ( identifier[dbconfig] . identifier[get] ( literal[string] , identifier[default] = literal[string] ), identifier[re] . identifier[I] ) identifier[__regions] = identifier[sorted] ( identifier[list] ({ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[data] [ literal[string] ] keyword[if] keyword[not] identifier[rgx] . identifier[search] ( identifier[x] [ literal[string] ])})) keyword[return] identifier[__regions]
def get_aws_regions(*, force=False): """Load a list of AWS regions from the AWS static data. Args: force (`bool`): Force fetch list of regions even if we already have a cached version Returns: :obj:`list` of `str` """ from cloud_inquisitor.config import dbconfig global __regions if force or not __regions: logger.debug('Loading list of AWS regions from static data') data = requests.get('https://ip-ranges.amazonaws.com/ip-ranges.json').json() rgx = re.compile(dbconfig.get('ignored_aws_regions_regexp', default='(^cn-|GLOBAL|-gov)'), re.I) __regions = sorted(list({x['region'] for x in data['prefixes'] if not rgx.search(x['region'])})) # depends on [control=['if'], data=[]] return __regions
def email_address_to_list(email_address): """Convert an email address to a list.""" realname, address = email.utils.parseaddr(email_address) return ( [address, realname] if realname and address else [email_address, email_address] )
def function[email_address_to_list, parameter[email_address]]: constant[Convert an email address to a list.] <ast.Tuple object at 0x7da204564700> assign[=] call[name[email].utils.parseaddr, parameter[name[email_address]]] return[<ast.IfExp object at 0x7da204564df0>]
keyword[def] identifier[email_address_to_list] ( identifier[email_address] ): literal[string] identifier[realname] , identifier[address] = identifier[email] . identifier[utils] . identifier[parseaddr] ( identifier[email_address] ) keyword[return] ( [ identifier[address] , identifier[realname] ] keyword[if] identifier[realname] keyword[and] identifier[address] keyword[else] [ identifier[email_address] , identifier[email_address] ] )
def email_address_to_list(email_address): """Convert an email address to a list.""" (realname, address) = email.utils.parseaddr(email_address) return [address, realname] if realname and address else [email_address, email_address]
def validate_services_by_name(self, sentry_services): """Validate system service status by service name, automatically detecting init system based on Ubuntu release codename. :param sentry_services: dict with sentry keys and svc list values :returns: None if successful, Failure string message otherwise """ self.log.debug('Checking status of system services...') # Point at which systemd became a thing systemd_switch = self.ubuntu_releases.index('vivid') for sentry_unit, services_list in six.iteritems(sentry_services): # Get lsb_release codename from unit release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) if ret: return ret for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or service_name in ['rabbitmq-server', 'apache2', 'memcached']): # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) output, code = sentry_unit.run(cmd) service_running = code == 0 elif self.ubuntu_releases.index(release) < systemd_switch: # init is upstart cmd = 'sudo status {}'.format(service_name) output, code = sentry_unit.run(cmd) service_running = code == 0 and "start/running" in output self.log.debug('{} `{}` returned ' '{}'.format(sentry_unit.info['unit_name'], cmd, code)) if not service_running: return u"command `{}` returned {} {}".format( cmd, output, str(code)) return None
def function[validate_services_by_name, parameter[self, sentry_services]]: constant[Validate system service status by service name, automatically detecting init system based on Ubuntu release codename. :param sentry_services: dict with sentry keys and svc list values :returns: None if successful, Failure string message otherwise ] call[name[self].log.debug, parameter[constant[Checking status of system services...]]] variable[systemd_switch] assign[=] call[name[self].ubuntu_releases.index, parameter[constant[vivid]]] for taget[tuple[[<ast.Name object at 0x7da18bc73820>, <ast.Name object at 0x7da18bc72410>]]] in starred[call[name[six].iteritems, parameter[name[sentry_services]]]] begin[:] <ast.Tuple object at 0x7da18bc71ff0> assign[=] call[name[self].get_ubuntu_release_from_sentry, parameter[name[sentry_unit]]] if name[ret] begin[:] return[name[ret]] for taget[name[service_name]] in starred[name[services_list]] begin[:] if <ast.BoolOp object at 0x7da18bc73910> begin[:] variable[cmd] assign[=] call[constant[sudo service {} status].format, parameter[name[service_name]]] <ast.Tuple object at 0x7da18bc70640> assign[=] call[name[sentry_unit].run, parameter[name[cmd]]] variable[service_running] assign[=] compare[name[code] equal[==] constant[0]] call[name[self].log.debug, parameter[call[constant[{} `{}` returned {}].format, parameter[call[name[sentry_unit].info][constant[unit_name]], name[cmd], name[code]]]]] if <ast.UnaryOp object at 0x7da18bc72a70> begin[:] return[call[constant[command `{}` returned {} {}].format, parameter[name[cmd], name[output], call[name[str], parameter[name[code]]]]]] return[constant[None]]
keyword[def] identifier[validate_services_by_name] ( identifier[self] , identifier[sentry_services] ): literal[string] identifier[self] . identifier[log] . identifier[debug] ( literal[string] ) identifier[systemd_switch] = identifier[self] . identifier[ubuntu_releases] . identifier[index] ( literal[string] ) keyword[for] identifier[sentry_unit] , identifier[services_list] keyword[in] identifier[six] . identifier[iteritems] ( identifier[sentry_services] ): identifier[release] , identifier[ret] = identifier[self] . identifier[get_ubuntu_release_from_sentry] ( identifier[sentry_unit] ) keyword[if] identifier[ret] : keyword[return] identifier[ret] keyword[for] identifier[service_name] keyword[in] identifier[services_list] : keyword[if] ( identifier[self] . identifier[ubuntu_releases] . identifier[index] ( identifier[release] )>= identifier[systemd_switch] keyword[or] identifier[service_name] keyword[in] [ literal[string] , literal[string] , literal[string] ]): identifier[cmd] = literal[string] . identifier[format] ( identifier[service_name] ) identifier[output] , identifier[code] = identifier[sentry_unit] . identifier[run] ( identifier[cmd] ) identifier[service_running] = identifier[code] == literal[int] keyword[elif] identifier[self] . identifier[ubuntu_releases] . identifier[index] ( identifier[release] )< identifier[systemd_switch] : identifier[cmd] = literal[string] . identifier[format] ( identifier[service_name] ) identifier[output] , identifier[code] = identifier[sentry_unit] . identifier[run] ( identifier[cmd] ) identifier[service_running] = identifier[code] == literal[int] keyword[and] literal[string] keyword[in] identifier[output] identifier[self] . identifier[log] . identifier[debug] ( literal[string] literal[string] . identifier[format] ( identifier[sentry_unit] . identifier[info] [ literal[string] ], identifier[cmd] , identifier[code] )) keyword[if] keyword[not] identifier[service_running] : keyword[return] literal[string] . identifier[format] ( identifier[cmd] , identifier[output] , identifier[str] ( identifier[code] )) keyword[return] keyword[None]
def validate_services_by_name(self, sentry_services): """Validate system service status by service name, automatically detecting init system based on Ubuntu release codename. :param sentry_services: dict with sentry keys and svc list values :returns: None if successful, Failure string message otherwise """ self.log.debug('Checking status of system services...') # Point at which systemd became a thing systemd_switch = self.ubuntu_releases.index('vivid') for (sentry_unit, services_list) in six.iteritems(sentry_services): # Get lsb_release codename from unit (release, ret) = self.get_ubuntu_release_from_sentry(sentry_unit) if ret: return ret # depends on [control=['if'], data=[]] for service_name in services_list: if self.ubuntu_releases.index(release) >= systemd_switch or service_name in ['rabbitmq-server', 'apache2', 'memcached']: # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) (output, code) = sentry_unit.run(cmd) service_running = code == 0 # depends on [control=['if'], data=[]] elif self.ubuntu_releases.index(release) < systemd_switch: # init is upstart cmd = 'sudo status {}'.format(service_name) (output, code) = sentry_unit.run(cmd) service_running = code == 0 and 'start/running' in output # depends on [control=['if'], data=[]] self.log.debug('{} `{}` returned {}'.format(sentry_unit.info['unit_name'], cmd, code)) if not service_running: return u'command `{}` returned {} {}'.format(cmd, output, str(code)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['service_name']] # depends on [control=['for'], data=[]] return None
async def multipart_parser(request, file_handler=default_file_handler): """ :param file_handler: callable to save file, this should always return the file path :return: dictionary containing files and data """ multipart_data = { 'files': {}, 'data': {} } if request.content_type == 'multipart/form-data': reader = MultipartReader.from_response(request) while True: part = await reader.next() if part is None: break if isinstance(part, BodyPartReader): if part.filename: # if body is binary file if file_handler: # in case we just want to parse data and not save file actually e.g. in validator file_data = await part.read(decode=True) file_data = part.decode(file_data) file_path = await file_handler(part.filename, file_data, part.headers[CONTENT_TYPE]) else: file_path = part.filename multipart_data['files'][part.name] = file_path elif part.text(): # if body is text text = await part.text() multipart_data['data'][part.name] = text else: # if body is json or form (not text), not handling this continue else: # if part is recursive multipart , not handling this right now # TODO: do recursive call to handle this continue else: try: multipart_data['data'] = await request.json() except json.JSONDecodeError: pass return multipart_data
<ast.AsyncFunctionDef object at 0x7da20e9b2770>
keyword[async] keyword[def] identifier[multipart_parser] ( identifier[request] , identifier[file_handler] = identifier[default_file_handler] ): literal[string] identifier[multipart_data] ={ literal[string] :{}, literal[string] :{} } keyword[if] identifier[request] . identifier[content_type] == literal[string] : identifier[reader] = identifier[MultipartReader] . identifier[from_response] ( identifier[request] ) keyword[while] keyword[True] : identifier[part] = keyword[await] identifier[reader] . identifier[next] () keyword[if] identifier[part] keyword[is] keyword[None] : keyword[break] keyword[if] identifier[isinstance] ( identifier[part] , identifier[BodyPartReader] ): keyword[if] identifier[part] . identifier[filename] : keyword[if] identifier[file_handler] : identifier[file_data] = keyword[await] identifier[part] . identifier[read] ( identifier[decode] = keyword[True] ) identifier[file_data] = identifier[part] . identifier[decode] ( identifier[file_data] ) identifier[file_path] = keyword[await] identifier[file_handler] ( identifier[part] . identifier[filename] , identifier[file_data] , identifier[part] . identifier[headers] [ identifier[CONTENT_TYPE] ]) keyword[else] : identifier[file_path] = identifier[part] . identifier[filename] identifier[multipart_data] [ literal[string] ][ identifier[part] . identifier[name] ]= identifier[file_path] keyword[elif] identifier[part] . identifier[text] (): identifier[text] = keyword[await] identifier[part] . identifier[text] () identifier[multipart_data] [ literal[string] ][ identifier[part] . identifier[name] ]= identifier[text] keyword[else] : keyword[continue] keyword[else] : keyword[continue] keyword[else] : keyword[try] : identifier[multipart_data] [ literal[string] ]= keyword[await] identifier[request] . identifier[json] () keyword[except] identifier[json] . identifier[JSONDecodeError] : keyword[pass] keyword[return] identifier[multipart_data]
async def multipart_parser(request, file_handler=default_file_handler): """ :param file_handler: callable to save file, this should always return the file path :return: dictionary containing files and data """ multipart_data = {'files': {}, 'data': {}} if request.content_type == 'multipart/form-data': reader = MultipartReader.from_response(request) while True: part = await reader.next() if part is None: break # depends on [control=['if'], data=[]] if isinstance(part, BodyPartReader): if part.filename: # if body is binary file if file_handler: # in case we just want to parse data and not save file actually e.g. in validator file_data = await part.read(decode=True) file_data = part.decode(file_data) file_path = await file_handler(part.filename, file_data, part.headers[CONTENT_TYPE]) # depends on [control=['if'], data=[]] else: file_path = part.filename multipart_data['files'][part.name] = file_path # depends on [control=['if'], data=[]] elif part.text(): # if body is text text = await part.text() multipart_data['data'][part.name] = text # depends on [control=['if'], data=[]] else: # if body is json or form (not text), not handling this continue # depends on [control=['if'], data=[]] else: # if part is recursive multipart , not handling this right now # TODO: do recursive call to handle this continue # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] else: try: multipart_data['data'] = await request.json() # depends on [control=['try'], data=[]] except json.JSONDecodeError: pass # depends on [control=['except'], data=[]] return multipart_data
def check_lifecycle(self): """ Tests if the state of the component must be updated, based on its own state and on the state of its dependencies """ with self._lock: # Validation flags was_valid = self.state == StoredInstance.VALID can_validate = self.state not in ( StoredInstance.VALIDATING, StoredInstance.VALID, ) # Test the validity of all handlers handlers_valid = self.__safe_handlers_callback( "is_valid", break_on_false=True ) if was_valid and not handlers_valid: # A dependency is missing self.invalidate(True) elif ( can_validate and handlers_valid and self._ipopo_service.running ): # We're all good self.validate(True)
def function[check_lifecycle, parameter[self]]: constant[ Tests if the state of the component must be updated, based on its own state and on the state of its dependencies ] with name[self]._lock begin[:] variable[was_valid] assign[=] compare[name[self].state equal[==] name[StoredInstance].VALID] variable[can_validate] assign[=] compare[name[self].state <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Attribute object at 0x7da1b0401600>, <ast.Attribute object at 0x7da1b0402a40>]]] variable[handlers_valid] assign[=] call[name[self].__safe_handlers_callback, parameter[constant[is_valid]]] if <ast.BoolOp object at 0x7da20c6e7f70> begin[:] call[name[self].invalidate, parameter[constant[True]]]
keyword[def] identifier[check_lifecycle] ( identifier[self] ): literal[string] keyword[with] identifier[self] . identifier[_lock] : identifier[was_valid] = identifier[self] . identifier[state] == identifier[StoredInstance] . identifier[VALID] identifier[can_validate] = identifier[self] . identifier[state] keyword[not] keyword[in] ( identifier[StoredInstance] . identifier[VALIDATING] , identifier[StoredInstance] . identifier[VALID] , ) identifier[handlers_valid] = identifier[self] . identifier[__safe_handlers_callback] ( literal[string] , identifier[break_on_false] = keyword[True] ) keyword[if] identifier[was_valid] keyword[and] keyword[not] identifier[handlers_valid] : identifier[self] . identifier[invalidate] ( keyword[True] ) keyword[elif] ( identifier[can_validate] keyword[and] identifier[handlers_valid] keyword[and] identifier[self] . identifier[_ipopo_service] . identifier[running] ): identifier[self] . identifier[validate] ( keyword[True] )
def check_lifecycle(self): """ Tests if the state of the component must be updated, based on its own state and on the state of its dependencies """ with self._lock: # Validation flags was_valid = self.state == StoredInstance.VALID can_validate = self.state not in (StoredInstance.VALIDATING, StoredInstance.VALID) # Test the validity of all handlers handlers_valid = self.__safe_handlers_callback('is_valid', break_on_false=True) if was_valid and (not handlers_valid): # A dependency is missing self.invalidate(True) # depends on [control=['if'], data=[]] elif can_validate and handlers_valid and self._ipopo_service.running: # We're all good self.validate(True) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
def remove_all_matching(self, address=None, name=None): """ Remove all HostsEntry instances from the Hosts object where the supplied ip address or name matches :param address: An ipv4 or ipv6 address :param name: A host name :return: None """ if self.entries: if address and name: func = lambda entry: not entry.is_real_entry() or (entry.address != address and name not in entry.names) elif address: func = lambda entry: not entry.is_real_entry() or entry.address != address elif name: func = lambda entry: not entry.is_real_entry() or name not in entry.names else: raise ValueError('No address or name was specified for removal.') self.entries = list(filter(func, self.entries))
def function[remove_all_matching, parameter[self, address, name]]: constant[ Remove all HostsEntry instances from the Hosts object where the supplied ip address or name matches :param address: An ipv4 or ipv6 address :param name: A host name :return: None ] if name[self].entries begin[:] if <ast.BoolOp object at 0x7da1b07813f0> begin[:] variable[func] assign[=] <ast.Lambda object at 0x7da1b0782c80> name[self].entries assign[=] call[name[list], parameter[call[name[filter], parameter[name[func], name[self].entries]]]]
keyword[def] identifier[remove_all_matching] ( identifier[self] , identifier[address] = keyword[None] , identifier[name] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[entries] : keyword[if] identifier[address] keyword[and] identifier[name] : identifier[func] = keyword[lambda] identifier[entry] : keyword[not] identifier[entry] . identifier[is_real_entry] () keyword[or] ( identifier[entry] . identifier[address] != identifier[address] keyword[and] identifier[name] keyword[not] keyword[in] identifier[entry] . identifier[names] ) keyword[elif] identifier[address] : identifier[func] = keyword[lambda] identifier[entry] : keyword[not] identifier[entry] . identifier[is_real_entry] () keyword[or] identifier[entry] . identifier[address] != identifier[address] keyword[elif] identifier[name] : identifier[func] = keyword[lambda] identifier[entry] : keyword[not] identifier[entry] . identifier[is_real_entry] () keyword[or] identifier[name] keyword[not] keyword[in] identifier[entry] . identifier[names] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[entries] = identifier[list] ( identifier[filter] ( identifier[func] , identifier[self] . identifier[entries] ))
def remove_all_matching(self, address=None, name=None): """ Remove all HostsEntry instances from the Hosts object where the supplied ip address or name matches :param address: An ipv4 or ipv6 address :param name: A host name :return: None """ if self.entries: if address and name: func = lambda entry: not entry.is_real_entry() or (entry.address != address and name not in entry.names) # depends on [control=['if'], data=[]] elif address: func = lambda entry: not entry.is_real_entry() or entry.address != address # depends on [control=['if'], data=[]] elif name: func = lambda entry: not entry.is_real_entry() or name not in entry.names # depends on [control=['if'], data=[]] else: raise ValueError('No address or name was specified for removal.') self.entries = list(filter(func, self.entries)) # depends on [control=['if'], data=[]]
def substrings_indexes(seq, reverse=False): """Yield all substrings and their positions in *seq* The items yielded will be a tuple of the form ``(substr, i, j)``, where ``substr == seq[i:j]``. This function only works for iterables that support slicing, such as ``str`` objects. >>> for item in substrings_indexes('more'): ... print(item) ('m', 0, 1) ('o', 1, 2) ('r', 2, 3) ('e', 3, 4) ('mo', 0, 2) ('or', 1, 3) ('re', 2, 4) ('mor', 0, 3) ('ore', 1, 4) ('more', 0, 4) Set *reverse* to ``True`` to yield the same items in the opposite order. """ r = range(1, len(seq) + 1) if reverse: r = reversed(r) return ( (seq[i:i + L], i, i + L) for L in r for i in range(len(seq) - L + 1) )
def function[substrings_indexes, parameter[seq, reverse]]: constant[Yield all substrings and their positions in *seq* The items yielded will be a tuple of the form ``(substr, i, j)``, where ``substr == seq[i:j]``. This function only works for iterables that support slicing, such as ``str`` objects. >>> for item in substrings_indexes('more'): ... print(item) ('m', 0, 1) ('o', 1, 2) ('r', 2, 3) ('e', 3, 4) ('mo', 0, 2) ('or', 1, 3) ('re', 2, 4) ('mor', 0, 3) ('ore', 1, 4) ('more', 0, 4) Set *reverse* to ``True`` to yield the same items in the opposite order. ] variable[r] assign[=] call[name[range], parameter[constant[1], binary_operation[call[name[len], parameter[name[seq]]] + constant[1]]]] if name[reverse] begin[:] variable[r] assign[=] call[name[reversed], parameter[name[r]]] return[<ast.GeneratorExp object at 0x7da1b22ad510>]
keyword[def] identifier[substrings_indexes] ( identifier[seq] , identifier[reverse] = keyword[False] ): literal[string] identifier[r] = identifier[range] ( literal[int] , identifier[len] ( identifier[seq] )+ literal[int] ) keyword[if] identifier[reverse] : identifier[r] = identifier[reversed] ( identifier[r] ) keyword[return] ( ( identifier[seq] [ identifier[i] : identifier[i] + identifier[L] ], identifier[i] , identifier[i] + identifier[L] ) keyword[for] identifier[L] keyword[in] identifier[r] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[seq] )- identifier[L] + literal[int] ) )
def substrings_indexes(seq, reverse=False): """Yield all substrings and their positions in *seq* The items yielded will be a tuple of the form ``(substr, i, j)``, where ``substr == seq[i:j]``. This function only works for iterables that support slicing, such as ``str`` objects. >>> for item in substrings_indexes('more'): ... print(item) ('m', 0, 1) ('o', 1, 2) ('r', 2, 3) ('e', 3, 4) ('mo', 0, 2) ('or', 1, 3) ('re', 2, 4) ('mor', 0, 3) ('ore', 1, 4) ('more', 0, 4) Set *reverse* to ``True`` to yield the same items in the opposite order. """ r = range(1, len(seq) + 1) if reverse: r = reversed(r) # depends on [control=['if'], data=[]] return ((seq[i:i + L], i, i + L) for L in r for i in range(len(seq) - L + 1))
def gather_candidates(self): """Gather candidates from the slave environments. The candidates are stored in :attr:`candidates`, overriding any previous candidates. """ async def slave_task(addr): r_manager = await self.env.connect(addr) return await r_manager.get_candidates() if self._single_env: self._candidates = self.env.candidates else: mgrs = self.get_managers() tasks = create_tasks(slave_task, mgrs) self._candidates = run(tasks)
def function[gather_candidates, parameter[self]]: constant[Gather candidates from the slave environments. The candidates are stored in :attr:`candidates`, overriding any previous candidates. ] <ast.AsyncFunctionDef object at 0x7da2046238b0> if name[self]._single_env begin[:] name[self]._candidates assign[=] name[self].env.candidates
keyword[def] identifier[gather_candidates] ( identifier[self] ): literal[string] keyword[async] keyword[def] identifier[slave_task] ( identifier[addr] ): identifier[r_manager] = keyword[await] identifier[self] . identifier[env] . identifier[connect] ( identifier[addr] ) keyword[return] keyword[await] identifier[r_manager] . identifier[get_candidates] () keyword[if] identifier[self] . identifier[_single_env] : identifier[self] . identifier[_candidates] = identifier[self] . identifier[env] . identifier[candidates] keyword[else] : identifier[mgrs] = identifier[self] . identifier[get_managers] () identifier[tasks] = identifier[create_tasks] ( identifier[slave_task] , identifier[mgrs] ) identifier[self] . identifier[_candidates] = identifier[run] ( identifier[tasks] )
def gather_candidates(self): """Gather candidates from the slave environments. The candidates are stored in :attr:`candidates`, overriding any previous candidates. """ async def slave_task(addr): r_manager = await self.env.connect(addr) return await r_manager.get_candidates() if self._single_env: self._candidates = self.env.candidates # depends on [control=['if'], data=[]] else: mgrs = self.get_managers() tasks = create_tasks(slave_task, mgrs) self._candidates = run(tasks)
def BuildFindSpecs(self, environment_variables=None): """Build find specification from a filter file. Args: environment_variables (Optional[list[EnvironmentVariableArtifact]]): environment variables. Returns: list[dfvfs.FindSpec]: find specification. """ path_attributes = {} if environment_variables: for environment_variable in environment_variables: attribute_name = environment_variable.name.lower() attribute_value = environment_variable.value if not isinstance(attribute_value, py2to3.STRING_TYPES): continue # Remove the drive letter. if len(attribute_value) > 2 and attribute_value[1] == ':': _, _, attribute_value = attribute_value.rpartition(':') if attribute_value.startswith('\\'): attribute_value = attribute_value.replace('\\', '/') path_attributes[attribute_name] = attribute_value find_specs = [] with open(self._path, 'r') as file_object: for line in file_object: line = line.strip() if line.startswith('#'): continue if path_attributes: try: line = line.format(**path_attributes) except KeyError as exception: logger.error(( 'Unable to expand path filter: {0:s} with error: ' '{1!s}').format(line, exception)) continue if not line.startswith('/'): logger.warning(( 'The path filter must be defined as an absolute path: ' '{0:s}').format(line)) continue # Convert the path filters into a list of path segments and strip # the root path segment. path_segments = line.split('/') path_segments.pop(0) if not path_segments[-1]: logger.warning( 'Empty last path segment in path filter: {0:s}'.format(line)) continue find_spec = file_system_searcher.FindSpec( location_regex=path_segments, case_sensitive=False) find_specs.append(find_spec) return find_specs
def function[BuildFindSpecs, parameter[self, environment_variables]]: constant[Build find specification from a filter file. Args: environment_variables (Optional[list[EnvironmentVariableArtifact]]): environment variables. Returns: list[dfvfs.FindSpec]: find specification. ] variable[path_attributes] assign[=] dictionary[[], []] if name[environment_variables] begin[:] for taget[name[environment_variable]] in starred[name[environment_variables]] begin[:] variable[attribute_name] assign[=] call[name[environment_variable].name.lower, parameter[]] variable[attribute_value] assign[=] name[environment_variable].value if <ast.UnaryOp object at 0x7da20c9925c0> begin[:] continue if <ast.BoolOp object at 0x7da20c9922c0> begin[:] <ast.Tuple object at 0x7da20c9906a0> assign[=] call[name[attribute_value].rpartition, parameter[constant[:]]] if call[name[attribute_value].startswith, parameter[constant[\]]] begin[:] variable[attribute_value] assign[=] call[name[attribute_value].replace, parameter[constant[\], constant[/]]] call[name[path_attributes]][name[attribute_name]] assign[=] name[attribute_value] variable[find_specs] assign[=] list[[]] with call[name[open], parameter[name[self]._path, constant[r]]] begin[:] for taget[name[line]] in starred[name[file_object]] begin[:] variable[line] assign[=] call[name[line].strip, parameter[]] if call[name[line].startswith, parameter[constant[#]]] begin[:] continue if name[path_attributes] begin[:] <ast.Try object at 0x7da20c993ac0> if <ast.UnaryOp object at 0x7da20c991a20> begin[:] call[name[logger].warning, parameter[call[constant[The path filter must be defined as an absolute path: {0:s}].format, parameter[name[line]]]]] continue variable[path_segments] assign[=] call[name[line].split, parameter[constant[/]]] call[name[path_segments].pop, parameter[constant[0]]] if <ast.UnaryOp object at 0x7da20c993730> begin[:] call[name[logger].warning, parameter[call[constant[Empty last path segment in path filter: {0:s}].format, parameter[name[line]]]]] continue variable[find_spec] assign[=] call[name[file_system_searcher].FindSpec, parameter[]] call[name[find_specs].append, parameter[name[find_spec]]] return[name[find_specs]]
keyword[def] identifier[BuildFindSpecs] ( identifier[self] , identifier[environment_variables] = keyword[None] ): literal[string] identifier[path_attributes] ={} keyword[if] identifier[environment_variables] : keyword[for] identifier[environment_variable] keyword[in] identifier[environment_variables] : identifier[attribute_name] = identifier[environment_variable] . identifier[name] . identifier[lower] () identifier[attribute_value] = identifier[environment_variable] . identifier[value] keyword[if] keyword[not] identifier[isinstance] ( identifier[attribute_value] , identifier[py2to3] . identifier[STRING_TYPES] ): keyword[continue] keyword[if] identifier[len] ( identifier[attribute_value] )> literal[int] keyword[and] identifier[attribute_value] [ literal[int] ]== literal[string] : identifier[_] , identifier[_] , identifier[attribute_value] = identifier[attribute_value] . identifier[rpartition] ( literal[string] ) keyword[if] identifier[attribute_value] . identifier[startswith] ( literal[string] ): identifier[attribute_value] = identifier[attribute_value] . identifier[replace] ( literal[string] , literal[string] ) identifier[path_attributes] [ identifier[attribute_name] ]= identifier[attribute_value] identifier[find_specs] =[] keyword[with] identifier[open] ( identifier[self] . identifier[_path] , literal[string] ) keyword[as] identifier[file_object] : keyword[for] identifier[line] keyword[in] identifier[file_object] : identifier[line] = identifier[line] . identifier[strip] () keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): keyword[continue] keyword[if] identifier[path_attributes] : keyword[try] : identifier[line] = identifier[line] . identifier[format] (** identifier[path_attributes] ) keyword[except] identifier[KeyError] keyword[as] identifier[exception] : identifier[logger] . identifier[error] (( literal[string] literal[string] ). identifier[format] ( identifier[line] , identifier[exception] )) keyword[continue] keyword[if] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ): identifier[logger] . identifier[warning] (( literal[string] literal[string] ). identifier[format] ( identifier[line] )) keyword[continue] identifier[path_segments] = identifier[line] . identifier[split] ( literal[string] ) identifier[path_segments] . identifier[pop] ( literal[int] ) keyword[if] keyword[not] identifier[path_segments] [- literal[int] ]: identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[line] )) keyword[continue] identifier[find_spec] = identifier[file_system_searcher] . identifier[FindSpec] ( identifier[location_regex] = identifier[path_segments] , identifier[case_sensitive] = keyword[False] ) identifier[find_specs] . identifier[append] ( identifier[find_spec] ) keyword[return] identifier[find_specs]
def BuildFindSpecs(self, environment_variables=None): """Build find specification from a filter file. Args: environment_variables (Optional[list[EnvironmentVariableArtifact]]): environment variables. Returns: list[dfvfs.FindSpec]: find specification. """ path_attributes = {} if environment_variables: for environment_variable in environment_variables: attribute_name = environment_variable.name.lower() attribute_value = environment_variable.value if not isinstance(attribute_value, py2to3.STRING_TYPES): continue # depends on [control=['if'], data=[]] # Remove the drive letter. if len(attribute_value) > 2 and attribute_value[1] == ':': (_, _, attribute_value) = attribute_value.rpartition(':') # depends on [control=['if'], data=[]] if attribute_value.startswith('\\'): attribute_value = attribute_value.replace('\\', '/') # depends on [control=['if'], data=[]] path_attributes[attribute_name] = attribute_value # depends on [control=['for'], data=['environment_variable']] # depends on [control=['if'], data=[]] find_specs = [] with open(self._path, 'r') as file_object: for line in file_object: line = line.strip() if line.startswith('#'): continue # depends on [control=['if'], data=[]] if path_attributes: try: line = line.format(**path_attributes) # depends on [control=['try'], data=[]] except KeyError as exception: logger.error('Unable to expand path filter: {0:s} with error: {1!s}'.format(line, exception)) continue # depends on [control=['except'], data=['exception']] # depends on [control=['if'], data=[]] if not line.startswith('/'): logger.warning('The path filter must be defined as an absolute path: {0:s}'.format(line)) continue # depends on [control=['if'], data=[]] # Convert the path filters into a list of path segments and strip # the root path segment. path_segments = line.split('/') path_segments.pop(0) if not path_segments[-1]: logger.warning('Empty last path segment in path filter: {0:s}'.format(line)) continue # depends on [control=['if'], data=[]] find_spec = file_system_searcher.FindSpec(location_regex=path_segments, case_sensitive=False) find_specs.append(find_spec) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['file_object']] return find_specs
def verts_str(verts, pad=1): r""" makes a string from a list of integer verticies """ if verts is None: return 'None' fmtstr = ', '.join(['%' + six.text_type(pad) + 'd' + ', %' + six.text_type(pad) + 'd'] * 1) return ', '.join(['(' + fmtstr % vert + ')' for vert in verts])
def function[verts_str, parameter[verts, pad]]: constant[ makes a string from a list of integer verticies ] if compare[name[verts] is constant[None]] begin[:] return[constant[None]] variable[fmtstr] assign[=] call[constant[, ].join, parameter[binary_operation[list[[<ast.BinOp object at 0x7da1b24c42e0>]] * constant[1]]]] return[call[constant[, ].join, parameter[<ast.ListComp object at 0x7da1b24c5960>]]]
keyword[def] identifier[verts_str] ( identifier[verts] , identifier[pad] = literal[int] ): literal[string] keyword[if] identifier[verts] keyword[is] keyword[None] : keyword[return] literal[string] identifier[fmtstr] = literal[string] . identifier[join] ([ literal[string] + identifier[six] . identifier[text_type] ( identifier[pad] )+ literal[string] + literal[string] + identifier[six] . identifier[text_type] ( identifier[pad] )+ literal[string] ]* literal[int] ) keyword[return] literal[string] . identifier[join] ([ literal[string] + identifier[fmtstr] % identifier[vert] + literal[string] keyword[for] identifier[vert] keyword[in] identifier[verts] ])
def verts_str(verts, pad=1): """ makes a string from a list of integer verticies """ if verts is None: return 'None' # depends on [control=['if'], data=[]] fmtstr = ', '.join(['%' + six.text_type(pad) + 'd' + ', %' + six.text_type(pad) + 'd'] * 1) return ', '.join(['(' + fmtstr % vert + ')' for vert in verts])
def _prompt_choice(var_name, options): ''' Prompt the user to choose between a list of options, index each one by adding an enumerator based on https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/prompt.py#L51 :param var_name: The question to ask the user :type var_name: ``str`` :param options: A list of options :type options: ``list`` of ``tupple`` :rtype: ``tuple`` :returns: The selected user ''' choice_map = OrderedDict( ('{0}'.format(i), value) for i, value in enumerate(options, 1) if value[0] != 'test' ) choices = choice_map.keys() default = '1' choice_lines = ['{0} - {1} - {2}'.format(c[0], c[1][0], c[1][1]) for c in choice_map.items()] prompt = '\n'.join(( 'Select {0}:'.format(var_name), '\n'.join(choice_lines), 'Choose from {0}'.format(', '.join(choices)) )) user_choice = click.prompt( prompt, type=click.Choice(choices), default=default ) return choice_map[user_choice]
def function[_prompt_choice, parameter[var_name, options]]: constant[ Prompt the user to choose between a list of options, index each one by adding an enumerator based on https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/prompt.py#L51 :param var_name: The question to ask the user :type var_name: ``str`` :param options: A list of options :type options: ``list`` of ``tupple`` :rtype: ``tuple`` :returns: The selected user ] variable[choice_map] assign[=] call[name[OrderedDict], parameter[<ast.GeneratorExp object at 0x7da18ede6800>]] variable[choices] assign[=] call[name[choice_map].keys, parameter[]] variable[default] assign[=] constant[1] variable[choice_lines] assign[=] <ast.ListComp object at 0x7da18c4cd210> variable[prompt] assign[=] call[constant[ ].join, parameter[tuple[[<ast.Call object at 0x7da18c4cdc90>, <ast.Call object at 0x7da18c4cd780>, <ast.Call object at 0x7da18c4cdba0>]]]] variable[user_choice] assign[=] call[name[click].prompt, parameter[name[prompt]]] return[call[name[choice_map]][name[user_choice]]]
keyword[def] identifier[_prompt_choice] ( identifier[var_name] , identifier[options] ): literal[string] identifier[choice_map] = identifier[OrderedDict] ( ( literal[string] . identifier[format] ( identifier[i] ), identifier[value] ) keyword[for] identifier[i] , identifier[value] keyword[in] identifier[enumerate] ( identifier[options] , literal[int] ) keyword[if] identifier[value] [ literal[int] ]!= literal[string] ) identifier[choices] = identifier[choice_map] . identifier[keys] () identifier[default] = literal[string] identifier[choice_lines] =[ literal[string] . identifier[format] ( identifier[c] [ literal[int] ], identifier[c] [ literal[int] ][ literal[int] ], identifier[c] [ literal[int] ][ literal[int] ]) keyword[for] identifier[c] keyword[in] identifier[choice_map] . identifier[items] ()] identifier[prompt] = literal[string] . identifier[join] (( literal[string] . identifier[format] ( identifier[var_name] ), literal[string] . identifier[join] ( identifier[choice_lines] ), literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[choices] )) )) identifier[user_choice] = identifier[click] . identifier[prompt] ( identifier[prompt] , identifier[type] = identifier[click] . identifier[Choice] ( identifier[choices] ), identifier[default] = identifier[default] ) keyword[return] identifier[choice_map] [ identifier[user_choice] ]
def _prompt_choice(var_name, options): """ Prompt the user to choose between a list of options, index each one by adding an enumerator based on https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/prompt.py#L51 :param var_name: The question to ask the user :type var_name: ``str`` :param options: A list of options :type options: ``list`` of ``tupple`` :rtype: ``tuple`` :returns: The selected user """ choice_map = OrderedDict((('{0}'.format(i), value) for (i, value) in enumerate(options, 1) if value[0] != 'test')) choices = choice_map.keys() default = '1' choice_lines = ['{0} - {1} - {2}'.format(c[0], c[1][0], c[1][1]) for c in choice_map.items()] prompt = '\n'.join(('Select {0}:'.format(var_name), '\n'.join(choice_lines), 'Choose from {0}'.format(', '.join(choices)))) user_choice = click.prompt(prompt, type=click.Choice(choices), default=default) return choice_map[user_choice]
def u128(self, name, value=None, align=None): """Add an unsigned 16 byte integer field to template. This is an convenience method that simply calls `Uint` keyword with predefined length.""" self.uint(16, name, value, align)
def function[u128, parameter[self, name, value, align]]: constant[Add an unsigned 16 byte integer field to template. This is an convenience method that simply calls `Uint` keyword with predefined length.] call[name[self].uint, parameter[constant[16], name[name], name[value], name[align]]]
keyword[def] identifier[u128] ( identifier[self] , identifier[name] , identifier[value] = keyword[None] , identifier[align] = keyword[None] ): literal[string] identifier[self] . identifier[uint] ( literal[int] , identifier[name] , identifier[value] , identifier[align] )
def u128(self, name, value=None, align=None): """Add an unsigned 16 byte integer field to template. This is an convenience method that simply calls `Uint` keyword with predefined length.""" self.uint(16, name, value, align)
def make_dist_mat(xy1, xy2, longlat=True): """ Return a distance matrix between two set of coordinates. Use geometric distance (default) or haversine distance (if longlat=True). Parameters ---------- xy1 : numpy.array The first set of coordinates as [(x, y), (x, y), (x, y)]. xy2 : numpy.array The second set of coordinates as [(x, y), (x, y), (x, y)]. longlat : boolean, optionnal Whether the coordinates are in geographic (longitude/latitude) format or not (default: False) Returns ------- mat_dist : numpy.array The distance matrix between xy1 and xy2 """ if longlat: return hav_dist(xy1[:, None], xy2) else: d0 = np.subtract.outer(xy1[:, 0], xy2[:, 0]) d1 = np.subtract.outer(xy1[:, 1], xy2[:, 1]) return np.hypot(d0, d1)
def function[make_dist_mat, parameter[xy1, xy2, longlat]]: constant[ Return a distance matrix between two set of coordinates. Use geometric distance (default) or haversine distance (if longlat=True). Parameters ---------- xy1 : numpy.array The first set of coordinates as [(x, y), (x, y), (x, y)]. xy2 : numpy.array The second set of coordinates as [(x, y), (x, y), (x, y)]. longlat : boolean, optionnal Whether the coordinates are in geographic (longitude/latitude) format or not (default: False) Returns ------- mat_dist : numpy.array The distance matrix between xy1 and xy2 ] if name[longlat] begin[:] return[call[name[hav_dist], parameter[call[name[xy1]][tuple[[<ast.Slice object at 0x7da1b0b803a0>, <ast.Constant object at 0x7da1b0b803d0>]]], name[xy2]]]]
keyword[def] identifier[make_dist_mat] ( identifier[xy1] , identifier[xy2] , identifier[longlat] = keyword[True] ): literal[string] keyword[if] identifier[longlat] : keyword[return] identifier[hav_dist] ( identifier[xy1] [:, keyword[None] ], identifier[xy2] ) keyword[else] : identifier[d0] = identifier[np] . identifier[subtract] . identifier[outer] ( identifier[xy1] [:, literal[int] ], identifier[xy2] [:, literal[int] ]) identifier[d1] = identifier[np] . identifier[subtract] . identifier[outer] ( identifier[xy1] [:, literal[int] ], identifier[xy2] [:, literal[int] ]) keyword[return] identifier[np] . identifier[hypot] ( identifier[d0] , identifier[d1] )
def make_dist_mat(xy1, xy2, longlat=True): """ Return a distance matrix between two set of coordinates. Use geometric distance (default) or haversine distance (if longlat=True). Parameters ---------- xy1 : numpy.array The first set of coordinates as [(x, y), (x, y), (x, y)]. xy2 : numpy.array The second set of coordinates as [(x, y), (x, y), (x, y)]. longlat : boolean, optionnal Whether the coordinates are in geographic (longitude/latitude) format or not (default: False) Returns ------- mat_dist : numpy.array The distance matrix between xy1 and xy2 """ if longlat: return hav_dist(xy1[:, None], xy2) # depends on [control=['if'], data=[]] else: d0 = np.subtract.outer(xy1[:, 0], xy2[:, 0]) d1 = np.subtract.outer(xy1[:, 1], xy2[:, 1]) return np.hypot(d0, d1)
def _damerau_levenshtein(a, b): """Returns Damerau-Levenshtein edit distance from a to b.""" memo = {} def distance(x, y): """Recursively defined string distance with memoization.""" if (x, y) in memo: return memo[x, y] if not x: d = len(y) elif not y: d = len(x) else: d = min( distance(x[1:], y) + 1, # correct an insertion error distance(x, y[1:]) + 1, # correct a deletion error distance(x[1:], y[1:]) + (x[0] != y[0])) # correct a wrong character if len(x) >= 2 and len(y) >= 2 and x[0] == y[1] and x[1] == y[0]: # Correct a transposition. t = distance(x[2:], y[2:]) + 1 if d > t: d = t memo[x, y] = d return d return distance(a, b)
def function[_damerau_levenshtein, parameter[a, b]]: constant[Returns Damerau-Levenshtein edit distance from a to b.] variable[memo] assign[=] dictionary[[], []] def function[distance, parameter[x, y]]: constant[Recursively defined string distance with memoization.] if compare[tuple[[<ast.Name object at 0x7da1b18bc7c0>, <ast.Name object at 0x7da1b18bcb80>]] in name[memo]] begin[:] return[call[name[memo]][tuple[[<ast.Name object at 0x7da1b18bdcc0>, <ast.Name object at 0x7da1b18bcd30>]]]] if <ast.UnaryOp object at 0x7da1b18bef50> begin[:] variable[d] assign[=] call[name[len], parameter[name[y]]] call[name[memo]][tuple[[<ast.Name object at 0x7da1b18bd3c0>, <ast.Name object at 0x7da1b18bd810>]]] assign[=] name[d] return[name[d]] return[call[name[distance], parameter[name[a], name[b]]]]
keyword[def] identifier[_damerau_levenshtein] ( identifier[a] , identifier[b] ): literal[string] identifier[memo] ={} keyword[def] identifier[distance] ( identifier[x] , identifier[y] ): literal[string] keyword[if] ( identifier[x] , identifier[y] ) keyword[in] identifier[memo] : keyword[return] identifier[memo] [ identifier[x] , identifier[y] ] keyword[if] keyword[not] identifier[x] : identifier[d] = identifier[len] ( identifier[y] ) keyword[elif] keyword[not] identifier[y] : identifier[d] = identifier[len] ( identifier[x] ) keyword[else] : identifier[d] = identifier[min] ( identifier[distance] ( identifier[x] [ literal[int] :], identifier[y] )+ literal[int] , identifier[distance] ( identifier[x] , identifier[y] [ literal[int] :])+ literal[int] , identifier[distance] ( identifier[x] [ literal[int] :], identifier[y] [ literal[int] :])+( identifier[x] [ literal[int] ]!= identifier[y] [ literal[int] ])) keyword[if] identifier[len] ( identifier[x] )>= literal[int] keyword[and] identifier[len] ( identifier[y] )>= literal[int] keyword[and] identifier[x] [ literal[int] ]== identifier[y] [ literal[int] ] keyword[and] identifier[x] [ literal[int] ]== identifier[y] [ literal[int] ]: identifier[t] = identifier[distance] ( identifier[x] [ literal[int] :], identifier[y] [ literal[int] :])+ literal[int] keyword[if] identifier[d] > identifier[t] : identifier[d] = identifier[t] identifier[memo] [ identifier[x] , identifier[y] ]= identifier[d] keyword[return] identifier[d] keyword[return] identifier[distance] ( identifier[a] , identifier[b] )
def _damerau_levenshtein(a, b): """Returns Damerau-Levenshtein edit distance from a to b.""" memo = {} def distance(x, y): """Recursively defined string distance with memoization.""" if (x, y) in memo: return memo[x, y] # depends on [control=['if'], data=['memo']] if not x: d = len(y) # depends on [control=['if'], data=[]] elif not y: d = len(x) # depends on [control=['if'], data=[]] else: # correct an insertion error # correct a deletion error d = min(distance(x[1:], y) + 1, distance(x, y[1:]) + 1, distance(x[1:], y[1:]) + (x[0] != y[0])) # correct a wrong character if len(x) >= 2 and len(y) >= 2 and (x[0] == y[1]) and (x[1] == y[0]): # Correct a transposition. t = distance(x[2:], y[2:]) + 1 if d > t: d = t # depends on [control=['if'], data=['d', 't']] # depends on [control=['if'], data=[]] memo[x, y] = d return d return distance(a, b)
def numeric_range(cls, field, from_value, to_value, include_lower=None, include_upper=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/numeric-range-filter.html Filters documents with fields that have values within a certain numeric range. Similar to range filter, except that it works only with numeric values, and the filter execution works differently. ''' instance = cls(numeric_range={field: {'from': from_value, 'to': to_value}}) if include_lower is not None: instance['numeric_range'][field]['include_lower'] = include_lower if include_upper is not None: instance['numeric_range'][field]['include_upper'] = include_upper return instance
def function[numeric_range, parameter[cls, field, from_value, to_value, include_lower, include_upper]]: constant[ http://www.elasticsearch.org/guide/reference/query-dsl/numeric-range-filter.html Filters documents with fields that have values within a certain numeric range. Similar to range filter, except that it works only with numeric values, and the filter execution works differently. ] variable[instance] assign[=] call[name[cls], parameter[]] if compare[name[include_lower] is_not constant[None]] begin[:] call[call[call[name[instance]][constant[numeric_range]]][name[field]]][constant[include_lower]] assign[=] name[include_lower] if compare[name[include_upper] is_not constant[None]] begin[:] call[call[call[name[instance]][constant[numeric_range]]][name[field]]][constant[include_upper]] assign[=] name[include_upper] return[name[instance]]
keyword[def] identifier[numeric_range] ( identifier[cls] , identifier[field] , identifier[from_value] , identifier[to_value] , identifier[include_lower] = keyword[None] , identifier[include_upper] = keyword[None] ): literal[string] identifier[instance] = identifier[cls] ( identifier[numeric_range] ={ identifier[field] :{ literal[string] : identifier[from_value] , literal[string] : identifier[to_value] }}) keyword[if] identifier[include_lower] keyword[is] keyword[not] keyword[None] : identifier[instance] [ literal[string] ][ identifier[field] ][ literal[string] ]= identifier[include_lower] keyword[if] identifier[include_upper] keyword[is] keyword[not] keyword[None] : identifier[instance] [ literal[string] ][ identifier[field] ][ literal[string] ]= identifier[include_upper] keyword[return] identifier[instance]
def numeric_range(cls, field, from_value, to_value, include_lower=None, include_upper=None): """ http://www.elasticsearch.org/guide/reference/query-dsl/numeric-range-filter.html Filters documents with fields that have values within a certain numeric range. Similar to range filter, except that it works only with numeric values, and the filter execution works differently. """ instance = cls(numeric_range={field: {'from': from_value, 'to': to_value}}) if include_lower is not None: instance['numeric_range'][field]['include_lower'] = include_lower # depends on [control=['if'], data=['include_lower']] if include_upper is not None: instance['numeric_range'][field]['include_upper'] = include_upper # depends on [control=['if'], data=['include_upper']] return instance
def request_url(self, request, proxies): """Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. :rtype: str """ proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme is_proxied_http_request = (proxy and scheme != 'https') using_socks_proxy = False if proxy: proxy_scheme = urlparse(proxy).scheme.lower() using_socks_proxy = proxy_scheme.startswith('socks') url = request.path_url if is_proxied_http_request and not using_socks_proxy: url = urldefragauth(request.url) return url
def function[request_url, parameter[self, request, proxies]]: constant[Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. :rtype: str ] variable[proxy] assign[=] call[name[select_proxy], parameter[name[request].url, name[proxies]]] variable[scheme] assign[=] call[name[urlparse], parameter[name[request].url]].scheme variable[is_proxied_http_request] assign[=] <ast.BoolOp object at 0x7da18ede7dc0> variable[using_socks_proxy] assign[=] constant[False] if name[proxy] begin[:] variable[proxy_scheme] assign[=] call[call[name[urlparse], parameter[name[proxy]]].scheme.lower, parameter[]] variable[using_socks_proxy] assign[=] call[name[proxy_scheme].startswith, parameter[constant[socks]]] variable[url] assign[=] name[request].path_url if <ast.BoolOp object at 0x7da18ede6d40> begin[:] variable[url] assign[=] call[name[urldefragauth], parameter[name[request].url]] return[name[url]]
keyword[def] identifier[request_url] ( identifier[self] , identifier[request] , identifier[proxies] ): literal[string] identifier[proxy] = identifier[select_proxy] ( identifier[request] . identifier[url] , identifier[proxies] ) identifier[scheme] = identifier[urlparse] ( identifier[request] . identifier[url] ). identifier[scheme] identifier[is_proxied_http_request] =( identifier[proxy] keyword[and] identifier[scheme] != literal[string] ) identifier[using_socks_proxy] = keyword[False] keyword[if] identifier[proxy] : identifier[proxy_scheme] = identifier[urlparse] ( identifier[proxy] ). identifier[scheme] . identifier[lower] () identifier[using_socks_proxy] = identifier[proxy_scheme] . identifier[startswith] ( literal[string] ) identifier[url] = identifier[request] . identifier[path_url] keyword[if] identifier[is_proxied_http_request] keyword[and] keyword[not] identifier[using_socks_proxy] : identifier[url] = identifier[urldefragauth] ( identifier[request] . identifier[url] ) keyword[return] identifier[url]
def request_url(self, request, proxies): """Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. :rtype: str """ proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme is_proxied_http_request = proxy and scheme != 'https' using_socks_proxy = False if proxy: proxy_scheme = urlparse(proxy).scheme.lower() using_socks_proxy = proxy_scheme.startswith('socks') # depends on [control=['if'], data=[]] url = request.path_url if is_proxied_http_request and (not using_socks_proxy): url = urldefragauth(request.url) # depends on [control=['if'], data=[]] return url
def get_instance(self, payload): """ Build an instance of MobileInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileInstance :rtype: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileInstance """ return MobileInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def function[get_instance, parameter[self, payload]]: constant[ Build an instance of MobileInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileInstance :rtype: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileInstance ] return[call[name[MobileInstance], parameter[name[self]._version, name[payload]]]]
keyword[def] identifier[get_instance] ( identifier[self] , identifier[payload] ): literal[string] keyword[return] identifier[MobileInstance] ( identifier[self] . identifier[_version] , identifier[payload] , identifier[account_sid] = identifier[self] . identifier[_solution] [ literal[string] ],)
def get_instance(self, payload): """ Build an instance of MobileInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileInstance :rtype: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileInstance """ return MobileInstance(self._version, payload, account_sid=self._solution['account_sid'])
def show_all(key): ''' Context: - abbr - metadata - bill - sources - nav_active Templates: - billy/web/public/bill_all_{key}.html - where key is passed in, like "actions", etc. ''' def func(request, abbr, session, bill_id, key): # get fixed version fixed_bill_id = fix_bill_id(bill_id) # redirect if URL's id isn't fixed id without spaces if fixed_bill_id.replace(' ', '') != bill_id: return redirect('bill', abbr=abbr, session=session, bill_id=fixed_bill_id.replace(' ', '')) bill = db.bills.find_one({settings.LEVEL_FIELD: abbr, 'session': session, 'bill_id': fixed_bill_id}) if bill is None: raise Http404('no bill found {0} {1} {2}'.format(abbr, session, bill_id)) return render(request, templatename('bill_all_%s' % key), dict(abbr=abbr, metadata=Metadata.get_object(abbr), bill=bill, sources=bill['sources'], nav_active='bills')) return func
def function[show_all, parameter[key]]: constant[ Context: - abbr - metadata - bill - sources - nav_active Templates: - billy/web/public/bill_all_{key}.html - where key is passed in, like "actions", etc. ] def function[func, parameter[request, abbr, session, bill_id, key]]: variable[fixed_bill_id] assign[=] call[name[fix_bill_id], parameter[name[bill_id]]] if compare[call[name[fixed_bill_id].replace, parameter[constant[ ], constant[]]] not_equal[!=] name[bill_id]] begin[:] return[call[name[redirect], parameter[constant[bill]]]] variable[bill] assign[=] call[name[db].bills.find_one, parameter[dictionary[[<ast.Attribute object at 0x7da1b26af610>, <ast.Constant object at 0x7da1b26af730>, <ast.Constant object at 0x7da1b26ac700>], [<ast.Name object at 0x7da1b26affd0>, <ast.Name object at 0x7da1b26afe50>, <ast.Name object at 0x7da1b26ac490>]]]] if compare[name[bill] is constant[None]] begin[:] <ast.Raise object at 0x7da1b26ad150> return[call[name[render], parameter[name[request], call[name[templatename], parameter[binary_operation[constant[bill_all_%s] <ast.Mod object at 0x7da2590d6920> name[key]]]], call[name[dict], parameter[]]]]] return[name[func]]
keyword[def] identifier[show_all] ( identifier[key] ): literal[string] keyword[def] identifier[func] ( identifier[request] , identifier[abbr] , identifier[session] , identifier[bill_id] , identifier[key] ): identifier[fixed_bill_id] = identifier[fix_bill_id] ( identifier[bill_id] ) keyword[if] identifier[fixed_bill_id] . identifier[replace] ( literal[string] , literal[string] )!= identifier[bill_id] : keyword[return] identifier[redirect] ( literal[string] , identifier[abbr] = identifier[abbr] , identifier[session] = identifier[session] , identifier[bill_id] = identifier[fixed_bill_id] . identifier[replace] ( literal[string] , literal[string] )) identifier[bill] = identifier[db] . identifier[bills] . identifier[find_one] ({ identifier[settings] . identifier[LEVEL_FIELD] : identifier[abbr] , literal[string] : identifier[session] , literal[string] : identifier[fixed_bill_id] }) keyword[if] identifier[bill] keyword[is] keyword[None] : keyword[raise] identifier[Http404] ( literal[string] . identifier[format] ( identifier[abbr] , identifier[session] , identifier[bill_id] )) keyword[return] identifier[render] ( identifier[request] , identifier[templatename] ( literal[string] % identifier[key] ), identifier[dict] ( identifier[abbr] = identifier[abbr] , identifier[metadata] = identifier[Metadata] . identifier[get_object] ( identifier[abbr] ), identifier[bill] = identifier[bill] , identifier[sources] = identifier[bill] [ literal[string] ], identifier[nav_active] = literal[string] )) keyword[return] identifier[func]
def show_all(key): """ Context: - abbr - metadata - bill - sources - nav_active Templates: - billy/web/public/bill_all_{key}.html - where key is passed in, like "actions", etc. """ def func(request, abbr, session, bill_id, key): # get fixed version fixed_bill_id = fix_bill_id(bill_id) # redirect if URL's id isn't fixed id without spaces if fixed_bill_id.replace(' ', '') != bill_id: return redirect('bill', abbr=abbr, session=session, bill_id=fixed_bill_id.replace(' ', '')) # depends on [control=['if'], data=[]] bill = db.bills.find_one({settings.LEVEL_FIELD: abbr, 'session': session, 'bill_id': fixed_bill_id}) if bill is None: raise Http404('no bill found {0} {1} {2}'.format(abbr, session, bill_id)) # depends on [control=['if'], data=[]] return render(request, templatename('bill_all_%s' % key), dict(abbr=abbr, metadata=Metadata.get_object(abbr), bill=bill, sources=bill['sources'], nav_active='bills')) return func
def rule_collection(href, cls): """ Rule collections insert a ``create`` and ``create_rule_section`` method into the collection. This collection type is returned when accessing rules through a reference, as:: policy = FirewallPolicy('mypolicy') policy.fw_ipv4_access_rules.create(....) policy.fw_ipv4_access_rules.create_rule_section(...) See the class types documentation, or use help():: print(help(policy.fw_ipv4_access_rules)) :rtype: SubElementCollection """ instance = cls(href=href) meth = getattr(instance, 'create') return type( cls.__name__, (SubElementCollection,), { 'create': meth, 'create_rule_section': instance.create_rule_section})(href, cls)
def function[rule_collection, parameter[href, cls]]: constant[ Rule collections insert a ``create`` and ``create_rule_section`` method into the collection. This collection type is returned when accessing rules through a reference, as:: policy = FirewallPolicy('mypolicy') policy.fw_ipv4_access_rules.create(....) policy.fw_ipv4_access_rules.create_rule_section(...) See the class types documentation, or use help():: print(help(policy.fw_ipv4_access_rules)) :rtype: SubElementCollection ] variable[instance] assign[=] call[name[cls], parameter[]] variable[meth] assign[=] call[name[getattr], parameter[name[instance], constant[create]]] return[call[call[name[type], parameter[name[cls].__name__, tuple[[<ast.Name object at 0x7da1b1b15e70>]], dictionary[[<ast.Constant object at 0x7da1b1b149a0>, <ast.Constant object at 0x7da1b1b16c20>], [<ast.Name object at 0x7da1b1b17670>, <ast.Attribute object at 0x7da1b1a970a0>]]]], parameter[name[href], name[cls]]]]
keyword[def] identifier[rule_collection] ( identifier[href] , identifier[cls] ): literal[string] identifier[instance] = identifier[cls] ( identifier[href] = identifier[href] ) identifier[meth] = identifier[getattr] ( identifier[instance] , literal[string] ) keyword[return] identifier[type] ( identifier[cls] . identifier[__name__] ,( identifier[SubElementCollection] ,),{ literal[string] : identifier[meth] , literal[string] : identifier[instance] . identifier[create_rule_section] })( identifier[href] , identifier[cls] )
def rule_collection(href, cls): """ Rule collections insert a ``create`` and ``create_rule_section`` method into the collection. This collection type is returned when accessing rules through a reference, as:: policy = FirewallPolicy('mypolicy') policy.fw_ipv4_access_rules.create(....) policy.fw_ipv4_access_rules.create_rule_section(...) See the class types documentation, or use help():: print(help(policy.fw_ipv4_access_rules)) :rtype: SubElementCollection """ instance = cls(href=href) meth = getattr(instance, 'create') return type(cls.__name__, (SubElementCollection,), {'create': meth, 'create_rule_section': instance.create_rule_section})(href, cls)
def get_spec(self): """ Return the Core ML spec """ if _mac_ver() >= (10, 14): return self.vggish_model.get_spec() else: vggish_model_file = VGGish() coreml_model_path = vggish_model_file.get_model_path(format='coreml') return MLModel(coreml_model_path).get_spec()
def function[get_spec, parameter[self]]: constant[ Return the Core ML spec ] if compare[call[name[_mac_ver], parameter[]] greater_or_equal[>=] tuple[[<ast.Constant object at 0x7da204961de0>, <ast.Constant object at 0x7da204963a00>]]] begin[:] return[call[name[self].vggish_model.get_spec, parameter[]]]
keyword[def] identifier[get_spec] ( identifier[self] ): literal[string] keyword[if] identifier[_mac_ver] ()>=( literal[int] , literal[int] ): keyword[return] identifier[self] . identifier[vggish_model] . identifier[get_spec] () keyword[else] : identifier[vggish_model_file] = identifier[VGGish] () identifier[coreml_model_path] = identifier[vggish_model_file] . identifier[get_model_path] ( identifier[format] = literal[string] ) keyword[return] identifier[MLModel] ( identifier[coreml_model_path] ). identifier[get_spec] ()
def get_spec(self): """ Return the Core ML spec """ if _mac_ver() >= (10, 14): return self.vggish_model.get_spec() # depends on [control=['if'], data=[]] else: vggish_model_file = VGGish() coreml_model_path = vggish_model_file.get_model_path(format='coreml') return MLModel(coreml_model_path).get_spec()
def strip_tags(html): """Stripts HTML tags from text. Note fields on several Mambu entities come with additional HTML tags (they are rich text fields, I guess that's why). Sometimes they are useless, so stripping them is a good idea. """ from html.parser import HTMLParser class MLStripper(HTMLParser): """Aux class for stripping HTML tags. fields on several Mambu entities come with additional HTML tags (they are rich text fields, I guess that's why). Sometimes they are useless, so stripping them is a good idea. """ def __init__(self): try: super().__init__() # required for python3 except TypeError as e: pass # with python2 raises TypeError self.reset() self.fed = [] def handle_data(self, d): self.fed.append(d) def get_data(self): return ''.join(self.fed) s = MLStripper() s.feed(html.replace("&nbsp;"," ")) return s.get_data()
def function[strip_tags, parameter[html]]: constant[Stripts HTML tags from text. Note fields on several Mambu entities come with additional HTML tags (they are rich text fields, I guess that's why). Sometimes they are useless, so stripping them is a good idea. ] from relative_module[html.parser] import module[HTMLParser] class class[MLStripper, parameter[]] begin[:] constant[Aux class for stripping HTML tags. fields on several Mambu entities come with additional HTML tags (they are rich text fields, I guess that's why). Sometimes they are useless, so stripping them is a good idea. ] def function[__init__, parameter[self]]: <ast.Try object at 0x7da20c992530> call[name[self].reset, parameter[]] name[self].fed assign[=] list[[]] def function[handle_data, parameter[self, d]]: call[name[self].fed.append, parameter[name[d]]] def function[get_data, parameter[self]]: return[call[constant[].join, parameter[name[self].fed]]] variable[s] assign[=] call[name[MLStripper], parameter[]] call[name[s].feed, parameter[call[name[html].replace, parameter[constant[&nbsp;], constant[ ]]]]] return[call[name[s].get_data, parameter[]]]
keyword[def] identifier[strip_tags] ( identifier[html] ): literal[string] keyword[from] identifier[html] . identifier[parser] keyword[import] identifier[HTMLParser] keyword[class] identifier[MLStripper] ( identifier[HTMLParser] ): literal[string] keyword[def] identifier[__init__] ( identifier[self] ): keyword[try] : identifier[super] (). identifier[__init__] () keyword[except] identifier[TypeError] keyword[as] identifier[e] : keyword[pass] identifier[self] . identifier[reset] () identifier[self] . identifier[fed] =[] keyword[def] identifier[handle_data] ( identifier[self] , identifier[d] ): identifier[self] . identifier[fed] . identifier[append] ( identifier[d] ) keyword[def] identifier[get_data] ( identifier[self] ): keyword[return] literal[string] . identifier[join] ( identifier[self] . identifier[fed] ) identifier[s] = identifier[MLStripper] () identifier[s] . identifier[feed] ( identifier[html] . identifier[replace] ( literal[string] , literal[string] )) keyword[return] identifier[s] . identifier[get_data] ()
def strip_tags(html): """Stripts HTML tags from text. Note fields on several Mambu entities come with additional HTML tags (they are rich text fields, I guess that's why). Sometimes they are useless, so stripping them is a good idea. """ from html.parser import HTMLParser class MLStripper(HTMLParser): """Aux class for stripping HTML tags. fields on several Mambu entities come with additional HTML tags (they are rich text fields, I guess that's why). Sometimes they are useless, so stripping them is a good idea. """ def __init__(self): try: super().__init__() # required for python3 # depends on [control=['try'], data=[]] except TypeError as e: pass # with python2 raises TypeError # depends on [control=['except'], data=[]] self.reset() self.fed = [] def handle_data(self, d): self.fed.append(d) def get_data(self): return ''.join(self.fed) s = MLStripper() s.feed(html.replace('&nbsp;', ' ')) return s.get_data()
def delete_service_settings_on_scope_delete(sender, instance, **kwargs): """ If VM that contains service settings were deleted - all settings resources could be safely deleted from NC. """ for service_settings in ServiceSettings.objects.filter(scope=instance): service_settings.unlink_descendants() service_settings.delete()
def function[delete_service_settings_on_scope_delete, parameter[sender, instance]]: constant[ If VM that contains service settings were deleted - all settings resources could be safely deleted from NC. ] for taget[name[service_settings]] in starred[call[name[ServiceSettings].objects.filter, parameter[]]] begin[:] call[name[service_settings].unlink_descendants, parameter[]] call[name[service_settings].delete, parameter[]]
keyword[def] identifier[delete_service_settings_on_scope_delete] ( identifier[sender] , identifier[instance] ,** identifier[kwargs] ): literal[string] keyword[for] identifier[service_settings] keyword[in] identifier[ServiceSettings] . identifier[objects] . identifier[filter] ( identifier[scope] = identifier[instance] ): identifier[service_settings] . identifier[unlink_descendants] () identifier[service_settings] . identifier[delete] ()
def delete_service_settings_on_scope_delete(sender, instance, **kwargs): """ If VM that contains service settings were deleted - all settings resources could be safely deleted from NC. """ for service_settings in ServiceSettings.objects.filter(scope=instance): service_settings.unlink_descendants() service_settings.delete() # depends on [control=['for'], data=['service_settings']]