code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def post_mortem(tb=None, host='', port=5555, patch_stdstreams=False): """ Start post-mortem debugging for the provided traceback object If no traceback is provided the debugger tries to obtain a traceback for the last unhandled exception. Example:: try: # Some error-prone code assert ham == spam except: web_pdb.post_mortem() :param tb: traceback for post-mortem debugging :type tb: types.TracebackType :param host: web-UI hostname or IP-address :type host: str :param port: web-UI port. If ``port=-1``, choose a random port value between 32768 and 65536. :type port: int :param patch_stdstreams: redirect all standard input and output streams to the web-UI. :type patch_stdstreams: bool :raises ValueError: if no valid traceback is provided and the Python interpreter is not handling any exception """ # handling the default if tb is None: # sys.exc_info() returns (type, value, traceback) if an exception is # being handled, otherwise it returns (None, None, None) t, v, tb = sys.exc_info() exc_data = traceback.format_exception(t, v, tb) else: exc_data = traceback.format_tb(tb) if tb is None: raise ValueError('A valid traceback must be passed if no ' 'exception is being handled') pdb = WebPdb.active_instance if pdb is None: pdb = WebPdb(host, port, patch_stdstreams) else: pdb.remove_trace() pdb.console.writeline('*** Web-PDB post-mortem ***\n') pdb.console.writeline(''.join(exc_data)) pdb.reset() pdb.interaction(None, tb)
def function[post_mortem, parameter[tb, host, port, patch_stdstreams]]: constant[ Start post-mortem debugging for the provided traceback object If no traceback is provided the debugger tries to obtain a traceback for the last unhandled exception. Example:: try: # Some error-prone code assert ham == spam except: web_pdb.post_mortem() :param tb: traceback for post-mortem debugging :type tb: types.TracebackType :param host: web-UI hostname or IP-address :type host: str :param port: web-UI port. If ``port=-1``, choose a random port value between 32768 and 65536. :type port: int :param patch_stdstreams: redirect all standard input and output streams to the web-UI. :type patch_stdstreams: bool :raises ValueError: if no valid traceback is provided and the Python interpreter is not handling any exception ] if compare[name[tb] is constant[None]] begin[:] <ast.Tuple object at 0x7da1b0b9f820> assign[=] call[name[sys].exc_info, parameter[]] variable[exc_data] assign[=] call[name[traceback].format_exception, parameter[name[t], name[v], name[tb]]] if compare[name[tb] is constant[None]] begin[:] <ast.Raise object at 0x7da18eb56440> variable[pdb] assign[=] name[WebPdb].active_instance if compare[name[pdb] is constant[None]] begin[:] variable[pdb] assign[=] call[name[WebPdb], parameter[name[host], name[port], name[patch_stdstreams]]] call[name[pdb].console.writeline, parameter[constant[*** Web-PDB post-mortem *** ]]] call[name[pdb].console.writeline, parameter[call[constant[].join, parameter[name[exc_data]]]]] call[name[pdb].reset, parameter[]] call[name[pdb].interaction, parameter[constant[None], name[tb]]]
keyword[def] identifier[post_mortem] ( identifier[tb] = keyword[None] , identifier[host] = literal[string] , identifier[port] = literal[int] , identifier[patch_stdstreams] = keyword[False] ): literal[string] keyword[if] identifier[tb] keyword[is] keyword[None] : identifier[t] , identifier[v] , identifier[tb] = identifier[sys] . identifier[exc_info] () identifier[exc_data] = identifier[traceback] . identifier[format_exception] ( identifier[t] , identifier[v] , identifier[tb] ) keyword[else] : identifier[exc_data] = identifier[traceback] . identifier[format_tb] ( identifier[tb] ) keyword[if] identifier[tb] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) identifier[pdb] = identifier[WebPdb] . identifier[active_instance] keyword[if] identifier[pdb] keyword[is] keyword[None] : identifier[pdb] = identifier[WebPdb] ( identifier[host] , identifier[port] , identifier[patch_stdstreams] ) keyword[else] : identifier[pdb] . identifier[remove_trace] () identifier[pdb] . identifier[console] . identifier[writeline] ( literal[string] ) identifier[pdb] . identifier[console] . identifier[writeline] ( literal[string] . identifier[join] ( identifier[exc_data] )) identifier[pdb] . identifier[reset] () identifier[pdb] . identifier[interaction] ( keyword[None] , identifier[tb] )
def post_mortem(tb=None, host='', port=5555, patch_stdstreams=False): """ Start post-mortem debugging for the provided traceback object If no traceback is provided the debugger tries to obtain a traceback for the last unhandled exception. Example:: try: # Some error-prone code assert ham == spam except: web_pdb.post_mortem() :param tb: traceback for post-mortem debugging :type tb: types.TracebackType :param host: web-UI hostname or IP-address :type host: str :param port: web-UI port. If ``port=-1``, choose a random port value between 32768 and 65536. :type port: int :param patch_stdstreams: redirect all standard input and output streams to the web-UI. :type patch_stdstreams: bool :raises ValueError: if no valid traceback is provided and the Python interpreter is not handling any exception """ # handling the default if tb is None: # sys.exc_info() returns (type, value, traceback) if an exception is # being handled, otherwise it returns (None, None, None) (t, v, tb) = sys.exc_info() exc_data = traceback.format_exception(t, v, tb) # depends on [control=['if'], data=['tb']] else: exc_data = traceback.format_tb(tb) if tb is None: raise ValueError('A valid traceback must be passed if no exception is being handled') # depends on [control=['if'], data=[]] pdb = WebPdb.active_instance if pdb is None: pdb = WebPdb(host, port, patch_stdstreams) # depends on [control=['if'], data=['pdb']] else: pdb.remove_trace() pdb.console.writeline('*** Web-PDB post-mortem ***\n') pdb.console.writeline(''.join(exc_data)) pdb.reset() pdb.interaction(None, tb)
def to_matrix(self, index_regs=None, index_meta=None, columns_regs=None, columns_meta=None, values_regs=None, values_meta=None, **kwargs): """ Transforms the GDataframe to a pivot matrix having as index and columns the ones specified. This function is a wrapper around the pivot_table function of Pandas. :param index_regs: list of region fields to use as index :param index_meta: list of metadata attributes to use as index :param columns_regs: list of region fields to use as columns :param columns_meta: list of metadata attributes to use as columns :param values_regs: list of region fields to use as values :param values_meta: list of metadata attributes to use as values :param kwargs: other parameters to pass to the pivot_table function :return: a Pandas dataframe having as index the union of index_regs and index_meta, as columns the union of columns_regs and columns_meta and as values ths union of values_regs and values_meta """ index_regs = index_regs if index_regs is not None else [] index_meta = index_meta if index_meta is not None else [] columns_regs = columns_regs if columns_regs is not None else [] columns_meta = columns_meta if columns_meta is not None else [] values_regs = values_regs if values_regs is not None else [] values_meta = values_meta if values_meta is not None else [] index_meta_s = set(index_meta) columns_meta_s = set(columns_meta) values_meta_s = set(values_meta) meta_to_project = list(index_meta_s.union(columns_meta_s)\ .union(values_meta_s)\ .difference(set(self.regs.columns))) res = self.project_meta(meta_to_project) pivot_columns = columns_meta + columns_regs pivot_index = index_meta + index_regs pivot_values = values_regs + values_meta return res.regs.pivot_table(index=pivot_index, columns=pivot_columns, values=pivot_values, **kwargs)
def function[to_matrix, parameter[self, index_regs, index_meta, columns_regs, columns_meta, values_regs, values_meta]]: constant[ Transforms the GDataframe to a pivot matrix having as index and columns the ones specified. This function is a wrapper around the pivot_table function of Pandas. :param index_regs: list of region fields to use as index :param index_meta: list of metadata attributes to use as index :param columns_regs: list of region fields to use as columns :param columns_meta: list of metadata attributes to use as columns :param values_regs: list of region fields to use as values :param values_meta: list of metadata attributes to use as values :param kwargs: other parameters to pass to the pivot_table function :return: a Pandas dataframe having as index the union of index_regs and index_meta, as columns the union of columns_regs and columns_meta and as values ths union of values_regs and values_meta ] variable[index_regs] assign[=] <ast.IfExp object at 0x7da1b1b0c550> variable[index_meta] assign[=] <ast.IfExp object at 0x7da1b1b0f820> variable[columns_regs] assign[=] <ast.IfExp object at 0x7da20c76e950> variable[columns_meta] assign[=] <ast.IfExp object at 0x7da20c76ce80> variable[values_regs] assign[=] <ast.IfExp object at 0x7da20c76cbb0> variable[values_meta] assign[=] <ast.IfExp object at 0x7da1b1a2cc10> variable[index_meta_s] assign[=] call[name[set], parameter[name[index_meta]]] variable[columns_meta_s] assign[=] call[name[set], parameter[name[columns_meta]]] variable[values_meta_s] assign[=] call[name[set], parameter[name[values_meta]]] variable[meta_to_project] assign[=] call[name[list], parameter[call[call[call[name[index_meta_s].union, parameter[name[columns_meta_s]]].union, parameter[name[values_meta_s]]].difference, parameter[call[name[set], parameter[name[self].regs.columns]]]]]] variable[res] assign[=] call[name[self].project_meta, parameter[name[meta_to_project]]] variable[pivot_columns] assign[=] binary_operation[name[columns_meta] + name[columns_regs]] variable[pivot_index] assign[=] binary_operation[name[index_meta] + name[index_regs]] variable[pivot_values] assign[=] binary_operation[name[values_regs] + name[values_meta]] return[call[name[res].regs.pivot_table, parameter[]]]
keyword[def] identifier[to_matrix] ( identifier[self] , identifier[index_regs] = keyword[None] , identifier[index_meta] = keyword[None] , identifier[columns_regs] = keyword[None] , identifier[columns_meta] = keyword[None] , identifier[values_regs] = keyword[None] , identifier[values_meta] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[index_regs] = identifier[index_regs] keyword[if] identifier[index_regs] keyword[is] keyword[not] keyword[None] keyword[else] [] identifier[index_meta] = identifier[index_meta] keyword[if] identifier[index_meta] keyword[is] keyword[not] keyword[None] keyword[else] [] identifier[columns_regs] = identifier[columns_regs] keyword[if] identifier[columns_regs] keyword[is] keyword[not] keyword[None] keyword[else] [] identifier[columns_meta] = identifier[columns_meta] keyword[if] identifier[columns_meta] keyword[is] keyword[not] keyword[None] keyword[else] [] identifier[values_regs] = identifier[values_regs] keyword[if] identifier[values_regs] keyword[is] keyword[not] keyword[None] keyword[else] [] identifier[values_meta] = identifier[values_meta] keyword[if] identifier[values_meta] keyword[is] keyword[not] keyword[None] keyword[else] [] identifier[index_meta_s] = identifier[set] ( identifier[index_meta] ) identifier[columns_meta_s] = identifier[set] ( identifier[columns_meta] ) identifier[values_meta_s] = identifier[set] ( identifier[values_meta] ) identifier[meta_to_project] = identifier[list] ( identifier[index_meta_s] . identifier[union] ( identifier[columns_meta_s] ). identifier[union] ( identifier[values_meta_s] ). identifier[difference] ( identifier[set] ( identifier[self] . identifier[regs] . identifier[columns] ))) identifier[res] = identifier[self] . identifier[project_meta] ( identifier[meta_to_project] ) identifier[pivot_columns] = identifier[columns_meta] + identifier[columns_regs] identifier[pivot_index] = identifier[index_meta] + identifier[index_regs] identifier[pivot_values] = identifier[values_regs] + identifier[values_meta] keyword[return] identifier[res] . identifier[regs] . identifier[pivot_table] ( identifier[index] = identifier[pivot_index] , identifier[columns] = identifier[pivot_columns] , identifier[values] = identifier[pivot_values] ,** identifier[kwargs] )
def to_matrix(self, index_regs=None, index_meta=None, columns_regs=None, columns_meta=None, values_regs=None, values_meta=None, **kwargs): """ Transforms the GDataframe to a pivot matrix having as index and columns the ones specified. This function is a wrapper around the pivot_table function of Pandas. :param index_regs: list of region fields to use as index :param index_meta: list of metadata attributes to use as index :param columns_regs: list of region fields to use as columns :param columns_meta: list of metadata attributes to use as columns :param values_regs: list of region fields to use as values :param values_meta: list of metadata attributes to use as values :param kwargs: other parameters to pass to the pivot_table function :return: a Pandas dataframe having as index the union of index_regs and index_meta, as columns the union of columns_regs and columns_meta and as values ths union of values_regs and values_meta """ index_regs = index_regs if index_regs is not None else [] index_meta = index_meta if index_meta is not None else [] columns_regs = columns_regs if columns_regs is not None else [] columns_meta = columns_meta if columns_meta is not None else [] values_regs = values_regs if values_regs is not None else [] values_meta = values_meta if values_meta is not None else [] index_meta_s = set(index_meta) columns_meta_s = set(columns_meta) values_meta_s = set(values_meta) meta_to_project = list(index_meta_s.union(columns_meta_s).union(values_meta_s).difference(set(self.regs.columns))) res = self.project_meta(meta_to_project) pivot_columns = columns_meta + columns_regs pivot_index = index_meta + index_regs pivot_values = values_regs + values_meta return res.regs.pivot_table(index=pivot_index, columns=pivot_columns, values=pivot_values, **kwargs)
def _py_code_variables(lines, executable, lparams, tab): """Adds the variable code lines for all the parameters in the executable. :arg lparams: a list of the local variable declarations made so far that need to be passed to the executable when it is called. """ allparams = executable.ordered_parameters if type(executable).__name__ == "Function": allparams = allparams + [executable] for p in allparams: _py_code_parameter(lines, p, "invar", lparams, tab) if p.direction == "(out)": #We need to reverse the order of the indices to match the fortran code #generation of the wrapper. _py_code_parameter(lines, p, "outvar", lparams, tab) _py_code_parameter(lines, p, "indices", lparams, tab) else: _py_code_parameter(lines, p, "indices", lparams, tab) _py_code_parameter(lines, p, "outvar", lparams, tab)
def function[_py_code_variables, parameter[lines, executable, lparams, tab]]: constant[Adds the variable code lines for all the parameters in the executable. :arg lparams: a list of the local variable declarations made so far that need to be passed to the executable when it is called. ] variable[allparams] assign[=] name[executable].ordered_parameters if compare[call[name[type], parameter[name[executable]]].__name__ equal[==] constant[Function]] begin[:] variable[allparams] assign[=] binary_operation[name[allparams] + list[[<ast.Name object at 0x7da20c6aad10>]]] for taget[name[p]] in starred[name[allparams]] begin[:] call[name[_py_code_parameter], parameter[name[lines], name[p], constant[invar], name[lparams], name[tab]]] if compare[name[p].direction equal[==] constant[(out)]] begin[:] call[name[_py_code_parameter], parameter[name[lines], name[p], constant[outvar], name[lparams], name[tab]]] call[name[_py_code_parameter], parameter[name[lines], name[p], constant[indices], name[lparams], name[tab]]]
keyword[def] identifier[_py_code_variables] ( identifier[lines] , identifier[executable] , identifier[lparams] , identifier[tab] ): literal[string] identifier[allparams] = identifier[executable] . identifier[ordered_parameters] keyword[if] identifier[type] ( identifier[executable] ). identifier[__name__] == literal[string] : identifier[allparams] = identifier[allparams] +[ identifier[executable] ] keyword[for] identifier[p] keyword[in] identifier[allparams] : identifier[_py_code_parameter] ( identifier[lines] , identifier[p] , literal[string] , identifier[lparams] , identifier[tab] ) keyword[if] identifier[p] . identifier[direction] == literal[string] : identifier[_py_code_parameter] ( identifier[lines] , identifier[p] , literal[string] , identifier[lparams] , identifier[tab] ) identifier[_py_code_parameter] ( identifier[lines] , identifier[p] , literal[string] , identifier[lparams] , identifier[tab] ) keyword[else] : identifier[_py_code_parameter] ( identifier[lines] , identifier[p] , literal[string] , identifier[lparams] , identifier[tab] ) identifier[_py_code_parameter] ( identifier[lines] , identifier[p] , literal[string] , identifier[lparams] , identifier[tab] )
def _py_code_variables(lines, executable, lparams, tab): """Adds the variable code lines for all the parameters in the executable. :arg lparams: a list of the local variable declarations made so far that need to be passed to the executable when it is called. """ allparams = executable.ordered_parameters if type(executable).__name__ == 'Function': allparams = allparams + [executable] # depends on [control=['if'], data=[]] for p in allparams: _py_code_parameter(lines, p, 'invar', lparams, tab) if p.direction == '(out)': #We need to reverse the order of the indices to match the fortran code #generation of the wrapper. _py_code_parameter(lines, p, 'outvar', lparams, tab) _py_code_parameter(lines, p, 'indices', lparams, tab) # depends on [control=['if'], data=[]] else: _py_code_parameter(lines, p, 'indices', lparams, tab) _py_code_parameter(lines, p, 'outvar', lparams, tab) # depends on [control=['for'], data=['p']]
def contourf_to_geojson(contourf, geojson_filepath=None, min_angle_deg=None, ndigits=5, unit='', stroke_width=1, fill_opacity=.9, geojson_properties=None, strdump=False, serialize=True): """Transform matplotlib.contourf to geojson with MultiPolygons.""" polygon_features = [] mps = [] contourf_idx = 0 for coll in contourf.collections: color = coll.get_facecolor() for path in coll.get_paths(): for coord in path.to_polygons(): if min_angle_deg: coord = keep_high_angle(coord, min_angle_deg) coord = np.around(coord, ndigits) if ndigits else coord op = MP(contourf.levels[contourf_idx], rgb2hex(color[0])) if op in mps: for i, k in enumerate(mps): if k == op: mps[i].add_coords(coord.tolist()) else: op.add_coords(coord.tolist()) mps.append(op) contourf_idx += 1 # starting here the multipolys will be extracted contourf_idx = 0 for muli in mps: polygon = muli.mpoly() fcolor = muli.color properties = set_contourf_properties(stroke_width, fcolor, fill_opacity, contourf.levels, contourf_idx, unit) if geojson_properties: properties.update(geojson_properties) feature = Feature(geometry=polygon, properties=properties) polygon_features.append(feature) contourf_idx += 1 feature_collection = FeatureCollection(polygon_features) return _render_feature_collection(feature_collection, geojson_filepath, strdump, serialize)
def function[contourf_to_geojson, parameter[contourf, geojson_filepath, min_angle_deg, ndigits, unit, stroke_width, fill_opacity, geojson_properties, strdump, serialize]]: constant[Transform matplotlib.contourf to geojson with MultiPolygons.] variable[polygon_features] assign[=] list[[]] variable[mps] assign[=] list[[]] variable[contourf_idx] assign[=] constant[0] for taget[name[coll]] in starred[name[contourf].collections] begin[:] variable[color] assign[=] call[name[coll].get_facecolor, parameter[]] for taget[name[path]] in starred[call[name[coll].get_paths, parameter[]]] begin[:] for taget[name[coord]] in starred[call[name[path].to_polygons, parameter[]]] begin[:] if name[min_angle_deg] begin[:] variable[coord] assign[=] call[name[keep_high_angle], parameter[name[coord], name[min_angle_deg]]] variable[coord] assign[=] <ast.IfExp object at 0x7da1b10821a0> variable[op] assign[=] call[name[MP], parameter[call[name[contourf].levels][name[contourf_idx]], call[name[rgb2hex], parameter[call[name[color]][constant[0]]]]]] if compare[name[op] in name[mps]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b1080ca0>, <ast.Name object at 0x7da1b1082a40>]]] in starred[call[name[enumerate], parameter[name[mps]]]] begin[:] if compare[name[k] equal[==] name[op]] begin[:] call[call[name[mps]][name[i]].add_coords, parameter[call[name[coord].tolist, parameter[]]]] <ast.AugAssign object at 0x7da1b1068b20> variable[contourf_idx] assign[=] constant[0] for taget[name[muli]] in starred[name[mps]] begin[:] variable[polygon] assign[=] call[name[muli].mpoly, parameter[]] variable[fcolor] assign[=] name[muli].color variable[properties] assign[=] call[name[set_contourf_properties], parameter[name[stroke_width], name[fcolor], name[fill_opacity], name[contourf].levels, name[contourf_idx], name[unit]]] if name[geojson_properties] begin[:] call[name[properties].update, parameter[name[geojson_properties]]] variable[feature] assign[=] call[name[Feature], parameter[]] call[name[polygon_features].append, parameter[name[feature]]] <ast.AugAssign object at 0x7da1b106b850> variable[feature_collection] assign[=] call[name[FeatureCollection], parameter[name[polygon_features]]] return[call[name[_render_feature_collection], parameter[name[feature_collection], name[geojson_filepath], name[strdump], name[serialize]]]]
keyword[def] identifier[contourf_to_geojson] ( identifier[contourf] , identifier[geojson_filepath] = keyword[None] , identifier[min_angle_deg] = keyword[None] , identifier[ndigits] = literal[int] , identifier[unit] = literal[string] , identifier[stroke_width] = literal[int] , identifier[fill_opacity] = literal[int] , identifier[geojson_properties] = keyword[None] , identifier[strdump] = keyword[False] , identifier[serialize] = keyword[True] ): literal[string] identifier[polygon_features] =[] identifier[mps] =[] identifier[contourf_idx] = literal[int] keyword[for] identifier[coll] keyword[in] identifier[contourf] . identifier[collections] : identifier[color] = identifier[coll] . identifier[get_facecolor] () keyword[for] identifier[path] keyword[in] identifier[coll] . identifier[get_paths] (): keyword[for] identifier[coord] keyword[in] identifier[path] . identifier[to_polygons] (): keyword[if] identifier[min_angle_deg] : identifier[coord] = identifier[keep_high_angle] ( identifier[coord] , identifier[min_angle_deg] ) identifier[coord] = identifier[np] . identifier[around] ( identifier[coord] , identifier[ndigits] ) keyword[if] identifier[ndigits] keyword[else] identifier[coord] identifier[op] = identifier[MP] ( identifier[contourf] . identifier[levels] [ identifier[contourf_idx] ], identifier[rgb2hex] ( identifier[color] [ literal[int] ])) keyword[if] identifier[op] keyword[in] identifier[mps] : keyword[for] identifier[i] , identifier[k] keyword[in] identifier[enumerate] ( identifier[mps] ): keyword[if] identifier[k] == identifier[op] : identifier[mps] [ identifier[i] ]. identifier[add_coords] ( identifier[coord] . identifier[tolist] ()) keyword[else] : identifier[op] . identifier[add_coords] ( identifier[coord] . identifier[tolist] ()) identifier[mps] . identifier[append] ( identifier[op] ) identifier[contourf_idx] += literal[int] identifier[contourf_idx] = literal[int] keyword[for] identifier[muli] keyword[in] identifier[mps] : identifier[polygon] = identifier[muli] . identifier[mpoly] () identifier[fcolor] = identifier[muli] . identifier[color] identifier[properties] = identifier[set_contourf_properties] ( identifier[stroke_width] , identifier[fcolor] , identifier[fill_opacity] , identifier[contourf] . identifier[levels] , identifier[contourf_idx] , identifier[unit] ) keyword[if] identifier[geojson_properties] : identifier[properties] . identifier[update] ( identifier[geojson_properties] ) identifier[feature] = identifier[Feature] ( identifier[geometry] = identifier[polygon] , identifier[properties] = identifier[properties] ) identifier[polygon_features] . identifier[append] ( identifier[feature] ) identifier[contourf_idx] += literal[int] identifier[feature_collection] = identifier[FeatureCollection] ( identifier[polygon_features] ) keyword[return] identifier[_render_feature_collection] ( identifier[feature_collection] , identifier[geojson_filepath] , identifier[strdump] , identifier[serialize] )
def contourf_to_geojson(contourf, geojson_filepath=None, min_angle_deg=None, ndigits=5, unit='', stroke_width=1, fill_opacity=0.9, geojson_properties=None, strdump=False, serialize=True): """Transform matplotlib.contourf to geojson with MultiPolygons.""" polygon_features = [] mps = [] contourf_idx = 0 for coll in contourf.collections: color = coll.get_facecolor() for path in coll.get_paths(): for coord in path.to_polygons(): if min_angle_deg: coord = keep_high_angle(coord, min_angle_deg) # depends on [control=['if'], data=[]] coord = np.around(coord, ndigits) if ndigits else coord op = MP(contourf.levels[contourf_idx], rgb2hex(color[0])) if op in mps: for (i, k) in enumerate(mps): if k == op: mps[i].add_coords(coord.tolist()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['op', 'mps']] else: op.add_coords(coord.tolist()) mps.append(op) # depends on [control=['for'], data=['coord']] # depends on [control=['for'], data=['path']] contourf_idx += 1 # depends on [control=['for'], data=['coll']] # starting here the multipolys will be extracted contourf_idx = 0 for muli in mps: polygon = muli.mpoly() fcolor = muli.color properties = set_contourf_properties(stroke_width, fcolor, fill_opacity, contourf.levels, contourf_idx, unit) if geojson_properties: properties.update(geojson_properties) # depends on [control=['if'], data=[]] feature = Feature(geometry=polygon, properties=properties) polygon_features.append(feature) contourf_idx += 1 # depends on [control=['for'], data=['muli']] feature_collection = FeatureCollection(polygon_features) return _render_feature_collection(feature_collection, geojson_filepath, strdump, serialize)
def GetSecurityToken(self, username, password): """ Grabs a security Token to authenticate to Office 365 services """ url = 'https://login.microsoftonline.com/extSTS.srf' body = """ <s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:a="http://www.w3.org/2005/08/addressing" xmlns:u="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"> <s:Header> <a:Action s:mustUnderstand="1">http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue</a:Action> <a:ReplyTo> <a:Address>http://www.w3.org/2005/08/addressing/anonymous</a:Address> </a:ReplyTo> <a:To s:mustUnderstand="1">https://login.microsoftonline.com/extSTS.srf</a:To> <o:Security s:mustUnderstand="1" xmlns:o="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"> <o:UsernameToken> <o:Username>%s</o:Username> <o:Password>%s</o:Password> </o:UsernameToken> </o:Security> </s:Header> <s:Body> <t:RequestSecurityToken xmlns:t="http://schemas.xmlsoap.org/ws/2005/02/trust"> <wsp:AppliesTo xmlns:wsp="http://schemas.xmlsoap.org/ws/2004/09/policy"> <a:EndpointReference> <a:Address>%s</a:Address> </a:EndpointReference> </wsp:AppliesTo> <t:KeyType>http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey</t:KeyType> <t:RequestType>http://schemas.xmlsoap.org/ws/2005/02/trust/Issue</t:RequestType> <t:TokenType>urn:oasis:names:tc:SAML:1.0:assertion</t:TokenType> </t:RequestSecurityToken> </s:Body> </s:Envelope>""" % (username, password, self.share_point_site) headers = {'accept': 'application/json;odata=verbose'} response = requests.post(url, body, headers=headers) xmldoc = etree.fromstring(response.content) token = xmldoc.find( './/{http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd}BinarySecurityToken' ) if token is not None: return token.text else: raise Exception('Check username/password and rootsite')
def function[GetSecurityToken, parameter[self, username, password]]: constant[ Grabs a security Token to authenticate to Office 365 services ] variable[url] assign[=] constant[https://login.microsoftonline.com/extSTS.srf] variable[body] assign[=] binary_operation[constant[ <s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:a="http://www.w3.org/2005/08/addressing" xmlns:u="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"> <s:Header> <a:Action s:mustUnderstand="1">http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue</a:Action> <a:ReplyTo> <a:Address>http://www.w3.org/2005/08/addressing/anonymous</a:Address> </a:ReplyTo> <a:To s:mustUnderstand="1">https://login.microsoftonline.com/extSTS.srf</a:To> <o:Security s:mustUnderstand="1" xmlns:o="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"> <o:UsernameToken> <o:Username>%s</o:Username> <o:Password>%s</o:Password> </o:UsernameToken> </o:Security> </s:Header> <s:Body> <t:RequestSecurityToken xmlns:t="http://schemas.xmlsoap.org/ws/2005/02/trust"> <wsp:AppliesTo xmlns:wsp="http://schemas.xmlsoap.org/ws/2004/09/policy"> <a:EndpointReference> <a:Address>%s</a:Address> </a:EndpointReference> </wsp:AppliesTo> <t:KeyType>http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey</t:KeyType> <t:RequestType>http://schemas.xmlsoap.org/ws/2005/02/trust/Issue</t:RequestType> <t:TokenType>urn:oasis:names:tc:SAML:1.0:assertion</t:TokenType> </t:RequestSecurityToken> </s:Body> </s:Envelope>] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1256290>, <ast.Name object at 0x7da1b1256050>, <ast.Attribute object at 0x7da1b1254ac0>]]] variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b1256920>], [<ast.Constant object at 0x7da1b1254460>]] variable[response] assign[=] call[name[requests].post, parameter[name[url], name[body]]] variable[xmldoc] assign[=] call[name[etree].fromstring, parameter[name[response].content]] variable[token] assign[=] call[name[xmldoc].find, parameter[constant[.//{http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd}BinarySecurityToken]]] if compare[name[token] is_not constant[None]] begin[:] return[name[token].text]
keyword[def] identifier[GetSecurityToken] ( identifier[self] , identifier[username] , identifier[password] ): literal[string] identifier[url] = literal[string] identifier[body] = literal[string] %( identifier[username] , identifier[password] , identifier[self] . identifier[share_point_site] ) identifier[headers] ={ literal[string] : literal[string] } identifier[response] = identifier[requests] . identifier[post] ( identifier[url] , identifier[body] , identifier[headers] = identifier[headers] ) identifier[xmldoc] = identifier[etree] . identifier[fromstring] ( identifier[response] . identifier[content] ) identifier[token] = identifier[xmldoc] . identifier[find] ( literal[string] ) keyword[if] identifier[token] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[token] . identifier[text] keyword[else] : keyword[raise] identifier[Exception] ( literal[string] )
def GetSecurityToken(self, username, password): """ Grabs a security Token to authenticate to Office 365 services """ url = 'https://login.microsoftonline.com/extSTS.srf' body = '\n <s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope"\n xmlns:a="http://www.w3.org/2005/08/addressing"\n xmlns:u="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">\n <s:Header>\n <a:Action s:mustUnderstand="1">http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue</a:Action>\n <a:ReplyTo>\n <a:Address>http://www.w3.org/2005/08/addressing/anonymous</a:Address>\n </a:ReplyTo>\n <a:To s:mustUnderstand="1">https://login.microsoftonline.com/extSTS.srf</a:To>\n <o:Security s:mustUnderstand="1"\n xmlns:o="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd">\n <o:UsernameToken>\n <o:Username>%s</o:Username>\n <o:Password>%s</o:Password>\n </o:UsernameToken>\n </o:Security>\n </s:Header>\n <s:Body>\n <t:RequestSecurityToken xmlns:t="http://schemas.xmlsoap.org/ws/2005/02/trust">\n <wsp:AppliesTo xmlns:wsp="http://schemas.xmlsoap.org/ws/2004/09/policy">\n <a:EndpointReference>\n <a:Address>%s</a:Address>\n </a:EndpointReference>\n </wsp:AppliesTo>\n <t:KeyType>http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey</t:KeyType>\n <t:RequestType>http://schemas.xmlsoap.org/ws/2005/02/trust/Issue</t:RequestType>\n <t:TokenType>urn:oasis:names:tc:SAML:1.0:assertion</t:TokenType>\n </t:RequestSecurityToken>\n </s:Body>\n </s:Envelope>' % (username, password, self.share_point_site) headers = {'accept': 'application/json;odata=verbose'} response = requests.post(url, body, headers=headers) xmldoc = etree.fromstring(response.content) token = xmldoc.find('.//{http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd}BinarySecurityToken') if token is not None: return token.text # depends on [control=['if'], data=['token']] else: raise Exception('Check username/password and rootsite')
def regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask, pixel_scales, origin=(0.0, 0.0)): """Compute the (y,x) arc second coordinates at the centre of every pixel of a 2D mask array of shape (rows, columns). Coordinates are defined from the top-left corner, where the first unmasked pixel corresponds to index 0. The pixel \ at the top-left of the array has negative x and y values in arc seconds. The regular grid is returned on an array of shape (total_unmasked_pixels, 2). y coordinates are stored in the 0 \ index of the second dimension, x coordinates in the 1 index. Parameters ---------- mask : ndarray A 2D array of bools, where *False* values mean unmasked and are therefore included as part of the calculated \ regular grid. pixel_scales : (float, float) The (y,x) arc-second to pixel scales of the 2D mask array. origin : (float, flloat) The (y,x) origin of the 2D array, which the regular grid is shifted around. Returns -------- ndarray A regular grid of (y,x) arc-second coordinates at the centre of every pixel unmasked pixel on the 2D mask \ array. The regular grid array has dimensions (total_unmasked_pixels, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) regular_grid_1d = regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_2d = regular_grid_2d_from_shape_pixel_scales_and_origin(mask.shape, pixel_scales, origin) total_regular_pixels = mask_util.total_regular_pixels_from_mask(mask) regular_grid_1d = np.zeros(shape=(total_regular_pixels, 2)) pixel_count = 0 for y in range(mask.shape[0]): for x in range(mask.shape[1]): if not mask[y, x]: regular_grid_1d[pixel_count, :] = grid_2d[y, x] pixel_count += 1 return regular_grid_1d
def function[regular_grid_1d_masked_from_mask_pixel_scales_and_origin, parameter[mask, pixel_scales, origin]]: constant[Compute the (y,x) arc second coordinates at the centre of every pixel of a 2D mask array of shape (rows, columns). Coordinates are defined from the top-left corner, where the first unmasked pixel corresponds to index 0. The pixel at the top-left of the array has negative x and y values in arc seconds. The regular grid is returned on an array of shape (total_unmasked_pixels, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Parameters ---------- mask : ndarray A 2D array of bools, where *False* values mean unmasked and are therefore included as part of the calculated regular grid. pixel_scales : (float, float) The (y,x) arc-second to pixel scales of the 2D mask array. origin : (float, flloat) The (y,x) origin of the 2D array, which the regular grid is shifted around. Returns -------- ndarray A regular grid of (y,x) arc-second coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The regular grid array has dimensions (total_unmasked_pixels, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) regular_grid_1d = regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) ] variable[grid_2d] assign[=] call[name[regular_grid_2d_from_shape_pixel_scales_and_origin], parameter[name[mask].shape, name[pixel_scales], name[origin]]] variable[total_regular_pixels] assign[=] call[name[mask_util].total_regular_pixels_from_mask, parameter[name[mask]]] variable[regular_grid_1d] assign[=] call[name[np].zeros, parameter[]] variable[pixel_count] assign[=] constant[0] for taget[name[y]] in starred[call[name[range], parameter[call[name[mask].shape][constant[0]]]]] begin[:] for taget[name[x]] in starred[call[name[range], parameter[call[name[mask].shape][constant[1]]]]] begin[:] if <ast.UnaryOp object at 0x7da204622e60> begin[:] call[name[regular_grid_1d]][tuple[[<ast.Name object at 0x7da204623670>, <ast.Slice object at 0x7da204623730>]]] assign[=] call[name[grid_2d]][tuple[[<ast.Name object at 0x7da204621e10>, <ast.Name object at 0x7da204622680>]]] <ast.AugAssign object at 0x7da204623190> return[name[regular_grid_1d]]
keyword[def] identifier[regular_grid_1d_masked_from_mask_pixel_scales_and_origin] ( identifier[mask] , identifier[pixel_scales] , identifier[origin] =( literal[int] , literal[int] )): literal[string] identifier[grid_2d] = identifier[regular_grid_2d_from_shape_pixel_scales_and_origin] ( identifier[mask] . identifier[shape] , identifier[pixel_scales] , identifier[origin] ) identifier[total_regular_pixels] = identifier[mask_util] . identifier[total_regular_pixels_from_mask] ( identifier[mask] ) identifier[regular_grid_1d] = identifier[np] . identifier[zeros] ( identifier[shape] =( identifier[total_regular_pixels] , literal[int] )) identifier[pixel_count] = literal[int] keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[mask] . identifier[shape] [ literal[int] ]): keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[mask] . identifier[shape] [ literal[int] ]): keyword[if] keyword[not] identifier[mask] [ identifier[y] , identifier[x] ]: identifier[regular_grid_1d] [ identifier[pixel_count] ,:]= identifier[grid_2d] [ identifier[y] , identifier[x] ] identifier[pixel_count] += literal[int] keyword[return] identifier[regular_grid_1d]
def regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask, pixel_scales, origin=(0.0, 0.0)): """Compute the (y,x) arc second coordinates at the centre of every pixel of a 2D mask array of shape (rows, columns). Coordinates are defined from the top-left corner, where the first unmasked pixel corresponds to index 0. The pixel at the top-left of the array has negative x and y values in arc seconds. The regular grid is returned on an array of shape (total_unmasked_pixels, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Parameters ---------- mask : ndarray A 2D array of bools, where *False* values mean unmasked and are therefore included as part of the calculated regular grid. pixel_scales : (float, float) The (y,x) arc-second to pixel scales of the 2D mask array. origin : (float, flloat) The (y,x) origin of the 2D array, which the regular grid is shifted around. Returns -------- ndarray A regular grid of (y,x) arc-second coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The regular grid array has dimensions (total_unmasked_pixels, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) regular_grid_1d = regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_2d = regular_grid_2d_from_shape_pixel_scales_and_origin(mask.shape, pixel_scales, origin) total_regular_pixels = mask_util.total_regular_pixels_from_mask(mask) regular_grid_1d = np.zeros(shape=(total_regular_pixels, 2)) pixel_count = 0 for y in range(mask.shape[0]): for x in range(mask.shape[1]): if not mask[y, x]: regular_grid_1d[pixel_count, :] = grid_2d[y, x] pixel_count += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['y']] return regular_grid_1d
def line_spacing(self): """ The spacing between baselines of successive lines in this paragraph. A float value indicates a number of lines. A |Length| value indicates a fixed spacing. Value is contained in `./a:lnSpc/a:spcPts/@val` or `./a:lnSpc/a:spcPct/@val`. Value is |None| if no element is present. """ lnSpc = self.lnSpc if lnSpc is None: return None if lnSpc.spcPts is not None: return lnSpc.spcPts.val return lnSpc.spcPct.val
def function[line_spacing, parameter[self]]: constant[ The spacing between baselines of successive lines in this paragraph. A float value indicates a number of lines. A |Length| value indicates a fixed spacing. Value is contained in `./a:lnSpc/a:spcPts/@val` or `./a:lnSpc/a:spcPct/@val`. Value is |None| if no element is present. ] variable[lnSpc] assign[=] name[self].lnSpc if compare[name[lnSpc] is constant[None]] begin[:] return[constant[None]] if compare[name[lnSpc].spcPts is_not constant[None]] begin[:] return[name[lnSpc].spcPts.val] return[name[lnSpc].spcPct.val]
keyword[def] identifier[line_spacing] ( identifier[self] ): literal[string] identifier[lnSpc] = identifier[self] . identifier[lnSpc] keyword[if] identifier[lnSpc] keyword[is] keyword[None] : keyword[return] keyword[None] keyword[if] identifier[lnSpc] . identifier[spcPts] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[lnSpc] . identifier[spcPts] . identifier[val] keyword[return] identifier[lnSpc] . identifier[spcPct] . identifier[val]
def line_spacing(self): """ The spacing between baselines of successive lines in this paragraph. A float value indicates a number of lines. A |Length| value indicates a fixed spacing. Value is contained in `./a:lnSpc/a:spcPts/@val` or `./a:lnSpc/a:spcPct/@val`. Value is |None| if no element is present. """ lnSpc = self.lnSpc if lnSpc is None: return None # depends on [control=['if'], data=[]] if lnSpc.spcPts is not None: return lnSpc.spcPts.val # depends on [control=['if'], data=[]] return lnSpc.spcPct.val
def subscribe(self, user_token, topic): """ Subscribe a user to the given topic. :param str user_token: The token of the user. :param str topic: The topic. :raises `requests.exceptions.HTTPError`: If an HTTP error occurred. """ response = _request('POST', url=self.url_v1('/user/subscriptions/' + topic), user_agent=self.user_agent, user_token=user_token, ) _raise_for_status(response)
def function[subscribe, parameter[self, user_token, topic]]: constant[ Subscribe a user to the given topic. :param str user_token: The token of the user. :param str topic: The topic. :raises `requests.exceptions.HTTPError`: If an HTTP error occurred. ] variable[response] assign[=] call[name[_request], parameter[constant[POST]]] call[name[_raise_for_status], parameter[name[response]]]
keyword[def] identifier[subscribe] ( identifier[self] , identifier[user_token] , identifier[topic] ): literal[string] identifier[response] = identifier[_request] ( literal[string] , identifier[url] = identifier[self] . identifier[url_v1] ( literal[string] + identifier[topic] ), identifier[user_agent] = identifier[self] . identifier[user_agent] , identifier[user_token] = identifier[user_token] , ) identifier[_raise_for_status] ( identifier[response] )
def subscribe(self, user_token, topic): """ Subscribe a user to the given topic. :param str user_token: The token of the user. :param str topic: The topic. :raises `requests.exceptions.HTTPError`: If an HTTP error occurred. """ response = _request('POST', url=self.url_v1('/user/subscriptions/' + topic), user_agent=self.user_agent, user_token=user_token) _raise_for_status(response)
def construct_transformed_regex(annotations): """ construct a regex that matches possible fields in a transformed file annotations is a set of which keys in BARCODEINFO are present in the file """ re_string = '.*' if "cellular" in annotations: re_string += ":CELL_(?P<CB>.*)" if "molecular" in annotations: re_string += ":UMI_(?P<MB>\w*)" if "sample" in annotations: re_string += ":SAMPLE_(?P<SB>\w*)" if re_string == ".*": logger.error("No annotation present on this file, aborting.") sys.exit(1) return re_string
def function[construct_transformed_regex, parameter[annotations]]: constant[ construct a regex that matches possible fields in a transformed file annotations is a set of which keys in BARCODEINFO are present in the file ] variable[re_string] assign[=] constant[.*] if compare[constant[cellular] in name[annotations]] begin[:] <ast.AugAssign object at 0x7da20e9557b0> if compare[constant[molecular] in name[annotations]] begin[:] <ast.AugAssign object at 0x7da20e9556c0> if compare[constant[sample] in name[annotations]] begin[:] <ast.AugAssign object at 0x7da20e954d60> if compare[name[re_string] equal[==] constant[.*]] begin[:] call[name[logger].error, parameter[constant[No annotation present on this file, aborting.]]] call[name[sys].exit, parameter[constant[1]]] return[name[re_string]]
keyword[def] identifier[construct_transformed_regex] ( identifier[annotations] ): literal[string] identifier[re_string] = literal[string] keyword[if] literal[string] keyword[in] identifier[annotations] : identifier[re_string] += literal[string] keyword[if] literal[string] keyword[in] identifier[annotations] : identifier[re_string] += literal[string] keyword[if] literal[string] keyword[in] identifier[annotations] : identifier[re_string] += literal[string] keyword[if] identifier[re_string] == literal[string] : identifier[logger] . identifier[error] ( literal[string] ) identifier[sys] . identifier[exit] ( literal[int] ) keyword[return] identifier[re_string]
def construct_transformed_regex(annotations): """ construct a regex that matches possible fields in a transformed file annotations is a set of which keys in BARCODEINFO are present in the file """ re_string = '.*' if 'cellular' in annotations: re_string += ':CELL_(?P<CB>.*)' # depends on [control=['if'], data=[]] if 'molecular' in annotations: re_string += ':UMI_(?P<MB>\\w*)' # depends on [control=['if'], data=[]] if 'sample' in annotations: re_string += ':SAMPLE_(?P<SB>\\w*)' # depends on [control=['if'], data=[]] if re_string == '.*': logger.error('No annotation present on this file, aborting.') sys.exit(1) # depends on [control=['if'], data=[]] return re_string
def union(self, other, sort=None): """ Form the union of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- union : Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Int64Index([1, 2, 3, 4, 5, 6], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other = ensure_index(other) if len(other) == 0 or self.equals(other): return self._get_reconciled_name_object(other) if len(self) == 0: return other._get_reconciled_name_object(self) # TODO: is_dtype_union_equal is a hack around # 1. buggy set ops with duplicates (GH #13432) # 2. CategoricalIndex lacking setops (GH #10186) # Once those are fixed, this workaround can be removed if not is_dtype_union_equal(self.dtype, other.dtype): this = self.astype('O') other = other.astype('O') return this.union(other, sort=sort) # TODO(EA): setops-refactor, clean all this up if is_period_dtype(self) or is_datetime64tz_dtype(self): lvals = self._ndarray_values else: lvals = self._values if is_period_dtype(other) or is_datetime64tz_dtype(other): rvals = other._ndarray_values else: rvals = other._values if sort is None and self.is_monotonic and other.is_monotonic: try: result = self._outer_indexer(lvals, rvals)[0] except TypeError: # incomparable objects result = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) result.extend([x for x in rvals if x not in value_set]) else: indexer = self.get_indexer(other) indexer, = (indexer == -1).nonzero() if len(indexer) > 0: other_diff = algos.take_nd(rvals, indexer, allow_fill=False) result = _concat._concat_compat((lvals, other_diff)) else: result = lvals if sort is None: try: result = sorting.safe_sort(result) except TypeError as e: warnings.warn("{}, sort order is undefined for " "incomparable objects".format(e), RuntimeWarning, stacklevel=3) # for subclasses return self._wrap_setop_result(other, result)
def function[union, parameter[self, other, sort]]: constant[ Form the union of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- union : Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Int64Index([1, 2, 3, 4, 5, 6], dtype='int64') ] call[name[self]._validate_sort_keyword, parameter[name[sort]]] call[name[self]._assert_can_do_setop, parameter[name[other]]] variable[other] assign[=] call[name[ensure_index], parameter[name[other]]] if <ast.BoolOp object at 0x7da18eb55f90> begin[:] return[call[name[self]._get_reconciled_name_object, parameter[name[other]]]] if compare[call[name[len], parameter[name[self]]] equal[==] constant[0]] begin[:] return[call[name[other]._get_reconciled_name_object, parameter[name[self]]]] if <ast.UnaryOp object at 0x7da18eb575e0> begin[:] variable[this] assign[=] call[name[self].astype, parameter[constant[O]]] variable[other] assign[=] call[name[other].astype, parameter[constant[O]]] return[call[name[this].union, parameter[name[other]]]] if <ast.BoolOp object at 0x7da18eb546d0> begin[:] variable[lvals] assign[=] name[self]._ndarray_values if <ast.BoolOp object at 0x7da18eb55c00> begin[:] variable[rvals] assign[=] name[other]._ndarray_values if <ast.BoolOp object at 0x7da18eb57fd0> begin[:] <ast.Try object at 0x7da18eb54f70> return[call[name[self]._wrap_setop_result, parameter[name[other], name[result]]]]
keyword[def] identifier[union] ( identifier[self] , identifier[other] , identifier[sort] = keyword[None] ): literal[string] identifier[self] . identifier[_validate_sort_keyword] ( identifier[sort] ) identifier[self] . identifier[_assert_can_do_setop] ( identifier[other] ) identifier[other] = identifier[ensure_index] ( identifier[other] ) keyword[if] identifier[len] ( identifier[other] )== literal[int] keyword[or] identifier[self] . identifier[equals] ( identifier[other] ): keyword[return] identifier[self] . identifier[_get_reconciled_name_object] ( identifier[other] ) keyword[if] identifier[len] ( identifier[self] )== literal[int] : keyword[return] identifier[other] . identifier[_get_reconciled_name_object] ( identifier[self] ) keyword[if] keyword[not] identifier[is_dtype_union_equal] ( identifier[self] . identifier[dtype] , identifier[other] . identifier[dtype] ): identifier[this] = identifier[self] . identifier[astype] ( literal[string] ) identifier[other] = identifier[other] . identifier[astype] ( literal[string] ) keyword[return] identifier[this] . identifier[union] ( identifier[other] , identifier[sort] = identifier[sort] ) keyword[if] identifier[is_period_dtype] ( identifier[self] ) keyword[or] identifier[is_datetime64tz_dtype] ( identifier[self] ): identifier[lvals] = identifier[self] . identifier[_ndarray_values] keyword[else] : identifier[lvals] = identifier[self] . identifier[_values] keyword[if] identifier[is_period_dtype] ( identifier[other] ) keyword[or] identifier[is_datetime64tz_dtype] ( identifier[other] ): identifier[rvals] = identifier[other] . identifier[_ndarray_values] keyword[else] : identifier[rvals] = identifier[other] . identifier[_values] keyword[if] identifier[sort] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[is_monotonic] keyword[and] identifier[other] . identifier[is_monotonic] : keyword[try] : identifier[result] = identifier[self] . identifier[_outer_indexer] ( identifier[lvals] , identifier[rvals] )[ literal[int] ] keyword[except] identifier[TypeError] : identifier[result] = identifier[list] ( identifier[lvals] ) identifier[value_set] = identifier[set] ( identifier[lvals] ) identifier[result] . identifier[extend] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[rvals] keyword[if] identifier[x] keyword[not] keyword[in] identifier[value_set] ]) keyword[else] : identifier[indexer] = identifier[self] . identifier[get_indexer] ( identifier[other] ) identifier[indexer] ,=( identifier[indexer] ==- literal[int] ). identifier[nonzero] () keyword[if] identifier[len] ( identifier[indexer] )> literal[int] : identifier[other_diff] = identifier[algos] . identifier[take_nd] ( identifier[rvals] , identifier[indexer] , identifier[allow_fill] = keyword[False] ) identifier[result] = identifier[_concat] . identifier[_concat_compat] (( identifier[lvals] , identifier[other_diff] )) keyword[else] : identifier[result] = identifier[lvals] keyword[if] identifier[sort] keyword[is] keyword[None] : keyword[try] : identifier[result] = identifier[sorting] . identifier[safe_sort] ( identifier[result] ) keyword[except] identifier[TypeError] keyword[as] identifier[e] : identifier[warnings] . identifier[warn] ( literal[string] literal[string] . identifier[format] ( identifier[e] ), identifier[RuntimeWarning] , identifier[stacklevel] = literal[int] ) keyword[return] identifier[self] . identifier[_wrap_setop_result] ( identifier[other] , identifier[result] )
def union(self, other, sort=None): """ Form the union of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- union : Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Int64Index([1, 2, 3, 4, 5, 6], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other = ensure_index(other) if len(other) == 0 or self.equals(other): return self._get_reconciled_name_object(other) # depends on [control=['if'], data=[]] if len(self) == 0: return other._get_reconciled_name_object(self) # depends on [control=['if'], data=[]] # TODO: is_dtype_union_equal is a hack around # 1. buggy set ops with duplicates (GH #13432) # 2. CategoricalIndex lacking setops (GH #10186) # Once those are fixed, this workaround can be removed if not is_dtype_union_equal(self.dtype, other.dtype): this = self.astype('O') other = other.astype('O') return this.union(other, sort=sort) # depends on [control=['if'], data=[]] # TODO(EA): setops-refactor, clean all this up if is_period_dtype(self) or is_datetime64tz_dtype(self): lvals = self._ndarray_values # depends on [control=['if'], data=[]] else: lvals = self._values if is_period_dtype(other) or is_datetime64tz_dtype(other): rvals = other._ndarray_values # depends on [control=['if'], data=[]] else: rvals = other._values if sort is None and self.is_monotonic and other.is_monotonic: try: result = self._outer_indexer(lvals, rvals)[0] # depends on [control=['try'], data=[]] except TypeError: # incomparable objects result = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) result.extend([x for x in rvals if x not in value_set]) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: indexer = self.get_indexer(other) (indexer,) = (indexer == -1).nonzero() if len(indexer) > 0: other_diff = algos.take_nd(rvals, indexer, allow_fill=False) result = _concat._concat_compat((lvals, other_diff)) # depends on [control=['if'], data=[]] else: result = lvals if sort is None: try: result = sorting.safe_sort(result) # depends on [control=['try'], data=[]] except TypeError as e: warnings.warn('{}, sort order is undefined for incomparable objects'.format(e), RuntimeWarning, stacklevel=3) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] # for subclasses return self._wrap_setop_result(other, result)
def create(self, name): """ Creates a configuration file named *name*. If there is already a configuration file with that name, the existing file is returned. :param name: The name of the configuration file. :type name: ``string`` :return: The :class:`ConfigurationFile` object. """ # This has to be overridden to handle the plumbing of creating # a ConfigurationFile (which is a Collection) instead of some # Entity. if not isinstance(name, basestring): raise ValueError("Invalid name: %s" % repr(name)) response = self.post(__conf=name) if response.status == 303: return self[name] elif response.status == 201: return ConfigurationFile(self.service, PATH_CONF % name, item=Stanza, state={'title': name}) else: raise ValueError("Unexpected status code %s returned from creating a stanza" % response.status)
def function[create, parameter[self, name]]: constant[ Creates a configuration file named *name*. If there is already a configuration file with that name, the existing file is returned. :param name: The name of the configuration file. :type name: ``string`` :return: The :class:`ConfigurationFile` object. ] if <ast.UnaryOp object at 0x7da1b176af50> begin[:] <ast.Raise object at 0x7da1b1768e20> variable[response] assign[=] call[name[self].post, parameter[]] if compare[name[response].status equal[==] constant[303]] begin[:] return[call[name[self]][name[name]]]
keyword[def] identifier[create] ( identifier[self] , identifier[name] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[name] , identifier[basestring] ): keyword[raise] identifier[ValueError] ( literal[string] % identifier[repr] ( identifier[name] )) identifier[response] = identifier[self] . identifier[post] ( identifier[__conf] = identifier[name] ) keyword[if] identifier[response] . identifier[status] == literal[int] : keyword[return] identifier[self] [ identifier[name] ] keyword[elif] identifier[response] . identifier[status] == literal[int] : keyword[return] identifier[ConfigurationFile] ( identifier[self] . identifier[service] , identifier[PATH_CONF] % identifier[name] , identifier[item] = identifier[Stanza] , identifier[state] ={ literal[string] : identifier[name] }) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[response] . identifier[status] )
def create(self, name): """ Creates a configuration file named *name*. If there is already a configuration file with that name, the existing file is returned. :param name: The name of the configuration file. :type name: ``string`` :return: The :class:`ConfigurationFile` object. """ # This has to be overridden to handle the plumbing of creating # a ConfigurationFile (which is a Collection) instead of some # Entity. if not isinstance(name, basestring): raise ValueError('Invalid name: %s' % repr(name)) # depends on [control=['if'], data=[]] response = self.post(__conf=name) if response.status == 303: return self[name] # depends on [control=['if'], data=[]] elif response.status == 201: return ConfigurationFile(self.service, PATH_CONF % name, item=Stanza, state={'title': name}) # depends on [control=['if'], data=[]] else: raise ValueError('Unexpected status code %s returned from creating a stanza' % response.status)
def read_bonedata(self, fid): """Read bone data from an acclaim skeleton file stream.""" bone_count = 0 lin = self.read_line(fid) while lin[0]!=':': parts = lin.split() if parts[0] == 'begin': bone_count += 1 self.vertices.append(vertex(name = '', id=np.NaN, meta={'name': [], 'id': [], 'offset': [], 'orientation': [], 'axis': [0., 0., 0.], 'axis_order': [], 'C': np.eye(3), 'Cinv': np.eye(3), 'channels': [], 'bodymass': [], 'confmass': [], 'order': [], 'rot_ind': [], 'pos_ind': [], 'limits': [], 'xyz': np.array([0., 0., 0.]), 'rot': np.eye(3)})) lin = self.read_line(fid) elif parts[0]=='id': self.vertices[bone_count].id = int(parts[1]) lin = self.read_line(fid) self.vertices[bone_count].children = [] elif parts[0]=='name': self.vertices[bone_count].name = parts[1] lin = self.read_line(fid) elif parts[0]=='direction': direction = np.array([float(parts[1]), float(parts[2]), float(parts[3])]) lin = self.read_line(fid) elif parts[0]=='length': lgth = float(parts[1]) lin = self.read_line(fid) elif parts[0]=='axis': self.vertices[bone_count].meta['axis'] = np.array([float(parts[1]), float(parts[2]), float(parts[3])]) # order is reversed compared to bvh self.vertices[bone_count].meta['axis_order'] = parts[-1][::-1].lower() lin = self.read_line(fid) elif parts[0]=='dof': order = [] for i in range(1, len(parts)): if parts[i]== 'rx': chan = 'Xrotation' order.append('x') elif parts[i] =='ry': chan = 'Yrotation' order.append('y') elif parts[i] == 'rz': chan = 'Zrotation' order.append('z') elif parts[i] == 'tx': chan = 'Xposition' elif parts[i] == 'ty': chan = 'Yposition' elif parts[i] == 'tz': chan = 'Zposition' elif parts[i] == 'l': chan = 'length' self.vertices[bone_count].meta['channels'].append(chan) # order is reversed compared to bvh self.vertices[bone_count].meta['order'] = order[::-1] lin = self.read_line(fid) elif parts[0]=='limits': self.vertices[bone_count].meta['limits'] = [[float(parts[1][1:]), float(parts[2][:-1])]] lin = self.read_line(fid) while lin !='end': parts = lin.split() self.vertices[bone_count].meta['limits'].append([float(parts[0][1:]), float(parts[1][:-1])]) lin = self.read_line(fid) self.vertices[bone_count].meta['limits'] = np.array(self.vertices[bone_count].meta['limits']) elif parts[0]=='end': self.vertices[bone_count].meta['offset'] = direction*lgth lin = self.read_line(fid) return lin
def function[read_bonedata, parameter[self, fid]]: constant[Read bone data from an acclaim skeleton file stream.] variable[bone_count] assign[=] constant[0] variable[lin] assign[=] call[name[self].read_line, parameter[name[fid]]] while compare[call[name[lin]][constant[0]] not_equal[!=] constant[:]] begin[:] variable[parts] assign[=] call[name[lin].split, parameter[]] if compare[call[name[parts]][constant[0]] equal[==] constant[begin]] begin[:] <ast.AugAssign object at 0x7da1b1b17940> call[name[self].vertices.append, parameter[call[name[vertex], parameter[]]]] variable[lin] assign[=] call[name[self].read_line, parameter[name[fid]]] return[name[lin]]
keyword[def] identifier[read_bonedata] ( identifier[self] , identifier[fid] ): literal[string] identifier[bone_count] = literal[int] identifier[lin] = identifier[self] . identifier[read_line] ( identifier[fid] ) keyword[while] identifier[lin] [ literal[int] ]!= literal[string] : identifier[parts] = identifier[lin] . identifier[split] () keyword[if] identifier[parts] [ literal[int] ]== literal[string] : identifier[bone_count] += literal[int] identifier[self] . identifier[vertices] . identifier[append] ( identifier[vertex] ( identifier[name] = literal[string] , identifier[id] = identifier[np] . identifier[NaN] , identifier[meta] ={ literal[string] :[], literal[string] :[], literal[string] :[], literal[string] :[], literal[string] :[ literal[int] , literal[int] , literal[int] ], literal[string] :[], literal[string] : identifier[np] . identifier[eye] ( literal[int] ), literal[string] : identifier[np] . identifier[eye] ( literal[int] ), literal[string] :[], literal[string] :[], literal[string] :[], literal[string] :[], literal[string] :[], literal[string] :[], literal[string] :[], literal[string] : identifier[np] . identifier[array] ([ literal[int] , literal[int] , literal[int] ]), literal[string] : identifier[np] . identifier[eye] ( literal[int] )})) identifier[lin] = identifier[self] . identifier[read_line] ( identifier[fid] ) keyword[elif] identifier[parts] [ literal[int] ]== literal[string] : identifier[self] . identifier[vertices] [ identifier[bone_count] ]. identifier[id] = identifier[int] ( identifier[parts] [ literal[int] ]) identifier[lin] = identifier[self] . identifier[read_line] ( identifier[fid] ) identifier[self] . identifier[vertices] [ identifier[bone_count] ]. identifier[children] =[] keyword[elif] identifier[parts] [ literal[int] ]== literal[string] : identifier[self] . identifier[vertices] [ identifier[bone_count] ]. identifier[name] = identifier[parts] [ literal[int] ] identifier[lin] = identifier[self] . identifier[read_line] ( identifier[fid] ) keyword[elif] identifier[parts] [ literal[int] ]== literal[string] : identifier[direction] = identifier[np] . identifier[array] ([ identifier[float] ( identifier[parts] [ literal[int] ]), identifier[float] ( identifier[parts] [ literal[int] ]), identifier[float] ( identifier[parts] [ literal[int] ])]) identifier[lin] = identifier[self] . identifier[read_line] ( identifier[fid] ) keyword[elif] identifier[parts] [ literal[int] ]== literal[string] : identifier[lgth] = identifier[float] ( identifier[parts] [ literal[int] ]) identifier[lin] = identifier[self] . identifier[read_line] ( identifier[fid] ) keyword[elif] identifier[parts] [ literal[int] ]== literal[string] : identifier[self] . identifier[vertices] [ identifier[bone_count] ]. identifier[meta] [ literal[string] ]= identifier[np] . identifier[array] ([ identifier[float] ( identifier[parts] [ literal[int] ]), identifier[float] ( identifier[parts] [ literal[int] ]), identifier[float] ( identifier[parts] [ literal[int] ])]) identifier[self] . identifier[vertices] [ identifier[bone_count] ]. identifier[meta] [ literal[string] ]= identifier[parts] [- literal[int] ][::- literal[int] ]. identifier[lower] () identifier[lin] = identifier[self] . identifier[read_line] ( identifier[fid] ) keyword[elif] identifier[parts] [ literal[int] ]== literal[string] : identifier[order] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[parts] )): keyword[if] identifier[parts] [ identifier[i] ]== literal[string] : identifier[chan] = literal[string] identifier[order] . identifier[append] ( literal[string] ) keyword[elif] identifier[parts] [ identifier[i] ]== literal[string] : identifier[chan] = literal[string] identifier[order] . identifier[append] ( literal[string] ) keyword[elif] identifier[parts] [ identifier[i] ]== literal[string] : identifier[chan] = literal[string] identifier[order] . identifier[append] ( literal[string] ) keyword[elif] identifier[parts] [ identifier[i] ]== literal[string] : identifier[chan] = literal[string] keyword[elif] identifier[parts] [ identifier[i] ]== literal[string] : identifier[chan] = literal[string] keyword[elif] identifier[parts] [ identifier[i] ]== literal[string] : identifier[chan] = literal[string] keyword[elif] identifier[parts] [ identifier[i] ]== literal[string] : identifier[chan] = literal[string] identifier[self] . identifier[vertices] [ identifier[bone_count] ]. identifier[meta] [ literal[string] ]. identifier[append] ( identifier[chan] ) identifier[self] . identifier[vertices] [ identifier[bone_count] ]. identifier[meta] [ literal[string] ]= identifier[order] [::- literal[int] ] identifier[lin] = identifier[self] . identifier[read_line] ( identifier[fid] ) keyword[elif] identifier[parts] [ literal[int] ]== literal[string] : identifier[self] . identifier[vertices] [ identifier[bone_count] ]. identifier[meta] [ literal[string] ]=[[ identifier[float] ( identifier[parts] [ literal[int] ][ literal[int] :]), identifier[float] ( identifier[parts] [ literal[int] ][:- literal[int] ])]] identifier[lin] = identifier[self] . identifier[read_line] ( identifier[fid] ) keyword[while] identifier[lin] != literal[string] : identifier[parts] = identifier[lin] . identifier[split] () identifier[self] . identifier[vertices] [ identifier[bone_count] ]. identifier[meta] [ literal[string] ]. identifier[append] ([ identifier[float] ( identifier[parts] [ literal[int] ][ literal[int] :]), identifier[float] ( identifier[parts] [ literal[int] ][:- literal[int] ])]) identifier[lin] = identifier[self] . identifier[read_line] ( identifier[fid] ) identifier[self] . identifier[vertices] [ identifier[bone_count] ]. identifier[meta] [ literal[string] ]= identifier[np] . identifier[array] ( identifier[self] . identifier[vertices] [ identifier[bone_count] ]. identifier[meta] [ literal[string] ]) keyword[elif] identifier[parts] [ literal[int] ]== literal[string] : identifier[self] . identifier[vertices] [ identifier[bone_count] ]. identifier[meta] [ literal[string] ]= identifier[direction] * identifier[lgth] identifier[lin] = identifier[self] . identifier[read_line] ( identifier[fid] ) keyword[return] identifier[lin]
def read_bonedata(self, fid): """Read bone data from an acclaim skeleton file stream.""" bone_count = 0 lin = self.read_line(fid) while lin[0] != ':': parts = lin.split() if parts[0] == 'begin': bone_count += 1 self.vertices.append(vertex(name='', id=np.NaN, meta={'name': [], 'id': [], 'offset': [], 'orientation': [], 'axis': [0.0, 0.0, 0.0], 'axis_order': [], 'C': np.eye(3), 'Cinv': np.eye(3), 'channels': [], 'bodymass': [], 'confmass': [], 'order': [], 'rot_ind': [], 'pos_ind': [], 'limits': [], 'xyz': np.array([0.0, 0.0, 0.0]), 'rot': np.eye(3)})) lin = self.read_line(fid) # depends on [control=['if'], data=[]] elif parts[0] == 'id': self.vertices[bone_count].id = int(parts[1]) lin = self.read_line(fid) self.vertices[bone_count].children = [] # depends on [control=['if'], data=[]] elif parts[0] == 'name': self.vertices[bone_count].name = parts[1] lin = self.read_line(fid) # depends on [control=['if'], data=[]] elif parts[0] == 'direction': direction = np.array([float(parts[1]), float(parts[2]), float(parts[3])]) lin = self.read_line(fid) # depends on [control=['if'], data=[]] elif parts[0] == 'length': lgth = float(parts[1]) lin = self.read_line(fid) # depends on [control=['if'], data=[]] elif parts[0] == 'axis': self.vertices[bone_count].meta['axis'] = np.array([float(parts[1]), float(parts[2]), float(parts[3])]) # order is reversed compared to bvh self.vertices[bone_count].meta['axis_order'] = parts[-1][::-1].lower() lin = self.read_line(fid) # depends on [control=['if'], data=[]] elif parts[0] == 'dof': order = [] for i in range(1, len(parts)): if parts[i] == 'rx': chan = 'Xrotation' order.append('x') # depends on [control=['if'], data=[]] elif parts[i] == 'ry': chan = 'Yrotation' order.append('y') # depends on [control=['if'], data=[]] elif parts[i] == 'rz': chan = 'Zrotation' order.append('z') # depends on [control=['if'], data=[]] elif parts[i] == 'tx': chan = 'Xposition' # depends on [control=['if'], data=[]] elif parts[i] == 'ty': chan = 'Yposition' # depends on [control=['if'], data=[]] elif parts[i] == 'tz': chan = 'Zposition' # depends on [control=['if'], data=[]] elif parts[i] == 'l': chan = 'length' # depends on [control=['if'], data=[]] self.vertices[bone_count].meta['channels'].append(chan) # depends on [control=['for'], data=['i']] # order is reversed compared to bvh self.vertices[bone_count].meta['order'] = order[::-1] lin = self.read_line(fid) # depends on [control=['if'], data=[]] elif parts[0] == 'limits': self.vertices[bone_count].meta['limits'] = [[float(parts[1][1:]), float(parts[2][:-1])]] lin = self.read_line(fid) while lin != 'end': parts = lin.split() self.vertices[bone_count].meta['limits'].append([float(parts[0][1:]), float(parts[1][:-1])]) lin = self.read_line(fid) # depends on [control=['while'], data=['lin']] self.vertices[bone_count].meta['limits'] = np.array(self.vertices[bone_count].meta['limits']) # depends on [control=['if'], data=[]] elif parts[0] == 'end': self.vertices[bone_count].meta['offset'] = direction * lgth lin = self.read_line(fid) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] return lin
def _bind_length_scalar_handlers(tids, scalar_factory, lns=_NON_ZERO_LENGTH_LNS): """Binds a set of scalar handlers for an inclusive range of low-nibble values. Args: tids (Sequence[int]): The Type IDs to bind to. scalar_factory (Callable): The factory for the scalar parsing function. This function can itself return a function representing a thunk to defer the scalar parsing or a direct value. lns (Sequence[int]): The low-nibble lengths to bind to. """ handler = partial(_length_scalar_handler, scalar_factory) return _bind_length_handlers(tids, handler, lns)
def function[_bind_length_scalar_handlers, parameter[tids, scalar_factory, lns]]: constant[Binds a set of scalar handlers for an inclusive range of low-nibble values. Args: tids (Sequence[int]): The Type IDs to bind to. scalar_factory (Callable): The factory for the scalar parsing function. This function can itself return a function representing a thunk to defer the scalar parsing or a direct value. lns (Sequence[int]): The low-nibble lengths to bind to. ] variable[handler] assign[=] call[name[partial], parameter[name[_length_scalar_handler], name[scalar_factory]]] return[call[name[_bind_length_handlers], parameter[name[tids], name[handler], name[lns]]]]
keyword[def] identifier[_bind_length_scalar_handlers] ( identifier[tids] , identifier[scalar_factory] , identifier[lns] = identifier[_NON_ZERO_LENGTH_LNS] ): literal[string] identifier[handler] = identifier[partial] ( identifier[_length_scalar_handler] , identifier[scalar_factory] ) keyword[return] identifier[_bind_length_handlers] ( identifier[tids] , identifier[handler] , identifier[lns] )
def _bind_length_scalar_handlers(tids, scalar_factory, lns=_NON_ZERO_LENGTH_LNS): """Binds a set of scalar handlers for an inclusive range of low-nibble values. Args: tids (Sequence[int]): The Type IDs to bind to. scalar_factory (Callable): The factory for the scalar parsing function. This function can itself return a function representing a thunk to defer the scalar parsing or a direct value. lns (Sequence[int]): The low-nibble lengths to bind to. """ handler = partial(_length_scalar_handler, scalar_factory) return _bind_length_handlers(tids, handler, lns)
def substitute_variables(cls, configuration, value, ref): """ Substitute variables in `value` from `configuration` where any path reference is relative to `ref`. Parameters ---------- configuration : dict configuration (required to resolve intra-document references) value : value to resolve substitutions for ref : str path to `value` in the `configuration` Returns ------- value : value after substitution """ if isinstance(value, str): # Substitute all intra-document references while True: match = cls.REF_PATTERN.search(value) if match is None: break path = os.path.join(os.path.dirname(ref), match.group('path')) try: value = value.replace( match.group(0), str(util.get_value(configuration, path))) except KeyError: raise KeyError(path) # Substitute all variable references while True: match = cls.VAR_PATTERN.search(value) if match is None: break value = value.replace( match.group(0), str(util.get_value(cls.VARIABLES, match.group('path'), '/'))) return value
def function[substitute_variables, parameter[cls, configuration, value, ref]]: constant[ Substitute variables in `value` from `configuration` where any path reference is relative to `ref`. Parameters ---------- configuration : dict configuration (required to resolve intra-document references) value : value to resolve substitutions for ref : str path to `value` in the `configuration` Returns ------- value : value after substitution ] if call[name[isinstance], parameter[name[value], name[str]]] begin[:] while constant[True] begin[:] variable[match] assign[=] call[name[cls].REF_PATTERN.search, parameter[name[value]]] if compare[name[match] is constant[None]] begin[:] break variable[path] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[ref]]], call[name[match].group, parameter[constant[path]]]]] <ast.Try object at 0x7da20e9b1cf0> while constant[True] begin[:] variable[match] assign[=] call[name[cls].VAR_PATTERN.search, parameter[name[value]]] if compare[name[match] is constant[None]] begin[:] break variable[value] assign[=] call[name[value].replace, parameter[call[name[match].group, parameter[constant[0]]], call[name[str], parameter[call[name[util].get_value, parameter[name[cls].VARIABLES, call[name[match].group, parameter[constant[path]]], constant[/]]]]]]] return[name[value]]
keyword[def] identifier[substitute_variables] ( identifier[cls] , identifier[configuration] , identifier[value] , identifier[ref] ): literal[string] keyword[if] identifier[isinstance] ( identifier[value] , identifier[str] ): keyword[while] keyword[True] : identifier[match] = identifier[cls] . identifier[REF_PATTERN] . identifier[search] ( identifier[value] ) keyword[if] identifier[match] keyword[is] keyword[None] : keyword[break] identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[ref] ), identifier[match] . identifier[group] ( literal[string] )) keyword[try] : identifier[value] = identifier[value] . identifier[replace] ( identifier[match] . identifier[group] ( literal[int] ), identifier[str] ( identifier[util] . identifier[get_value] ( identifier[configuration] , identifier[path] ))) keyword[except] identifier[KeyError] : keyword[raise] identifier[KeyError] ( identifier[path] ) keyword[while] keyword[True] : identifier[match] = identifier[cls] . identifier[VAR_PATTERN] . identifier[search] ( identifier[value] ) keyword[if] identifier[match] keyword[is] keyword[None] : keyword[break] identifier[value] = identifier[value] . identifier[replace] ( identifier[match] . identifier[group] ( literal[int] ), identifier[str] ( identifier[util] . identifier[get_value] ( identifier[cls] . identifier[VARIABLES] , identifier[match] . identifier[group] ( literal[string] ), literal[string] ))) keyword[return] identifier[value]
def substitute_variables(cls, configuration, value, ref): """ Substitute variables in `value` from `configuration` where any path reference is relative to `ref`. Parameters ---------- configuration : dict configuration (required to resolve intra-document references) value : value to resolve substitutions for ref : str path to `value` in the `configuration` Returns ------- value : value after substitution """ if isinstance(value, str): # Substitute all intra-document references while True: match = cls.REF_PATTERN.search(value) if match is None: break # depends on [control=['if'], data=[]] path = os.path.join(os.path.dirname(ref), match.group('path')) try: value = value.replace(match.group(0), str(util.get_value(configuration, path))) # depends on [control=['try'], data=[]] except KeyError: raise KeyError(path) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] # Substitute all variable references while True: match = cls.VAR_PATTERN.search(value) if match is None: break # depends on [control=['if'], data=[]] value = value.replace(match.group(0), str(util.get_value(cls.VARIABLES, match.group('path'), '/'))) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] return value
def initrepo(repopath, bare, shared): """ Initialize an activegit repo. Default makes base shared repo that should be cloned for users """ ag = activegit.ActiveGit(repopath, bare=bare, shared=shared)
def function[initrepo, parameter[repopath, bare, shared]]: constant[ Initialize an activegit repo. Default makes base shared repo that should be cloned for users ] variable[ag] assign[=] call[name[activegit].ActiveGit, parameter[name[repopath]]]
keyword[def] identifier[initrepo] ( identifier[repopath] , identifier[bare] , identifier[shared] ): literal[string] identifier[ag] = identifier[activegit] . identifier[ActiveGit] ( identifier[repopath] , identifier[bare] = identifier[bare] , identifier[shared] = identifier[shared] )
def initrepo(repopath, bare, shared): """ Initialize an activegit repo. Default makes base shared repo that should be cloned for users """ ag = activegit.ActiveGit(repopath, bare=bare, shared=shared)
def do_where(self, taskid: int) -> None: """Show stack frames for a task""" task = task_by_id(taskid, self._loop) if task: self._sout.write(_format_stack(task)) self._sout.write('\n') else: self._sout.write('No task %d\n' % taskid)
def function[do_where, parameter[self, taskid]]: constant[Show stack frames for a task] variable[task] assign[=] call[name[task_by_id], parameter[name[taskid], name[self]._loop]] if name[task] begin[:] call[name[self]._sout.write, parameter[call[name[_format_stack], parameter[name[task]]]]] call[name[self]._sout.write, parameter[constant[ ]]]
keyword[def] identifier[do_where] ( identifier[self] , identifier[taskid] : identifier[int] )-> keyword[None] : literal[string] identifier[task] = identifier[task_by_id] ( identifier[taskid] , identifier[self] . identifier[_loop] ) keyword[if] identifier[task] : identifier[self] . identifier[_sout] . identifier[write] ( identifier[_format_stack] ( identifier[task] )) identifier[self] . identifier[_sout] . identifier[write] ( literal[string] ) keyword[else] : identifier[self] . identifier[_sout] . identifier[write] ( literal[string] % identifier[taskid] )
def do_where(self, taskid: int) -> None: """Show stack frames for a task""" task = task_by_id(taskid, self._loop) if task: self._sout.write(_format_stack(task)) self._sout.write('\n') # depends on [control=['if'], data=[]] else: self._sout.write('No task %d\n' % taskid)
def new_pic_inline(self, image_descriptor, width, height): """Return a newly-created `w:inline` element. The element contains the image specified by *image_descriptor* and is scaled based on the values of *width* and *height*. """ rId, image = self.get_or_add_image(image_descriptor) cx, cy = image.scaled_dimensions(width, height) shape_id, filename = self.next_id, image.filename return CT_Inline.new_pic_inline(shape_id, rId, filename, cx, cy)
def function[new_pic_inline, parameter[self, image_descriptor, width, height]]: constant[Return a newly-created `w:inline` element. The element contains the image specified by *image_descriptor* and is scaled based on the values of *width* and *height*. ] <ast.Tuple object at 0x7da1b216c580> assign[=] call[name[self].get_or_add_image, parameter[name[image_descriptor]]] <ast.Tuple object at 0x7da1b216ef80> assign[=] call[name[image].scaled_dimensions, parameter[name[width], name[height]]] <ast.Tuple object at 0x7da1b216d480> assign[=] tuple[[<ast.Attribute object at 0x7da1b216df30>, <ast.Attribute object at 0x7da1b216caf0>]] return[call[name[CT_Inline].new_pic_inline, parameter[name[shape_id], name[rId], name[filename], name[cx], name[cy]]]]
keyword[def] identifier[new_pic_inline] ( identifier[self] , identifier[image_descriptor] , identifier[width] , identifier[height] ): literal[string] identifier[rId] , identifier[image] = identifier[self] . identifier[get_or_add_image] ( identifier[image_descriptor] ) identifier[cx] , identifier[cy] = identifier[image] . identifier[scaled_dimensions] ( identifier[width] , identifier[height] ) identifier[shape_id] , identifier[filename] = identifier[self] . identifier[next_id] , identifier[image] . identifier[filename] keyword[return] identifier[CT_Inline] . identifier[new_pic_inline] ( identifier[shape_id] , identifier[rId] , identifier[filename] , identifier[cx] , identifier[cy] )
def new_pic_inline(self, image_descriptor, width, height): """Return a newly-created `w:inline` element. The element contains the image specified by *image_descriptor* and is scaled based on the values of *width* and *height*. """ (rId, image) = self.get_or_add_image(image_descriptor) (cx, cy) = image.scaled_dimensions(width, height) (shape_id, filename) = (self.next_id, image.filename) return CT_Inline.new_pic_inline(shape_id, rId, filename, cx, cy)
def http_call(self, url=None, **kwargs): """ Call the target URL via HTTP and return the JSON result """ if not url: url = self.search_url http_func, arg_name = self.get_http_method_arg_name() # Build the argument dictionary to pass in the http function _kwargs = { arg_name: kwargs, } # The actual HTTP call response = http_func( url=url.format(**kwargs), headers=self.get_http_headers(), **_kwargs ) # Error handling if response.status_code != 200: logger.warning('Invalid Request for `%s`', response.url) # Raising a "requests" exception response.raise_for_status() return response.json()
def function[http_call, parameter[self, url]]: constant[ Call the target URL via HTTP and return the JSON result ] if <ast.UnaryOp object at 0x7da20e956da0> begin[:] variable[url] assign[=] name[self].search_url <ast.Tuple object at 0x7da20c993640> assign[=] call[name[self].get_http_method_arg_name, parameter[]] variable[_kwargs] assign[=] dictionary[[<ast.Name object at 0x7da20c992440>], [<ast.Name object at 0x7da20c993460>]] variable[response] assign[=] call[name[http_func], parameter[]] if compare[name[response].status_code not_equal[!=] constant[200]] begin[:] call[name[logger].warning, parameter[constant[Invalid Request for `%s`], name[response].url]] call[name[response].raise_for_status, parameter[]] return[call[name[response].json, parameter[]]]
keyword[def] identifier[http_call] ( identifier[self] , identifier[url] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[url] : identifier[url] = identifier[self] . identifier[search_url] identifier[http_func] , identifier[arg_name] = identifier[self] . identifier[get_http_method_arg_name] () identifier[_kwargs] ={ identifier[arg_name] : identifier[kwargs] , } identifier[response] = identifier[http_func] ( identifier[url] = identifier[url] . identifier[format] (** identifier[kwargs] ), identifier[headers] = identifier[self] . identifier[get_http_headers] (), ** identifier[_kwargs] ) keyword[if] identifier[response] . identifier[status_code] != literal[int] : identifier[logger] . identifier[warning] ( literal[string] , identifier[response] . identifier[url] ) identifier[response] . identifier[raise_for_status] () keyword[return] identifier[response] . identifier[json] ()
def http_call(self, url=None, **kwargs): """ Call the target URL via HTTP and return the JSON result """ if not url: url = self.search_url # depends on [control=['if'], data=[]] (http_func, arg_name) = self.get_http_method_arg_name() # Build the argument dictionary to pass in the http function _kwargs = {arg_name: kwargs} # The actual HTTP call response = http_func(url=url.format(**kwargs), headers=self.get_http_headers(), **_kwargs) # Error handling if response.status_code != 200: logger.warning('Invalid Request for `%s`', response.url) # Raising a "requests" exception response.raise_for_status() # depends on [control=['if'], data=[]] return response.json()
def get_exports(self, volume=None, state=None, offset=None, limit=None): """ Fetches exports for this volume. :param volume: Optional volume identifier. :param state: Optional state. :param offset: Pagination offset. :param limit: Pagination limit. :return: Collection object. """ return self._api.exports.query(project=self.id, volume=volume, state=state, offset=offset, limit=limit)
def function[get_exports, parameter[self, volume, state, offset, limit]]: constant[ Fetches exports for this volume. :param volume: Optional volume identifier. :param state: Optional state. :param offset: Pagination offset. :param limit: Pagination limit. :return: Collection object. ] return[call[name[self]._api.exports.query, parameter[]]]
keyword[def] identifier[get_exports] ( identifier[self] , identifier[volume] = keyword[None] , identifier[state] = keyword[None] , identifier[offset] = keyword[None] , identifier[limit] = keyword[None] ): literal[string] keyword[return] identifier[self] . identifier[_api] . identifier[exports] . identifier[query] ( identifier[project] = identifier[self] . identifier[id] , identifier[volume] = identifier[volume] , identifier[state] = identifier[state] , identifier[offset] = identifier[offset] , identifier[limit] = identifier[limit] )
def get_exports(self, volume=None, state=None, offset=None, limit=None): """ Fetches exports for this volume. :param volume: Optional volume identifier. :param state: Optional state. :param offset: Pagination offset. :param limit: Pagination limit. :return: Collection object. """ return self._api.exports.query(project=self.id, volume=volume, state=state, offset=offset, limit=limit)
def convert_path_to_api_gateway(path): """ Converts a Path from a Flask defined path to one that is accepted by Api Gateway Examples: '/id/<id>' => '/id/{id}' '/<path:proxy>' => '/{proxy+}' :param str path: Path to convert to Api Gateway defined path :return str: Path representing an Api Gateway path """ proxy_sub_path = FLASK_TO_APIGW_REGEX.sub(PROXY_PATH_PARAMS, path) # Replace the '<' and '>' with '{' and '}' respectively return proxy_sub_path.replace(LEFT_ANGLE_BRACKET, LEFT_BRACKET).replace(RIGHT_ANGLE_BRACKET, RIGHT_BRACKET)
def function[convert_path_to_api_gateway, parameter[path]]: constant[ Converts a Path from a Flask defined path to one that is accepted by Api Gateway Examples: '/id/<id>' => '/id/{id}' '/<path:proxy>' => '/{proxy+}' :param str path: Path to convert to Api Gateway defined path :return str: Path representing an Api Gateway path ] variable[proxy_sub_path] assign[=] call[name[FLASK_TO_APIGW_REGEX].sub, parameter[name[PROXY_PATH_PARAMS], name[path]]] return[call[call[name[proxy_sub_path].replace, parameter[name[LEFT_ANGLE_BRACKET], name[LEFT_BRACKET]]].replace, parameter[name[RIGHT_ANGLE_BRACKET], name[RIGHT_BRACKET]]]]
keyword[def] identifier[convert_path_to_api_gateway] ( identifier[path] ): literal[string] identifier[proxy_sub_path] = identifier[FLASK_TO_APIGW_REGEX] . identifier[sub] ( identifier[PROXY_PATH_PARAMS] , identifier[path] ) keyword[return] identifier[proxy_sub_path] . identifier[replace] ( identifier[LEFT_ANGLE_BRACKET] , identifier[LEFT_BRACKET] ). identifier[replace] ( identifier[RIGHT_ANGLE_BRACKET] , identifier[RIGHT_BRACKET] )
def convert_path_to_api_gateway(path): """ Converts a Path from a Flask defined path to one that is accepted by Api Gateway Examples: '/id/<id>' => '/id/{id}' '/<path:proxy>' => '/{proxy+}' :param str path: Path to convert to Api Gateway defined path :return str: Path representing an Api Gateway path """ proxy_sub_path = FLASK_TO_APIGW_REGEX.sub(PROXY_PATH_PARAMS, path) # Replace the '<' and '>' with '{' and '}' respectively return proxy_sub_path.replace(LEFT_ANGLE_BRACKET, LEFT_BRACKET).replace(RIGHT_ANGLE_BRACKET, RIGHT_BRACKET)
def train(hparams, *args): """Train your awesome model. :param hparams: The arguments to run the model with. """ # Initialize experiments and track all the hyperparameters exp = Experiment( name=hparams.test_tube_exp_name, # Location to save the metrics. save_dir=hparams.log_path, autosave=False, ) exp.argparse(hparams) # Pretend to train. x = torch.rand((1, hparams.x_val)) for train_step in range(0, 100): y = torch.rand((hparams.x_val, 1)) out = x.mm(y) exp.log({'fake_err': out.item()}) # Save exp when . exp.save()
def function[train, parameter[hparams]]: constant[Train your awesome model. :param hparams: The arguments to run the model with. ] variable[exp] assign[=] call[name[Experiment], parameter[]] call[name[exp].argparse, parameter[name[hparams]]] variable[x] assign[=] call[name[torch].rand, parameter[tuple[[<ast.Constant object at 0x7da1b03a5a80>, <ast.Attribute object at 0x7da1b03a48b0>]]]] for taget[name[train_step]] in starred[call[name[range], parameter[constant[0], constant[100]]]] begin[:] variable[y] assign[=] call[name[torch].rand, parameter[tuple[[<ast.Attribute object at 0x7da1b033a9e0>, <ast.Constant object at 0x7da1b0339570>]]]] variable[out] assign[=] call[name[x].mm, parameter[name[y]]] call[name[exp].log, parameter[dictionary[[<ast.Constant object at 0x7da1b033a050>], [<ast.Call object at 0x7da1b0338c70>]]]] call[name[exp].save, parameter[]]
keyword[def] identifier[train] ( identifier[hparams] ,* identifier[args] ): literal[string] identifier[exp] = identifier[Experiment] ( identifier[name] = identifier[hparams] . identifier[test_tube_exp_name] , identifier[save_dir] = identifier[hparams] . identifier[log_path] , identifier[autosave] = keyword[False] , ) identifier[exp] . identifier[argparse] ( identifier[hparams] ) identifier[x] = identifier[torch] . identifier[rand] (( literal[int] , identifier[hparams] . identifier[x_val] )) keyword[for] identifier[train_step] keyword[in] identifier[range] ( literal[int] , literal[int] ): identifier[y] = identifier[torch] . identifier[rand] (( identifier[hparams] . identifier[x_val] , literal[int] )) identifier[out] = identifier[x] . identifier[mm] ( identifier[y] ) identifier[exp] . identifier[log] ({ literal[string] : identifier[out] . identifier[item] ()}) identifier[exp] . identifier[save] ()
def train(hparams, *args): """Train your awesome model. :param hparams: The arguments to run the model with. """ # Initialize experiments and track all the hyperparameters # Location to save the metrics. exp = Experiment(name=hparams.test_tube_exp_name, save_dir=hparams.log_path, autosave=False) exp.argparse(hparams) # Pretend to train. x = torch.rand((1, hparams.x_val)) for train_step in range(0, 100): y = torch.rand((hparams.x_val, 1)) out = x.mm(y) exp.log({'fake_err': out.item()}) # depends on [control=['for'], data=[]] # Save exp when . exp.save()
def mutate(self): """Mutates current section.""" section = self.section project_name = self.project_name section.project_name = project_name self.contribute_runtime_dir() main = section.main_process main.set_naming_params(prefix='[%s] ' % project_name) main.set_pid_file( self.get_pid_filepath(), before_priv_drop=False, # For vacuum to cleanup properly. safe=True, ) section.master_process.set_basic_params( fifo_file=self.get_fifo_filepath(), ) # todo maybe autoreload in debug apps = section.applications apps.set_basic_params( manage_script_name=True, ) self.contribute_error_pages() self.contribute_static()
def function[mutate, parameter[self]]: constant[Mutates current section.] variable[section] assign[=] name[self].section variable[project_name] assign[=] name[self].project_name name[section].project_name assign[=] name[project_name] call[name[self].contribute_runtime_dir, parameter[]] variable[main] assign[=] name[section].main_process call[name[main].set_naming_params, parameter[]] call[name[main].set_pid_file, parameter[call[name[self].get_pid_filepath, parameter[]]]] call[name[section].master_process.set_basic_params, parameter[]] variable[apps] assign[=] name[section].applications call[name[apps].set_basic_params, parameter[]] call[name[self].contribute_error_pages, parameter[]] call[name[self].contribute_static, parameter[]]
keyword[def] identifier[mutate] ( identifier[self] ): literal[string] identifier[section] = identifier[self] . identifier[section] identifier[project_name] = identifier[self] . identifier[project_name] identifier[section] . identifier[project_name] = identifier[project_name] identifier[self] . identifier[contribute_runtime_dir] () identifier[main] = identifier[section] . identifier[main_process] identifier[main] . identifier[set_naming_params] ( identifier[prefix] = literal[string] % identifier[project_name] ) identifier[main] . identifier[set_pid_file] ( identifier[self] . identifier[get_pid_filepath] (), identifier[before_priv_drop] = keyword[False] , identifier[safe] = keyword[True] , ) identifier[section] . identifier[master_process] . identifier[set_basic_params] ( identifier[fifo_file] = identifier[self] . identifier[get_fifo_filepath] (), ) identifier[apps] = identifier[section] . identifier[applications] identifier[apps] . identifier[set_basic_params] ( identifier[manage_script_name] = keyword[True] , ) identifier[self] . identifier[contribute_error_pages] () identifier[self] . identifier[contribute_static] ()
def mutate(self): """Mutates current section.""" section = self.section project_name = self.project_name section.project_name = project_name self.contribute_runtime_dir() main = section.main_process main.set_naming_params(prefix='[%s] ' % project_name) # For vacuum to cleanup properly. main.set_pid_file(self.get_pid_filepath(), before_priv_drop=False, safe=True) section.master_process.set_basic_params(fifo_file=self.get_fifo_filepath()) # todo maybe autoreload in debug apps = section.applications apps.set_basic_params(manage_script_name=True) self.contribute_error_pages() self.contribute_static()
def AddEventTag(self, event_tag): """Adds an event tag. Args: event_tag (EventTag): an event tag. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed. """ self._RaiseIfNotWritable() self._storage_file.AddEventTag(event_tag) self._session.event_labels_counter['total'] += 1 for label in event_tag.labels: self._session.event_labels_counter[label] += 1 self.number_of_event_tags += 1
def function[AddEventTag, parameter[self, event_tag]]: constant[Adds an event tag. Args: event_tag (EventTag): an event tag. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed. ] call[name[self]._RaiseIfNotWritable, parameter[]] call[name[self]._storage_file.AddEventTag, parameter[name[event_tag]]] <ast.AugAssign object at 0x7da18c4cece0> for taget[name[label]] in starred[name[event_tag].labels] begin[:] <ast.AugAssign object at 0x7da18c4cfaf0> <ast.AugAssign object at 0x7da18c4cdab0>
keyword[def] identifier[AddEventTag] ( identifier[self] , identifier[event_tag] ): literal[string] identifier[self] . identifier[_RaiseIfNotWritable] () identifier[self] . identifier[_storage_file] . identifier[AddEventTag] ( identifier[event_tag] ) identifier[self] . identifier[_session] . identifier[event_labels_counter] [ literal[string] ]+= literal[int] keyword[for] identifier[label] keyword[in] identifier[event_tag] . identifier[labels] : identifier[self] . identifier[_session] . identifier[event_labels_counter] [ identifier[label] ]+= literal[int] identifier[self] . identifier[number_of_event_tags] += literal[int]
def AddEventTag(self, event_tag): """Adds an event tag. Args: event_tag (EventTag): an event tag. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed. """ self._RaiseIfNotWritable() self._storage_file.AddEventTag(event_tag) self._session.event_labels_counter['total'] += 1 for label in event_tag.labels: self._session.event_labels_counter[label] += 1 # depends on [control=['for'], data=['label']] self.number_of_event_tags += 1
def run(function_name=None, function_input=None): """Triggers the execution environment entry point processor. Use this function in the program entry point code: .. code-block:: python import dxpy @dxpy.entry_point('main') def hello(i): pass dxpy.run() This method may be used to invoke the program either in a production environment (inside the execution environment) or for local debugging (in the debug harness), as follows: If the environment variable *DX_JOB_ID* is set, the processor retrieves the job with that ID from the API server. The job's *function* field indicates the function name to be invoked. That function name is looked up in the table of all methods decorated with *@dxpy.entry_point('name')* in the module from which :func:`run()` was called, and the matching method is invoked (with the job's input supplied as parameters). This is the mode of operation used in the DNAnexus execution environment. .. warning:: The parameters *function_name* and *function_input* are disregarded in this mode of operation. If the environment variable *DX_JOB_ID* is not set, the function name may be given in *function_name*; if not set, it is set by the environment variable *DX_TEST_FUNCTION*. The function input may be given in *function_input*; if not set, it is set by the local file *job_input.json* which is expected to be present. The absence of *DX_JOB_ID* signals to :func:`run()` that execution is happening in the debug harness. In this mode of operation, all calls to :func:`dxpy.bindings.dxjob.new_dxjob()` (and higher level handler methods which use it) are intercepted, and :func:`run()` is invoked instead with appropriate inputs. """ global RUN_COUNT RUN_COUNT += 1 dx_working_dir = os.getcwd() if dxpy.JOB_ID is not None: logging.basicConfig() try: logging.getLogger().addHandler(dxpy.DXLogHandler()) except dxpy.exceptions.DXError: print("TODO: FIXME: the EE client should die if logging is not available") job = dxpy.describe(dxpy.JOB_ID) else: if function_name is None: function_name = os.environ.get('DX_TEST_FUNCTION', 'main') if function_input is None: with open("job_input.json", "r") as fh: function_input = json.load(fh) job = {'function': function_name, 'input': function_input} with open("job_error_reserved_space", "w") as fh: fh.write("This file contains reserved space for writing job errors in case the filesystem becomes full.\n" + " "*1024*64) print("Invoking", job.get('function'), "with", job.get('input')) try: result = ENTRY_POINT_TABLE[job['function']](**job['input']) except dxpy.AppError as e: save_error(e, dx_working_dir, error_type="AppError") raise except Exception as e: save_error(e, dx_working_dir) raise if result is not None: # TODO: protect against client removing its original working directory os.chdir(dx_working_dir) if USING_PYTHON2: # On python-2 we need to use binary mode with open("job_output.json", "wb") as fh: json.dump(result, fh, indent=2, cls=DXJSONEncoder) fh.write(b"\n") else: with open("job_output.json", "w") as fh: json.dump(result, fh, indent=2, cls=DXJSONEncoder) fh.write("\n") return result
def function[run, parameter[function_name, function_input]]: constant[Triggers the execution environment entry point processor. Use this function in the program entry point code: .. code-block:: python import dxpy @dxpy.entry_point('main') def hello(i): pass dxpy.run() This method may be used to invoke the program either in a production environment (inside the execution environment) or for local debugging (in the debug harness), as follows: If the environment variable *DX_JOB_ID* is set, the processor retrieves the job with that ID from the API server. The job's *function* field indicates the function name to be invoked. That function name is looked up in the table of all methods decorated with *@dxpy.entry_point('name')* in the module from which :func:`run()` was called, and the matching method is invoked (with the job's input supplied as parameters). This is the mode of operation used in the DNAnexus execution environment. .. warning:: The parameters *function_name* and *function_input* are disregarded in this mode of operation. If the environment variable *DX_JOB_ID* is not set, the function name may be given in *function_name*; if not set, it is set by the environment variable *DX_TEST_FUNCTION*. The function input may be given in *function_input*; if not set, it is set by the local file *job_input.json* which is expected to be present. The absence of *DX_JOB_ID* signals to :func:`run()` that execution is happening in the debug harness. In this mode of operation, all calls to :func:`dxpy.bindings.dxjob.new_dxjob()` (and higher level handler methods which use it) are intercepted, and :func:`run()` is invoked instead with appropriate inputs. ] <ast.Global object at 0x7da18eb56980> <ast.AugAssign object at 0x7da18eb56dd0> variable[dx_working_dir] assign[=] call[name[os].getcwd, parameter[]] if compare[name[dxpy].JOB_ID is_not constant[None]] begin[:] call[name[logging].basicConfig, parameter[]] <ast.Try object at 0x7da18eb57ac0> variable[job] assign[=] call[name[dxpy].describe, parameter[name[dxpy].JOB_ID]] with call[name[open], parameter[constant[job_error_reserved_space], constant[w]]] begin[:] call[name[fh].write, parameter[binary_operation[constant[This file contains reserved space for writing job errors in case the filesystem becomes full. ] + binary_operation[binary_operation[constant[ ] * constant[1024]] * constant[64]]]]] call[name[print], parameter[constant[Invoking], call[name[job].get, parameter[constant[function]]], constant[with], call[name[job].get, parameter[constant[input]]]]] <ast.Try object at 0x7da20c6c7760> if compare[name[result] is_not constant[None]] begin[:] call[name[os].chdir, parameter[name[dx_working_dir]]] if name[USING_PYTHON2] begin[:] with call[name[open], parameter[constant[job_output.json], constant[wb]]] begin[:] call[name[json].dump, parameter[name[result], name[fh]]] call[name[fh].write, parameter[constant[b'\n']]] return[name[result]]
keyword[def] identifier[run] ( identifier[function_name] = keyword[None] , identifier[function_input] = keyword[None] ): literal[string] keyword[global] identifier[RUN_COUNT] identifier[RUN_COUNT] += literal[int] identifier[dx_working_dir] = identifier[os] . identifier[getcwd] () keyword[if] identifier[dxpy] . identifier[JOB_ID] keyword[is] keyword[not] keyword[None] : identifier[logging] . identifier[basicConfig] () keyword[try] : identifier[logging] . identifier[getLogger] (). identifier[addHandler] ( identifier[dxpy] . identifier[DXLogHandler] ()) keyword[except] identifier[dxpy] . identifier[exceptions] . identifier[DXError] : identifier[print] ( literal[string] ) identifier[job] = identifier[dxpy] . identifier[describe] ( identifier[dxpy] . identifier[JOB_ID] ) keyword[else] : keyword[if] identifier[function_name] keyword[is] keyword[None] : identifier[function_name] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[function_input] keyword[is] keyword[None] : keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[fh] : identifier[function_input] = identifier[json] . identifier[load] ( identifier[fh] ) identifier[job] ={ literal[string] : identifier[function_name] , literal[string] : identifier[function_input] } keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[fh] : identifier[fh] . identifier[write] ( literal[string] + literal[string] * literal[int] * literal[int] ) identifier[print] ( literal[string] , identifier[job] . identifier[get] ( literal[string] ), literal[string] , identifier[job] . identifier[get] ( literal[string] )) keyword[try] : identifier[result] = identifier[ENTRY_POINT_TABLE] [ identifier[job] [ literal[string] ]](** identifier[job] [ literal[string] ]) keyword[except] identifier[dxpy] . identifier[AppError] keyword[as] identifier[e] : identifier[save_error] ( identifier[e] , identifier[dx_working_dir] , identifier[error_type] = literal[string] ) keyword[raise] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[save_error] ( identifier[e] , identifier[dx_working_dir] ) keyword[raise] keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] : identifier[os] . identifier[chdir] ( identifier[dx_working_dir] ) keyword[if] identifier[USING_PYTHON2] : keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[fh] : identifier[json] . identifier[dump] ( identifier[result] , identifier[fh] , identifier[indent] = literal[int] , identifier[cls] = identifier[DXJSONEncoder] ) identifier[fh] . identifier[write] ( literal[string] ) keyword[else] : keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[fh] : identifier[json] . identifier[dump] ( identifier[result] , identifier[fh] , identifier[indent] = literal[int] , identifier[cls] = identifier[DXJSONEncoder] ) identifier[fh] . identifier[write] ( literal[string] ) keyword[return] identifier[result]
def run(function_name=None, function_input=None): """Triggers the execution environment entry point processor. Use this function in the program entry point code: .. code-block:: python import dxpy @dxpy.entry_point('main') def hello(i): pass dxpy.run() This method may be used to invoke the program either in a production environment (inside the execution environment) or for local debugging (in the debug harness), as follows: If the environment variable *DX_JOB_ID* is set, the processor retrieves the job with that ID from the API server. The job's *function* field indicates the function name to be invoked. That function name is looked up in the table of all methods decorated with *@dxpy.entry_point('name')* in the module from which :func:`run()` was called, and the matching method is invoked (with the job's input supplied as parameters). This is the mode of operation used in the DNAnexus execution environment. .. warning:: The parameters *function_name* and *function_input* are disregarded in this mode of operation. If the environment variable *DX_JOB_ID* is not set, the function name may be given in *function_name*; if not set, it is set by the environment variable *DX_TEST_FUNCTION*. The function input may be given in *function_input*; if not set, it is set by the local file *job_input.json* which is expected to be present. The absence of *DX_JOB_ID* signals to :func:`run()` that execution is happening in the debug harness. In this mode of operation, all calls to :func:`dxpy.bindings.dxjob.new_dxjob()` (and higher level handler methods which use it) are intercepted, and :func:`run()` is invoked instead with appropriate inputs. """ global RUN_COUNT RUN_COUNT += 1 dx_working_dir = os.getcwd() if dxpy.JOB_ID is not None: logging.basicConfig() try: logging.getLogger().addHandler(dxpy.DXLogHandler()) # depends on [control=['try'], data=[]] except dxpy.exceptions.DXError: print('TODO: FIXME: the EE client should die if logging is not available') # depends on [control=['except'], data=[]] job = dxpy.describe(dxpy.JOB_ID) # depends on [control=['if'], data=[]] else: if function_name is None: function_name = os.environ.get('DX_TEST_FUNCTION', 'main') # depends on [control=['if'], data=['function_name']] if function_input is None: with open('job_input.json', 'r') as fh: function_input = json.load(fh) # depends on [control=['with'], data=['fh']] # depends on [control=['if'], data=['function_input']] job = {'function': function_name, 'input': function_input} with open('job_error_reserved_space', 'w') as fh: fh.write('This file contains reserved space for writing job errors in case the filesystem becomes full.\n' + ' ' * 1024 * 64) # depends on [control=['with'], data=['fh']] print('Invoking', job.get('function'), 'with', job.get('input')) try: result = ENTRY_POINT_TABLE[job['function']](**job['input']) # depends on [control=['try'], data=[]] except dxpy.AppError as e: save_error(e, dx_working_dir, error_type='AppError') raise # depends on [control=['except'], data=['e']] except Exception as e: save_error(e, dx_working_dir) raise # depends on [control=['except'], data=['e']] if result is not None: # TODO: protect against client removing its original working directory os.chdir(dx_working_dir) if USING_PYTHON2: # On python-2 we need to use binary mode with open('job_output.json', 'wb') as fh: json.dump(result, fh, indent=2, cls=DXJSONEncoder) fh.write(b'\n') # depends on [control=['with'], data=['fh']] # depends on [control=['if'], data=[]] else: with open('job_output.json', 'w') as fh: json.dump(result, fh, indent=2, cls=DXJSONEncoder) fh.write('\n') # depends on [control=['with'], data=['fh']] # depends on [control=['if'], data=['result']] return result
def setnx(self, name, value): """ Set the value as a string in the key only if the key doesn't exist. :param name: str the name of the redis key :param value: :return: Future() """ with self.pipe as pipe: return pipe.setnx(self.redis_key(name), self.valueparse.encode(value))
def function[setnx, parameter[self, name, value]]: constant[ Set the value as a string in the key only if the key doesn't exist. :param name: str the name of the redis key :param value: :return: Future() ] with name[self].pipe begin[:] return[call[name[pipe].setnx, parameter[call[name[self].redis_key, parameter[name[name]]], call[name[self].valueparse.encode, parameter[name[value]]]]]]
keyword[def] identifier[setnx] ( identifier[self] , identifier[name] , identifier[value] ): literal[string] keyword[with] identifier[self] . identifier[pipe] keyword[as] identifier[pipe] : keyword[return] identifier[pipe] . identifier[setnx] ( identifier[self] . identifier[redis_key] ( identifier[name] ), identifier[self] . identifier[valueparse] . identifier[encode] ( identifier[value] ))
def setnx(self, name, value): """ Set the value as a string in the key only if the key doesn't exist. :param name: str the name of the redis key :param value: :return: Future() """ with self.pipe as pipe: return pipe.setnx(self.redis_key(name), self.valueparse.encode(value)) # depends on [control=['with'], data=['pipe']]
def remove(self, connection): """Remove the connection from the pool :param connection: The connection to remove :type connection: psycopg2.extensions.connection :raises: ConnectionNotFoundError :raises: ConnectionBusyError """ cid = id(connection) if cid not in self.connections: raise ConnectionNotFoundError(self.id, cid) self.connection_handle(connection).close() with self._lock: del self.connections[cid] LOGGER.debug('Pool %s removed connection %s', self.id, cid)
def function[remove, parameter[self, connection]]: constant[Remove the connection from the pool :param connection: The connection to remove :type connection: psycopg2.extensions.connection :raises: ConnectionNotFoundError :raises: ConnectionBusyError ] variable[cid] assign[=] call[name[id], parameter[name[connection]]] if compare[name[cid] <ast.NotIn object at 0x7da2590d7190> name[self].connections] begin[:] <ast.Raise object at 0x7da2054a4520> call[call[name[self].connection_handle, parameter[name[connection]]].close, parameter[]] with name[self]._lock begin[:] <ast.Delete object at 0x7da2054a77c0> call[name[LOGGER].debug, parameter[constant[Pool %s removed connection %s], name[self].id, name[cid]]]
keyword[def] identifier[remove] ( identifier[self] , identifier[connection] ): literal[string] identifier[cid] = identifier[id] ( identifier[connection] ) keyword[if] identifier[cid] keyword[not] keyword[in] identifier[self] . identifier[connections] : keyword[raise] identifier[ConnectionNotFoundError] ( identifier[self] . identifier[id] , identifier[cid] ) identifier[self] . identifier[connection_handle] ( identifier[connection] ). identifier[close] () keyword[with] identifier[self] . identifier[_lock] : keyword[del] identifier[self] . identifier[connections] [ identifier[cid] ] identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[self] . identifier[id] , identifier[cid] )
def remove(self, connection): """Remove the connection from the pool :param connection: The connection to remove :type connection: psycopg2.extensions.connection :raises: ConnectionNotFoundError :raises: ConnectionBusyError """ cid = id(connection) if cid not in self.connections: raise ConnectionNotFoundError(self.id, cid) # depends on [control=['if'], data=['cid']] self.connection_handle(connection).close() with self._lock: del self.connections[cid] # depends on [control=['with'], data=[]] LOGGER.debug('Pool %s removed connection %s', self.id, cid)
def extract_headings_from_history(history_lines): """Return list of dicts with version-like headers. We check for patterns like '2.10 (unreleased)', so with either 'unreleased' or a date between parenthesis as that's the format we're using. Just fix up your first heading and you should be set. As an alternative, we support an alternative format used by some zope/plone paster templates: '2.10 - unreleased' or '2.10 ~ unreleased' Note that new headers that zest.releaser sets are in our preferred form (so 'version (date)'). """ pattern = re.compile(r""" (?P<version>.+) # Version string \( # Opening ( (?P<date>.+) # Date \) # Closing ) \W*$ # Possible whitespace at end of line. """, re.VERBOSE) alt_pattern = re.compile(r""" ^ # Start of line (?P<version>.+) # Version string \ [-~]\ # space dash/twiggle space (?P<date>.+) # Date \W*$ # Possible whitespace at end of line. """, re.VERBOSE) headings = [] line_number = 0 for line in history_lines: match = pattern.search(line) alt_match = alt_pattern.search(line) if match: result = {'line': line_number, 'version': match.group('version').strip(), 'date': match.group('date'.strip())} headings.append(result) logger.debug("Found heading: %r", result) if alt_match: result = {'line': line_number, 'version': alt_match.group('version').strip(), 'date': alt_match.group('date'.strip())} headings.append(result) logger.debug("Found alternative heading: %r", result) line_number += 1 return headings
def function[extract_headings_from_history, parameter[history_lines]]: constant[Return list of dicts with version-like headers. We check for patterns like '2.10 (unreleased)', so with either 'unreleased' or a date between parenthesis as that's the format we're using. Just fix up your first heading and you should be set. As an alternative, we support an alternative format used by some zope/plone paster templates: '2.10 - unreleased' or '2.10 ~ unreleased' Note that new headers that zest.releaser sets are in our preferred form (so 'version (date)'). ] variable[pattern] assign[=] call[name[re].compile, parameter[constant[ (?P<version>.+) # Version string \( # Opening ( (?P<date>.+) # Date \) # Closing ) \W*$ # Possible whitespace at end of line. ], name[re].VERBOSE]] variable[alt_pattern] assign[=] call[name[re].compile, parameter[constant[ ^ # Start of line (?P<version>.+) # Version string \ [-~]\ # space dash/twiggle space (?P<date>.+) # Date \W*$ # Possible whitespace at end of line. ], name[re].VERBOSE]] variable[headings] assign[=] list[[]] variable[line_number] assign[=] constant[0] for taget[name[line]] in starred[name[history_lines]] begin[:] variable[match] assign[=] call[name[pattern].search, parameter[name[line]]] variable[alt_match] assign[=] call[name[alt_pattern].search, parameter[name[line]]] if name[match] begin[:] variable[result] assign[=] dictionary[[<ast.Constant object at 0x7da1b1435510>, <ast.Constant object at 0x7da1b14367a0>, <ast.Constant object at 0x7da1b1436830>], [<ast.Name object at 0x7da1b1437400>, <ast.Call object at 0x7da1b1435cf0>, <ast.Call object at 0x7da1b1435b70>]] call[name[headings].append, parameter[name[result]]] call[name[logger].debug, parameter[constant[Found heading: %r], name[result]]] if name[alt_match] begin[:] variable[result] assign[=] dictionary[[<ast.Constant object at 0x7da1b1602b30>, <ast.Constant object at 0x7da1b1601d50>, <ast.Constant object at 0x7da1b1602e90>], [<ast.Name object at 0x7da1b16021a0>, <ast.Call object at 0x7da1b1601a80>, <ast.Call object at 0x7da1b1603070>]] call[name[headings].append, parameter[name[result]]] call[name[logger].debug, parameter[constant[Found alternative heading: %r], name[result]]] <ast.AugAssign object at 0x7da1b1602740> return[name[headings]]
keyword[def] identifier[extract_headings_from_history] ( identifier[history_lines] ): literal[string] identifier[pattern] = identifier[re] . identifier[compile] ( literal[string] , identifier[re] . identifier[VERBOSE] ) identifier[alt_pattern] = identifier[re] . identifier[compile] ( literal[string] , identifier[re] . identifier[VERBOSE] ) identifier[headings] =[] identifier[line_number] = literal[int] keyword[for] identifier[line] keyword[in] identifier[history_lines] : identifier[match] = identifier[pattern] . identifier[search] ( identifier[line] ) identifier[alt_match] = identifier[alt_pattern] . identifier[search] ( identifier[line] ) keyword[if] identifier[match] : identifier[result] ={ literal[string] : identifier[line_number] , literal[string] : identifier[match] . identifier[group] ( literal[string] ). identifier[strip] (), literal[string] : identifier[match] . identifier[group] ( literal[string] . identifier[strip] ())} identifier[headings] . identifier[append] ( identifier[result] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[result] ) keyword[if] identifier[alt_match] : identifier[result] ={ literal[string] : identifier[line_number] , literal[string] : identifier[alt_match] . identifier[group] ( literal[string] ). identifier[strip] (), literal[string] : identifier[alt_match] . identifier[group] ( literal[string] . identifier[strip] ())} identifier[headings] . identifier[append] ( identifier[result] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[result] ) identifier[line_number] += literal[int] keyword[return] identifier[headings]
def extract_headings_from_history(history_lines): """Return list of dicts with version-like headers. We check for patterns like '2.10 (unreleased)', so with either 'unreleased' or a date between parenthesis as that's the format we're using. Just fix up your first heading and you should be set. As an alternative, we support an alternative format used by some zope/plone paster templates: '2.10 - unreleased' or '2.10 ~ unreleased' Note that new headers that zest.releaser sets are in our preferred form (so 'version (date)'). """ pattern = re.compile('\n (?P<version>.+) # Version string\n \\( # Opening (\n (?P<date>.+) # Date\n \\) # Closing )\n \\W*$ # Possible whitespace at end of line.\n ', re.VERBOSE) alt_pattern = re.compile('\n ^ # Start of line\n (?P<version>.+) # Version string\n \\ [-~]\\ # space dash/twiggle space\n (?P<date>.+) # Date\n \\W*$ # Possible whitespace at end of line.\n ', re.VERBOSE) headings = [] line_number = 0 for line in history_lines: match = pattern.search(line) alt_match = alt_pattern.search(line) if match: result = {'line': line_number, 'version': match.group('version').strip(), 'date': match.group('date'.strip())} headings.append(result) logger.debug('Found heading: %r', result) # depends on [control=['if'], data=[]] if alt_match: result = {'line': line_number, 'version': alt_match.group('version').strip(), 'date': alt_match.group('date'.strip())} headings.append(result) logger.debug('Found alternative heading: %r', result) # depends on [control=['if'], data=[]] line_number += 1 # depends on [control=['for'], data=['line']] return headings
async def process_response(self, request, response): """If a reissue was requested, only reiisue if the response was a valid 2xx response """ if _REISSUE_KEY in request: if (response.started or not isinstance(response, web.Response) or response.status < 200 or response.status > 299): return await self.remember_ticket(request, request[_REISSUE_KEY])
<ast.AsyncFunctionDef object at 0x7da2054a58d0>
keyword[async] keyword[def] identifier[process_response] ( identifier[self] , identifier[request] , identifier[response] ): literal[string] keyword[if] identifier[_REISSUE_KEY] keyword[in] identifier[request] : keyword[if] ( identifier[response] . identifier[started] keyword[or] keyword[not] identifier[isinstance] ( identifier[response] , identifier[web] . identifier[Response] ) keyword[or] identifier[response] . identifier[status] < literal[int] keyword[or] identifier[response] . identifier[status] > literal[int] ): keyword[return] keyword[await] identifier[self] . identifier[remember_ticket] ( identifier[request] , identifier[request] [ identifier[_REISSUE_KEY] ])
async def process_response(self, request, response): """If a reissue was requested, only reiisue if the response was a valid 2xx response """ if _REISSUE_KEY in request: if response.started or not isinstance(response, web.Response) or response.status < 200 or (response.status > 299): return # depends on [control=['if'], data=[]] await self.remember_ticket(request, request[_REISSUE_KEY]) # depends on [control=['if'], data=['_REISSUE_KEY', 'request']]
def p_operation_definition1(self, p): """ operation_definition : operation_type name variable_definitions directives selection_set """ p[0] = self.operation_cls(p[1])( selections=p[5], name=p[2], variable_definitions=p[3], directives=p[4], )
def function[p_operation_definition1, parameter[self, p]]: constant[ operation_definition : operation_type name variable_definitions directives selection_set ] call[name[p]][constant[0]] assign[=] call[call[name[self].operation_cls, parameter[call[name[p]][constant[1]]]], parameter[]]
keyword[def] identifier[p_operation_definition1] ( identifier[self] , identifier[p] ): literal[string] identifier[p] [ literal[int] ]= identifier[self] . identifier[operation_cls] ( identifier[p] [ literal[int] ])( identifier[selections] = identifier[p] [ literal[int] ], identifier[name] = identifier[p] [ literal[int] ], identifier[variable_definitions] = identifier[p] [ literal[int] ], identifier[directives] = identifier[p] [ literal[int] ], )
def p_operation_definition1(self, p): """ operation_definition : operation_type name variable_definitions directives selection_set """ p[0] = self.operation_cls(p[1])(selections=p[5], name=p[2], variable_definitions=p[3], directives=p[4])
def Advertise(port, stype="SCOOP", sname="Broker", advertisername="Broker", location=""): """ stype = always SCOOP port = comma separated ports sname = broker unique name location = routable location (ip or dns) """ scoop.logger.info("Launching advertiser...") service = minusconf.Service(stype, port, sname, location) advertiser = minusconf.ThreadAdvertiser([service], advertisername) advertiser.start() scoop.logger.info("Advertiser launched.") return advertiser
def function[Advertise, parameter[port, stype, sname, advertisername, location]]: constant[ stype = always SCOOP port = comma separated ports sname = broker unique name location = routable location (ip or dns) ] call[name[scoop].logger.info, parameter[constant[Launching advertiser...]]] variable[service] assign[=] call[name[minusconf].Service, parameter[name[stype], name[port], name[sname], name[location]]] variable[advertiser] assign[=] call[name[minusconf].ThreadAdvertiser, parameter[list[[<ast.Name object at 0x7da18eb545b0>]], name[advertisername]]] call[name[advertiser].start, parameter[]] call[name[scoop].logger.info, parameter[constant[Advertiser launched.]]] return[name[advertiser]]
keyword[def] identifier[Advertise] ( identifier[port] , identifier[stype] = literal[string] , identifier[sname] = literal[string] , identifier[advertisername] = literal[string] , identifier[location] = literal[string] ): literal[string] identifier[scoop] . identifier[logger] . identifier[info] ( literal[string] ) identifier[service] = identifier[minusconf] . identifier[Service] ( identifier[stype] , identifier[port] , identifier[sname] , identifier[location] ) identifier[advertiser] = identifier[minusconf] . identifier[ThreadAdvertiser] ([ identifier[service] ], identifier[advertisername] ) identifier[advertiser] . identifier[start] () identifier[scoop] . identifier[logger] . identifier[info] ( literal[string] ) keyword[return] identifier[advertiser]
def Advertise(port, stype='SCOOP', sname='Broker', advertisername='Broker', location=''): """ stype = always SCOOP port = comma separated ports sname = broker unique name location = routable location (ip or dns) """ scoop.logger.info('Launching advertiser...') service = minusconf.Service(stype, port, sname, location) advertiser = minusconf.ThreadAdvertiser([service], advertisername) advertiser.start() scoop.logger.info('Advertiser launched.') return advertiser
def create(self, attributes=None, **kwargs): """ Creates a webhook with given attributes. """ return super(WebhooksProxy, self).create(resource_id=None, attributes=attributes)
def function[create, parameter[self, attributes]]: constant[ Creates a webhook with given attributes. ] return[call[call[name[super], parameter[name[WebhooksProxy], name[self]]].create, parameter[]]]
keyword[def] identifier[create] ( identifier[self] , identifier[attributes] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[super] ( identifier[WebhooksProxy] , identifier[self] ). identifier[create] ( identifier[resource_id] = keyword[None] , identifier[attributes] = identifier[attributes] )
def create(self, attributes=None, **kwargs): """ Creates a webhook with given attributes. """ return super(WebhooksProxy, self).create(resource_id=None, attributes=attributes)
def ellipse_from_second_moments(image, labels, indexes, wants_compactness = False): """Calculate measurements of ellipses equivalent to the second moments of labels image - the intensity at each point labels - for each labeled object, derive an ellipse indexes - sequence of indexes to process returns the following arrays: coordinates of the center of the ellipse eccentricity major axis length minor axis length orientation compactness (if asked for) some definitions taken from "Image Moments-Based Structuring and Tracking of Objects", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO, http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf particularly equation 5 (which has some errors in it). These yield the rectangle with equivalent second moments. I translate to the ellipse by multiplying by 1.154701 which is Matlab's calculation of the major and minor axis length for a square of length X divided by the actual length of the side of a square of that length. eccentricity is the distance between foci divided by the major axis length orientation is the angle of the major axis with respect to the X axis compactness is the variance of the radial distribution normalized by the area """ if len(indexes) == 0: return (np.zeros((0,2)), np.zeros((0,)), np.zeros((0,)), np.zeros((0,)),np.zeros((0,))) i,j = np.argwhere(labels != 0).transpose() return ellipse_from_second_moments_ijv(i,j,image[i,j], labels[i,j], indexes, wants_compactness)
def function[ellipse_from_second_moments, parameter[image, labels, indexes, wants_compactness]]: constant[Calculate measurements of ellipses equivalent to the second moments of labels image - the intensity at each point labels - for each labeled object, derive an ellipse indexes - sequence of indexes to process returns the following arrays: coordinates of the center of the ellipse eccentricity major axis length minor axis length orientation compactness (if asked for) some definitions taken from "Image Moments-Based Structuring and Tracking of Objects", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO, http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf particularly equation 5 (which has some errors in it). These yield the rectangle with equivalent second moments. I translate to the ellipse by multiplying by 1.154701 which is Matlab's calculation of the major and minor axis length for a square of length X divided by the actual length of the side of a square of that length. eccentricity is the distance between foci divided by the major axis length orientation is the angle of the major axis with respect to the X axis compactness is the variance of the radial distribution normalized by the area ] if compare[call[name[len], parameter[name[indexes]]] equal[==] constant[0]] begin[:] return[tuple[[<ast.Call object at 0x7da20e954d60>, <ast.Call object at 0x7da20e9546a0>, <ast.Call object at 0x7da20e956b90>, <ast.Call object at 0x7da20e954ca0>, <ast.Call object at 0x7da20e9563e0>]]] <ast.Tuple object at 0x7da20e957610> assign[=] call[call[name[np].argwhere, parameter[compare[name[labels] not_equal[!=] constant[0]]]].transpose, parameter[]] return[call[name[ellipse_from_second_moments_ijv], parameter[name[i], name[j], call[name[image]][tuple[[<ast.Name object at 0x7da20e954670>, <ast.Name object at 0x7da20cabf0d0>]]], call[name[labels]][tuple[[<ast.Name object at 0x7da20cabd750>, <ast.Name object at 0x7da20cabc610>]]], name[indexes], name[wants_compactness]]]]
keyword[def] identifier[ellipse_from_second_moments] ( identifier[image] , identifier[labels] , identifier[indexes] , identifier[wants_compactness] = keyword[False] ): literal[string] keyword[if] identifier[len] ( identifier[indexes] )== literal[int] : keyword[return] ( identifier[np] . identifier[zeros] (( literal[int] , literal[int] )), identifier[np] . identifier[zeros] (( literal[int] ,)), identifier[np] . identifier[zeros] (( literal[int] ,)), identifier[np] . identifier[zeros] (( literal[int] ,)), identifier[np] . identifier[zeros] (( literal[int] ,))) identifier[i] , identifier[j] = identifier[np] . identifier[argwhere] ( identifier[labels] != literal[int] ). identifier[transpose] () keyword[return] identifier[ellipse_from_second_moments_ijv] ( identifier[i] , identifier[j] , identifier[image] [ identifier[i] , identifier[j] ], identifier[labels] [ identifier[i] , identifier[j] ], identifier[indexes] , identifier[wants_compactness] )
def ellipse_from_second_moments(image, labels, indexes, wants_compactness=False): """Calculate measurements of ellipses equivalent to the second moments of labels image - the intensity at each point labels - for each labeled object, derive an ellipse indexes - sequence of indexes to process returns the following arrays: coordinates of the center of the ellipse eccentricity major axis length minor axis length orientation compactness (if asked for) some definitions taken from "Image Moments-Based Structuring and Tracking of Objects", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO, http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf particularly equation 5 (which has some errors in it). These yield the rectangle with equivalent second moments. I translate to the ellipse by multiplying by 1.154701 which is Matlab's calculation of the major and minor axis length for a square of length X divided by the actual length of the side of a square of that length. eccentricity is the distance between foci divided by the major axis length orientation is the angle of the major axis with respect to the X axis compactness is the variance of the radial distribution normalized by the area """ if len(indexes) == 0: return (np.zeros((0, 2)), np.zeros((0,)), np.zeros((0,)), np.zeros((0,)), np.zeros((0,))) # depends on [control=['if'], data=[]] (i, j) = np.argwhere(labels != 0).transpose() return ellipse_from_second_moments_ijv(i, j, image[i, j], labels[i, j], indexes, wants_compactness)
def rouge_n(evaluated_sentences, reference_sentences, n=2): """ Computes ROUGE-N of two text collections of sentences. Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/ papers/rouge-working-note-v1.3.1.pdf Args: evaluated_sentences: The sentences that have been picked by the summarizer reference_sentences: The sentences from the referene set n: Size of ngram. Defaults to 2. Returns: A tuple (f1, precision, recall) for ROUGE-N Raises: ValueError: raises exception if a param has len <= 0 """ if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0: raise ValueError("Collections must contain at least 1 sentence.") evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences) reference_ngrams = _get_word_ngrams(n, reference_sentences) reference_count = len(reference_ngrams) evaluated_count = len(evaluated_ngrams) # Gets the overlapping ngrams between evaluated and reference overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams) overlapping_count = len(overlapping_ngrams) # Handle edge case. This isn't mathematically correct, but it's good enough if evaluated_count == 0: precision = 0.0 else: precision = overlapping_count / evaluated_count # Handle edge case for recall, same as for precision if reference_count == 0: recall = 0.0 else: recall = overlapping_count / reference_count f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8)) # return overlapping_count / reference_count return f1_score, precision, recall
def function[rouge_n, parameter[evaluated_sentences, reference_sentences, n]]: constant[ Computes ROUGE-N of two text collections of sentences. Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/ papers/rouge-working-note-v1.3.1.pdf Args: evaluated_sentences: The sentences that have been picked by the summarizer reference_sentences: The sentences from the referene set n: Size of ngram. Defaults to 2. Returns: A tuple (f1, precision, recall) for ROUGE-N Raises: ValueError: raises exception if a param has len <= 0 ] if <ast.BoolOp object at 0x7da2044c0820> begin[:] <ast.Raise object at 0x7da2044c16c0> variable[evaluated_ngrams] assign[=] call[name[_get_word_ngrams], parameter[name[n], name[evaluated_sentences]]] variable[reference_ngrams] assign[=] call[name[_get_word_ngrams], parameter[name[n], name[reference_sentences]]] variable[reference_count] assign[=] call[name[len], parameter[name[reference_ngrams]]] variable[evaluated_count] assign[=] call[name[len], parameter[name[evaluated_ngrams]]] variable[overlapping_ngrams] assign[=] call[name[evaluated_ngrams].intersection, parameter[name[reference_ngrams]]] variable[overlapping_count] assign[=] call[name[len], parameter[name[overlapping_ngrams]]] if compare[name[evaluated_count] equal[==] constant[0]] begin[:] variable[precision] assign[=] constant[0.0] if compare[name[reference_count] equal[==] constant[0]] begin[:] variable[recall] assign[=] constant[0.0] variable[f1_score] assign[=] binary_operation[constant[2.0] * binary_operation[binary_operation[name[precision] * name[recall]] / binary_operation[binary_operation[name[precision] + name[recall]] + constant[1e-08]]]] return[tuple[[<ast.Name object at 0x7da1b1d74460>, <ast.Name object at 0x7da1b1d743a0>, <ast.Name object at 0x7da1b1d764a0>]]]
keyword[def] identifier[rouge_n] ( identifier[evaluated_sentences] , identifier[reference_sentences] , identifier[n] = literal[int] ): literal[string] keyword[if] identifier[len] ( identifier[evaluated_sentences] )<= literal[int] keyword[or] identifier[len] ( identifier[reference_sentences] )<= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[evaluated_ngrams] = identifier[_get_word_ngrams] ( identifier[n] , identifier[evaluated_sentences] ) identifier[reference_ngrams] = identifier[_get_word_ngrams] ( identifier[n] , identifier[reference_sentences] ) identifier[reference_count] = identifier[len] ( identifier[reference_ngrams] ) identifier[evaluated_count] = identifier[len] ( identifier[evaluated_ngrams] ) identifier[overlapping_ngrams] = identifier[evaluated_ngrams] . identifier[intersection] ( identifier[reference_ngrams] ) identifier[overlapping_count] = identifier[len] ( identifier[overlapping_ngrams] ) keyword[if] identifier[evaluated_count] == literal[int] : identifier[precision] = literal[int] keyword[else] : identifier[precision] = identifier[overlapping_count] / identifier[evaluated_count] keyword[if] identifier[reference_count] == literal[int] : identifier[recall] = literal[int] keyword[else] : identifier[recall] = identifier[overlapping_count] / identifier[reference_count] identifier[f1_score] = literal[int] *(( identifier[precision] * identifier[recall] )/( identifier[precision] + identifier[recall] + literal[int] )) keyword[return] identifier[f1_score] , identifier[precision] , identifier[recall]
def rouge_n(evaluated_sentences, reference_sentences, n=2): """ Computes ROUGE-N of two text collections of sentences. Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/ papers/rouge-working-note-v1.3.1.pdf Args: evaluated_sentences: The sentences that have been picked by the summarizer reference_sentences: The sentences from the referene set n: Size of ngram. Defaults to 2. Returns: A tuple (f1, precision, recall) for ROUGE-N Raises: ValueError: raises exception if a param has len <= 0 """ if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0: raise ValueError('Collections must contain at least 1 sentence.') # depends on [control=['if'], data=[]] evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences) reference_ngrams = _get_word_ngrams(n, reference_sentences) reference_count = len(reference_ngrams) evaluated_count = len(evaluated_ngrams) # Gets the overlapping ngrams between evaluated and reference overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams) overlapping_count = len(overlapping_ngrams) # Handle edge case. This isn't mathematically correct, but it's good enough if evaluated_count == 0: precision = 0.0 # depends on [control=['if'], data=[]] else: precision = overlapping_count / evaluated_count # Handle edge case for recall, same as for precision if reference_count == 0: recall = 0.0 # depends on [control=['if'], data=[]] else: recall = overlapping_count / reference_count f1_score = 2.0 * (precision * recall / (precision + recall + 1e-08)) # return overlapping_count / reference_count return (f1_score, precision, recall)
def update_variant_rank(self, case_obj, variant_type='clinical', category='snv'): """Updates the manual rank for all variants in a case Add a variant rank based on the rank score Whenever variants are added or removed from a case we need to update the variant rank Args: case_obj(Case) variant_type(str) """ # Get all variants sorted by rank score variants = self.variant_collection.find({ 'case_id': case_obj['_id'], 'category': category, 'variant_type': variant_type, }).sort('rank_score', pymongo.DESCENDING) LOG.info("Updating variant_rank for all variants") requests = [] for index, var_obj in enumerate(variants): if len(requests) > 5000: try: self.variant_collection.bulk_write(requests, ordered=False) requests = [] except BulkWriteError as err: LOG.warning("Updating variant rank failed") raise err operation = pymongo.UpdateOne( {'_id': var_obj['_id']}, { '$set': { 'variant_rank': index + 1, } }) requests.append(operation) #Update the final bulk try: self.variant_collection.bulk_write(requests, ordered=False) except BulkWriteError as err: LOG.warning("Updating variant rank failed") raise err LOG.info("Updating variant_rank done")
def function[update_variant_rank, parameter[self, case_obj, variant_type, category]]: constant[Updates the manual rank for all variants in a case Add a variant rank based on the rank score Whenever variants are added or removed from a case we need to update the variant rank Args: case_obj(Case) variant_type(str) ] variable[variants] assign[=] call[call[name[self].variant_collection.find, parameter[dictionary[[<ast.Constant object at 0x7da18f58c700>, <ast.Constant object at 0x7da18f58f910>, <ast.Constant object at 0x7da18f58e500>], [<ast.Subscript object at 0x7da18f58c310>, <ast.Name object at 0x7da18f58e140>, <ast.Name object at 0x7da18f58e0b0>]]]].sort, parameter[constant[rank_score], name[pymongo].DESCENDING]] call[name[LOG].info, parameter[constant[Updating variant_rank for all variants]]] variable[requests] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da18f58e260>, <ast.Name object at 0x7da18f58d570>]]] in starred[call[name[enumerate], parameter[name[variants]]]] begin[:] if compare[call[name[len], parameter[name[requests]]] greater[>] constant[5000]] begin[:] <ast.Try object at 0x7da18f58e230> variable[operation] assign[=] call[name[pymongo].UpdateOne, parameter[dictionary[[<ast.Constant object at 0x7da18f58c6d0>], [<ast.Subscript object at 0x7da18f58d720>]], dictionary[[<ast.Constant object at 0x7da18f58ed70>], [<ast.Dict object at 0x7da18f58c670>]]]] call[name[requests].append, parameter[name[operation]]] <ast.Try object at 0x7da18f58d540> call[name[LOG].info, parameter[constant[Updating variant_rank done]]]
keyword[def] identifier[update_variant_rank] ( identifier[self] , identifier[case_obj] , identifier[variant_type] = literal[string] , identifier[category] = literal[string] ): literal[string] identifier[variants] = identifier[self] . identifier[variant_collection] . identifier[find] ({ literal[string] : identifier[case_obj] [ literal[string] ], literal[string] : identifier[category] , literal[string] : identifier[variant_type] , }). identifier[sort] ( literal[string] , identifier[pymongo] . identifier[DESCENDING] ) identifier[LOG] . identifier[info] ( literal[string] ) identifier[requests] =[] keyword[for] identifier[index] , identifier[var_obj] keyword[in] identifier[enumerate] ( identifier[variants] ): keyword[if] identifier[len] ( identifier[requests] )> literal[int] : keyword[try] : identifier[self] . identifier[variant_collection] . identifier[bulk_write] ( identifier[requests] , identifier[ordered] = keyword[False] ) identifier[requests] =[] keyword[except] identifier[BulkWriteError] keyword[as] identifier[err] : identifier[LOG] . identifier[warning] ( literal[string] ) keyword[raise] identifier[err] identifier[operation] = identifier[pymongo] . identifier[UpdateOne] ( { literal[string] : identifier[var_obj] [ literal[string] ]}, { literal[string] :{ literal[string] : identifier[index] + literal[int] , } }) identifier[requests] . identifier[append] ( identifier[operation] ) keyword[try] : identifier[self] . identifier[variant_collection] . identifier[bulk_write] ( identifier[requests] , identifier[ordered] = keyword[False] ) keyword[except] identifier[BulkWriteError] keyword[as] identifier[err] : identifier[LOG] . identifier[warning] ( literal[string] ) keyword[raise] identifier[err] identifier[LOG] . identifier[info] ( literal[string] )
def update_variant_rank(self, case_obj, variant_type='clinical', category='snv'): """Updates the manual rank for all variants in a case Add a variant rank based on the rank score Whenever variants are added or removed from a case we need to update the variant rank Args: case_obj(Case) variant_type(str) """ # Get all variants sorted by rank score variants = self.variant_collection.find({'case_id': case_obj['_id'], 'category': category, 'variant_type': variant_type}).sort('rank_score', pymongo.DESCENDING) LOG.info('Updating variant_rank for all variants') requests = [] for (index, var_obj) in enumerate(variants): if len(requests) > 5000: try: self.variant_collection.bulk_write(requests, ordered=False) requests = [] # depends on [control=['try'], data=[]] except BulkWriteError as err: LOG.warning('Updating variant rank failed') raise err # depends on [control=['except'], data=['err']] # depends on [control=['if'], data=[]] operation = pymongo.UpdateOne({'_id': var_obj['_id']}, {'$set': {'variant_rank': index + 1}}) requests.append(operation) # depends on [control=['for'], data=[]] #Update the final bulk try: self.variant_collection.bulk_write(requests, ordered=False) # depends on [control=['try'], data=[]] except BulkWriteError as err: LOG.warning('Updating variant rank failed') raise err # depends on [control=['except'], data=['err']] LOG.info('Updating variant_rank done')
def validate_cmaps(cmaps): """Validate a dictionary of color lists Parameters ---------- cmaps: dict a mapping from a colormap name to a list of colors Raises ------ ValueError If one of the values in `cmaps` is not a color list Notes ----- For all items (listname, list) in `cmaps`, the reversed list is automatically inserted with the ``listname + '_r'`` key.""" cmaps = {validate_str(key): validate_colorlist(val) for key, val in cmaps} for key, val in six.iteritems(cmaps): cmaps.setdefault(key + '_r', val[::-1]) return cmaps
def function[validate_cmaps, parameter[cmaps]]: constant[Validate a dictionary of color lists Parameters ---------- cmaps: dict a mapping from a colormap name to a list of colors Raises ------ ValueError If one of the values in `cmaps` is not a color list Notes ----- For all items (listname, list) in `cmaps`, the reversed list is automatically inserted with the ``listname + '_r'`` key.] variable[cmaps] assign[=] <ast.DictComp object at 0x7da20c76f610> for taget[tuple[[<ast.Name object at 0x7da20c76f700>, <ast.Name object at 0x7da20c76d420>]]] in starred[call[name[six].iteritems, parameter[name[cmaps]]]] begin[:] call[name[cmaps].setdefault, parameter[binary_operation[name[key] + constant[_r]], call[name[val]][<ast.Slice object at 0x7da20c76cd60>]]] return[name[cmaps]]
keyword[def] identifier[validate_cmaps] ( identifier[cmaps] ): literal[string] identifier[cmaps] ={ identifier[validate_str] ( identifier[key] ): identifier[validate_colorlist] ( identifier[val] ) keyword[for] identifier[key] , identifier[val] keyword[in] identifier[cmaps] } keyword[for] identifier[key] , identifier[val] keyword[in] identifier[six] . identifier[iteritems] ( identifier[cmaps] ): identifier[cmaps] . identifier[setdefault] ( identifier[key] + literal[string] , identifier[val] [::- literal[int] ]) keyword[return] identifier[cmaps]
def validate_cmaps(cmaps): """Validate a dictionary of color lists Parameters ---------- cmaps: dict a mapping from a colormap name to a list of colors Raises ------ ValueError If one of the values in `cmaps` is not a color list Notes ----- For all items (listname, list) in `cmaps`, the reversed list is automatically inserted with the ``listname + '_r'`` key.""" cmaps = {validate_str(key): validate_colorlist(val) for (key, val) in cmaps} for (key, val) in six.iteritems(cmaps): cmaps.setdefault(key + '_r', val[::-1]) # depends on [control=['for'], data=[]] return cmaps
def list_inputs(self): """Lists all arguments and auxiliary states of this Symbol. Returns ------- inputs : list of str List of all inputs. Examples -------- >>> bn = mx.sym.BatchNorm(name='bn') >>> bn.list_arguments() ['bn_data', 'bn_gamma', 'bn_beta'] >>> bn.list_auxiliary_states() ['bn_moving_mean', 'bn_moving_var'] >>> bn.list_inputs() ['bn_data', 'bn_gamma', 'bn_beta', 'bn_moving_mean', 'bn_moving_var'] """ size = ctypes.c_uint() sarr = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.NNSymbolListInputNames( self.handle, 0, ctypes.byref(size), ctypes.byref(sarr))) return [py_str(sarr[i]) for i in range(size.value)]
def function[list_inputs, parameter[self]]: constant[Lists all arguments and auxiliary states of this Symbol. Returns ------- inputs : list of str List of all inputs. Examples -------- >>> bn = mx.sym.BatchNorm(name='bn') >>> bn.list_arguments() ['bn_data', 'bn_gamma', 'bn_beta'] >>> bn.list_auxiliary_states() ['bn_moving_mean', 'bn_moving_var'] >>> bn.list_inputs() ['bn_data', 'bn_gamma', 'bn_beta', 'bn_moving_mean', 'bn_moving_var'] ] variable[size] assign[=] call[name[ctypes].c_uint, parameter[]] variable[sarr] assign[=] call[call[name[ctypes].POINTER, parameter[name[ctypes].c_char_p]], parameter[]] call[name[check_call], parameter[call[name[_LIB].NNSymbolListInputNames, parameter[name[self].handle, constant[0], call[name[ctypes].byref, parameter[name[size]]], call[name[ctypes].byref, parameter[name[sarr]]]]]]] return[<ast.ListComp object at 0x7da1b2016c80>]
keyword[def] identifier[list_inputs] ( identifier[self] ): literal[string] identifier[size] = identifier[ctypes] . identifier[c_uint] () identifier[sarr] = identifier[ctypes] . identifier[POINTER] ( identifier[ctypes] . identifier[c_char_p] )() identifier[check_call] ( identifier[_LIB] . identifier[NNSymbolListInputNames] ( identifier[self] . identifier[handle] , literal[int] , identifier[ctypes] . identifier[byref] ( identifier[size] ), identifier[ctypes] . identifier[byref] ( identifier[sarr] ))) keyword[return] [ identifier[py_str] ( identifier[sarr] [ identifier[i] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[size] . identifier[value] )]
def list_inputs(self): """Lists all arguments and auxiliary states of this Symbol. Returns ------- inputs : list of str List of all inputs. Examples -------- >>> bn = mx.sym.BatchNorm(name='bn') >>> bn.list_arguments() ['bn_data', 'bn_gamma', 'bn_beta'] >>> bn.list_auxiliary_states() ['bn_moving_mean', 'bn_moving_var'] >>> bn.list_inputs() ['bn_data', 'bn_gamma', 'bn_beta', 'bn_moving_mean', 'bn_moving_var'] """ size = ctypes.c_uint() sarr = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.NNSymbolListInputNames(self.handle, 0, ctypes.byref(size), ctypes.byref(sarr))) return [py_str(sarr[i]) for i in range(size.value)]
def do(self, command, files=None, use_long_polling=False, request_timeout=None, **query): """ Return the request params we would send to the api. """ url, params = self._prepare_request(command, query) return { "url": url, "params": params, "files": files, "stream": use_long_polling, "verify": True, # No self signed certificates. Telegram should be trustworthy anyway... "timeout": request_timeout }
def function[do, parameter[self, command, files, use_long_polling, request_timeout]]: constant[ Return the request params we would send to the api. ] <ast.Tuple object at 0x7da18f58c4f0> assign[=] call[name[self]._prepare_request, parameter[name[command], name[query]]] return[dictionary[[<ast.Constant object at 0x7da18f58d6c0>, <ast.Constant object at 0x7da18f58e8c0>, <ast.Constant object at 0x7da18f58e560>, <ast.Constant object at 0x7da18f58c850>, <ast.Constant object at 0x7da18f58f850>, <ast.Constant object at 0x7da1b0471780>], [<ast.Name object at 0x7da1b0470ee0>, <ast.Name object at 0x7da1b0472ef0>, <ast.Name object at 0x7da1b0472710>, <ast.Name object at 0x7da1b0470190>, <ast.Constant object at 0x7da1b0472200>, <ast.Name object at 0x7da1b04737c0>]]]
keyword[def] identifier[do] ( identifier[self] , identifier[command] , identifier[files] = keyword[None] , identifier[use_long_polling] = keyword[False] , identifier[request_timeout] = keyword[None] ,** identifier[query] ): literal[string] identifier[url] , identifier[params] = identifier[self] . identifier[_prepare_request] ( identifier[command] , identifier[query] ) keyword[return] { literal[string] : identifier[url] , literal[string] : identifier[params] , literal[string] : identifier[files] , literal[string] : identifier[use_long_polling] , literal[string] : keyword[True] , literal[string] : identifier[request_timeout] }
def do(self, command, files=None, use_long_polling=False, request_timeout=None, **query): """ Return the request params we would send to the api. """ (url, params) = self._prepare_request(command, query) # No self signed certificates. Telegram should be trustworthy anyway... return {'url': url, 'params': params, 'files': files, 'stream': use_long_polling, 'verify': True, 'timeout': request_timeout}
def _fast_memory_load_bytes(self, addr, length): """ Perform a fast memory loading of some data. :param int addr: Address to read from. :param int length: Size of the string to load. :return: A string or None if the address does not exist. :rtype: bytes or None """ try: return self.project.loader.memory.load(addr, length) except KeyError: return None
def function[_fast_memory_load_bytes, parameter[self, addr, length]]: constant[ Perform a fast memory loading of some data. :param int addr: Address to read from. :param int length: Size of the string to load. :return: A string or None if the address does not exist. :rtype: bytes or None ] <ast.Try object at 0x7da20c7cb760>
keyword[def] identifier[_fast_memory_load_bytes] ( identifier[self] , identifier[addr] , identifier[length] ): literal[string] keyword[try] : keyword[return] identifier[self] . identifier[project] . identifier[loader] . identifier[memory] . identifier[load] ( identifier[addr] , identifier[length] ) keyword[except] identifier[KeyError] : keyword[return] keyword[None]
def _fast_memory_load_bytes(self, addr, length): """ Perform a fast memory loading of some data. :param int addr: Address to read from. :param int length: Size of the string to load. :return: A string or None if the address does not exist. :rtype: bytes or None """ try: return self.project.loader.memory.load(addr, length) # depends on [control=['try'], data=[]] except KeyError: return None # depends on [control=['except'], data=[]]
def propagate(self, *arg, **kw): """ Propagates activation through the network.""" output = Network.propagate(self, *arg, **kw) if self.interactive: self.updateGraphics() # IMPORTANT: convert results from numpy.floats to conventional floats if type(output) == dict: for layerName in output: output[layerName] = [float(x) for x in output[layerName]] return output else: return [float(x) for x in output]
def function[propagate, parameter[self]]: constant[ Propagates activation through the network.] variable[output] assign[=] call[name[Network].propagate, parameter[name[self], <ast.Starred object at 0x7da1b034a620>]] if name[self].interactive begin[:] call[name[self].updateGraphics, parameter[]] if compare[call[name[type], parameter[name[output]]] equal[==] name[dict]] begin[:] for taget[name[layerName]] in starred[name[output]] begin[:] call[name[output]][name[layerName]] assign[=] <ast.ListComp object at 0x7da1b0349180> return[name[output]]
keyword[def] identifier[propagate] ( identifier[self] ,* identifier[arg] ,** identifier[kw] ): literal[string] identifier[output] = identifier[Network] . identifier[propagate] ( identifier[self] ,* identifier[arg] ,** identifier[kw] ) keyword[if] identifier[self] . identifier[interactive] : identifier[self] . identifier[updateGraphics] () keyword[if] identifier[type] ( identifier[output] )== identifier[dict] : keyword[for] identifier[layerName] keyword[in] identifier[output] : identifier[output] [ identifier[layerName] ]=[ identifier[float] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[output] [ identifier[layerName] ]] keyword[return] identifier[output] keyword[else] : keyword[return] [ identifier[float] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[output] ]
def propagate(self, *arg, **kw): """ Propagates activation through the network.""" output = Network.propagate(self, *arg, **kw) if self.interactive: self.updateGraphics() # depends on [control=['if'], data=[]] # IMPORTANT: convert results from numpy.floats to conventional floats if type(output) == dict: for layerName in output: output[layerName] = [float(x) for x in output[layerName]] # depends on [control=['for'], data=['layerName']] return output # depends on [control=['if'], data=[]] else: return [float(x) for x in output]
def encode_safely(self, data): """Encode the data. """ encoder = self.base_encoder result = settings.null try: result = encoder(pickle.dumps(data)) except: warnings.warn("Data could not be serialized.", RuntimeWarning) return result
def function[encode_safely, parameter[self, data]]: constant[Encode the data. ] variable[encoder] assign[=] name[self].base_encoder variable[result] assign[=] name[settings].null <ast.Try object at 0x7da1b0c4dae0> return[name[result]]
keyword[def] identifier[encode_safely] ( identifier[self] , identifier[data] ): literal[string] identifier[encoder] = identifier[self] . identifier[base_encoder] identifier[result] = identifier[settings] . identifier[null] keyword[try] : identifier[result] = identifier[encoder] ( identifier[pickle] . identifier[dumps] ( identifier[data] )) keyword[except] : identifier[warnings] . identifier[warn] ( literal[string] , identifier[RuntimeWarning] ) keyword[return] identifier[result]
def encode_safely(self, data): """Encode the data. """ encoder = self.base_encoder result = settings.null try: result = encoder(pickle.dumps(data)) # depends on [control=['try'], data=[]] except: warnings.warn('Data could not be serialized.', RuntimeWarning) # depends on [control=['except'], data=[]] return result
def predict_expectation(self, X, ancillary_X=None): """ Predict the expectation of lifetimes, :math:`E[T | x]`. Parameters ---------- X: numpy array or DataFrame a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. ancillary_X: numpy array or DataFrame, optional a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. Returns ------- percentiles: DataFrame the median lifetimes for the individuals. If the survival curve of an individual does not cross 0.5, then the result is infinity. See Also -------- predict_median """ exp_mu_, sigma_ = self._prep_inputs_for_prediction_and_return_scores(X, ancillary_X) return pd.DataFrame(exp_mu_ * np.exp(sigma_ ** 2 / 2), index=_get_index(X))
def function[predict_expectation, parameter[self, X, ancillary_X]]: constant[ Predict the expectation of lifetimes, :math:`E[T | x]`. Parameters ---------- X: numpy array or DataFrame a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. ancillary_X: numpy array or DataFrame, optional a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. Returns ------- percentiles: DataFrame the median lifetimes for the individuals. If the survival curve of an individual does not cross 0.5, then the result is infinity. See Also -------- predict_median ] <ast.Tuple object at 0x7da20c6ab8e0> assign[=] call[name[self]._prep_inputs_for_prediction_and_return_scores, parameter[name[X], name[ancillary_X]]] return[call[name[pd].DataFrame, parameter[binary_operation[name[exp_mu_] * call[name[np].exp, parameter[binary_operation[binary_operation[name[sigma_] ** constant[2]] / constant[2]]]]]]]]
keyword[def] identifier[predict_expectation] ( identifier[self] , identifier[X] , identifier[ancillary_X] = keyword[None] ): literal[string] identifier[exp_mu_] , identifier[sigma_] = identifier[self] . identifier[_prep_inputs_for_prediction_and_return_scores] ( identifier[X] , identifier[ancillary_X] ) keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[exp_mu_] * identifier[np] . identifier[exp] ( identifier[sigma_] ** literal[int] / literal[int] ), identifier[index] = identifier[_get_index] ( identifier[X] ))
def predict_expectation(self, X, ancillary_X=None): """ Predict the expectation of lifetimes, :math:`E[T | x]`. Parameters ---------- X: numpy array or DataFrame a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. ancillary_X: numpy array or DataFrame, optional a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. Returns ------- percentiles: DataFrame the median lifetimes for the individuals. If the survival curve of an individual does not cross 0.5, then the result is infinity. See Also -------- predict_median """ (exp_mu_, sigma_) = self._prep_inputs_for_prediction_and_return_scores(X, ancillary_X) return pd.DataFrame(exp_mu_ * np.exp(sigma_ ** 2 / 2), index=_get_index(X))
def pre_request(self, response, exc=None): """Start the tunnel. This is a callback fired once a connection with upstream server is established. """ if response.request.method == 'CONNECT': self.start_response( '200 Connection established', [('content-length', '0')] ) # send empty byte so that headers are sent self.future.set_result([b'']) # proxy - server connection upstream = response.connection # client - proxy connection dostream = self.connection # Upgrade downstream connection dostream.upgrade(partial(StreamTunnel.create, upstream)) # # upstream upgrade upstream.upgrade(partial(StreamTunnel.create, dostream)) response.fire_event('post_request') # abort the event raise AbortEvent else: response.event('data_processed').bind(self.data_processed) response.event('post_request').bind(self.post_request)
def function[pre_request, parameter[self, response, exc]]: constant[Start the tunnel. This is a callback fired once a connection with upstream server is established. ] if compare[name[response].request.method equal[==] constant[CONNECT]] begin[:] call[name[self].start_response, parameter[constant[200 Connection established], list[[<ast.Tuple object at 0x7da18eb55cc0>]]]] call[name[self].future.set_result, parameter[list[[<ast.Constant object at 0x7da18eb542b0>]]]] variable[upstream] assign[=] name[response].connection variable[dostream] assign[=] name[self].connection call[name[dostream].upgrade, parameter[call[name[partial], parameter[name[StreamTunnel].create, name[upstream]]]]] call[name[upstream].upgrade, parameter[call[name[partial], parameter[name[StreamTunnel].create, name[dostream]]]]] call[name[response].fire_event, parameter[constant[post_request]]] <ast.Raise object at 0x7da18eb55780>
keyword[def] identifier[pre_request] ( identifier[self] , identifier[response] , identifier[exc] = keyword[None] ): literal[string] keyword[if] identifier[response] . identifier[request] . identifier[method] == literal[string] : identifier[self] . identifier[start_response] ( literal[string] , [( literal[string] , literal[string] )] ) identifier[self] . identifier[future] . identifier[set_result] ([ literal[string] ]) identifier[upstream] = identifier[response] . identifier[connection] identifier[dostream] = identifier[self] . identifier[connection] identifier[dostream] . identifier[upgrade] ( identifier[partial] ( identifier[StreamTunnel] . identifier[create] , identifier[upstream] )) identifier[upstream] . identifier[upgrade] ( identifier[partial] ( identifier[StreamTunnel] . identifier[create] , identifier[dostream] )) identifier[response] . identifier[fire_event] ( literal[string] ) keyword[raise] identifier[AbortEvent] keyword[else] : identifier[response] . identifier[event] ( literal[string] ). identifier[bind] ( identifier[self] . identifier[data_processed] ) identifier[response] . identifier[event] ( literal[string] ). identifier[bind] ( identifier[self] . identifier[post_request] )
def pre_request(self, response, exc=None): """Start the tunnel. This is a callback fired once a connection with upstream server is established. """ if response.request.method == 'CONNECT': self.start_response('200 Connection established', [('content-length', '0')]) # send empty byte so that headers are sent self.future.set_result([b'']) # proxy - server connection upstream = response.connection # client - proxy connection dostream = self.connection # Upgrade downstream connection dostream.upgrade(partial(StreamTunnel.create, upstream)) # # upstream upgrade upstream.upgrade(partial(StreamTunnel.create, dostream)) response.fire_event('post_request') # abort the event raise AbortEvent # depends on [control=['if'], data=[]] else: response.event('data_processed').bind(self.data_processed) response.event('post_request').bind(self.post_request)
def decrypt_assertions(self, encrypted_assertions, decr_txt, issuer=None, verified=False): """ Moves the decrypted assertion from the encrypted assertion to a list. :param encrypted_assertions: A list of encrypted assertions. :param decr_txt: The string representation containing the decrypted data. Used when verifying signatures. :param issuer: The issuer of the response. :param verified: If True do not verify signatures, otherwise verify the signature if it exists. :return: A list of decrypted assertions. """ res = [] for encrypted_assertion in encrypted_assertions: if encrypted_assertion.extension_elements: assertions = extension_elements_to_elements( encrypted_assertion.extension_elements, [saml, samlp]) for assertion in assertions: if assertion.signature and not verified: if not self.sec.check_signature( assertion, origdoc=decr_txt, node_name=class_name(assertion), issuer=issuer): logger.error("Failed to verify signature on '%s'", assertion) raise SignatureError() res.append(assertion) return res
def function[decrypt_assertions, parameter[self, encrypted_assertions, decr_txt, issuer, verified]]: constant[ Moves the decrypted assertion from the encrypted assertion to a list. :param encrypted_assertions: A list of encrypted assertions. :param decr_txt: The string representation containing the decrypted data. Used when verifying signatures. :param issuer: The issuer of the response. :param verified: If True do not verify signatures, otherwise verify the signature if it exists. :return: A list of decrypted assertions. ] variable[res] assign[=] list[[]] for taget[name[encrypted_assertion]] in starred[name[encrypted_assertions]] begin[:] if name[encrypted_assertion].extension_elements begin[:] variable[assertions] assign[=] call[name[extension_elements_to_elements], parameter[name[encrypted_assertion].extension_elements, list[[<ast.Name object at 0x7da1b1d558a0>, <ast.Name object at 0x7da1b1d56e00>]]]] for taget[name[assertion]] in starred[name[assertions]] begin[:] if <ast.BoolOp object at 0x7da1b1d54f70> begin[:] if <ast.UnaryOp object at 0x7da1b1d57ee0> begin[:] call[name[logger].error, parameter[constant[Failed to verify signature on '%s'], name[assertion]]] <ast.Raise object at 0x7da1b1d574f0> call[name[res].append, parameter[name[assertion]]] return[name[res]]
keyword[def] identifier[decrypt_assertions] ( identifier[self] , identifier[encrypted_assertions] , identifier[decr_txt] , identifier[issuer] = keyword[None] , identifier[verified] = keyword[False] ): literal[string] identifier[res] =[] keyword[for] identifier[encrypted_assertion] keyword[in] identifier[encrypted_assertions] : keyword[if] identifier[encrypted_assertion] . identifier[extension_elements] : identifier[assertions] = identifier[extension_elements_to_elements] ( identifier[encrypted_assertion] . identifier[extension_elements] ,[ identifier[saml] , identifier[samlp] ]) keyword[for] identifier[assertion] keyword[in] identifier[assertions] : keyword[if] identifier[assertion] . identifier[signature] keyword[and] keyword[not] identifier[verified] : keyword[if] keyword[not] identifier[self] . identifier[sec] . identifier[check_signature] ( identifier[assertion] , identifier[origdoc] = identifier[decr_txt] , identifier[node_name] = identifier[class_name] ( identifier[assertion] ), identifier[issuer] = identifier[issuer] ): identifier[logger] . identifier[error] ( literal[string] , identifier[assertion] ) keyword[raise] identifier[SignatureError] () identifier[res] . identifier[append] ( identifier[assertion] ) keyword[return] identifier[res]
def decrypt_assertions(self, encrypted_assertions, decr_txt, issuer=None, verified=False): """ Moves the decrypted assertion from the encrypted assertion to a list. :param encrypted_assertions: A list of encrypted assertions. :param decr_txt: The string representation containing the decrypted data. Used when verifying signatures. :param issuer: The issuer of the response. :param verified: If True do not verify signatures, otherwise verify the signature if it exists. :return: A list of decrypted assertions. """ res = [] for encrypted_assertion in encrypted_assertions: if encrypted_assertion.extension_elements: assertions = extension_elements_to_elements(encrypted_assertion.extension_elements, [saml, samlp]) for assertion in assertions: if assertion.signature and (not verified): if not self.sec.check_signature(assertion, origdoc=decr_txt, node_name=class_name(assertion), issuer=issuer): logger.error("Failed to verify signature on '%s'", assertion) raise SignatureError() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] res.append(assertion) # depends on [control=['for'], data=['assertion']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['encrypted_assertion']] return res
def from_scf_task(cls, scf_task, ngqpt, ph_tolerance=None, tolwfr=1.0e-22, nband=None, with_becs=False, ddk_tolerance=None, shiftq=(0, 0, 0), is_ngqpt=True, remove_wfkq=True, manager=None): """ Construct a `PhononWfkqWork` from a :class:`ScfTask` object. The input files for WFQ and phonons are automatically generated from the input of the ScfTask. Each phonon task depends on the WFK file produced by scf_task and the associated WFQ file. Args: scf_task: ScfTask object. ngqpt: three integers defining the q-mesh with_becs: Activate calculation of Electric field and Born effective charges. ph_tolerance: dict {"varname": value} with the tolerance for the phonon run. None to use AbiPy default. tolwfr: tolerance used to compute WFQ. ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run if with_becs. None to use AbiPy default. shiftq: Q-mesh shift. Multiple shifts are not supported. is_ngqpt: the ngqpt is interpreted as a set of integers defining the q-mesh, otherwise is an explicit list of q-points remove_wfkq: Remove WKQ files when the children are completed. manager: :class:`TaskManager` object. .. note: Use k-meshes with one shift and q-meshes that are multiple of ngkpt to decrease the number of WFQ files to be computed. """ if not isinstance(scf_task, ScfTask): raise TypeError("task `%s` does not inherit from ScfTask" % scf_task) shiftq = np.reshape(shiftq, (3,)) if is_ngqpt: qpoints = scf_task.input.abiget_ibz(ngkpt=ngqpt, shiftk=shiftq, kptopt=1).points else: qpoints = ngqpt new = cls(manager=manager) new.remove_wfkq = remove_wfkq new.wfkq_tasks = [] new.wfkq_task_children = collections.defaultdict(list) if with_becs: # Add DDK and BECS. new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance) # Get ngkpt, shift for electrons from input. # Won't try to skip WFQ if multiple shifts or off-diagonal kptrlatt ngkpt, shiftk = scf_task.input.get_ngkpt_shiftk() try_to_skip_wfkq = True if ngkpt is None or len(shiftk) > 1 and is_ngqpt: try_to_skip_wfkq = True # TODO: One could avoid kptopt 3 by computing WFK in the IBZ and then rotating. # but this has to be done inside Abinit. for qpt in qpoints: is_gamma = np.sum(qpt ** 2) < 1e-12 if with_becs and is_gamma: continue # Avoid WFQ if k + q = k (requires ngkpt, multiple shifts are not supported) need_wfkq = True if is_gamma: need_wfkq = False elif try_to_skip_wfkq: # k = (i + shiftk) / ngkpt qinds = np.rint(qpt * ngqpt - shiftq) f = (qinds * ngkpt) % ngqpt need_wfkq = np.any(f != 0) if need_wfkq: nscf_inp = scf_task.input.new_with_vars(qpt=qpt, nqpt=1, iscf=-2, kptopt=3, tolwfr=tolwfr) if nband: nbdbuf = max(2,nband*0.1) nscf_inp.set_vars(nband=nband+nbdbuf, nbdbuf=nbdbuf) wfkq_task = new.register_nscf_task(nscf_inp, deps={scf_task: ["DEN", "WFK"]}) new.wfkq_tasks.append(wfkq_task) multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=ph_tolerance) for ph_inp in multi: deps = {scf_task: "WFK", wfkq_task: "WFQ"} if need_wfkq else {scf_task: "WFK"} #ph_inp["prtwf"] = -1 t = new.register_phonon_task(ph_inp, deps=deps) if need_wfkq: new.wfkq_task_children[wfkq_task].append(t) return new
def function[from_scf_task, parameter[cls, scf_task, ngqpt, ph_tolerance, tolwfr, nband, with_becs, ddk_tolerance, shiftq, is_ngqpt, remove_wfkq, manager]]: constant[ Construct a `PhononWfkqWork` from a :class:`ScfTask` object. The input files for WFQ and phonons are automatically generated from the input of the ScfTask. Each phonon task depends on the WFK file produced by scf_task and the associated WFQ file. Args: scf_task: ScfTask object. ngqpt: three integers defining the q-mesh with_becs: Activate calculation of Electric field and Born effective charges. ph_tolerance: dict {"varname": value} with the tolerance for the phonon run. None to use AbiPy default. tolwfr: tolerance used to compute WFQ. ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run if with_becs. None to use AbiPy default. shiftq: Q-mesh shift. Multiple shifts are not supported. is_ngqpt: the ngqpt is interpreted as a set of integers defining the q-mesh, otherwise is an explicit list of q-points remove_wfkq: Remove WKQ files when the children are completed. manager: :class:`TaskManager` object. .. note: Use k-meshes with one shift and q-meshes that are multiple of ngkpt to decrease the number of WFQ files to be computed. ] if <ast.UnaryOp object at 0x7da1b21a11b0> begin[:] <ast.Raise object at 0x7da1b21a1cc0> variable[shiftq] assign[=] call[name[np].reshape, parameter[name[shiftq], tuple[[<ast.Constant object at 0x7da1b21a07c0>]]]] if name[is_ngqpt] begin[:] variable[qpoints] assign[=] call[name[scf_task].input.abiget_ibz, parameter[]].points variable[new] assign[=] call[name[cls], parameter[]] name[new].remove_wfkq assign[=] name[remove_wfkq] name[new].wfkq_tasks assign[=] list[[]] name[new].wfkq_task_children assign[=] call[name[collections].defaultdict, parameter[name[list]]] if name[with_becs] begin[:] call[name[new].add_becs_from_scf_task, parameter[name[scf_task], name[ddk_tolerance], name[ph_tolerance]]] <ast.Tuple object at 0x7da1b21a0880> assign[=] call[name[scf_task].input.get_ngkpt_shiftk, parameter[]] variable[try_to_skip_wfkq] assign[=] constant[True] if <ast.BoolOp object at 0x7da1b21a2ad0> begin[:] variable[try_to_skip_wfkq] assign[=] constant[True] for taget[name[qpt]] in starred[name[qpoints]] begin[:] variable[is_gamma] assign[=] compare[call[name[np].sum, parameter[binary_operation[name[qpt] ** constant[2]]]] less[<] constant[1e-12]] if <ast.BoolOp object at 0x7da1b21a2140> begin[:] continue variable[need_wfkq] assign[=] constant[True] if name[is_gamma] begin[:] variable[need_wfkq] assign[=] constant[False] if name[need_wfkq] begin[:] variable[nscf_inp] assign[=] call[name[scf_task].input.new_with_vars, parameter[]] if name[nband] begin[:] variable[nbdbuf] assign[=] call[name[max], parameter[constant[2], binary_operation[name[nband] * constant[0.1]]]] call[name[nscf_inp].set_vars, parameter[]] variable[wfkq_task] assign[=] call[name[new].register_nscf_task, parameter[name[nscf_inp]]] call[name[new].wfkq_tasks.append, parameter[name[wfkq_task]]] variable[multi] assign[=] call[name[scf_task].input.make_ph_inputs_qpoint, parameter[name[qpt]]] for taget[name[ph_inp]] in starred[name[multi]] begin[:] variable[deps] assign[=] <ast.IfExp object at 0x7da20e962320> variable[t] assign[=] call[name[new].register_phonon_task, parameter[name[ph_inp]]] if name[need_wfkq] begin[:] call[call[name[new].wfkq_task_children][name[wfkq_task]].append, parameter[name[t]]] return[name[new]]
keyword[def] identifier[from_scf_task] ( identifier[cls] , identifier[scf_task] , identifier[ngqpt] , identifier[ph_tolerance] = keyword[None] , identifier[tolwfr] = literal[int] , identifier[nband] = keyword[None] , identifier[with_becs] = keyword[False] , identifier[ddk_tolerance] = keyword[None] , identifier[shiftq] =( literal[int] , literal[int] , literal[int] ), identifier[is_ngqpt] = keyword[True] , identifier[remove_wfkq] = keyword[True] , identifier[manager] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[scf_task] , identifier[ScfTask] ): keyword[raise] identifier[TypeError] ( literal[string] % identifier[scf_task] ) identifier[shiftq] = identifier[np] . identifier[reshape] ( identifier[shiftq] ,( literal[int] ,)) keyword[if] identifier[is_ngqpt] : identifier[qpoints] = identifier[scf_task] . identifier[input] . identifier[abiget_ibz] ( identifier[ngkpt] = identifier[ngqpt] , identifier[shiftk] = identifier[shiftq] , identifier[kptopt] = literal[int] ). identifier[points] keyword[else] : identifier[qpoints] = identifier[ngqpt] identifier[new] = identifier[cls] ( identifier[manager] = identifier[manager] ) identifier[new] . identifier[remove_wfkq] = identifier[remove_wfkq] identifier[new] . identifier[wfkq_tasks] =[] identifier[new] . identifier[wfkq_task_children] = identifier[collections] . identifier[defaultdict] ( identifier[list] ) keyword[if] identifier[with_becs] : identifier[new] . identifier[add_becs_from_scf_task] ( identifier[scf_task] , identifier[ddk_tolerance] , identifier[ph_tolerance] ) identifier[ngkpt] , identifier[shiftk] = identifier[scf_task] . identifier[input] . identifier[get_ngkpt_shiftk] () identifier[try_to_skip_wfkq] = keyword[True] keyword[if] identifier[ngkpt] keyword[is] keyword[None] keyword[or] identifier[len] ( identifier[shiftk] )> literal[int] keyword[and] identifier[is_ngqpt] : identifier[try_to_skip_wfkq] = keyword[True] keyword[for] identifier[qpt] keyword[in] identifier[qpoints] : identifier[is_gamma] = identifier[np] . identifier[sum] ( identifier[qpt] ** literal[int] )< literal[int] keyword[if] identifier[with_becs] keyword[and] identifier[is_gamma] : keyword[continue] identifier[need_wfkq] = keyword[True] keyword[if] identifier[is_gamma] : identifier[need_wfkq] = keyword[False] keyword[elif] identifier[try_to_skip_wfkq] : identifier[qinds] = identifier[np] . identifier[rint] ( identifier[qpt] * identifier[ngqpt] - identifier[shiftq] ) identifier[f] =( identifier[qinds] * identifier[ngkpt] )% identifier[ngqpt] identifier[need_wfkq] = identifier[np] . identifier[any] ( identifier[f] != literal[int] ) keyword[if] identifier[need_wfkq] : identifier[nscf_inp] = identifier[scf_task] . identifier[input] . identifier[new_with_vars] ( identifier[qpt] = identifier[qpt] , identifier[nqpt] = literal[int] , identifier[iscf] =- literal[int] , identifier[kptopt] = literal[int] , identifier[tolwfr] = identifier[tolwfr] ) keyword[if] identifier[nband] : identifier[nbdbuf] = identifier[max] ( literal[int] , identifier[nband] * literal[int] ) identifier[nscf_inp] . identifier[set_vars] ( identifier[nband] = identifier[nband] + identifier[nbdbuf] , identifier[nbdbuf] = identifier[nbdbuf] ) identifier[wfkq_task] = identifier[new] . identifier[register_nscf_task] ( identifier[nscf_inp] , identifier[deps] ={ identifier[scf_task] :[ literal[string] , literal[string] ]}) identifier[new] . identifier[wfkq_tasks] . identifier[append] ( identifier[wfkq_task] ) identifier[multi] = identifier[scf_task] . identifier[input] . identifier[make_ph_inputs_qpoint] ( identifier[qpt] , identifier[tolerance] = identifier[ph_tolerance] ) keyword[for] identifier[ph_inp] keyword[in] identifier[multi] : identifier[deps] ={ identifier[scf_task] : literal[string] , identifier[wfkq_task] : literal[string] } keyword[if] identifier[need_wfkq] keyword[else] { identifier[scf_task] : literal[string] } identifier[t] = identifier[new] . identifier[register_phonon_task] ( identifier[ph_inp] , identifier[deps] = identifier[deps] ) keyword[if] identifier[need_wfkq] : identifier[new] . identifier[wfkq_task_children] [ identifier[wfkq_task] ]. identifier[append] ( identifier[t] ) keyword[return] identifier[new]
def from_scf_task(cls, scf_task, ngqpt, ph_tolerance=None, tolwfr=1e-22, nband=None, with_becs=False, ddk_tolerance=None, shiftq=(0, 0, 0), is_ngqpt=True, remove_wfkq=True, manager=None): """ Construct a `PhononWfkqWork` from a :class:`ScfTask` object. The input files for WFQ and phonons are automatically generated from the input of the ScfTask. Each phonon task depends on the WFK file produced by scf_task and the associated WFQ file. Args: scf_task: ScfTask object. ngqpt: three integers defining the q-mesh with_becs: Activate calculation of Electric field and Born effective charges. ph_tolerance: dict {"varname": value} with the tolerance for the phonon run. None to use AbiPy default. tolwfr: tolerance used to compute WFQ. ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run if with_becs. None to use AbiPy default. shiftq: Q-mesh shift. Multiple shifts are not supported. is_ngqpt: the ngqpt is interpreted as a set of integers defining the q-mesh, otherwise is an explicit list of q-points remove_wfkq: Remove WKQ files when the children are completed. manager: :class:`TaskManager` object. .. note: Use k-meshes with one shift and q-meshes that are multiple of ngkpt to decrease the number of WFQ files to be computed. """ if not isinstance(scf_task, ScfTask): raise TypeError('task `%s` does not inherit from ScfTask' % scf_task) # depends on [control=['if'], data=[]] shiftq = np.reshape(shiftq, (3,)) if is_ngqpt: qpoints = scf_task.input.abiget_ibz(ngkpt=ngqpt, shiftk=shiftq, kptopt=1).points # depends on [control=['if'], data=[]] else: qpoints = ngqpt new = cls(manager=manager) new.remove_wfkq = remove_wfkq new.wfkq_tasks = [] new.wfkq_task_children = collections.defaultdict(list) if with_becs: # Add DDK and BECS. new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance) # depends on [control=['if'], data=[]] # Get ngkpt, shift for electrons from input. # Won't try to skip WFQ if multiple shifts or off-diagonal kptrlatt (ngkpt, shiftk) = scf_task.input.get_ngkpt_shiftk() try_to_skip_wfkq = True if ngkpt is None or (len(shiftk) > 1 and is_ngqpt): try_to_skip_wfkq = True # depends on [control=['if'], data=[]] # TODO: One could avoid kptopt 3 by computing WFK in the IBZ and then rotating. # but this has to be done inside Abinit. for qpt in qpoints: is_gamma = np.sum(qpt ** 2) < 1e-12 if with_becs and is_gamma: continue # depends on [control=['if'], data=[]] # Avoid WFQ if k + q = k (requires ngkpt, multiple shifts are not supported) need_wfkq = True if is_gamma: need_wfkq = False # depends on [control=['if'], data=[]] elif try_to_skip_wfkq: # k = (i + shiftk) / ngkpt qinds = np.rint(qpt * ngqpt - shiftq) f = qinds * ngkpt % ngqpt need_wfkq = np.any(f != 0) # depends on [control=['if'], data=[]] if need_wfkq: nscf_inp = scf_task.input.new_with_vars(qpt=qpt, nqpt=1, iscf=-2, kptopt=3, tolwfr=tolwfr) if nband: nbdbuf = max(2, nband * 0.1) nscf_inp.set_vars(nband=nband + nbdbuf, nbdbuf=nbdbuf) # depends on [control=['if'], data=[]] wfkq_task = new.register_nscf_task(nscf_inp, deps={scf_task: ['DEN', 'WFK']}) new.wfkq_tasks.append(wfkq_task) # depends on [control=['if'], data=[]] multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=ph_tolerance) for ph_inp in multi: deps = {scf_task: 'WFK', wfkq_task: 'WFQ'} if need_wfkq else {scf_task: 'WFK'} #ph_inp["prtwf"] = -1 t = new.register_phonon_task(ph_inp, deps=deps) if need_wfkq: new.wfkq_task_children[wfkq_task].append(t) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ph_inp']] # depends on [control=['for'], data=['qpt']] return new
def is_dragon(host, timeout=1): """ Check if host is a dragon. Check if the specified host is a dragon based on simple heuristic. The code simply checks if particular strings are in the index page. It should work for DragonMint or Innosilicon branded miners. """ try: r = requests.get('http://{}/'.format(host), timeout=timeout) if r.status_code == 200: if '<title>DragonMint</title>' in r.text or \ '<title>AsicMiner</title>' in r.text: return True except requests.exceptions.RequestException: pass return False
def function[is_dragon, parameter[host, timeout]]: constant[ Check if host is a dragon. Check if the specified host is a dragon based on simple heuristic. The code simply checks if particular strings are in the index page. It should work for DragonMint or Innosilicon branded miners. ] <ast.Try object at 0x7da1b25d9210> return[constant[False]]
keyword[def] identifier[is_dragon] ( identifier[host] , identifier[timeout] = literal[int] ): literal[string] keyword[try] : identifier[r] = identifier[requests] . identifier[get] ( literal[string] . identifier[format] ( identifier[host] ), identifier[timeout] = identifier[timeout] ) keyword[if] identifier[r] . identifier[status_code] == literal[int] : keyword[if] literal[string] keyword[in] identifier[r] . identifier[text] keyword[or] literal[string] keyword[in] identifier[r] . identifier[text] : keyword[return] keyword[True] keyword[except] identifier[requests] . identifier[exceptions] . identifier[RequestException] : keyword[pass] keyword[return] keyword[False]
def is_dragon(host, timeout=1): """ Check if host is a dragon. Check if the specified host is a dragon based on simple heuristic. The code simply checks if particular strings are in the index page. It should work for DragonMint or Innosilicon branded miners. """ try: r = requests.get('http://{}/'.format(host), timeout=timeout) if r.status_code == 200: if '<title>DragonMint</title>' in r.text or '<title>AsicMiner</title>' in r.text: return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except requests.exceptions.RequestException: pass # depends on [control=['except'], data=[]] return False
def parse_uri(self, uri=None): ''' parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef Args: uri (rdflib.term.URIRef,str): input URI Returns: rdflib.term.URIRef ''' # no uri provided, assume root if not uri: return rdflib.term.URIRef(self.root) # string uri provided elif type(uri) == str: # assume "short" uri, expand with repo root if type(uri) == str and not uri.startswith('http'): return rdflib.term.URIRef("%s%s" % (self.root, uri)) # else, assume full uri else: return rdflib.term.URIRef(uri) # already rdflib.term.URIRef elif type(uri) == rdflib.term.URIRef: return uri # unknown input else: raise TypeError('invalid URI input')
def function[parse_uri, parameter[self, uri]]: constant[ parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef Args: uri (rdflib.term.URIRef,str): input URI Returns: rdflib.term.URIRef ] if <ast.UnaryOp object at 0x7da1b2257310> begin[:] return[call[name[rdflib].term.URIRef, parameter[name[self].root]]]
keyword[def] identifier[parse_uri] ( identifier[self] , identifier[uri] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[uri] : keyword[return] identifier[rdflib] . identifier[term] . identifier[URIRef] ( identifier[self] . identifier[root] ) keyword[elif] identifier[type] ( identifier[uri] )== identifier[str] : keyword[if] identifier[type] ( identifier[uri] )== identifier[str] keyword[and] keyword[not] identifier[uri] . identifier[startswith] ( literal[string] ): keyword[return] identifier[rdflib] . identifier[term] . identifier[URIRef] ( literal[string] %( identifier[self] . identifier[root] , identifier[uri] )) keyword[else] : keyword[return] identifier[rdflib] . identifier[term] . identifier[URIRef] ( identifier[uri] ) keyword[elif] identifier[type] ( identifier[uri] )== identifier[rdflib] . identifier[term] . identifier[URIRef] : keyword[return] identifier[uri] keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] )
def parse_uri(self, uri=None): """ parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef Args: uri (rdflib.term.URIRef,str): input URI Returns: rdflib.term.URIRef """ # no uri provided, assume root if not uri: return rdflib.term.URIRef(self.root) # depends on [control=['if'], data=[]] # string uri provided elif type(uri) == str: # assume "short" uri, expand with repo root if type(uri) == str and (not uri.startswith('http')): return rdflib.term.URIRef('%s%s' % (self.root, uri)) # depends on [control=['if'], data=[]] else: # else, assume full uri return rdflib.term.URIRef(uri) # depends on [control=['if'], data=['str']] # already rdflib.term.URIRef elif type(uri) == rdflib.term.URIRef: return uri # depends on [control=['if'], data=[]] else: # unknown input raise TypeError('invalid URI input')
def add_parameter(self, parameter: Parameter): """ A parameter can have several scenarios. They are specified as a comma-separated list in a string. :param parameter: :return: """ # try reading the scenarios from the function arg or from the parameter attribute scenario_string = parameter.source_scenarios_string if scenario_string: _scenarios = [i.strip() for i in scenario_string.split(',')] self.fill_missing_attributes_from_default_parameter(parameter) else: _scenarios = [ParameterScenarioSet.default_scenario] for scenario in _scenarios: parameter.scenario = scenario self.parameter_sets[parameter.name][scenario] = parameter # record all tags for this parameter if parameter.tags: _tags = [i.strip() for i in parameter.tags.split(',')] for tag in _tags: self.tags[tag][parameter.name].add(parameter)
def function[add_parameter, parameter[self, parameter]]: constant[ A parameter can have several scenarios. They are specified as a comma-separated list in a string. :param parameter: :return: ] variable[scenario_string] assign[=] name[parameter].source_scenarios_string if name[scenario_string] begin[:] variable[_scenarios] assign[=] <ast.ListComp object at 0x7da20c6ab6a0> call[name[self].fill_missing_attributes_from_default_parameter, parameter[name[parameter]]] for taget[name[scenario]] in starred[name[_scenarios]] begin[:] name[parameter].scenario assign[=] name[scenario] call[call[name[self].parameter_sets][name[parameter].name]][name[scenario]] assign[=] name[parameter] if name[parameter].tags begin[:] variable[_tags] assign[=] <ast.ListComp object at 0x7da20c6aba60> for taget[name[tag]] in starred[name[_tags]] begin[:] call[call[call[name[self].tags][name[tag]]][name[parameter].name].add, parameter[name[parameter]]]
keyword[def] identifier[add_parameter] ( identifier[self] , identifier[parameter] : identifier[Parameter] ): literal[string] identifier[scenario_string] = identifier[parameter] . identifier[source_scenarios_string] keyword[if] identifier[scenario_string] : identifier[_scenarios] =[ identifier[i] . identifier[strip] () keyword[for] identifier[i] keyword[in] identifier[scenario_string] . identifier[split] ( literal[string] )] identifier[self] . identifier[fill_missing_attributes_from_default_parameter] ( identifier[parameter] ) keyword[else] : identifier[_scenarios] =[ identifier[ParameterScenarioSet] . identifier[default_scenario] ] keyword[for] identifier[scenario] keyword[in] identifier[_scenarios] : identifier[parameter] . identifier[scenario] = identifier[scenario] identifier[self] . identifier[parameter_sets] [ identifier[parameter] . identifier[name] ][ identifier[scenario] ]= identifier[parameter] keyword[if] identifier[parameter] . identifier[tags] : identifier[_tags] =[ identifier[i] . identifier[strip] () keyword[for] identifier[i] keyword[in] identifier[parameter] . identifier[tags] . identifier[split] ( literal[string] )] keyword[for] identifier[tag] keyword[in] identifier[_tags] : identifier[self] . identifier[tags] [ identifier[tag] ][ identifier[parameter] . identifier[name] ]. identifier[add] ( identifier[parameter] )
def add_parameter(self, parameter: Parameter): """ A parameter can have several scenarios. They are specified as a comma-separated list in a string. :param parameter: :return: """ # try reading the scenarios from the function arg or from the parameter attribute scenario_string = parameter.source_scenarios_string if scenario_string: _scenarios = [i.strip() for i in scenario_string.split(',')] self.fill_missing_attributes_from_default_parameter(parameter) # depends on [control=['if'], data=[]] else: _scenarios = [ParameterScenarioSet.default_scenario] for scenario in _scenarios: parameter.scenario = scenario self.parameter_sets[parameter.name][scenario] = parameter # depends on [control=['for'], data=['scenario']] # record all tags for this parameter if parameter.tags: _tags = [i.strip() for i in parameter.tags.split(',')] for tag in _tags: self.tags[tag][parameter.name].add(parameter) # depends on [control=['for'], data=['tag']] # depends on [control=['if'], data=[]]
def set_column_names(self, column_names): """ Setup the feature vector with some column names :param column_names: the column names we want :return: """ if len(self._rows): raise NotImplementedError("You can't manually set columns once data has been added") self._update_internal_column_state(column_names)
def function[set_column_names, parameter[self, column_names]]: constant[ Setup the feature vector with some column names :param column_names: the column names we want :return: ] if call[name[len], parameter[name[self]._rows]] begin[:] <ast.Raise object at 0x7da1b137f9a0> call[name[self]._update_internal_column_state, parameter[name[column_names]]]
keyword[def] identifier[set_column_names] ( identifier[self] , identifier[column_names] ): literal[string] keyword[if] identifier[len] ( identifier[self] . identifier[_rows] ): keyword[raise] identifier[NotImplementedError] ( literal[string] ) identifier[self] . identifier[_update_internal_column_state] ( identifier[column_names] )
def set_column_names(self, column_names): """ Setup the feature vector with some column names :param column_names: the column names we want :return: """ if len(self._rows): raise NotImplementedError("You can't manually set columns once data has been added") # depends on [control=['if'], data=[]] self._update_internal_column_state(column_names)
def censor(input_text): """ Returns the input string with profanity replaced with a random string of characters plucked from the censor_characters pool. """ ret = input_text words = get_words() for word in words: curse_word = re.compile(re.escape(word), re.IGNORECASE) cen = "".join(get_censor_char() for i in list(word)) ret = curse_word.sub(cen, ret) return ret
def function[censor, parameter[input_text]]: constant[ Returns the input string with profanity replaced with a random string of characters plucked from the censor_characters pool. ] variable[ret] assign[=] name[input_text] variable[words] assign[=] call[name[get_words], parameter[]] for taget[name[word]] in starred[name[words]] begin[:] variable[curse_word] assign[=] call[name[re].compile, parameter[call[name[re].escape, parameter[name[word]]], name[re].IGNORECASE]] variable[cen] assign[=] call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da1b0f9c730>]] variable[ret] assign[=] call[name[curse_word].sub, parameter[name[cen], name[ret]]] return[name[ret]]
keyword[def] identifier[censor] ( identifier[input_text] ): literal[string] identifier[ret] = identifier[input_text] identifier[words] = identifier[get_words] () keyword[for] identifier[word] keyword[in] identifier[words] : identifier[curse_word] = identifier[re] . identifier[compile] ( identifier[re] . identifier[escape] ( identifier[word] ), identifier[re] . identifier[IGNORECASE] ) identifier[cen] = literal[string] . identifier[join] ( identifier[get_censor_char] () keyword[for] identifier[i] keyword[in] identifier[list] ( identifier[word] )) identifier[ret] = identifier[curse_word] . identifier[sub] ( identifier[cen] , identifier[ret] ) keyword[return] identifier[ret]
def censor(input_text): """ Returns the input string with profanity replaced with a random string of characters plucked from the censor_characters pool. """ ret = input_text words = get_words() for word in words: curse_word = re.compile(re.escape(word), re.IGNORECASE) cen = ''.join((get_censor_char() for i in list(word))) ret = curse_word.sub(cen, ret) # depends on [control=['for'], data=['word']] return ret
def get_assessment_lookup_session(self, proxy): """Gets the ``OsidSession`` associated with the assessment lookup service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentLookupSession) - an ``AssessmentLookupSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_lookup()`` is ``true``.* """ if not self.supports_assessment_lookup(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.AssessmentLookupSession(proxy=proxy, runtime=self._runtime)
def function[get_assessment_lookup_session, parameter[self, proxy]]: constant[Gets the ``OsidSession`` associated with the assessment lookup service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentLookupSession) - an ``AssessmentLookupSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_lookup()`` is ``true``.* ] if <ast.UnaryOp object at 0x7da20c6e6e60> begin[:] <ast.Raise object at 0x7da20c6e53c0> return[call[name[sessions].AssessmentLookupSession, parameter[]]]
keyword[def] identifier[get_assessment_lookup_session] ( identifier[self] , identifier[proxy] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[supports_assessment_lookup] (): keyword[raise] identifier[errors] . identifier[Unimplemented] () keyword[return] identifier[sessions] . identifier[AssessmentLookupSession] ( identifier[proxy] = identifier[proxy] , identifier[runtime] = identifier[self] . identifier[_runtime] )
def get_assessment_lookup_session(self, proxy): """Gets the ``OsidSession`` associated with the assessment lookup service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentLookupSession) - an ``AssessmentLookupSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_lookup()`` is ``true``.* """ if not self.supports_assessment_lookup(): raise errors.Unimplemented() # depends on [control=['if'], data=[]] # pylint: disable=no-member return sessions.AssessmentLookupSession(proxy=proxy, runtime=self._runtime)
def _resolve_metric(self, original_metric_name, metrics_to_collect, prefix=""): """ Return the submit method and the metric name to use. The metric name is defined as follow: * If available, the normalized metric name alias * (Or) the normalized original metric name """ submit_method = ( metrics_to_collect[original_metric_name][0] if isinstance(metrics_to_collect[original_metric_name], tuple) else metrics_to_collect[original_metric_name] ) metric_name = ( metrics_to_collect[original_metric_name][1] if isinstance(metrics_to_collect[original_metric_name], tuple) else original_metric_name ) return submit_method, self._normalize(metric_name, submit_method, prefix)
def function[_resolve_metric, parameter[self, original_metric_name, metrics_to_collect, prefix]]: constant[ Return the submit method and the metric name to use. The metric name is defined as follow: * If available, the normalized metric name alias * (Or) the normalized original metric name ] variable[submit_method] assign[=] <ast.IfExp object at 0x7da20c9911e0> variable[metric_name] assign[=] <ast.IfExp object at 0x7da20c990730> return[tuple[[<ast.Name object at 0x7da20c9907c0>, <ast.Call object at 0x7da20c991ea0>]]]
keyword[def] identifier[_resolve_metric] ( identifier[self] , identifier[original_metric_name] , identifier[metrics_to_collect] , identifier[prefix] = literal[string] ): literal[string] identifier[submit_method] =( identifier[metrics_to_collect] [ identifier[original_metric_name] ][ literal[int] ] keyword[if] identifier[isinstance] ( identifier[metrics_to_collect] [ identifier[original_metric_name] ], identifier[tuple] ) keyword[else] identifier[metrics_to_collect] [ identifier[original_metric_name] ] ) identifier[metric_name] =( identifier[metrics_to_collect] [ identifier[original_metric_name] ][ literal[int] ] keyword[if] identifier[isinstance] ( identifier[metrics_to_collect] [ identifier[original_metric_name] ], identifier[tuple] ) keyword[else] identifier[original_metric_name] ) keyword[return] identifier[submit_method] , identifier[self] . identifier[_normalize] ( identifier[metric_name] , identifier[submit_method] , identifier[prefix] )
def _resolve_metric(self, original_metric_name, metrics_to_collect, prefix=''): """ Return the submit method and the metric name to use. The metric name is defined as follow: * If available, the normalized metric name alias * (Or) the normalized original metric name """ submit_method = metrics_to_collect[original_metric_name][0] if isinstance(metrics_to_collect[original_metric_name], tuple) else metrics_to_collect[original_metric_name] metric_name = metrics_to_collect[original_metric_name][1] if isinstance(metrics_to_collect[original_metric_name], tuple) else original_metric_name return (submit_method, self._normalize(metric_name, submit_method, prefix))
def create_from_tree(cls, repo, tree, message, parent_commits=None, head=False, author=None, committer=None, author_date=None, commit_date=None): """Commit the given tree, creating a commit object. :param repo: Repo object the commit should be part of :param tree: Tree object or hex or bin sha the tree of the new commit :param message: Commit message. It may be an empty string if no message is provided. It will be converted to a string in any case. :param parent_commits: Optional Commit objects to use as parents for the new commit. If empty list, the commit will have no parents at all and become a root commit. If None , the current head commit will be the parent of the new commit object :param head: If True, the HEAD will be advanced to the new commit automatically. Else the HEAD will remain pointing on the previous commit. This could lead to undesired results when diffing files. :param author: The name of the author, optional. If unset, the repository configuration is used to obtain this value. :param committer: The name of the committer, optional. If unset, the repository configuration is used to obtain this value. :param author_date: The timestamp for the author field :param commit_date: The timestamp for the committer field :return: Commit object representing the new commit :note: Additional information about the committer and Author are taken from the environment or from the git configuration, see git-commit-tree for more information""" if parent_commits is None: try: parent_commits = [repo.head.commit] except ValueError: # empty repositories have no head commit parent_commits = [] # END handle parent commits else: for p in parent_commits: if not isinstance(p, cls): raise ValueError("Parent commit '%r' must be of type %s" % (p, cls)) # end check parent commit types # END if parent commits are unset # retrieve all additional information, create a commit object, and # serialize it # Generally: # * Environment variables override configuration values # * Sensible defaults are set according to the git documentation # COMMITER AND AUTHOR INFO cr = repo.config_reader() env = os.environ committer = committer or Actor.committer(cr) author = author or Actor.author(cr) # PARSE THE DATES unix_time = int(time()) is_dst = daylight and localtime().tm_isdst > 0 offset = altzone if is_dst else timezone author_date_str = env.get(cls.env_author_date, '') if author_date: author_time, author_offset = parse_date(author_date) elif author_date_str: author_time, author_offset = parse_date(author_date_str) else: author_time, author_offset = unix_time, offset # END set author time committer_date_str = env.get(cls.env_committer_date, '') if commit_date: committer_time, committer_offset = parse_date(commit_date) elif committer_date_str: committer_time, committer_offset = parse_date(committer_date_str) else: committer_time, committer_offset = unix_time, offset # END set committer time # assume utf8 encoding enc_section, enc_option = cls.conf_encoding.split('.') conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding) # if the tree is no object, make sure we create one - otherwise # the created commit object is invalid if isinstance(tree, str): tree = repo.tree(tree) # END tree conversion # CREATE NEW COMMIT new_commit = cls(repo, cls.NULL_BIN_SHA, tree, author, author_time, author_offset, committer, committer_time, committer_offset, message, parent_commits, conf_encoding) stream = BytesIO() new_commit._serialize(stream) streamlen = stream.tell() stream.seek(0) istream = repo.odb.store(IStream(cls.type, streamlen, stream)) new_commit.binsha = istream.binsha if head: # need late import here, importing git at the very beginning throws # as well ... import git.refs try: repo.head.set_commit(new_commit, logmsg=message) except ValueError: # head is not yet set to the ref our HEAD points to # Happens on first commit master = git.refs.Head.create(repo, repo.head.ref, new_commit, logmsg="commit (initial): %s" % message) repo.head.set_reference(master, logmsg='commit: Switching to %s' % master) # END handle empty repositories # END advance head handling return new_commit
def function[create_from_tree, parameter[cls, repo, tree, message, parent_commits, head, author, committer, author_date, commit_date]]: constant[Commit the given tree, creating a commit object. :param repo: Repo object the commit should be part of :param tree: Tree object or hex or bin sha the tree of the new commit :param message: Commit message. It may be an empty string if no message is provided. It will be converted to a string in any case. :param parent_commits: Optional Commit objects to use as parents for the new commit. If empty list, the commit will have no parents at all and become a root commit. If None , the current head commit will be the parent of the new commit object :param head: If True, the HEAD will be advanced to the new commit automatically. Else the HEAD will remain pointing on the previous commit. This could lead to undesired results when diffing files. :param author: The name of the author, optional. If unset, the repository configuration is used to obtain this value. :param committer: The name of the committer, optional. If unset, the repository configuration is used to obtain this value. :param author_date: The timestamp for the author field :param commit_date: The timestamp for the committer field :return: Commit object representing the new commit :note: Additional information about the committer and Author are taken from the environment or from the git configuration, see git-commit-tree for more information] if compare[name[parent_commits] is constant[None]] begin[:] <ast.Try object at 0x7da1b23473a0> variable[cr] assign[=] call[name[repo].config_reader, parameter[]] variable[env] assign[=] name[os].environ variable[committer] assign[=] <ast.BoolOp object at 0x7da1b2347340> variable[author] assign[=] <ast.BoolOp object at 0x7da1b2344310> variable[unix_time] assign[=] call[name[int], parameter[call[name[time], parameter[]]]] variable[is_dst] assign[=] <ast.BoolOp object at 0x7da1b2345ba0> variable[offset] assign[=] <ast.IfExp object at 0x7da1b2346cb0> variable[author_date_str] assign[=] call[name[env].get, parameter[name[cls].env_author_date, constant[]]] if name[author_date] begin[:] <ast.Tuple object at 0x7da1b2345de0> assign[=] call[name[parse_date], parameter[name[author_date]]] variable[committer_date_str] assign[=] call[name[env].get, parameter[name[cls].env_committer_date, constant[]]] if name[commit_date] begin[:] <ast.Tuple object at 0x7da1b23465c0> assign[=] call[name[parse_date], parameter[name[commit_date]]] <ast.Tuple object at 0x7da1b23475b0> assign[=] call[name[cls].conf_encoding.split, parameter[constant[.]]] variable[conf_encoding] assign[=] call[name[cr].get_value, parameter[name[enc_section], name[enc_option], name[cls].default_encoding]] if call[name[isinstance], parameter[name[tree], name[str]]] begin[:] variable[tree] assign[=] call[name[repo].tree, parameter[name[tree]]] variable[new_commit] assign[=] call[name[cls], parameter[name[repo], name[cls].NULL_BIN_SHA, name[tree], name[author], name[author_time], name[author_offset], name[committer], name[committer_time], name[committer_offset], name[message], name[parent_commits], name[conf_encoding]]] variable[stream] assign[=] call[name[BytesIO], parameter[]] call[name[new_commit]._serialize, parameter[name[stream]]] variable[streamlen] assign[=] call[name[stream].tell, parameter[]] call[name[stream].seek, parameter[constant[0]]] variable[istream] assign[=] call[name[repo].odb.store, parameter[call[name[IStream], parameter[name[cls].type, name[streamlen], name[stream]]]]] name[new_commit].binsha assign[=] name[istream].binsha if name[head] begin[:] import module[git.refs] <ast.Try object at 0x7da1b23584f0> return[name[new_commit]]
keyword[def] identifier[create_from_tree] ( identifier[cls] , identifier[repo] , identifier[tree] , identifier[message] , identifier[parent_commits] = keyword[None] , identifier[head] = keyword[False] , identifier[author] = keyword[None] , identifier[committer] = keyword[None] , identifier[author_date] = keyword[None] , identifier[commit_date] = keyword[None] ): literal[string] keyword[if] identifier[parent_commits] keyword[is] keyword[None] : keyword[try] : identifier[parent_commits] =[ identifier[repo] . identifier[head] . identifier[commit] ] keyword[except] identifier[ValueError] : identifier[parent_commits] =[] keyword[else] : keyword[for] identifier[p] keyword[in] identifier[parent_commits] : keyword[if] keyword[not] identifier[isinstance] ( identifier[p] , identifier[cls] ): keyword[raise] identifier[ValueError] ( literal[string] %( identifier[p] , identifier[cls] )) identifier[cr] = identifier[repo] . identifier[config_reader] () identifier[env] = identifier[os] . identifier[environ] identifier[committer] = identifier[committer] keyword[or] identifier[Actor] . identifier[committer] ( identifier[cr] ) identifier[author] = identifier[author] keyword[or] identifier[Actor] . identifier[author] ( identifier[cr] ) identifier[unix_time] = identifier[int] ( identifier[time] ()) identifier[is_dst] = identifier[daylight] keyword[and] identifier[localtime] (). identifier[tm_isdst] > literal[int] identifier[offset] = identifier[altzone] keyword[if] identifier[is_dst] keyword[else] identifier[timezone] identifier[author_date_str] = identifier[env] . identifier[get] ( identifier[cls] . identifier[env_author_date] , literal[string] ) keyword[if] identifier[author_date] : identifier[author_time] , identifier[author_offset] = identifier[parse_date] ( identifier[author_date] ) keyword[elif] identifier[author_date_str] : identifier[author_time] , identifier[author_offset] = identifier[parse_date] ( identifier[author_date_str] ) keyword[else] : identifier[author_time] , identifier[author_offset] = identifier[unix_time] , identifier[offset] identifier[committer_date_str] = identifier[env] . identifier[get] ( identifier[cls] . identifier[env_committer_date] , literal[string] ) keyword[if] identifier[commit_date] : identifier[committer_time] , identifier[committer_offset] = identifier[parse_date] ( identifier[commit_date] ) keyword[elif] identifier[committer_date_str] : identifier[committer_time] , identifier[committer_offset] = identifier[parse_date] ( identifier[committer_date_str] ) keyword[else] : identifier[committer_time] , identifier[committer_offset] = identifier[unix_time] , identifier[offset] identifier[enc_section] , identifier[enc_option] = identifier[cls] . identifier[conf_encoding] . identifier[split] ( literal[string] ) identifier[conf_encoding] = identifier[cr] . identifier[get_value] ( identifier[enc_section] , identifier[enc_option] , identifier[cls] . identifier[default_encoding] ) keyword[if] identifier[isinstance] ( identifier[tree] , identifier[str] ): identifier[tree] = identifier[repo] . identifier[tree] ( identifier[tree] ) identifier[new_commit] = identifier[cls] ( identifier[repo] , identifier[cls] . identifier[NULL_BIN_SHA] , identifier[tree] , identifier[author] , identifier[author_time] , identifier[author_offset] , identifier[committer] , identifier[committer_time] , identifier[committer_offset] , identifier[message] , identifier[parent_commits] , identifier[conf_encoding] ) identifier[stream] = identifier[BytesIO] () identifier[new_commit] . identifier[_serialize] ( identifier[stream] ) identifier[streamlen] = identifier[stream] . identifier[tell] () identifier[stream] . identifier[seek] ( literal[int] ) identifier[istream] = identifier[repo] . identifier[odb] . identifier[store] ( identifier[IStream] ( identifier[cls] . identifier[type] , identifier[streamlen] , identifier[stream] )) identifier[new_commit] . identifier[binsha] = identifier[istream] . identifier[binsha] keyword[if] identifier[head] : keyword[import] identifier[git] . identifier[refs] keyword[try] : identifier[repo] . identifier[head] . identifier[set_commit] ( identifier[new_commit] , identifier[logmsg] = identifier[message] ) keyword[except] identifier[ValueError] : identifier[master] = identifier[git] . identifier[refs] . identifier[Head] . identifier[create] ( identifier[repo] , identifier[repo] . identifier[head] . identifier[ref] , identifier[new_commit] , identifier[logmsg] = literal[string] % identifier[message] ) identifier[repo] . identifier[head] . identifier[set_reference] ( identifier[master] , identifier[logmsg] = literal[string] % identifier[master] ) keyword[return] identifier[new_commit]
def create_from_tree(cls, repo, tree, message, parent_commits=None, head=False, author=None, committer=None, author_date=None, commit_date=None): """Commit the given tree, creating a commit object. :param repo: Repo object the commit should be part of :param tree: Tree object or hex or bin sha the tree of the new commit :param message: Commit message. It may be an empty string if no message is provided. It will be converted to a string in any case. :param parent_commits: Optional Commit objects to use as parents for the new commit. If empty list, the commit will have no parents at all and become a root commit. If None , the current head commit will be the parent of the new commit object :param head: If True, the HEAD will be advanced to the new commit automatically. Else the HEAD will remain pointing on the previous commit. This could lead to undesired results when diffing files. :param author: The name of the author, optional. If unset, the repository configuration is used to obtain this value. :param committer: The name of the committer, optional. If unset, the repository configuration is used to obtain this value. :param author_date: The timestamp for the author field :param commit_date: The timestamp for the committer field :return: Commit object representing the new commit :note: Additional information about the committer and Author are taken from the environment or from the git configuration, see git-commit-tree for more information""" if parent_commits is None: try: parent_commits = [repo.head.commit] # depends on [control=['try'], data=[]] except ValueError: # empty repositories have no head commit parent_commits = [] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['parent_commits']] else: # END handle parent commits for p in parent_commits: if not isinstance(p, cls): raise ValueError("Parent commit '%r' must be of type %s" % (p, cls)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']] # end check parent commit types # END if parent commits are unset # retrieve all additional information, create a commit object, and # serialize it # Generally: # * Environment variables override configuration values # * Sensible defaults are set according to the git documentation # COMMITER AND AUTHOR INFO cr = repo.config_reader() env = os.environ committer = committer or Actor.committer(cr) author = author or Actor.author(cr) # PARSE THE DATES unix_time = int(time()) is_dst = daylight and localtime().tm_isdst > 0 offset = altzone if is_dst else timezone author_date_str = env.get(cls.env_author_date, '') if author_date: (author_time, author_offset) = parse_date(author_date) # depends on [control=['if'], data=[]] elif author_date_str: (author_time, author_offset) = parse_date(author_date_str) # depends on [control=['if'], data=[]] else: (author_time, author_offset) = (unix_time, offset) # END set author time committer_date_str = env.get(cls.env_committer_date, '') if commit_date: (committer_time, committer_offset) = parse_date(commit_date) # depends on [control=['if'], data=[]] elif committer_date_str: (committer_time, committer_offset) = parse_date(committer_date_str) # depends on [control=['if'], data=[]] else: (committer_time, committer_offset) = (unix_time, offset) # END set committer time # assume utf8 encoding (enc_section, enc_option) = cls.conf_encoding.split('.') conf_encoding = cr.get_value(enc_section, enc_option, cls.default_encoding) # if the tree is no object, make sure we create one - otherwise # the created commit object is invalid if isinstance(tree, str): tree = repo.tree(tree) # depends on [control=['if'], data=[]] # END tree conversion # CREATE NEW COMMIT new_commit = cls(repo, cls.NULL_BIN_SHA, tree, author, author_time, author_offset, committer, committer_time, committer_offset, message, parent_commits, conf_encoding) stream = BytesIO() new_commit._serialize(stream) streamlen = stream.tell() stream.seek(0) istream = repo.odb.store(IStream(cls.type, streamlen, stream)) new_commit.binsha = istream.binsha if head: # need late import here, importing git at the very beginning throws # as well ... import git.refs try: repo.head.set_commit(new_commit, logmsg=message) # depends on [control=['try'], data=[]] except ValueError: # head is not yet set to the ref our HEAD points to # Happens on first commit master = git.refs.Head.create(repo, repo.head.ref, new_commit, logmsg='commit (initial): %s' % message) repo.head.set_reference(master, logmsg='commit: Switching to %s' % master) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # END handle empty repositories # END advance head handling return new_commit
def make_event_filter(self, filter_key, filter_value): """Create a new event filter.""" event_filter = EventFilter( self.event_name, self.event, {filter_key: filter_value}, from_block=self.from_block, to_block=self.to_block ) event_filter.set_poll_interval(0.5) return event_filter
def function[make_event_filter, parameter[self, filter_key, filter_value]]: constant[Create a new event filter.] variable[event_filter] assign[=] call[name[EventFilter], parameter[name[self].event_name, name[self].event, dictionary[[<ast.Name object at 0x7da18bc706a0>], [<ast.Name object at 0x7da18bc73dc0>]]]] call[name[event_filter].set_poll_interval, parameter[constant[0.5]]] return[name[event_filter]]
keyword[def] identifier[make_event_filter] ( identifier[self] , identifier[filter_key] , identifier[filter_value] ): literal[string] identifier[event_filter] = identifier[EventFilter] ( identifier[self] . identifier[event_name] , identifier[self] . identifier[event] , { identifier[filter_key] : identifier[filter_value] }, identifier[from_block] = identifier[self] . identifier[from_block] , identifier[to_block] = identifier[self] . identifier[to_block] ) identifier[event_filter] . identifier[set_poll_interval] ( literal[int] ) keyword[return] identifier[event_filter]
def make_event_filter(self, filter_key, filter_value): """Create a new event filter.""" event_filter = EventFilter(self.event_name, self.event, {filter_key: filter_value}, from_block=self.from_block, to_block=self.to_block) event_filter.set_poll_interval(0.5) return event_filter
def get_mini_reviews(recid, ln=CFG_SITE_LANG): """ Returns the web controls to add reviews to a record from the detailed record pages mini-panel. :param recid: the id of the displayed record :param ln: the user's language """ if CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS: action = 'SUBMIT' else: action = 'DISPLAY' reviews = query_retrieve_comments_or_remarks(recid, ranking=1) return webcomment_templates.tmpl_mini_review( recid, ln, action=action, avg_score=calculate_avg_score(reviews), nb_comments_total=len(reviews))
def function[get_mini_reviews, parameter[recid, ln]]: constant[ Returns the web controls to add reviews to a record from the detailed record pages mini-panel. :param recid: the id of the displayed record :param ln: the user's language ] if name[CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS] begin[:] variable[action] assign[=] constant[SUBMIT] variable[reviews] assign[=] call[name[query_retrieve_comments_or_remarks], parameter[name[recid]]] return[call[name[webcomment_templates].tmpl_mini_review, parameter[name[recid], name[ln]]]]
keyword[def] identifier[get_mini_reviews] ( identifier[recid] , identifier[ln] = identifier[CFG_SITE_LANG] ): literal[string] keyword[if] identifier[CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS] : identifier[action] = literal[string] keyword[else] : identifier[action] = literal[string] identifier[reviews] = identifier[query_retrieve_comments_or_remarks] ( identifier[recid] , identifier[ranking] = literal[int] ) keyword[return] identifier[webcomment_templates] . identifier[tmpl_mini_review] ( identifier[recid] , identifier[ln] , identifier[action] = identifier[action] , identifier[avg_score] = identifier[calculate_avg_score] ( identifier[reviews] ), identifier[nb_comments_total] = identifier[len] ( identifier[reviews] ))
def get_mini_reviews(recid, ln=CFG_SITE_LANG): """ Returns the web controls to add reviews to a record from the detailed record pages mini-panel. :param recid: the id of the displayed record :param ln: the user's language """ if CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS: action = 'SUBMIT' # depends on [control=['if'], data=[]] else: action = 'DISPLAY' reviews = query_retrieve_comments_or_remarks(recid, ranking=1) return webcomment_templates.tmpl_mini_review(recid, ln, action=action, avg_score=calculate_avg_score(reviews), nb_comments_total=len(reviews))
def set_filters(self, can_filers=None): """Unsupported. See note on :class:`~can.interfaces.nican.NicanBus`. """ if self.__set_filters_has_been_called: logger.warn("using filters is not supported like this, see note on NicanBus") else: # allow the constructor to call this without causing a warning self.__set_filters_has_been_called = True
def function[set_filters, parameter[self, can_filers]]: constant[Unsupported. See note on :class:`~can.interfaces.nican.NicanBus`. ] if name[self].__set_filters_has_been_called begin[:] call[name[logger].warn, parameter[constant[using filters is not supported like this, see note on NicanBus]]]
keyword[def] identifier[set_filters] ( identifier[self] , identifier[can_filers] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[__set_filters_has_been_called] : identifier[logger] . identifier[warn] ( literal[string] ) keyword[else] : identifier[self] . identifier[__set_filters_has_been_called] = keyword[True]
def set_filters(self, can_filers=None): """Unsupported. See note on :class:`~can.interfaces.nican.NicanBus`. """ if self.__set_filters_has_been_called: logger.warn('using filters is not supported like this, see note on NicanBus') # depends on [control=['if'], data=[]] else: # allow the constructor to call this without causing a warning self.__set_filters_has_been_called = True
def _maxSizeCheck(cls, obj): """ Raise a MaxSizeException if ``obj`` exceeds MAX_FRAME_SIZE Args: obj (numbers.Number or collection): Raises: :class:`fileseq.exceptions.MaxSizeException`: """ fail = False size = 0 if isinstance(obj, numbers.Number): if obj > constants.MAX_FRAME_SIZE: fail = True size = obj elif hasattr(obj, '__len__'): size = len(obj) fail = size > constants.MAX_FRAME_SIZE if fail: raise MaxSizeException('Frame size %s > %s (MAX_FRAME_SIZE)' \ % (size, constants.MAX_FRAME_SIZE))
def function[_maxSizeCheck, parameter[cls, obj]]: constant[ Raise a MaxSizeException if ``obj`` exceeds MAX_FRAME_SIZE Args: obj (numbers.Number or collection): Raises: :class:`fileseq.exceptions.MaxSizeException`: ] variable[fail] assign[=] constant[False] variable[size] assign[=] constant[0] if call[name[isinstance], parameter[name[obj], name[numbers].Number]] begin[:] if compare[name[obj] greater[>] name[constants].MAX_FRAME_SIZE] begin[:] variable[fail] assign[=] constant[True] variable[size] assign[=] name[obj] if name[fail] begin[:] <ast.Raise object at 0x7da1b064f760>
keyword[def] identifier[_maxSizeCheck] ( identifier[cls] , identifier[obj] ): literal[string] identifier[fail] = keyword[False] identifier[size] = literal[int] keyword[if] identifier[isinstance] ( identifier[obj] , identifier[numbers] . identifier[Number] ): keyword[if] identifier[obj] > identifier[constants] . identifier[MAX_FRAME_SIZE] : identifier[fail] = keyword[True] identifier[size] = identifier[obj] keyword[elif] identifier[hasattr] ( identifier[obj] , literal[string] ): identifier[size] = identifier[len] ( identifier[obj] ) identifier[fail] = identifier[size] > identifier[constants] . identifier[MAX_FRAME_SIZE] keyword[if] identifier[fail] : keyword[raise] identifier[MaxSizeException] ( literal[string] %( identifier[size] , identifier[constants] . identifier[MAX_FRAME_SIZE] ))
def _maxSizeCheck(cls, obj): """ Raise a MaxSizeException if ``obj`` exceeds MAX_FRAME_SIZE Args: obj (numbers.Number or collection): Raises: :class:`fileseq.exceptions.MaxSizeException`: """ fail = False size = 0 if isinstance(obj, numbers.Number): if obj > constants.MAX_FRAME_SIZE: fail = True size = obj # depends on [control=['if'], data=['obj']] # depends on [control=['if'], data=[]] elif hasattr(obj, '__len__'): size = len(obj) fail = size > constants.MAX_FRAME_SIZE # depends on [control=['if'], data=[]] if fail: raise MaxSizeException('Frame size %s > %s (MAX_FRAME_SIZE)' % (size, constants.MAX_FRAME_SIZE)) # depends on [control=['if'], data=[]]
def cminus(a, b): ''' cminus(a, b) returns the difference a - b as a numpy array object. Like numpy's subtract function or a - b syntax, minus will thread over the latest dimension possible. ''' # adding/subtracting a constant to/from a sparse array is an error... spa = sps.issparse(a) spb = sps.issparse(b) if not spa: a = np.asarray(a) if not spb: b = np.asarray(b) if spa: b = np.reshape(b, (1,1)) if len(np.shape(b)) == 0 else b elif spb: a = np.reshape(a, (1,1)) if len(np.shape(a)) == 0 else a return a - b
def function[cminus, parameter[a, b]]: constant[ cminus(a, b) returns the difference a - b as a numpy array object. Like numpy's subtract function or a - b syntax, minus will thread over the latest dimension possible. ] variable[spa] assign[=] call[name[sps].issparse, parameter[name[a]]] variable[spb] assign[=] call[name[sps].issparse, parameter[name[b]]] if <ast.UnaryOp object at 0x7da18eb57e50> begin[:] variable[a] assign[=] call[name[np].asarray, parameter[name[a]]] if <ast.UnaryOp object at 0x7da18eb57820> begin[:] variable[b] assign[=] call[name[np].asarray, parameter[name[b]]] if name[spa] begin[:] variable[b] assign[=] <ast.IfExp object at 0x7da1b0b448b0> return[binary_operation[name[a] - name[b]]]
keyword[def] identifier[cminus] ( identifier[a] , identifier[b] ): literal[string] identifier[spa] = identifier[sps] . identifier[issparse] ( identifier[a] ) identifier[spb] = identifier[sps] . identifier[issparse] ( identifier[b] ) keyword[if] keyword[not] identifier[spa] : identifier[a] = identifier[np] . identifier[asarray] ( identifier[a] ) keyword[if] keyword[not] identifier[spb] : identifier[b] = identifier[np] . identifier[asarray] ( identifier[b] ) keyword[if] identifier[spa] : identifier[b] = identifier[np] . identifier[reshape] ( identifier[b] ,( literal[int] , literal[int] )) keyword[if] identifier[len] ( identifier[np] . identifier[shape] ( identifier[b] ))== literal[int] keyword[else] identifier[b] keyword[elif] identifier[spb] : identifier[a] = identifier[np] . identifier[reshape] ( identifier[a] ,( literal[int] , literal[int] )) keyword[if] identifier[len] ( identifier[np] . identifier[shape] ( identifier[a] ))== literal[int] keyword[else] identifier[a] keyword[return] identifier[a] - identifier[b]
def cminus(a, b): """ cminus(a, b) returns the difference a - b as a numpy array object. Like numpy's subtract function or a - b syntax, minus will thread over the latest dimension possible. """ # adding/subtracting a constant to/from a sparse array is an error... spa = sps.issparse(a) spb = sps.issparse(b) if not spa: a = np.asarray(a) # depends on [control=['if'], data=[]] if not spb: b = np.asarray(b) # depends on [control=['if'], data=[]] if spa: b = np.reshape(b, (1, 1)) if len(np.shape(b)) == 0 else b # depends on [control=['if'], data=[]] elif spb: a = np.reshape(a, (1, 1)) if len(np.shape(a)) == 0 else a # depends on [control=['if'], data=[]] return a - b
def unpatch_locals(depth=3): """Restores the original values of module variables considered preferences if they are still PatchedLocal and not PrefProxy. """ for name, locals_dict in traverse_local_prefs(depth): if isinstance(locals_dict[name], PatchedLocal): locals_dict[name] = locals_dict[name].val del get_frame_locals(depth)[__PATCHED_LOCALS_SENTINEL]
def function[unpatch_locals, parameter[depth]]: constant[Restores the original values of module variables considered preferences if they are still PatchedLocal and not PrefProxy. ] for taget[tuple[[<ast.Name object at 0x7da1afe0e140>, <ast.Name object at 0x7da1afe0d6c0>]]] in starred[call[name[traverse_local_prefs], parameter[name[depth]]]] begin[:] if call[name[isinstance], parameter[call[name[locals_dict]][name[name]], name[PatchedLocal]]] begin[:] call[name[locals_dict]][name[name]] assign[=] call[name[locals_dict]][name[name]].val <ast.Delete object at 0x7da1afe19ab0>
keyword[def] identifier[unpatch_locals] ( identifier[depth] = literal[int] ): literal[string] keyword[for] identifier[name] , identifier[locals_dict] keyword[in] identifier[traverse_local_prefs] ( identifier[depth] ): keyword[if] identifier[isinstance] ( identifier[locals_dict] [ identifier[name] ], identifier[PatchedLocal] ): identifier[locals_dict] [ identifier[name] ]= identifier[locals_dict] [ identifier[name] ]. identifier[val] keyword[del] identifier[get_frame_locals] ( identifier[depth] )[ identifier[__PATCHED_LOCALS_SENTINEL] ]
def unpatch_locals(depth=3): """Restores the original values of module variables considered preferences if they are still PatchedLocal and not PrefProxy. """ for (name, locals_dict) in traverse_local_prefs(depth): if isinstance(locals_dict[name], PatchedLocal): locals_dict[name] = locals_dict[name].val # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] del get_frame_locals(depth)[__PATCHED_LOCALS_SENTINEL]
def cache_key(self, request, method=None): """ the cache key is the absolute uri and the request method """ if method is None: method = request.method return "bettercache_page:%s:%s" %(request.build_absolute_uri(), method)
def function[cache_key, parameter[self, request, method]]: constant[ the cache key is the absolute uri and the request method ] if compare[name[method] is constant[None]] begin[:] variable[method] assign[=] name[request].method return[binary_operation[constant[bettercache_page:%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b14d5bd0>, <ast.Name object at 0x7da1b14d5b70>]]]]
keyword[def] identifier[cache_key] ( identifier[self] , identifier[request] , identifier[method] = keyword[None] ): literal[string] keyword[if] identifier[method] keyword[is] keyword[None] : identifier[method] = identifier[request] . identifier[method] keyword[return] literal[string] %( identifier[request] . identifier[build_absolute_uri] (), identifier[method] )
def cache_key(self, request, method=None): """ the cache key is the absolute uri and the request method """ if method is None: method = request.method # depends on [control=['if'], data=['method']] return 'bettercache_page:%s:%s' % (request.build_absolute_uri(), method)
def wr_txt(self, fout_txt="gos_depth01.txt", title=None): """write text table of depth-01 GO terms and their letter representation.""" with open(fout_txt, 'w') as prt: self.prt_header(prt, title) data_nts = self.prt_txt(prt) sys.stdout.write(" {N:>5} items WROTE: {TXT}\n".format( N=len(data_nts), TXT=fout_txt))
def function[wr_txt, parameter[self, fout_txt, title]]: constant[write text table of depth-01 GO terms and their letter representation.] with call[name[open], parameter[name[fout_txt], constant[w]]] begin[:] call[name[self].prt_header, parameter[name[prt], name[title]]] variable[data_nts] assign[=] call[name[self].prt_txt, parameter[name[prt]]] call[name[sys].stdout.write, parameter[call[constant[ {N:>5} items WROTE: {TXT} ].format, parameter[]]]]
keyword[def] identifier[wr_txt] ( identifier[self] , identifier[fout_txt] = literal[string] , identifier[title] = keyword[None] ): literal[string] keyword[with] identifier[open] ( identifier[fout_txt] , literal[string] ) keyword[as] identifier[prt] : identifier[self] . identifier[prt_header] ( identifier[prt] , identifier[title] ) identifier[data_nts] = identifier[self] . identifier[prt_txt] ( identifier[prt] ) identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] . identifier[format] ( identifier[N] = identifier[len] ( identifier[data_nts] ), identifier[TXT] = identifier[fout_txt] ))
def wr_txt(self, fout_txt='gos_depth01.txt', title=None): """write text table of depth-01 GO terms and their letter representation.""" with open(fout_txt, 'w') as prt: self.prt_header(prt, title) data_nts = self.prt_txt(prt) sys.stdout.write(' {N:>5} items WROTE: {TXT}\n'.format(N=len(data_nts), TXT=fout_txt)) # depends on [control=['with'], data=['prt']]
def CallUDFUNS(f, x): """ We are given a UDF CFUNCTYPE and want to call it in python :param f: SpiceUDFUNS :type f: CFUNCTYPE :param x: some scalar :type x: float :return: value :rtype: float """ value = c_double() f(x, byref(value)) return value.value
def function[CallUDFUNS, parameter[f, x]]: constant[ We are given a UDF CFUNCTYPE and want to call it in python :param f: SpiceUDFUNS :type f: CFUNCTYPE :param x: some scalar :type x: float :return: value :rtype: float ] variable[value] assign[=] call[name[c_double], parameter[]] call[name[f], parameter[name[x], call[name[byref], parameter[name[value]]]]] return[name[value].value]
keyword[def] identifier[CallUDFUNS] ( identifier[f] , identifier[x] ): literal[string] identifier[value] = identifier[c_double] () identifier[f] ( identifier[x] , identifier[byref] ( identifier[value] )) keyword[return] identifier[value] . identifier[value]
def CallUDFUNS(f, x): """ We are given a UDF CFUNCTYPE and want to call it in python :param f: SpiceUDFUNS :type f: CFUNCTYPE :param x: some scalar :type x: float :return: value :rtype: float """ value = c_double() f(x, byref(value)) return value.value
def jupyter_notebook_skeleton(): """Returns a dictionary with the elements of a Jupyter notebook""" py_version = sys.version_info notebook_skeleton = { "cells": [], "metadata": { "kernelspec": { "display_name": "Python " + str(py_version[0]), "language": "python", "name": "python" + str(py_version[0]) }, "language_info": { "codemirror_mode": { "name": "ipython", "version": py_version[0] }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython" + str(py_version[0]), "version": '{0}.{1}.{2}'.format(*sys.version_info[:3]) } }, "nbformat": 4, "nbformat_minor": 0 } return notebook_skeleton
def function[jupyter_notebook_skeleton, parameter[]]: constant[Returns a dictionary with the elements of a Jupyter notebook] variable[py_version] assign[=] name[sys].version_info variable[notebook_skeleton] assign[=] dictionary[[<ast.Constant object at 0x7da1b2346a70>, <ast.Constant object at 0x7da1b23469e0>, <ast.Constant object at 0x7da1b2345c60>, <ast.Constant object at 0x7da1b2345810>], [<ast.List object at 0x7da1b2344be0>, <ast.Dict object at 0x7da1b23479d0>, <ast.Constant object at 0x7da1b26ad090>, <ast.Constant object at 0x7da1b26ae7d0>]] return[name[notebook_skeleton]]
keyword[def] identifier[jupyter_notebook_skeleton] (): literal[string] identifier[py_version] = identifier[sys] . identifier[version_info] identifier[notebook_skeleton] ={ literal[string] :[], literal[string] :{ literal[string] :{ literal[string] : literal[string] + identifier[str] ( identifier[py_version] [ literal[int] ]), literal[string] : literal[string] , literal[string] : literal[string] + identifier[str] ( identifier[py_version] [ literal[int] ]) }, literal[string] :{ literal[string] :{ literal[string] : literal[string] , literal[string] : identifier[py_version] [ literal[int] ] }, literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] + identifier[str] ( identifier[py_version] [ literal[int] ]), literal[string] : literal[string] . identifier[format] (* identifier[sys] . identifier[version_info] [: literal[int] ]) } }, literal[string] : literal[int] , literal[string] : literal[int] } keyword[return] identifier[notebook_skeleton]
def jupyter_notebook_skeleton(): """Returns a dictionary with the elements of a Jupyter notebook""" py_version = sys.version_info notebook_skeleton = {'cells': [], 'metadata': {'kernelspec': {'display_name': 'Python ' + str(py_version[0]), 'language': 'python', 'name': 'python' + str(py_version[0])}, 'language_info': {'codemirror_mode': {'name': 'ipython', 'version': py_version[0]}, 'file_extension': '.py', 'mimetype': 'text/x-python', 'name': 'python', 'nbconvert_exporter': 'python', 'pygments_lexer': 'ipython' + str(py_version[0]), 'version': '{0}.{1}.{2}'.format(*sys.version_info[:3])}}, 'nbformat': 4, 'nbformat_minor': 0} return notebook_skeleton
def values_above_sweep(abf,dataI,dataY,ylabel="",useFigure=None): """ To make plots like AP frequency over original trace. dataI=[i] #the i of the sweep dataY=[1.234] #something like inst freq """ xOffset = abf.currentSweep*abf.sweepInterval if not useFigure: #just passing the figure makes it persistant! pylab.figure(figsize=(8,6)) ax=pylab.subplot(221) pylab.grid(alpha=.5) if len(dataI): pylab.plot(abf.dataX[dataI],dataY,'.',ms=10,alpha=.5, color=abf.colormap[abf.currentSweep]) pylab.margins(0,.1) pylab.ylabel(ylabel) pylab.subplot(223,sharex=ax) pylab.grid(alpha=.5) pylab.plot(abf.dataX,abf.dataY,color=abf.colormap[abf.currentSweep],alpha=.5) pylab.ylabel("raw data (%s)"%abf.units) ax2=pylab.subplot(222) pylab.grid(alpha=.5) if len(dataI): pylab.plot(abf.dataX[dataI]+xOffset,dataY,'.',ms=10,alpha=.5, color=abf.colormap[abf.currentSweep]) pylab.margins(0,.1) pylab.ylabel(ylabel) pylab.subplot(224,sharex=ax2) pylab.grid(alpha=.5) pylab.plot(abf.dataX+xOffset,abf.dataY,color=abf.colormap[abf.currentSweep]) pylab.ylabel("raw data (%s)"%abf.units) pylab.tight_layout()
def function[values_above_sweep, parameter[abf, dataI, dataY, ylabel, useFigure]]: constant[ To make plots like AP frequency over original trace. dataI=[i] #the i of the sweep dataY=[1.234] #something like inst freq ] variable[xOffset] assign[=] binary_operation[name[abf].currentSweep * name[abf].sweepInterval] if <ast.UnaryOp object at 0x7da1afeefbb0> begin[:] call[name[pylab].figure, parameter[]] variable[ax] assign[=] call[name[pylab].subplot, parameter[constant[221]]] call[name[pylab].grid, parameter[]] if call[name[len], parameter[name[dataI]]] begin[:] call[name[pylab].plot, parameter[call[name[abf].dataX][name[dataI]], name[dataY], constant[.]]] call[name[pylab].margins, parameter[constant[0], constant[0.1]]] call[name[pylab].ylabel, parameter[name[ylabel]]] call[name[pylab].subplot, parameter[constant[223]]] call[name[pylab].grid, parameter[]] call[name[pylab].plot, parameter[name[abf].dataX, name[abf].dataY]] call[name[pylab].ylabel, parameter[binary_operation[constant[raw data (%s)] <ast.Mod object at 0x7da2590d6920> name[abf].units]]] variable[ax2] assign[=] call[name[pylab].subplot, parameter[constant[222]]] call[name[pylab].grid, parameter[]] if call[name[len], parameter[name[dataI]]] begin[:] call[name[pylab].plot, parameter[binary_operation[call[name[abf].dataX][name[dataI]] + name[xOffset]], name[dataY], constant[.]]] call[name[pylab].margins, parameter[constant[0], constant[0.1]]] call[name[pylab].ylabel, parameter[name[ylabel]]] call[name[pylab].subplot, parameter[constant[224]]] call[name[pylab].grid, parameter[]] call[name[pylab].plot, parameter[binary_operation[name[abf].dataX + name[xOffset]], name[abf].dataY]] call[name[pylab].ylabel, parameter[binary_operation[constant[raw data (%s)] <ast.Mod object at 0x7da2590d6920> name[abf].units]]] call[name[pylab].tight_layout, parameter[]]
keyword[def] identifier[values_above_sweep] ( identifier[abf] , identifier[dataI] , identifier[dataY] , identifier[ylabel] = literal[string] , identifier[useFigure] = keyword[None] ): literal[string] identifier[xOffset] = identifier[abf] . identifier[currentSweep] * identifier[abf] . identifier[sweepInterval] keyword[if] keyword[not] identifier[useFigure] : identifier[pylab] . identifier[figure] ( identifier[figsize] =( literal[int] , literal[int] )) identifier[ax] = identifier[pylab] . identifier[subplot] ( literal[int] ) identifier[pylab] . identifier[grid] ( identifier[alpha] = literal[int] ) keyword[if] identifier[len] ( identifier[dataI] ): identifier[pylab] . identifier[plot] ( identifier[abf] . identifier[dataX] [ identifier[dataI] ], identifier[dataY] , literal[string] , identifier[ms] = literal[int] , identifier[alpha] = literal[int] , identifier[color] = identifier[abf] . identifier[colormap] [ identifier[abf] . identifier[currentSweep] ]) identifier[pylab] . identifier[margins] ( literal[int] , literal[int] ) identifier[pylab] . identifier[ylabel] ( identifier[ylabel] ) identifier[pylab] . identifier[subplot] ( literal[int] , identifier[sharex] = identifier[ax] ) identifier[pylab] . identifier[grid] ( identifier[alpha] = literal[int] ) identifier[pylab] . identifier[plot] ( identifier[abf] . identifier[dataX] , identifier[abf] . identifier[dataY] , identifier[color] = identifier[abf] . identifier[colormap] [ identifier[abf] . identifier[currentSweep] ], identifier[alpha] = literal[int] ) identifier[pylab] . identifier[ylabel] ( literal[string] % identifier[abf] . identifier[units] ) identifier[ax2] = identifier[pylab] . identifier[subplot] ( literal[int] ) identifier[pylab] . identifier[grid] ( identifier[alpha] = literal[int] ) keyword[if] identifier[len] ( identifier[dataI] ): identifier[pylab] . identifier[plot] ( identifier[abf] . identifier[dataX] [ identifier[dataI] ]+ identifier[xOffset] , identifier[dataY] , literal[string] , identifier[ms] = literal[int] , identifier[alpha] = literal[int] , identifier[color] = identifier[abf] . identifier[colormap] [ identifier[abf] . identifier[currentSweep] ]) identifier[pylab] . identifier[margins] ( literal[int] , literal[int] ) identifier[pylab] . identifier[ylabel] ( identifier[ylabel] ) identifier[pylab] . identifier[subplot] ( literal[int] , identifier[sharex] = identifier[ax2] ) identifier[pylab] . identifier[grid] ( identifier[alpha] = literal[int] ) identifier[pylab] . identifier[plot] ( identifier[abf] . identifier[dataX] + identifier[xOffset] , identifier[abf] . identifier[dataY] , identifier[color] = identifier[abf] . identifier[colormap] [ identifier[abf] . identifier[currentSweep] ]) identifier[pylab] . identifier[ylabel] ( literal[string] % identifier[abf] . identifier[units] ) identifier[pylab] . identifier[tight_layout] ()
def values_above_sweep(abf, dataI, dataY, ylabel='', useFigure=None): """ To make plots like AP frequency over original trace. dataI=[i] #the i of the sweep dataY=[1.234] #something like inst freq """ xOffset = abf.currentSweep * abf.sweepInterval if not useFigure: #just passing the figure makes it persistant! pylab.figure(figsize=(8, 6)) # depends on [control=['if'], data=[]] ax = pylab.subplot(221) pylab.grid(alpha=0.5) if len(dataI): pylab.plot(abf.dataX[dataI], dataY, '.', ms=10, alpha=0.5, color=abf.colormap[abf.currentSweep]) # depends on [control=['if'], data=[]] pylab.margins(0, 0.1) pylab.ylabel(ylabel) pylab.subplot(223, sharex=ax) pylab.grid(alpha=0.5) pylab.plot(abf.dataX, abf.dataY, color=abf.colormap[abf.currentSweep], alpha=0.5) pylab.ylabel('raw data (%s)' % abf.units) ax2 = pylab.subplot(222) pylab.grid(alpha=0.5) if len(dataI): pylab.plot(abf.dataX[dataI] + xOffset, dataY, '.', ms=10, alpha=0.5, color=abf.colormap[abf.currentSweep]) # depends on [control=['if'], data=[]] pylab.margins(0, 0.1) pylab.ylabel(ylabel) pylab.subplot(224, sharex=ax2) pylab.grid(alpha=0.5) pylab.plot(abf.dataX + xOffset, abf.dataY, color=abf.colormap[abf.currentSweep]) pylab.ylabel('raw data (%s)' % abf.units) pylab.tight_layout()
def set_default_keychain(keychain, domain="user", user=None): ''' Set the default keychain keychain The location of the keychain to set as default domain The domain to use valid values are user|system|common|dynamic, the default is user user The user to set the default keychain as CLI Example: .. code-block:: bash salt '*' keychain.set_keychain /Users/fred/Library/Keychains/login.keychain ''' cmd = "security default-keychain -d {0} -s {1}".format(domain, keychain) return __salt__['cmd.run'](cmd, runas=user)
def function[set_default_keychain, parameter[keychain, domain, user]]: constant[ Set the default keychain keychain The location of the keychain to set as default domain The domain to use valid values are user|system|common|dynamic, the default is user user The user to set the default keychain as CLI Example: .. code-block:: bash salt '*' keychain.set_keychain /Users/fred/Library/Keychains/login.keychain ] variable[cmd] assign[=] call[constant[security default-keychain -d {0} -s {1}].format, parameter[name[domain], name[keychain]]] return[call[call[name[__salt__]][constant[cmd.run]], parameter[name[cmd]]]]
keyword[def] identifier[set_default_keychain] ( identifier[keychain] , identifier[domain] = literal[string] , identifier[user] = keyword[None] ): literal[string] identifier[cmd] = literal[string] . identifier[format] ( identifier[domain] , identifier[keychain] ) keyword[return] identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[runas] = identifier[user] )
def set_default_keychain(keychain, domain='user', user=None): """ Set the default keychain keychain The location of the keychain to set as default domain The domain to use valid values are user|system|common|dynamic, the default is user user The user to set the default keychain as CLI Example: .. code-block:: bash salt '*' keychain.set_keychain /Users/fred/Library/Keychains/login.keychain """ cmd = 'security default-keychain -d {0} -s {1}'.format(domain, keychain) return __salt__['cmd.run'](cmd, runas=user)
def get_course_descriptor_content(self, courseid): """ :param courseid: the course id of the course :raise InvalidNameException, CourseNotFoundException, CourseUnreadableException :return: the content of the dict that describes the course """ path = self._get_course_descriptor_path(courseid) return loads_json_or_yaml(path, self._filesystem.get(path).decode("utf-8"))
def function[get_course_descriptor_content, parameter[self, courseid]]: constant[ :param courseid: the course id of the course :raise InvalidNameException, CourseNotFoundException, CourseUnreadableException :return: the content of the dict that describes the course ] variable[path] assign[=] call[name[self]._get_course_descriptor_path, parameter[name[courseid]]] return[call[name[loads_json_or_yaml], parameter[name[path], call[call[name[self]._filesystem.get, parameter[name[path]]].decode, parameter[constant[utf-8]]]]]]
keyword[def] identifier[get_course_descriptor_content] ( identifier[self] , identifier[courseid] ): literal[string] identifier[path] = identifier[self] . identifier[_get_course_descriptor_path] ( identifier[courseid] ) keyword[return] identifier[loads_json_or_yaml] ( identifier[path] , identifier[self] . identifier[_filesystem] . identifier[get] ( identifier[path] ). identifier[decode] ( literal[string] ))
def get_course_descriptor_content(self, courseid): """ :param courseid: the course id of the course :raise InvalidNameException, CourseNotFoundException, CourseUnreadableException :return: the content of the dict that describes the course """ path = self._get_course_descriptor_path(courseid) return loads_json_or_yaml(path, self._filesystem.get(path).decode('utf-8'))
def cutout_source(x_pos, y_pos, image, kernelsize, shift=True): """ cuts out point source (e.g. PSF estimate) out of image and shift it to the center of a pixel :param x_pos: :param y_pos: :param image: :param kernelsize: :return: """ if kernelsize % 2 == 0: raise ValueError("even pixel number kernel size not supported!") x_int = int(round(x_pos)) y_int = int(round(y_pos)) n = len(image) d = (kernelsize - 1)/2 x_max = int(np.minimum(x_int + d + 1, n)) x_min = int(np.maximum(x_int - d, 0)) y_max = int(np.minimum(y_int + d + 1, n)) y_min = int(np.maximum(y_int - d, 0)) image_cut = copy.deepcopy(image[y_min:y_max, x_min:x_max]) shift_x = x_int - x_pos shift_y = y_int - y_pos if shift is True: kernel_shift = de_shift_kernel(image_cut, shift_x, shift_y, iterations=50) else: kernel_shift = image_cut kernel_final = np.zeros((kernelsize, kernelsize)) k_l2_x = int((kernelsize - 1) / 2) k_l2_y = int((kernelsize - 1) / 2) xk_min = np.maximum(0, -x_int + k_l2_x) yk_min = np.maximum(0, -y_int + k_l2_y) xk_max = np.minimum(kernelsize, -x_int + k_l2_x + n) yk_max = np.minimum(kernelsize, -y_int + k_l2_y + n) kernel_final[yk_min:yk_max, xk_min:xk_max] = kernel_shift return kernel_final
def function[cutout_source, parameter[x_pos, y_pos, image, kernelsize, shift]]: constant[ cuts out point source (e.g. PSF estimate) out of image and shift it to the center of a pixel :param x_pos: :param y_pos: :param image: :param kernelsize: :return: ] if compare[binary_operation[name[kernelsize] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da20c6c4af0> variable[x_int] assign[=] call[name[int], parameter[call[name[round], parameter[name[x_pos]]]]] variable[y_int] assign[=] call[name[int], parameter[call[name[round], parameter[name[y_pos]]]]] variable[n] assign[=] call[name[len], parameter[name[image]]] variable[d] assign[=] binary_operation[binary_operation[name[kernelsize] - constant[1]] / constant[2]] variable[x_max] assign[=] call[name[int], parameter[call[name[np].minimum, parameter[binary_operation[binary_operation[name[x_int] + name[d]] + constant[1]], name[n]]]]] variable[x_min] assign[=] call[name[int], parameter[call[name[np].maximum, parameter[binary_operation[name[x_int] - name[d]], constant[0]]]]] variable[y_max] assign[=] call[name[int], parameter[call[name[np].minimum, parameter[binary_operation[binary_operation[name[y_int] + name[d]] + constant[1]], name[n]]]]] variable[y_min] assign[=] call[name[int], parameter[call[name[np].maximum, parameter[binary_operation[name[y_int] - name[d]], constant[0]]]]] variable[image_cut] assign[=] call[name[copy].deepcopy, parameter[call[name[image]][tuple[[<ast.Slice object at 0x7da20c6c4490>, <ast.Slice object at 0x7da20c6c5c90>]]]]] variable[shift_x] assign[=] binary_operation[name[x_int] - name[x_pos]] variable[shift_y] assign[=] binary_operation[name[y_int] - name[y_pos]] if compare[name[shift] is constant[True]] begin[:] variable[kernel_shift] assign[=] call[name[de_shift_kernel], parameter[name[image_cut], name[shift_x], name[shift_y]]] variable[kernel_final] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da20c6c4ca0>, <ast.Name object at 0x7da20c6c40a0>]]]] variable[k_l2_x] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[kernelsize] - constant[1]] / constant[2]]]] variable[k_l2_y] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[kernelsize] - constant[1]] / constant[2]]]] variable[xk_min] assign[=] call[name[np].maximum, parameter[constant[0], binary_operation[<ast.UnaryOp object at 0x7da20c6c4580> + name[k_l2_x]]]] variable[yk_min] assign[=] call[name[np].maximum, parameter[constant[0], binary_operation[<ast.UnaryOp object at 0x7da20c6c79d0> + name[k_l2_y]]]] variable[xk_max] assign[=] call[name[np].minimum, parameter[name[kernelsize], binary_operation[binary_operation[<ast.UnaryOp object at 0x7da20c6c5cf0> + name[k_l2_x]] + name[n]]]] variable[yk_max] assign[=] call[name[np].minimum, parameter[name[kernelsize], binary_operation[binary_operation[<ast.UnaryOp object at 0x7da20c6c4fd0> + name[k_l2_y]] + name[n]]]] call[name[kernel_final]][tuple[[<ast.Slice object at 0x7da20c6c7670>, <ast.Slice object at 0x7da20c6c6680>]]] assign[=] name[kernel_shift] return[name[kernel_final]]
keyword[def] identifier[cutout_source] ( identifier[x_pos] , identifier[y_pos] , identifier[image] , identifier[kernelsize] , identifier[shift] = keyword[True] ): literal[string] keyword[if] identifier[kernelsize] % literal[int] == literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[x_int] = identifier[int] ( identifier[round] ( identifier[x_pos] )) identifier[y_int] = identifier[int] ( identifier[round] ( identifier[y_pos] )) identifier[n] = identifier[len] ( identifier[image] ) identifier[d] =( identifier[kernelsize] - literal[int] )/ literal[int] identifier[x_max] = identifier[int] ( identifier[np] . identifier[minimum] ( identifier[x_int] + identifier[d] + literal[int] , identifier[n] )) identifier[x_min] = identifier[int] ( identifier[np] . identifier[maximum] ( identifier[x_int] - identifier[d] , literal[int] )) identifier[y_max] = identifier[int] ( identifier[np] . identifier[minimum] ( identifier[y_int] + identifier[d] + literal[int] , identifier[n] )) identifier[y_min] = identifier[int] ( identifier[np] . identifier[maximum] ( identifier[y_int] - identifier[d] , literal[int] )) identifier[image_cut] = identifier[copy] . identifier[deepcopy] ( identifier[image] [ identifier[y_min] : identifier[y_max] , identifier[x_min] : identifier[x_max] ]) identifier[shift_x] = identifier[x_int] - identifier[x_pos] identifier[shift_y] = identifier[y_int] - identifier[y_pos] keyword[if] identifier[shift] keyword[is] keyword[True] : identifier[kernel_shift] = identifier[de_shift_kernel] ( identifier[image_cut] , identifier[shift_x] , identifier[shift_y] , identifier[iterations] = literal[int] ) keyword[else] : identifier[kernel_shift] = identifier[image_cut] identifier[kernel_final] = identifier[np] . identifier[zeros] (( identifier[kernelsize] , identifier[kernelsize] )) identifier[k_l2_x] = identifier[int] (( identifier[kernelsize] - literal[int] )/ literal[int] ) identifier[k_l2_y] = identifier[int] (( identifier[kernelsize] - literal[int] )/ literal[int] ) identifier[xk_min] = identifier[np] . identifier[maximum] ( literal[int] ,- identifier[x_int] + identifier[k_l2_x] ) identifier[yk_min] = identifier[np] . identifier[maximum] ( literal[int] ,- identifier[y_int] + identifier[k_l2_y] ) identifier[xk_max] = identifier[np] . identifier[minimum] ( identifier[kernelsize] ,- identifier[x_int] + identifier[k_l2_x] + identifier[n] ) identifier[yk_max] = identifier[np] . identifier[minimum] ( identifier[kernelsize] ,- identifier[y_int] + identifier[k_l2_y] + identifier[n] ) identifier[kernel_final] [ identifier[yk_min] : identifier[yk_max] , identifier[xk_min] : identifier[xk_max] ]= identifier[kernel_shift] keyword[return] identifier[kernel_final]
def cutout_source(x_pos, y_pos, image, kernelsize, shift=True): """ cuts out point source (e.g. PSF estimate) out of image and shift it to the center of a pixel :param x_pos: :param y_pos: :param image: :param kernelsize: :return: """ if kernelsize % 2 == 0: raise ValueError('even pixel number kernel size not supported!') # depends on [control=['if'], data=[]] x_int = int(round(x_pos)) y_int = int(round(y_pos)) n = len(image) d = (kernelsize - 1) / 2 x_max = int(np.minimum(x_int + d + 1, n)) x_min = int(np.maximum(x_int - d, 0)) y_max = int(np.minimum(y_int + d + 1, n)) y_min = int(np.maximum(y_int - d, 0)) image_cut = copy.deepcopy(image[y_min:y_max, x_min:x_max]) shift_x = x_int - x_pos shift_y = y_int - y_pos if shift is True: kernel_shift = de_shift_kernel(image_cut, shift_x, shift_y, iterations=50) # depends on [control=['if'], data=[]] else: kernel_shift = image_cut kernel_final = np.zeros((kernelsize, kernelsize)) k_l2_x = int((kernelsize - 1) / 2) k_l2_y = int((kernelsize - 1) / 2) xk_min = np.maximum(0, -x_int + k_l2_x) yk_min = np.maximum(0, -y_int + k_l2_y) xk_max = np.minimum(kernelsize, -x_int + k_l2_x + n) yk_max = np.minimum(kernelsize, -y_int + k_l2_y + n) kernel_final[yk_min:yk_max, xk_min:xk_max] = kernel_shift return kernel_final
def execute_deposit(self, deposit_params, private_key): """ Function to execute the deposit request by signing the transaction generated by the create deposit function. Execution of this function is as follows:: execute_deposit(deposit_params=create_deposit, private_key=KeyPair) execute_deposit(deposit_params=create_deposit, private_key=eth_private_key) The expected return result for this function is as follows:: { 'result': 'ok' } :param deposit_params: Parameters from the API to be signed and deposited to the Switcheo Smart Contract. :type deposit_params: dict :param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message. :type private_key: KeyPair or str :return: Dictionary with the result status of the deposit attempt. """ deposit_id = deposit_params['id'] api_params = self.sign_execute_deposit_function[self.blockchain](deposit_params, private_key) return self.request.post(path='/deposits/{}/broadcast'.format(deposit_id), json_data=api_params)
def function[execute_deposit, parameter[self, deposit_params, private_key]]: constant[ Function to execute the deposit request by signing the transaction generated by the create deposit function. Execution of this function is as follows:: execute_deposit(deposit_params=create_deposit, private_key=KeyPair) execute_deposit(deposit_params=create_deposit, private_key=eth_private_key) The expected return result for this function is as follows:: { 'result': 'ok' } :param deposit_params: Parameters from the API to be signed and deposited to the Switcheo Smart Contract. :type deposit_params: dict :param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message. :type private_key: KeyPair or str :return: Dictionary with the result status of the deposit attempt. ] variable[deposit_id] assign[=] call[name[deposit_params]][constant[id]] variable[api_params] assign[=] call[call[name[self].sign_execute_deposit_function][name[self].blockchain], parameter[name[deposit_params], name[private_key]]] return[call[name[self].request.post, parameter[]]]
keyword[def] identifier[execute_deposit] ( identifier[self] , identifier[deposit_params] , identifier[private_key] ): literal[string] identifier[deposit_id] = identifier[deposit_params] [ literal[string] ] identifier[api_params] = identifier[self] . identifier[sign_execute_deposit_function] [ identifier[self] . identifier[blockchain] ]( identifier[deposit_params] , identifier[private_key] ) keyword[return] identifier[self] . identifier[request] . identifier[post] ( identifier[path] = literal[string] . identifier[format] ( identifier[deposit_id] ), identifier[json_data] = identifier[api_params] )
def execute_deposit(self, deposit_params, private_key): """ Function to execute the deposit request by signing the transaction generated by the create deposit function. Execution of this function is as follows:: execute_deposit(deposit_params=create_deposit, private_key=KeyPair) execute_deposit(deposit_params=create_deposit, private_key=eth_private_key) The expected return result for this function is as follows:: { 'result': 'ok' } :param deposit_params: Parameters from the API to be signed and deposited to the Switcheo Smart Contract. :type deposit_params: dict :param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message. :type private_key: KeyPair or str :return: Dictionary with the result status of the deposit attempt. """ deposit_id = deposit_params['id'] api_params = self.sign_execute_deposit_function[self.blockchain](deposit_params, private_key) return self.request.post(path='/deposits/{}/broadcast'.format(deposit_id), json_data=api_params)
def download(url, params=None, accept="xml", **kwds): """Helper function to download a file and return its content. Parameters ---------- url : string The URL to be parsed. params : dict (optional) Dictionary containing query parameters. For required keys and accepted values see e.g. https://api.elsevier.com/documentation/AuthorRetrievalAPI.wadl accept : str (optional, default=xml) mime type of the file to be downloaded. Accepted values are json, atom+xml, xml. kwds : key-value parings, optional Keywords passed on to as query parameters. Must contain fields and values specified in the respective API specification. Raises ------ ScopusHtmlError If the status of the response is not ok. ValueError If the accept parameter is not one of the accepted values. Returns ------- resp : byte-like object The content of the file, which needs to be serialized. """ # Value check accepted = ("json", "xml", "atom+xml") if accept.lower() not in accepted: raise ValueError('accept parameter must be one of ' + ', '.join(accepted)) # Get credentials key = config.get('Authentication', 'APIKey') header = {'X-ELS-APIKey': key} if config.has_option('Authentication', 'InstToken'): token = config.get('Authentication', 'InstToken') header.update({'X-ELS-APIKey': key, 'X-ELS-Insttoken': token}) header.update({'Accept': 'application/{}'.format(accept)}) # Perform request params.update(**kwds) resp = requests.get(url, headers=header, params=params) # Raise error if necessary try: reason = resp.reason.upper() + " for url: " + url raise errors[resp.status_code](reason) except KeyError: # Exception not specified in scopus resp.raise_for_status() # Will pass when everything is ok return resp
def function[download, parameter[url, params, accept]]: constant[Helper function to download a file and return its content. Parameters ---------- url : string The URL to be parsed. params : dict (optional) Dictionary containing query parameters. For required keys and accepted values see e.g. https://api.elsevier.com/documentation/AuthorRetrievalAPI.wadl accept : str (optional, default=xml) mime type of the file to be downloaded. Accepted values are json, atom+xml, xml. kwds : key-value parings, optional Keywords passed on to as query parameters. Must contain fields and values specified in the respective API specification. Raises ------ ScopusHtmlError If the status of the response is not ok. ValueError If the accept parameter is not one of the accepted values. Returns ------- resp : byte-like object The content of the file, which needs to be serialized. ] variable[accepted] assign[=] tuple[[<ast.Constant object at 0x7da2047e8a30>, <ast.Constant object at 0x7da2047e95d0>, <ast.Constant object at 0x7da2047e9e70>]] if compare[call[name[accept].lower, parameter[]] <ast.NotIn object at 0x7da2590d7190> name[accepted]] begin[:] <ast.Raise object at 0x7da2047eb4c0> variable[key] assign[=] call[name[config].get, parameter[constant[Authentication], constant[APIKey]]] variable[header] assign[=] dictionary[[<ast.Constant object at 0x7da2047e85e0>], [<ast.Name object at 0x7da2047e9240>]] if call[name[config].has_option, parameter[constant[Authentication], constant[InstToken]]] begin[:] variable[token] assign[=] call[name[config].get, parameter[constant[Authentication], constant[InstToken]]] call[name[header].update, parameter[dictionary[[<ast.Constant object at 0x7da2047e8f40>, <ast.Constant object at 0x7da2047eb520>], [<ast.Name object at 0x7da2047e9840>, <ast.Name object at 0x7da2047e98d0>]]]] call[name[header].update, parameter[dictionary[[<ast.Constant object at 0x7da2047e8250>], [<ast.Call object at 0x7da2047e8a60>]]]] call[name[params].update, parameter[]] variable[resp] assign[=] call[name[requests].get, parameter[name[url]]] <ast.Try object at 0x7da2047e8fd0> return[name[resp]]
keyword[def] identifier[download] ( identifier[url] , identifier[params] = keyword[None] , identifier[accept] = literal[string] ,** identifier[kwds] ): literal[string] identifier[accepted] =( literal[string] , literal[string] , literal[string] ) keyword[if] identifier[accept] . identifier[lower] () keyword[not] keyword[in] identifier[accepted] : keyword[raise] identifier[ValueError] ( literal[string] + literal[string] . identifier[join] ( identifier[accepted] )) identifier[key] = identifier[config] . identifier[get] ( literal[string] , literal[string] ) identifier[header] ={ literal[string] : identifier[key] } keyword[if] identifier[config] . identifier[has_option] ( literal[string] , literal[string] ): identifier[token] = identifier[config] . identifier[get] ( literal[string] , literal[string] ) identifier[header] . identifier[update] ({ literal[string] : identifier[key] , literal[string] : identifier[token] }) identifier[header] . identifier[update] ({ literal[string] : literal[string] . identifier[format] ( identifier[accept] )}) identifier[params] . identifier[update] (** identifier[kwds] ) identifier[resp] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[header] , identifier[params] = identifier[params] ) keyword[try] : identifier[reason] = identifier[resp] . identifier[reason] . identifier[upper] ()+ literal[string] + identifier[url] keyword[raise] identifier[errors] [ identifier[resp] . identifier[status_code] ]( identifier[reason] ) keyword[except] identifier[KeyError] : identifier[resp] . identifier[raise_for_status] () keyword[return] identifier[resp]
def download(url, params=None, accept='xml', **kwds): """Helper function to download a file and return its content. Parameters ---------- url : string The URL to be parsed. params : dict (optional) Dictionary containing query parameters. For required keys and accepted values see e.g. https://api.elsevier.com/documentation/AuthorRetrievalAPI.wadl accept : str (optional, default=xml) mime type of the file to be downloaded. Accepted values are json, atom+xml, xml. kwds : key-value parings, optional Keywords passed on to as query parameters. Must contain fields and values specified in the respective API specification. Raises ------ ScopusHtmlError If the status of the response is not ok. ValueError If the accept parameter is not one of the accepted values. Returns ------- resp : byte-like object The content of the file, which needs to be serialized. """ # Value check accepted = ('json', 'xml', 'atom+xml') if accept.lower() not in accepted: raise ValueError('accept parameter must be one of ' + ', '.join(accepted)) # depends on [control=['if'], data=['accepted']] # Get credentials key = config.get('Authentication', 'APIKey') header = {'X-ELS-APIKey': key} if config.has_option('Authentication', 'InstToken'): token = config.get('Authentication', 'InstToken') header.update({'X-ELS-APIKey': key, 'X-ELS-Insttoken': token}) # depends on [control=['if'], data=[]] header.update({'Accept': 'application/{}'.format(accept)}) # Perform request params.update(**kwds) resp = requests.get(url, headers=header, params=params) # Raise error if necessary try: reason = resp.reason.upper() + ' for url: ' + url raise errors[resp.status_code](reason) # depends on [control=['try'], data=[]] except KeyError: # Exception not specified in scopus resp.raise_for_status() # Will pass when everything is ok # depends on [control=['except'], data=[]] return resp
def __serial_price(self, rows=6): """ 取出某一價格序列 *(舊→新)* 預設序列收盤價 *(self.__serial_price(6))* :rtype: list :returns: 預設序列收盤價 *(self.__serial_price(6))* """ result = (float(i[rows]) for i in self.__raw_data) return list(result)
def function[__serial_price, parameter[self, rows]]: constant[ 取出某一價格序列 *(舊→新)* 預設序列收盤價 *(self.__serial_price(6))* :rtype: list :returns: 預設序列收盤價 *(self.__serial_price(6))* ] variable[result] assign[=] <ast.GeneratorExp object at 0x7da1b0717700> return[call[name[list], parameter[name[result]]]]
keyword[def] identifier[__serial_price] ( identifier[self] , identifier[rows] = literal[int] ): literal[string] identifier[result] =( identifier[float] ( identifier[i] [ identifier[rows] ]) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[__raw_data] ) keyword[return] identifier[list] ( identifier[result] )
def __serial_price(self, rows=6): """ 取出某一價格序列 *(舊→新)* 預設序列收盤價 *(self.__serial_price(6))* :rtype: list :returns: 預設序列收盤價 *(self.__serial_price(6))* """ result = (float(i[rows]) for i in self.__raw_data) return list(result)
async def setHiveKey(self, path, value): ''' Set or change the value of a key in the cell default hive ''' perm = ('hive:set',) + path self.user.allowed(perm) return await self.cell.hive.set(path, value)
<ast.AsyncFunctionDef object at 0x7da18eb57670>
keyword[async] keyword[def] identifier[setHiveKey] ( identifier[self] , identifier[path] , identifier[value] ): literal[string] identifier[perm] =( literal[string] ,)+ identifier[path] identifier[self] . identifier[user] . identifier[allowed] ( identifier[perm] ) keyword[return] keyword[await] identifier[self] . identifier[cell] . identifier[hive] . identifier[set] ( identifier[path] , identifier[value] )
async def setHiveKey(self, path, value): """ Set or change the value of a key in the cell default hive """ perm = ('hive:set',) + path self.user.allowed(perm) return await self.cell.hive.set(path, value)
def describe(self, **kwargs): """ Describe this field instance for purpose of self-documentation. Args: kwargs (dict): dictionary of additional description items for extending default description Returns: dict: dictionary of description items Suggested way for overriding description fields or extending it with additional items is calling super class method with new/overriden fields passed as keyword arguments like following: .. code-block:: python class DummyField(BaseField): def description(self, **kwargs): super().describe(is_dummy=True, **kwargs) """ description = { 'label': self.label, 'details': inspect.cleandoc(self.details), 'type': "list of {}".format(self.type) if self.many else self.type, 'spec': self.spec, 'read_only': self.read_only, 'write_only': self.write_only, 'allow_null': self.allow_null, } description.update(kwargs) return description
def function[describe, parameter[self]]: constant[ Describe this field instance for purpose of self-documentation. Args: kwargs (dict): dictionary of additional description items for extending default description Returns: dict: dictionary of description items Suggested way for overriding description fields or extending it with additional items is calling super class method with new/overriden fields passed as keyword arguments like following: .. code-block:: python class DummyField(BaseField): def description(self, **kwargs): super().describe(is_dummy=True, **kwargs) ] variable[description] assign[=] dictionary[[<ast.Constant object at 0x7da18f09e0e0>, <ast.Constant object at 0x7da18f09eaa0>, <ast.Constant object at 0x7da18f09d870>, <ast.Constant object at 0x7da18f09f8b0>, <ast.Constant object at 0x7da18f09f820>, <ast.Constant object at 0x7da18f09c2b0>, <ast.Constant object at 0x7da18f09dcf0>], [<ast.Attribute object at 0x7da18f09cc70>, <ast.Call object at 0x7da18f09e3b0>, <ast.IfExp object at 0x7da18f09c040>, <ast.Attribute object at 0x7da18f09ca30>, <ast.Attribute object at 0x7da18f09d4e0>, <ast.Attribute object at 0x7da18f09c940>, <ast.Attribute object at 0x7da18f09c0a0>]] call[name[description].update, parameter[name[kwargs]]] return[name[description]]
keyword[def] identifier[describe] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[description] ={ literal[string] : identifier[self] . identifier[label] , literal[string] : identifier[inspect] . identifier[cleandoc] ( identifier[self] . identifier[details] ), literal[string] : literal[string] . identifier[format] ( identifier[self] . identifier[type] ) keyword[if] identifier[self] . identifier[many] keyword[else] identifier[self] . identifier[type] , literal[string] : identifier[self] . identifier[spec] , literal[string] : identifier[self] . identifier[read_only] , literal[string] : identifier[self] . identifier[write_only] , literal[string] : identifier[self] . identifier[allow_null] , } identifier[description] . identifier[update] ( identifier[kwargs] ) keyword[return] identifier[description]
def describe(self, **kwargs): """ Describe this field instance for purpose of self-documentation. Args: kwargs (dict): dictionary of additional description items for extending default description Returns: dict: dictionary of description items Suggested way for overriding description fields or extending it with additional items is calling super class method with new/overriden fields passed as keyword arguments like following: .. code-block:: python class DummyField(BaseField): def description(self, **kwargs): super().describe(is_dummy=True, **kwargs) """ description = {'label': self.label, 'details': inspect.cleandoc(self.details), 'type': 'list of {}'.format(self.type) if self.many else self.type, 'spec': self.spec, 'read_only': self.read_only, 'write_only': self.write_only, 'allow_null': self.allow_null} description.update(kwargs) return description
def from_parfiles(cls,pst,parfile_names,real_names=None): """ create a parameter ensemble from parfiles. Accepts parfiles with less than the parameters in the control (get NaNs in the ensemble) or extra parameters in the parfiles (get dropped) Parameters: pst : pyemu.Pst parfile_names : list of str par file names real_names : str optional list of realization names. If None, a single integer counter is used Returns: pyemu.ParameterEnsemble """ if isinstance(pst,str): pst = pyemu.Pst(pst) dfs = {} if real_names is not None: assert len(real_names) == len(parfile_names) else: real_names = np.arange(len(parfile_names)) for rname,pfile in zip(real_names,parfile_names): assert os.path.exists(pfile), "ParameterEnsemble.read_parfiles() error: " + \ "file: {0} not found".format(pfile) df = read_parfile(pfile) #check for scale differences - I don't who is dumb enough #to change scale between par files and pst... diff = df.scale - pst.parameter_data.scale if diff.apply(np.abs).sum() > 0.0: warnings.warn("differences in scale detected, applying scale in par file", PyemuWarning) #df.loc[:,"parval1"] *= df.scale dfs[rname] = df.parval1.values df_all = pd.DataFrame(data=dfs).T df_all.columns = df.index if len(pst.par_names) != df_all.shape[1]: #if len(pst.par_names) < df_all.shape[1]: # raise Exception("pst is not compatible with par files") pset = set(pst.par_names) dset = set(df_all.columns) diff = pset.difference(dset) if len(diff) > 0: warnings.warn("the following parameters are not in the par files (getting NaNs) :{0}". format(','.join(diff)),PyemuWarning) blank_df = pd.DataFrame(index=df_all.index,columns=diff) df_all = pd.concat([df_all,blank_df],axis=1) diff = dset.difference(pset) if len(diff) > 0: warnings.warn("the following par file parameters are not in the control (being dropped):{0}". format(','.join(diff)),PyemuWarning) df_all = df_all.loc[:, pst.par_names] return ParameterEnsemble.from_dataframe(df=df_all,pst=pst)
def function[from_parfiles, parameter[cls, pst, parfile_names, real_names]]: constant[ create a parameter ensemble from parfiles. Accepts parfiles with less than the parameters in the control (get NaNs in the ensemble) or extra parameters in the parfiles (get dropped) Parameters: pst : pyemu.Pst parfile_names : list of str par file names real_names : str optional list of realization names. If None, a single integer counter is used Returns: pyemu.ParameterEnsemble ] if call[name[isinstance], parameter[name[pst], name[str]]] begin[:] variable[pst] assign[=] call[name[pyemu].Pst, parameter[name[pst]]] variable[dfs] assign[=] dictionary[[], []] if compare[name[real_names] is_not constant[None]] begin[:] assert[compare[call[name[len], parameter[name[real_names]]] equal[==] call[name[len], parameter[name[parfile_names]]]]] for taget[tuple[[<ast.Name object at 0x7da1b2393070>, <ast.Name object at 0x7da1b2393040>]]] in starred[call[name[zip], parameter[name[real_names], name[parfile_names]]]] begin[:] assert[call[name[os].path.exists, parameter[name[pfile]]]] variable[df] assign[=] call[name[read_parfile], parameter[name[pfile]]] variable[diff] assign[=] binary_operation[name[df].scale - name[pst].parameter_data.scale] if compare[call[call[name[diff].apply, parameter[name[np].abs]].sum, parameter[]] greater[>] constant[0.0]] begin[:] call[name[warnings].warn, parameter[constant[differences in scale detected, applying scale in par file], name[PyemuWarning]]] call[name[dfs]][name[rname]] assign[=] name[df].parval1.values variable[df_all] assign[=] call[name[pd].DataFrame, parameter[]].T name[df_all].columns assign[=] name[df].index if compare[call[name[len], parameter[name[pst].par_names]] not_equal[!=] call[name[df_all].shape][constant[1]]] begin[:] variable[pset] assign[=] call[name[set], parameter[name[pst].par_names]] variable[dset] assign[=] call[name[set], parameter[name[df_all].columns]] variable[diff] assign[=] call[name[pset].difference, parameter[name[dset]]] if compare[call[name[len], parameter[name[diff]]] greater[>] constant[0]] begin[:] call[name[warnings].warn, parameter[call[constant[the following parameters are not in the par files (getting NaNs) :{0}].format, parameter[call[constant[,].join, parameter[name[diff]]]]], name[PyemuWarning]]] variable[blank_df] assign[=] call[name[pd].DataFrame, parameter[]] variable[df_all] assign[=] call[name[pd].concat, parameter[list[[<ast.Name object at 0x7da1b2391780>, <ast.Name object at 0x7da1b2391750>]]]] variable[diff] assign[=] call[name[dset].difference, parameter[name[pset]]] if compare[call[name[len], parameter[name[diff]]] greater[>] constant[0]] begin[:] call[name[warnings].warn, parameter[call[constant[the following par file parameters are not in the control (being dropped):{0}].format, parameter[call[constant[,].join, parameter[name[diff]]]]], name[PyemuWarning]]] variable[df_all] assign[=] call[name[df_all].loc][tuple[[<ast.Slice object at 0x7da1b23910f0>, <ast.Attribute object at 0x7da1b23910c0>]]] return[call[name[ParameterEnsemble].from_dataframe, parameter[]]]
keyword[def] identifier[from_parfiles] ( identifier[cls] , identifier[pst] , identifier[parfile_names] , identifier[real_names] = keyword[None] ): literal[string] keyword[if] identifier[isinstance] ( identifier[pst] , identifier[str] ): identifier[pst] = identifier[pyemu] . identifier[Pst] ( identifier[pst] ) identifier[dfs] ={} keyword[if] identifier[real_names] keyword[is] keyword[not] keyword[None] : keyword[assert] identifier[len] ( identifier[real_names] )== identifier[len] ( identifier[parfile_names] ) keyword[else] : identifier[real_names] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[parfile_names] )) keyword[for] identifier[rname] , identifier[pfile] keyword[in] identifier[zip] ( identifier[real_names] , identifier[parfile_names] ): keyword[assert] identifier[os] . identifier[path] . identifier[exists] ( identifier[pfile] ), literal[string] + literal[string] . identifier[format] ( identifier[pfile] ) identifier[df] = identifier[read_parfile] ( identifier[pfile] ) identifier[diff] = identifier[df] . identifier[scale] - identifier[pst] . identifier[parameter_data] . identifier[scale] keyword[if] identifier[diff] . identifier[apply] ( identifier[np] . identifier[abs] ). identifier[sum] ()> literal[int] : identifier[warnings] . identifier[warn] ( literal[string] , identifier[PyemuWarning] ) identifier[dfs] [ identifier[rname] ]= identifier[df] . identifier[parval1] . identifier[values] identifier[df_all] = identifier[pd] . identifier[DataFrame] ( identifier[data] = identifier[dfs] ). identifier[T] identifier[df_all] . identifier[columns] = identifier[df] . identifier[index] keyword[if] identifier[len] ( identifier[pst] . identifier[par_names] )!= identifier[df_all] . identifier[shape] [ literal[int] ]: identifier[pset] = identifier[set] ( identifier[pst] . identifier[par_names] ) identifier[dset] = identifier[set] ( identifier[df_all] . identifier[columns] ) identifier[diff] = identifier[pset] . identifier[difference] ( identifier[dset] ) keyword[if] identifier[len] ( identifier[diff] )> literal[int] : identifier[warnings] . identifier[warn] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[diff] )), identifier[PyemuWarning] ) identifier[blank_df] = identifier[pd] . identifier[DataFrame] ( identifier[index] = identifier[df_all] . identifier[index] , identifier[columns] = identifier[diff] ) identifier[df_all] = identifier[pd] . identifier[concat] ([ identifier[df_all] , identifier[blank_df] ], identifier[axis] = literal[int] ) identifier[diff] = identifier[dset] . identifier[difference] ( identifier[pset] ) keyword[if] identifier[len] ( identifier[diff] )> literal[int] : identifier[warnings] . identifier[warn] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[diff] )), identifier[PyemuWarning] ) identifier[df_all] = identifier[df_all] . identifier[loc] [:, identifier[pst] . identifier[par_names] ] keyword[return] identifier[ParameterEnsemble] . identifier[from_dataframe] ( identifier[df] = identifier[df_all] , identifier[pst] = identifier[pst] )
def from_parfiles(cls, pst, parfile_names, real_names=None): """ create a parameter ensemble from parfiles. Accepts parfiles with less than the parameters in the control (get NaNs in the ensemble) or extra parameters in the parfiles (get dropped) Parameters: pst : pyemu.Pst parfile_names : list of str par file names real_names : str optional list of realization names. If None, a single integer counter is used Returns: pyemu.ParameterEnsemble """ if isinstance(pst, str): pst = pyemu.Pst(pst) # depends on [control=['if'], data=[]] dfs = {} if real_names is not None: assert len(real_names) == len(parfile_names) # depends on [control=['if'], data=['real_names']] else: real_names = np.arange(len(parfile_names)) for (rname, pfile) in zip(real_names, parfile_names): assert os.path.exists(pfile), 'ParameterEnsemble.read_parfiles() error: ' + 'file: {0} not found'.format(pfile) df = read_parfile(pfile) #check for scale differences - I don't who is dumb enough #to change scale between par files and pst... diff = df.scale - pst.parameter_data.scale if diff.apply(np.abs).sum() > 0.0: warnings.warn('differences in scale detected, applying scale in par file', PyemuWarning) # depends on [control=['if'], data=[]] #df.loc[:,"parval1"] *= df.scale dfs[rname] = df.parval1.values # depends on [control=['for'], data=[]] df_all = pd.DataFrame(data=dfs).T df_all.columns = df.index if len(pst.par_names) != df_all.shape[1]: #if len(pst.par_names) < df_all.shape[1]: # raise Exception("pst is not compatible with par files") pset = set(pst.par_names) dset = set(df_all.columns) diff = pset.difference(dset) if len(diff) > 0: warnings.warn('the following parameters are not in the par files (getting NaNs) :{0}'.format(','.join(diff)), PyemuWarning) blank_df = pd.DataFrame(index=df_all.index, columns=diff) df_all = pd.concat([df_all, blank_df], axis=1) # depends on [control=['if'], data=[]] diff = dset.difference(pset) if len(diff) > 0: warnings.warn('the following par file parameters are not in the control (being dropped):{0}'.format(','.join(diff)), PyemuWarning) df_all = df_all.loc[:, pst.par_names] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return ParameterEnsemble.from_dataframe(df=df_all, pst=pst)
def model_alias(vk, model): """Fill the model with alias since V1""" model['alias'] = {} # types for s in vk['registry']['types']['type']: if s.get('@category', None) == 'handle' and s.get('@alias'): model['alias'][s['@alias']] = s['@name'] # commands for c in vk['registry']['commands']['command']: if c.get('@alias'): model['alias'][c['@alias']] = c['@name']
def function[model_alias, parameter[vk, model]]: constant[Fill the model with alias since V1] call[name[model]][constant[alias]] assign[=] dictionary[[], []] for taget[name[s]] in starred[call[call[call[name[vk]][constant[registry]]][constant[types]]][constant[type]]] begin[:] if <ast.BoolOp object at 0x7da1b0792bc0> begin[:] call[call[name[model]][constant[alias]]][call[name[s]][constant[@alias]]] assign[=] call[name[s]][constant[@name]] for taget[name[c]] in starred[call[call[call[name[vk]][constant[registry]]][constant[commands]]][constant[command]]] begin[:] if call[name[c].get, parameter[constant[@alias]]] begin[:] call[call[name[model]][constant[alias]]][call[name[c]][constant[@alias]]] assign[=] call[name[c]][constant[@name]]
keyword[def] identifier[model_alias] ( identifier[vk] , identifier[model] ): literal[string] identifier[model] [ literal[string] ]={} keyword[for] identifier[s] keyword[in] identifier[vk] [ literal[string] ][ literal[string] ][ literal[string] ]: keyword[if] identifier[s] . identifier[get] ( literal[string] , keyword[None] )== literal[string] keyword[and] identifier[s] . identifier[get] ( literal[string] ): identifier[model] [ literal[string] ][ identifier[s] [ literal[string] ]]= identifier[s] [ literal[string] ] keyword[for] identifier[c] keyword[in] identifier[vk] [ literal[string] ][ literal[string] ][ literal[string] ]: keyword[if] identifier[c] . identifier[get] ( literal[string] ): identifier[model] [ literal[string] ][ identifier[c] [ literal[string] ]]= identifier[c] [ literal[string] ]
def model_alias(vk, model): """Fill the model with alias since V1""" model['alias'] = {} # types for s in vk['registry']['types']['type']: if s.get('@category', None) == 'handle' and s.get('@alias'): model['alias'][s['@alias']] = s['@name'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']] # commands for c in vk['registry']['commands']['command']: if c.get('@alias'): model['alias'][c['@alias']] = c['@name'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
def new(self, base: pathlib.PurePath = pathlib.PurePath(), include_intermediates: bool = True) -> Iterator[str]: """ Find the list of new paths in this comparison. :param base: The base directory to prepend to the right entity's name. :param include_intermediates: Whether to include new non-empty directories in the returned iterable. If you only care about files, or are using flat key-based storage system like S3 where directories are a made-up concept, this can be set to false. :return: An iterator of the new paths. """ if self.is_new: yield str(base / self.right.name)
def function[new, parameter[self, base, include_intermediates]]: constant[ Find the list of new paths in this comparison. :param base: The base directory to prepend to the right entity's name. :param include_intermediates: Whether to include new non-empty directories in the returned iterable. If you only care about files, or are using flat key-based storage system like S3 where directories are a made-up concept, this can be set to false. :return: An iterator of the new paths. ] if name[self].is_new begin[:] <ast.Yield object at 0x7da1b22409d0>
keyword[def] identifier[new] ( identifier[self] , identifier[base] : identifier[pathlib] . identifier[PurePath] = identifier[pathlib] . identifier[PurePath] (), identifier[include_intermediates] : identifier[bool] = keyword[True] )-> identifier[Iterator] [ identifier[str] ]: literal[string] keyword[if] identifier[self] . identifier[is_new] : keyword[yield] identifier[str] ( identifier[base] / identifier[self] . identifier[right] . identifier[name] )
def new(self, base: pathlib.PurePath=pathlib.PurePath(), include_intermediates: bool=True) -> Iterator[str]: """ Find the list of new paths in this comparison. :param base: The base directory to prepend to the right entity's name. :param include_intermediates: Whether to include new non-empty directories in the returned iterable. If you only care about files, or are using flat key-based storage system like S3 where directories are a made-up concept, this can be set to false. :return: An iterator of the new paths. """ if self.is_new: yield str(base / self.right.name) # depends on [control=['if'], data=[]]
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, K=None, variance=None, Z_tilde=None): """ Returns a Posterior class containing essential quantities of the posterior """ if mean_function is None: m = 0 else: m = mean_function.f(X) if variance is None: variance = likelihood.gaussian_variance(Y_metadata) YYT_factor = Y-m if K is None: K = kern.K(X) Ky = K.copy() diag.add(Ky, variance+1e-8) Wi, LW, LWi, W_logdet = pdinv(Ky) alpha, _ = dpotrs(LW, YYT_factor, lower=1) log_marginal = 0.5*(-Y.size * log_2_pi - Y.shape[1] * W_logdet - np.sum(alpha * YYT_factor)) if Z_tilde is not None: # This is a correction term for the log marginal likelihood # In EP this is log Z_tilde, which is the difference between the # Gaussian marginal and Z_EP log_marginal += Z_tilde dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi) dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK), Y_metadata) return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL, 'dL_dm':alpha}
def function[inference, parameter[self, kern, X, likelihood, Y, mean_function, Y_metadata, K, variance, Z_tilde]]: constant[ Returns a Posterior class containing essential quantities of the posterior ] if compare[name[mean_function] is constant[None]] begin[:] variable[m] assign[=] constant[0] if compare[name[variance] is constant[None]] begin[:] variable[variance] assign[=] call[name[likelihood].gaussian_variance, parameter[name[Y_metadata]]] variable[YYT_factor] assign[=] binary_operation[name[Y] - name[m]] if compare[name[K] is constant[None]] begin[:] variable[K] assign[=] call[name[kern].K, parameter[name[X]]] variable[Ky] assign[=] call[name[K].copy, parameter[]] call[name[diag].add, parameter[name[Ky], binary_operation[name[variance] + constant[1e-08]]]] <ast.Tuple object at 0x7da1b1c0e2f0> assign[=] call[name[pdinv], parameter[name[Ky]]] <ast.Tuple object at 0x7da1b1c0c550> assign[=] call[name[dpotrs], parameter[name[LW], name[YYT_factor]]] variable[log_marginal] assign[=] binary_operation[constant[0.5] * binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b1c0d7e0> * name[log_2_pi]] - binary_operation[call[name[Y].shape][constant[1]] * name[W_logdet]]] - call[name[np].sum, parameter[binary_operation[name[alpha] * name[YYT_factor]]]]]] if compare[name[Z_tilde] is_not constant[None]] begin[:] <ast.AugAssign object at 0x7da1b1c0c040> variable[dL_dK] assign[=] binary_operation[constant[0.5] * binary_operation[call[name[tdot], parameter[name[alpha]]] - binary_operation[call[name[Y].shape][constant[1]] * name[Wi]]]] variable[dL_dthetaL] assign[=] call[name[likelihood].exact_inference_gradients, parameter[call[name[np].diag, parameter[name[dL_dK]]], name[Y_metadata]]] return[tuple[[<ast.Call object at 0x7da1b1c0c3a0>, <ast.Name object at 0x7da1b1c0cfa0>, <ast.Dict object at 0x7da1b1c0ec50>]]]
keyword[def] identifier[inference] ( identifier[self] , identifier[kern] , identifier[X] , identifier[likelihood] , identifier[Y] , identifier[mean_function] = keyword[None] , identifier[Y_metadata] = keyword[None] , identifier[K] = keyword[None] , identifier[variance] = keyword[None] , identifier[Z_tilde] = keyword[None] ): literal[string] keyword[if] identifier[mean_function] keyword[is] keyword[None] : identifier[m] = literal[int] keyword[else] : identifier[m] = identifier[mean_function] . identifier[f] ( identifier[X] ) keyword[if] identifier[variance] keyword[is] keyword[None] : identifier[variance] = identifier[likelihood] . identifier[gaussian_variance] ( identifier[Y_metadata] ) identifier[YYT_factor] = identifier[Y] - identifier[m] keyword[if] identifier[K] keyword[is] keyword[None] : identifier[K] = identifier[kern] . identifier[K] ( identifier[X] ) identifier[Ky] = identifier[K] . identifier[copy] () identifier[diag] . identifier[add] ( identifier[Ky] , identifier[variance] + literal[int] ) identifier[Wi] , identifier[LW] , identifier[LWi] , identifier[W_logdet] = identifier[pdinv] ( identifier[Ky] ) identifier[alpha] , identifier[_] = identifier[dpotrs] ( identifier[LW] , identifier[YYT_factor] , identifier[lower] = literal[int] ) identifier[log_marginal] = literal[int] *(- identifier[Y] . identifier[size] * identifier[log_2_pi] - identifier[Y] . identifier[shape] [ literal[int] ]* identifier[W_logdet] - identifier[np] . identifier[sum] ( identifier[alpha] * identifier[YYT_factor] )) keyword[if] identifier[Z_tilde] keyword[is] keyword[not] keyword[None] : identifier[log_marginal] += identifier[Z_tilde] identifier[dL_dK] = literal[int] *( identifier[tdot] ( identifier[alpha] )- identifier[Y] . identifier[shape] [ literal[int] ]* identifier[Wi] ) identifier[dL_dthetaL] = identifier[likelihood] . identifier[exact_inference_gradients] ( identifier[np] . identifier[diag] ( identifier[dL_dK] ), identifier[Y_metadata] ) keyword[return] identifier[Posterior] ( identifier[woodbury_chol] = identifier[LW] , identifier[woodbury_vector] = identifier[alpha] , identifier[K] = identifier[K] ), identifier[log_marginal] ,{ literal[string] : identifier[dL_dK] , literal[string] : identifier[dL_dthetaL] , literal[string] : identifier[alpha] }
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, K=None, variance=None, Z_tilde=None): """ Returns a Posterior class containing essential quantities of the posterior """ if mean_function is None: m = 0 # depends on [control=['if'], data=[]] else: m = mean_function.f(X) if variance is None: variance = likelihood.gaussian_variance(Y_metadata) # depends on [control=['if'], data=['variance']] YYT_factor = Y - m if K is None: K = kern.K(X) # depends on [control=['if'], data=['K']] Ky = K.copy() diag.add(Ky, variance + 1e-08) (Wi, LW, LWi, W_logdet) = pdinv(Ky) (alpha, _) = dpotrs(LW, YYT_factor, lower=1) log_marginal = 0.5 * (-Y.size * log_2_pi - Y.shape[1] * W_logdet - np.sum(alpha * YYT_factor)) if Z_tilde is not None: # This is a correction term for the log marginal likelihood # In EP this is log Z_tilde, which is the difference between the # Gaussian marginal and Z_EP log_marginal += Z_tilde # depends on [control=['if'], data=['Z_tilde']] dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi) dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK), Y_metadata) return (Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK': dL_dK, 'dL_dthetaL': dL_dthetaL, 'dL_dm': alpha})
def compile_low_chunks(self): ''' Compile the highstate but don't run it, return the low chunks to see exactly what the highstate will execute ''' top = self.get_top() matches = self.top_matches(top) high, errors = self.render_highstate(matches) # If there is extension data reconcile it high, ext_errors = self.state.reconcile_extend(high) errors += ext_errors # Verify that the high data is structurally sound errors += self.state.verify_high(high) high, req_in_errors = self.state.requisite_in(high) errors += req_in_errors high = self.state.apply_exclude(high) if errors: return errors # Compile and verify the raw chunks chunks = self.state.compile_high_data(high) return chunks
def function[compile_low_chunks, parameter[self]]: constant[ Compile the highstate but don't run it, return the low chunks to see exactly what the highstate will execute ] variable[top] assign[=] call[name[self].get_top, parameter[]] variable[matches] assign[=] call[name[self].top_matches, parameter[name[top]]] <ast.Tuple object at 0x7da18fe90220> assign[=] call[name[self].render_highstate, parameter[name[matches]]] <ast.Tuple object at 0x7da18fe93190> assign[=] call[name[self].state.reconcile_extend, parameter[name[high]]] <ast.AugAssign object at 0x7da18fe90a00> <ast.AugAssign object at 0x7da18fe92b30> <ast.Tuple object at 0x7da18fe93280> assign[=] call[name[self].state.requisite_in, parameter[name[high]]] <ast.AugAssign object at 0x7da18fe90d00> variable[high] assign[=] call[name[self].state.apply_exclude, parameter[name[high]]] if name[errors] begin[:] return[name[errors]] variable[chunks] assign[=] call[name[self].state.compile_high_data, parameter[name[high]]] return[name[chunks]]
keyword[def] identifier[compile_low_chunks] ( identifier[self] ): literal[string] identifier[top] = identifier[self] . identifier[get_top] () identifier[matches] = identifier[self] . identifier[top_matches] ( identifier[top] ) identifier[high] , identifier[errors] = identifier[self] . identifier[render_highstate] ( identifier[matches] ) identifier[high] , identifier[ext_errors] = identifier[self] . identifier[state] . identifier[reconcile_extend] ( identifier[high] ) identifier[errors] += identifier[ext_errors] identifier[errors] += identifier[self] . identifier[state] . identifier[verify_high] ( identifier[high] ) identifier[high] , identifier[req_in_errors] = identifier[self] . identifier[state] . identifier[requisite_in] ( identifier[high] ) identifier[errors] += identifier[req_in_errors] identifier[high] = identifier[self] . identifier[state] . identifier[apply_exclude] ( identifier[high] ) keyword[if] identifier[errors] : keyword[return] identifier[errors] identifier[chunks] = identifier[self] . identifier[state] . identifier[compile_high_data] ( identifier[high] ) keyword[return] identifier[chunks]
def compile_low_chunks(self): """ Compile the highstate but don't run it, return the low chunks to see exactly what the highstate will execute """ top = self.get_top() matches = self.top_matches(top) (high, errors) = self.render_highstate(matches) # If there is extension data reconcile it (high, ext_errors) = self.state.reconcile_extend(high) errors += ext_errors # Verify that the high data is structurally sound errors += self.state.verify_high(high) (high, req_in_errors) = self.state.requisite_in(high) errors += req_in_errors high = self.state.apply_exclude(high) if errors: return errors # depends on [control=['if'], data=[]] # Compile and verify the raw chunks chunks = self.state.compile_high_data(high) return chunks
def update_or_create(cls, external_gateway, name, with_status=False, **kw): """ Update or create external endpoints for the specified external gateway. An ExternalEndpoint is considered unique based on the IP address for the endpoint (you cannot add two external endpoints with the same IP). If the external endpoint is dynamic, then the name is the unique identifier. :param ExternalGateway external_gateway: external gateway reference :param str name: name of the ExternalEndpoint. This is only used as a direct match if the endpoint is dynamic. Otherwise the address field in the keyword arguments will be used as you cannot add multiple external endpoints with the same IP address. :param bool with_status: If set to True, returns a 3-tuple of (ExternalEndpoint, modified, created), where modified and created is the boolean status for operations performed. :param dict kw: keyword arguments to satisfy ExternalEndpoint.create constructor :raises CreateElementFailed: Failed to create external endpoint with reason :raises ElementNotFound: If specified ExternalGateway is not valid :return: if with_status=True, return tuple(ExternalEndpoint, created). Otherwise return only ExternalEndpoint. """ if 'address' in kw: external_endpoint = external_gateway.external_endpoint.get_contains( '({})'.format(kw['address'])) else: external_endpoint = external_gateway.external_endpoint.get_contains(name) updated = False created = False if external_endpoint: # Check for changes for name, value in kw.items(): # Check for differences before updating if getattr(external_endpoint, name, None) != value: external_endpoint.data[name] = value updated = True if updated: external_endpoint.update() else: external_endpoint = external_gateway.external_endpoint.create( name, **kw) created = True if with_status: return external_endpoint, updated, created return external_endpoint
def function[update_or_create, parameter[cls, external_gateway, name, with_status]]: constant[ Update or create external endpoints for the specified external gateway. An ExternalEndpoint is considered unique based on the IP address for the endpoint (you cannot add two external endpoints with the same IP). If the external endpoint is dynamic, then the name is the unique identifier. :param ExternalGateway external_gateway: external gateway reference :param str name: name of the ExternalEndpoint. This is only used as a direct match if the endpoint is dynamic. Otherwise the address field in the keyword arguments will be used as you cannot add multiple external endpoints with the same IP address. :param bool with_status: If set to True, returns a 3-tuple of (ExternalEndpoint, modified, created), where modified and created is the boolean status for operations performed. :param dict kw: keyword arguments to satisfy ExternalEndpoint.create constructor :raises CreateElementFailed: Failed to create external endpoint with reason :raises ElementNotFound: If specified ExternalGateway is not valid :return: if with_status=True, return tuple(ExternalEndpoint, created). Otherwise return only ExternalEndpoint. ] if compare[constant[address] in name[kw]] begin[:] variable[external_endpoint] assign[=] call[name[external_gateway].external_endpoint.get_contains, parameter[call[constant[({})].format, parameter[call[name[kw]][constant[address]]]]]] variable[updated] assign[=] constant[False] variable[created] assign[=] constant[False] if name[external_endpoint] begin[:] for taget[tuple[[<ast.Name object at 0x7da2041d8670>, <ast.Name object at 0x7da1b1b1bcd0>]]] in starred[call[name[kw].items, parameter[]]] begin[:] if compare[call[name[getattr], parameter[name[external_endpoint], name[name], constant[None]]] not_equal[!=] name[value]] begin[:] call[name[external_endpoint].data][name[name]] assign[=] name[value] variable[updated] assign[=] constant[True] if name[updated] begin[:] call[name[external_endpoint].update, parameter[]] if name[with_status] begin[:] return[tuple[[<ast.Name object at 0x7da1b1c61240>, <ast.Name object at 0x7da1b1c61ba0>, <ast.Name object at 0x7da1b1c609d0>]]] return[name[external_endpoint]]
keyword[def] identifier[update_or_create] ( identifier[cls] , identifier[external_gateway] , identifier[name] , identifier[with_status] = keyword[False] ,** identifier[kw] ): literal[string] keyword[if] literal[string] keyword[in] identifier[kw] : identifier[external_endpoint] = identifier[external_gateway] . identifier[external_endpoint] . identifier[get_contains] ( literal[string] . identifier[format] ( identifier[kw] [ literal[string] ])) keyword[else] : identifier[external_endpoint] = identifier[external_gateway] . identifier[external_endpoint] . identifier[get_contains] ( identifier[name] ) identifier[updated] = keyword[False] identifier[created] = keyword[False] keyword[if] identifier[external_endpoint] : keyword[for] identifier[name] , identifier[value] keyword[in] identifier[kw] . identifier[items] (): keyword[if] identifier[getattr] ( identifier[external_endpoint] , identifier[name] , keyword[None] )!= identifier[value] : identifier[external_endpoint] . identifier[data] [ identifier[name] ]= identifier[value] identifier[updated] = keyword[True] keyword[if] identifier[updated] : identifier[external_endpoint] . identifier[update] () keyword[else] : identifier[external_endpoint] = identifier[external_gateway] . identifier[external_endpoint] . identifier[create] ( identifier[name] ,** identifier[kw] ) identifier[created] = keyword[True] keyword[if] identifier[with_status] : keyword[return] identifier[external_endpoint] , identifier[updated] , identifier[created] keyword[return] identifier[external_endpoint]
def update_or_create(cls, external_gateway, name, with_status=False, **kw): """ Update or create external endpoints for the specified external gateway. An ExternalEndpoint is considered unique based on the IP address for the endpoint (you cannot add two external endpoints with the same IP). If the external endpoint is dynamic, then the name is the unique identifier. :param ExternalGateway external_gateway: external gateway reference :param str name: name of the ExternalEndpoint. This is only used as a direct match if the endpoint is dynamic. Otherwise the address field in the keyword arguments will be used as you cannot add multiple external endpoints with the same IP address. :param bool with_status: If set to True, returns a 3-tuple of (ExternalEndpoint, modified, created), where modified and created is the boolean status for operations performed. :param dict kw: keyword arguments to satisfy ExternalEndpoint.create constructor :raises CreateElementFailed: Failed to create external endpoint with reason :raises ElementNotFound: If specified ExternalGateway is not valid :return: if with_status=True, return tuple(ExternalEndpoint, created). Otherwise return only ExternalEndpoint. """ if 'address' in kw: external_endpoint = external_gateway.external_endpoint.get_contains('({})'.format(kw['address'])) # depends on [control=['if'], data=['kw']] else: external_endpoint = external_gateway.external_endpoint.get_contains(name) updated = False created = False if external_endpoint: # Check for changes for (name, value) in kw.items(): # Check for differences before updating if getattr(external_endpoint, name, None) != value: external_endpoint.data[name] = value updated = True # depends on [control=['if'], data=['value']] # depends on [control=['for'], data=[]] if updated: external_endpoint.update() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: external_endpoint = external_gateway.external_endpoint.create(name, **kw) created = True if with_status: return (external_endpoint, updated, created) # depends on [control=['if'], data=[]] return external_endpoint
def select_row(self, steps): """Select row in list widget based on a number of steps with direction. Steps can be positive (next rows) or negative (previous rows). """ row = self.current_row() + steps if 0 <= row < self.count(): self.set_current_row(row)
def function[select_row, parameter[self, steps]]: constant[Select row in list widget based on a number of steps with direction. Steps can be positive (next rows) or negative (previous rows). ] variable[row] assign[=] binary_operation[call[name[self].current_row, parameter[]] + name[steps]] if compare[constant[0] less_or_equal[<=] name[row]] begin[:] call[name[self].set_current_row, parameter[name[row]]]
keyword[def] identifier[select_row] ( identifier[self] , identifier[steps] ): literal[string] identifier[row] = identifier[self] . identifier[current_row] ()+ identifier[steps] keyword[if] literal[int] <= identifier[row] < identifier[self] . identifier[count] (): identifier[self] . identifier[set_current_row] ( identifier[row] )
def select_row(self, steps): """Select row in list widget based on a number of steps with direction. Steps can be positive (next rows) or negative (previous rows). """ row = self.current_row() + steps if 0 <= row < self.count(): self.set_current_row(row) # depends on [control=['if'], data=['row']]
def flatatt(attrs): """ Pilfered from `django.forms.utils`: Convert a dictionary of attributes to a single string. The returned string will contain a leading space followed by key="value", XML-style pairs. In the case of a boolean value, the key will appear without a value. Otherwise, the value is formatted through its own dict of `attrs`, which can be useful to parametrize Angular directives. It is assumed that the keys do not need to be XML-escaped. If the passed dictionary is empty, then return an empty string. The result is passed through 'mark_safe' (by way of 'format_html_join'). """ key_value_attrs = [] boolean_attrs = [] for attr, value in attrs.items(): if isinstance(value, bool): if value: boolean_attrs.append((attr,)) else: try: value = value.format(**attrs) except KeyError: pass key_value_attrs.append((attr, value)) return ( format_html_join('', ' {}="{}"', sorted(key_value_attrs)) + format_html_join('', ' {}', sorted(boolean_attrs)) )
def function[flatatt, parameter[attrs]]: constant[ Pilfered from `django.forms.utils`: Convert a dictionary of attributes to a single string. The returned string will contain a leading space followed by key="value", XML-style pairs. In the case of a boolean value, the key will appear without a value. Otherwise, the value is formatted through its own dict of `attrs`, which can be useful to parametrize Angular directives. It is assumed that the keys do not need to be XML-escaped. If the passed dictionary is empty, then return an empty string. The result is passed through 'mark_safe' (by way of 'format_html_join'). ] variable[key_value_attrs] assign[=] list[[]] variable[boolean_attrs] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da2047ea560>, <ast.Name object at 0x7da2047e9180>]]] in starred[call[name[attrs].items, parameter[]]] begin[:] if call[name[isinstance], parameter[name[value], name[bool]]] begin[:] if name[value] begin[:] call[name[boolean_attrs].append, parameter[tuple[[<ast.Name object at 0x7da2047e8c70>]]]] return[binary_operation[call[name[format_html_join], parameter[constant[], constant[ {}="{}"], call[name[sorted], parameter[name[key_value_attrs]]]]] + call[name[format_html_join], parameter[constant[], constant[ {}], call[name[sorted], parameter[name[boolean_attrs]]]]]]]
keyword[def] identifier[flatatt] ( identifier[attrs] ): literal[string] identifier[key_value_attrs] =[] identifier[boolean_attrs] =[] keyword[for] identifier[attr] , identifier[value] keyword[in] identifier[attrs] . identifier[items] (): keyword[if] identifier[isinstance] ( identifier[value] , identifier[bool] ): keyword[if] identifier[value] : identifier[boolean_attrs] . identifier[append] (( identifier[attr] ,)) keyword[else] : keyword[try] : identifier[value] = identifier[value] . identifier[format] (** identifier[attrs] ) keyword[except] identifier[KeyError] : keyword[pass] identifier[key_value_attrs] . identifier[append] (( identifier[attr] , identifier[value] )) keyword[return] ( identifier[format_html_join] ( literal[string] , literal[string] , identifier[sorted] ( identifier[key_value_attrs] ))+ identifier[format_html_join] ( literal[string] , literal[string] , identifier[sorted] ( identifier[boolean_attrs] )) )
def flatatt(attrs): """ Pilfered from `django.forms.utils`: Convert a dictionary of attributes to a single string. The returned string will contain a leading space followed by key="value", XML-style pairs. In the case of a boolean value, the key will appear without a value. Otherwise, the value is formatted through its own dict of `attrs`, which can be useful to parametrize Angular directives. It is assumed that the keys do not need to be XML-escaped. If the passed dictionary is empty, then return an empty string. The result is passed through 'mark_safe' (by way of 'format_html_join'). """ key_value_attrs = [] boolean_attrs = [] for (attr, value) in attrs.items(): if isinstance(value, bool): if value: boolean_attrs.append((attr,)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: try: value = value.format(**attrs) # depends on [control=['try'], data=[]] except KeyError: pass # depends on [control=['except'], data=[]] key_value_attrs.append((attr, value)) # depends on [control=['for'], data=[]] return format_html_join('', ' {}="{}"', sorted(key_value_attrs)) + format_html_join('', ' {}', sorted(boolean_attrs))
def open_mfdataset(paths, chunks=None, concat_dim=_CONCAT_DIM_DEFAULT, compat='no_conflicts', preprocess=None, engine=None, lock=None, data_vars='all', coords='different', autoclose=None, parallel=False, **kwargs): """Open multiple files as a single dataset. Requires dask to be installed. See documentation for details on dask [1]. Attributes from the first dataset file are used for the combined dataset. Parameters ---------- paths : str or sequence Either a string glob in the form "path/to/my/files/*.nc" or an explicit list of files to open. Paths can be given as strings or as pathlib Paths. chunks : int or dict, optional Dictionary with keys given by dimension names and values given by chunk sizes. In general, these should divide the dimensions of each dataset. If int, chunk each dimension by ``chunks``. By default, chunks will be chosen to load entire input files into memory at once. This has a major impact on performance: please see the full documentation for more details [2]. concat_dim : None, str, DataArray or Index, optional Dimension to concatenate files along. This argument is passed on to :py:func:`xarray.auto_combine` along with the dataset objects. You only need to provide this argument if the dimension along which you want to concatenate is not a dimension in the original datasets, e.g., if you want to stack a collection of 2D arrays along a third dimension. By default, xarray attempts to infer this argument by examining component files. Set ``concat_dim=None`` explicitly to disable concatenation. compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional String indicating how to compare variables of the same name for potential conflicts when merging: * 'broadcast_equals': all values must be equal when variables are broadcast against each other to ensure common dimensions. * 'equals': all values and dimensions must be the same. * 'identical': all values, dimensions and attributes must be the same. * 'no_conflicts': only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. preprocess : callable, optional If provided, call this function on each dataset prior to concatenation. You can find the file-name from which each dataset was loaded in ``ds.encoding['source']``. engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \ optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for 'netcdf4'. lock : False or duck threading.Lock, optional Resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. data_vars : {'minimal', 'different', 'all' or list of str}, optional These data variables will be concatenated together: * 'minimal': Only data variables in which the dimension already appears are included. * 'different': Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. * 'all': All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in addition to the 'minimal' data variables. coords : {'minimal', 'different', 'all' o list of str}, optional These coordinate variables will be concatenated together: * 'minimal': Only coordinates in which the dimension already appears are included. * 'different': Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. * 'all': All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of str: The listed coordinate variables will be concatenated, in addition the 'minimal' coordinates. parallel : bool, optional If True, the open and preprocess steps of this function will be performed in parallel using ``dask.delayed``. Default is False. **kwargs : optional Additional arguments passed on to :py:func:`xarray.open_dataset`. Returns ------- xarray.Dataset Notes ----- ``open_mfdataset`` opens files with read-only access. When you modify values of a Dataset, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. See Also -------- auto_combine open_dataset References ---------- .. [1] http://xarray.pydata.org/en/stable/dask.html .. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance """ # noqa if isinstance(paths, str): if is_remote_uri(paths): raise ValueError( 'cannot do wild-card matching for paths that are remote URLs: ' '{!r}. Instead, supply paths as an explicit list of strings.' .format(paths)) paths = sorted(glob(paths)) else: paths = [str(p) if isinstance(p, Path) else p for p in paths] if not paths: raise IOError('no files to open') # Coerce 1D input into ND to maintain backwards-compatible API until API # for N-D combine decided # (see https://github.com/pydata/xarray/pull/2553/#issuecomment-445892746) if concat_dim is None or concat_dim is _CONCAT_DIM_DEFAULT: concat_dims = concat_dim elif not isinstance(concat_dim, list): concat_dims = [concat_dim] else: concat_dims = concat_dim infer_order_from_coords = False # If infer_order_from_coords=True then this is unnecessary, but quick. # If infer_order_from_coords=False then this creates a flat list which is # easier to iterate over, while saving the originally-supplied structure combined_ids_paths, concat_dims = _infer_concat_order_from_positions( paths, concat_dims) ids, paths = ( list(combined_ids_paths.keys()), list(combined_ids_paths.values())) open_kwargs = dict(engine=engine, chunks=chunks or {}, lock=lock, autoclose=autoclose, **kwargs) if parallel: import dask # wrap the open_dataset, getattr, and preprocess with delayed open_ = dask.delayed(open_dataset) getattr_ = dask.delayed(getattr) if preprocess is not None: preprocess = dask.delayed(preprocess) else: open_ = open_dataset getattr_ = getattr datasets = [open_(p, **open_kwargs) for p in paths] file_objs = [getattr_(ds, '_file_obj') for ds in datasets] if preprocess is not None: datasets = [preprocess(ds) for ds in datasets] if parallel: # calling compute here will return the datasets/file_objs lists, # the underlying datasets will still be stored as dask arrays datasets, file_objs = dask.compute(datasets, file_objs) # Close datasets in case of a ValueError try: if infer_order_from_coords: # Discard ordering because it should be redone from coordinates ids = False combined = _auto_combine( datasets, concat_dims=concat_dims, compat=compat, data_vars=data_vars, coords=coords, infer_order_from_coords=infer_order_from_coords, ids=ids) except ValueError: for ds in datasets: ds.close() raise combined._file_obj = _MultiFileCloser(file_objs) combined.attrs = datasets[0].attrs return combined
def function[open_mfdataset, parameter[paths, chunks, concat_dim, compat, preprocess, engine, lock, data_vars, coords, autoclose, parallel]]: constant[Open multiple files as a single dataset. Requires dask to be installed. See documentation for details on dask [1]. Attributes from the first dataset file are used for the combined dataset. Parameters ---------- paths : str or sequence Either a string glob in the form "path/to/my/files/*.nc" or an explicit list of files to open. Paths can be given as strings or as pathlib Paths. chunks : int or dict, optional Dictionary with keys given by dimension names and values given by chunk sizes. In general, these should divide the dimensions of each dataset. If int, chunk each dimension by ``chunks``. By default, chunks will be chosen to load entire input files into memory at once. This has a major impact on performance: please see the full documentation for more details [2]. concat_dim : None, str, DataArray or Index, optional Dimension to concatenate files along. This argument is passed on to :py:func:`xarray.auto_combine` along with the dataset objects. You only need to provide this argument if the dimension along which you want to concatenate is not a dimension in the original datasets, e.g., if you want to stack a collection of 2D arrays along a third dimension. By default, xarray attempts to infer this argument by examining component files. Set ``concat_dim=None`` explicitly to disable concatenation. compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional String indicating how to compare variables of the same name for potential conflicts when merging: * 'broadcast_equals': all values must be equal when variables are broadcast against each other to ensure common dimensions. * 'equals': all values and dimensions must be the same. * 'identical': all values, dimensions and attributes must be the same. * 'no_conflicts': only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. preprocess : callable, optional If provided, call this function on each dataset prior to concatenation. You can find the file-name from which each dataset was loaded in ``ds.encoding['source']``. engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for 'netcdf4'. lock : False or duck threading.Lock, optional Resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. data_vars : {'minimal', 'different', 'all' or list of str}, optional These data variables will be concatenated together: * 'minimal': Only data variables in which the dimension already appears are included. * 'different': Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. * 'all': All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in addition to the 'minimal' data variables. coords : {'minimal', 'different', 'all' o list of str}, optional These coordinate variables will be concatenated together: * 'minimal': Only coordinates in which the dimension already appears are included. * 'different': Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. * 'all': All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of str: The listed coordinate variables will be concatenated, in addition the 'minimal' coordinates. parallel : bool, optional If True, the open and preprocess steps of this function will be performed in parallel using ``dask.delayed``. Default is False. **kwargs : optional Additional arguments passed on to :py:func:`xarray.open_dataset`. Returns ------- xarray.Dataset Notes ----- ``open_mfdataset`` opens files with read-only access. When you modify values of a Dataset, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. See Also -------- auto_combine open_dataset References ---------- .. [1] http://xarray.pydata.org/en/stable/dask.html .. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance ] if call[name[isinstance], parameter[name[paths], name[str]]] begin[:] if call[name[is_remote_uri], parameter[name[paths]]] begin[:] <ast.Raise object at 0x7da1b1f96e90> variable[paths] assign[=] call[name[sorted], parameter[call[name[glob], parameter[name[paths]]]]] if <ast.UnaryOp object at 0x7da1b1f97d90> begin[:] <ast.Raise object at 0x7da1b1f94eb0> if <ast.BoolOp object at 0x7da1b1f94fa0> begin[:] variable[concat_dims] assign[=] name[concat_dim] variable[infer_order_from_coords] assign[=] constant[False] <ast.Tuple object at 0x7da1b1f94d90> assign[=] call[name[_infer_concat_order_from_positions], parameter[name[paths], name[concat_dims]]] <ast.Tuple object at 0x7da1b1f97670> assign[=] tuple[[<ast.Call object at 0x7da1b1cca0b0>, <ast.Call object at 0x7da1b1cc9150>]] variable[open_kwargs] assign[=] call[name[dict], parameter[]] if name[parallel] begin[:] import module[dask] variable[open_] assign[=] call[name[dask].delayed, parameter[name[open_dataset]]] variable[getattr_] assign[=] call[name[dask].delayed, parameter[name[getattr]]] if compare[name[preprocess] is_not constant[None]] begin[:] variable[preprocess] assign[=] call[name[dask].delayed, parameter[name[preprocess]]] variable[datasets] assign[=] <ast.ListComp object at 0x7da1b1ccb3a0> variable[file_objs] assign[=] <ast.ListComp object at 0x7da1b1ccb460> if compare[name[preprocess] is_not constant[None]] begin[:] variable[datasets] assign[=] <ast.ListComp object at 0x7da18c4cf9a0> if name[parallel] begin[:] <ast.Tuple object at 0x7da18c4ce0e0> assign[=] call[name[dask].compute, parameter[name[datasets], name[file_objs]]] <ast.Try object at 0x7da18c4cd330> name[combined]._file_obj assign[=] call[name[_MultiFileCloser], parameter[name[file_objs]]] name[combined].attrs assign[=] call[name[datasets]][constant[0]].attrs return[name[combined]]
keyword[def] identifier[open_mfdataset] ( identifier[paths] , identifier[chunks] = keyword[None] , identifier[concat_dim] = identifier[_CONCAT_DIM_DEFAULT] , identifier[compat] = literal[string] , identifier[preprocess] = keyword[None] , identifier[engine] = keyword[None] , identifier[lock] = keyword[None] , identifier[data_vars] = literal[string] , identifier[coords] = literal[string] , identifier[autoclose] = keyword[None] , identifier[parallel] = keyword[False] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[isinstance] ( identifier[paths] , identifier[str] ): keyword[if] identifier[is_remote_uri] ( identifier[paths] ): keyword[raise] identifier[ValueError] ( literal[string] literal[string] . identifier[format] ( identifier[paths] )) identifier[paths] = identifier[sorted] ( identifier[glob] ( identifier[paths] )) keyword[else] : identifier[paths] =[ identifier[str] ( identifier[p] ) keyword[if] identifier[isinstance] ( identifier[p] , identifier[Path] ) keyword[else] identifier[p] keyword[for] identifier[p] keyword[in] identifier[paths] ] keyword[if] keyword[not] identifier[paths] : keyword[raise] identifier[IOError] ( literal[string] ) keyword[if] identifier[concat_dim] keyword[is] keyword[None] keyword[or] identifier[concat_dim] keyword[is] identifier[_CONCAT_DIM_DEFAULT] : identifier[concat_dims] = identifier[concat_dim] keyword[elif] keyword[not] identifier[isinstance] ( identifier[concat_dim] , identifier[list] ): identifier[concat_dims] =[ identifier[concat_dim] ] keyword[else] : identifier[concat_dims] = identifier[concat_dim] identifier[infer_order_from_coords] = keyword[False] identifier[combined_ids_paths] , identifier[concat_dims] = identifier[_infer_concat_order_from_positions] ( identifier[paths] , identifier[concat_dims] ) identifier[ids] , identifier[paths] =( identifier[list] ( identifier[combined_ids_paths] . identifier[keys] ()), identifier[list] ( identifier[combined_ids_paths] . identifier[values] ())) identifier[open_kwargs] = identifier[dict] ( identifier[engine] = identifier[engine] , identifier[chunks] = identifier[chunks] keyword[or] {}, identifier[lock] = identifier[lock] , identifier[autoclose] = identifier[autoclose] ,** identifier[kwargs] ) keyword[if] identifier[parallel] : keyword[import] identifier[dask] identifier[open_] = identifier[dask] . identifier[delayed] ( identifier[open_dataset] ) identifier[getattr_] = identifier[dask] . identifier[delayed] ( identifier[getattr] ) keyword[if] identifier[preprocess] keyword[is] keyword[not] keyword[None] : identifier[preprocess] = identifier[dask] . identifier[delayed] ( identifier[preprocess] ) keyword[else] : identifier[open_] = identifier[open_dataset] identifier[getattr_] = identifier[getattr] identifier[datasets] =[ identifier[open_] ( identifier[p] ,** identifier[open_kwargs] ) keyword[for] identifier[p] keyword[in] identifier[paths] ] identifier[file_objs] =[ identifier[getattr_] ( identifier[ds] , literal[string] ) keyword[for] identifier[ds] keyword[in] identifier[datasets] ] keyword[if] identifier[preprocess] keyword[is] keyword[not] keyword[None] : identifier[datasets] =[ identifier[preprocess] ( identifier[ds] ) keyword[for] identifier[ds] keyword[in] identifier[datasets] ] keyword[if] identifier[parallel] : identifier[datasets] , identifier[file_objs] = identifier[dask] . identifier[compute] ( identifier[datasets] , identifier[file_objs] ) keyword[try] : keyword[if] identifier[infer_order_from_coords] : identifier[ids] = keyword[False] identifier[combined] = identifier[_auto_combine] ( identifier[datasets] , identifier[concat_dims] = identifier[concat_dims] , identifier[compat] = identifier[compat] , identifier[data_vars] = identifier[data_vars] , identifier[coords] = identifier[coords] , identifier[infer_order_from_coords] = identifier[infer_order_from_coords] , identifier[ids] = identifier[ids] ) keyword[except] identifier[ValueError] : keyword[for] identifier[ds] keyword[in] identifier[datasets] : identifier[ds] . identifier[close] () keyword[raise] identifier[combined] . identifier[_file_obj] = identifier[_MultiFileCloser] ( identifier[file_objs] ) identifier[combined] . identifier[attrs] = identifier[datasets] [ literal[int] ]. identifier[attrs] keyword[return] identifier[combined]
def open_mfdataset(paths, chunks=None, concat_dim=_CONCAT_DIM_DEFAULT, compat='no_conflicts', preprocess=None, engine=None, lock=None, data_vars='all', coords='different', autoclose=None, parallel=False, **kwargs): """Open multiple files as a single dataset. Requires dask to be installed. See documentation for details on dask [1]. Attributes from the first dataset file are used for the combined dataset. Parameters ---------- paths : str or sequence Either a string glob in the form "path/to/my/files/*.nc" or an explicit list of files to open. Paths can be given as strings or as pathlib Paths. chunks : int or dict, optional Dictionary with keys given by dimension names and values given by chunk sizes. In general, these should divide the dimensions of each dataset. If int, chunk each dimension by ``chunks``. By default, chunks will be chosen to load entire input files into memory at once. This has a major impact on performance: please see the full documentation for more details [2]. concat_dim : None, str, DataArray or Index, optional Dimension to concatenate files along. This argument is passed on to :py:func:`xarray.auto_combine` along with the dataset objects. You only need to provide this argument if the dimension along which you want to concatenate is not a dimension in the original datasets, e.g., if you want to stack a collection of 2D arrays along a third dimension. By default, xarray attempts to infer this argument by examining component files. Set ``concat_dim=None`` explicitly to disable concatenation. compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional String indicating how to compare variables of the same name for potential conflicts when merging: * 'broadcast_equals': all values must be equal when variables are broadcast against each other to ensure common dimensions. * 'equals': all values and dimensions must be the same. * 'identical': all values, dimensions and attributes must be the same. * 'no_conflicts': only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. preprocess : callable, optional If provided, call this function on each dataset prior to concatenation. You can find the file-name from which each dataset was loaded in ``ds.encoding['source']``. engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for 'netcdf4'. lock : False or duck threading.Lock, optional Resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. data_vars : {'minimal', 'different', 'all' or list of str}, optional These data variables will be concatenated together: * 'minimal': Only data variables in which the dimension already appears are included. * 'different': Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. * 'all': All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in addition to the 'minimal' data variables. coords : {'minimal', 'different', 'all' o list of str}, optional These coordinate variables will be concatenated together: * 'minimal': Only coordinates in which the dimension already appears are included. * 'different': Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. * 'all': All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of str: The listed coordinate variables will be concatenated, in addition the 'minimal' coordinates. parallel : bool, optional If True, the open and preprocess steps of this function will be performed in parallel using ``dask.delayed``. Default is False. **kwargs : optional Additional arguments passed on to :py:func:`xarray.open_dataset`. Returns ------- xarray.Dataset Notes ----- ``open_mfdataset`` opens files with read-only access. When you modify values of a Dataset, even one linked to files on disk, only the in-memory copy you are manipulating in xarray is modified: the original file on disk is never touched. See Also -------- auto_combine open_dataset References ---------- .. [1] http://xarray.pydata.org/en/stable/dask.html .. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance """ # noqa if isinstance(paths, str): if is_remote_uri(paths): raise ValueError('cannot do wild-card matching for paths that are remote URLs: {!r}. Instead, supply paths as an explicit list of strings.'.format(paths)) # depends on [control=['if'], data=[]] paths = sorted(glob(paths)) # depends on [control=['if'], data=[]] else: paths = [str(p) if isinstance(p, Path) else p for p in paths] if not paths: raise IOError('no files to open') # depends on [control=['if'], data=[]] # Coerce 1D input into ND to maintain backwards-compatible API until API # for N-D combine decided # (see https://github.com/pydata/xarray/pull/2553/#issuecomment-445892746) if concat_dim is None or concat_dim is _CONCAT_DIM_DEFAULT: concat_dims = concat_dim # depends on [control=['if'], data=[]] elif not isinstance(concat_dim, list): concat_dims = [concat_dim] # depends on [control=['if'], data=[]] else: concat_dims = concat_dim infer_order_from_coords = False # If infer_order_from_coords=True then this is unnecessary, but quick. # If infer_order_from_coords=False then this creates a flat list which is # easier to iterate over, while saving the originally-supplied structure (combined_ids_paths, concat_dims) = _infer_concat_order_from_positions(paths, concat_dims) (ids, paths) = (list(combined_ids_paths.keys()), list(combined_ids_paths.values())) open_kwargs = dict(engine=engine, chunks=chunks or {}, lock=lock, autoclose=autoclose, **kwargs) if parallel: import dask # wrap the open_dataset, getattr, and preprocess with delayed open_ = dask.delayed(open_dataset) getattr_ = dask.delayed(getattr) if preprocess is not None: preprocess = dask.delayed(preprocess) # depends on [control=['if'], data=['preprocess']] # depends on [control=['if'], data=[]] else: open_ = open_dataset getattr_ = getattr datasets = [open_(p, **open_kwargs) for p in paths] file_objs = [getattr_(ds, '_file_obj') for ds in datasets] if preprocess is not None: datasets = [preprocess(ds) for ds in datasets] # depends on [control=['if'], data=['preprocess']] if parallel: # calling compute here will return the datasets/file_objs lists, # the underlying datasets will still be stored as dask arrays (datasets, file_objs) = dask.compute(datasets, file_objs) # depends on [control=['if'], data=[]] # Close datasets in case of a ValueError try: if infer_order_from_coords: # Discard ordering because it should be redone from coordinates ids = False # depends on [control=['if'], data=[]] combined = _auto_combine(datasets, concat_dims=concat_dims, compat=compat, data_vars=data_vars, coords=coords, infer_order_from_coords=infer_order_from_coords, ids=ids) # depends on [control=['try'], data=[]] except ValueError: for ds in datasets: ds.close() # depends on [control=['for'], data=['ds']] raise # depends on [control=['except'], data=[]] combined._file_obj = _MultiFileCloser(file_objs) combined.attrs = datasets[0].attrs return combined
def run(self, cell, is_full_fc=False, parse_fc=True): """Make supercell force constants readable for phonopy Note ---- Born effective charges and dielectric constant tensor are read from QE output file if they exist. But this means dipole-dipole contributions are removed from force constants and this force constants matrix is not usable in phonopy. Arguments --------- cell : PhonopyAtoms Primitive cell used for QE/PH calculation. is_full_fc : Bool, optional, default=False Whether to create full or compact force constants. parse_fc : Bool, optional, default=True Force constants file of QE is not parsed when this is False. False may be used when expected to parse only epsilon and born. """ with open(self._filename) as f: fc_dct = self._parse_q2r(f) self.dimension = fc_dct['dimension'] self.epsilon = fc_dct['dielectric'] self.borns = fc_dct['born'] if parse_fc: (self.fc, self.primitive, self.supercell) = self._arrange_supercell_fc( cell, fc_dct['fc'], is_full_fc=is_full_fc)
def function[run, parameter[self, cell, is_full_fc, parse_fc]]: constant[Make supercell force constants readable for phonopy Note ---- Born effective charges and dielectric constant tensor are read from QE output file if they exist. But this means dipole-dipole contributions are removed from force constants and this force constants matrix is not usable in phonopy. Arguments --------- cell : PhonopyAtoms Primitive cell used for QE/PH calculation. is_full_fc : Bool, optional, default=False Whether to create full or compact force constants. parse_fc : Bool, optional, default=True Force constants file of QE is not parsed when this is False. False may be used when expected to parse only epsilon and born. ] with call[name[open], parameter[name[self]._filename]] begin[:] variable[fc_dct] assign[=] call[name[self]._parse_q2r, parameter[name[f]]] name[self].dimension assign[=] call[name[fc_dct]][constant[dimension]] name[self].epsilon assign[=] call[name[fc_dct]][constant[dielectric]] name[self].borns assign[=] call[name[fc_dct]][constant[born]] if name[parse_fc] begin[:] <ast.Tuple object at 0x7da18ede6320> assign[=] call[name[self]._arrange_supercell_fc, parameter[name[cell], call[name[fc_dct]][constant[fc]]]]
keyword[def] identifier[run] ( identifier[self] , identifier[cell] , identifier[is_full_fc] = keyword[False] , identifier[parse_fc] = keyword[True] ): literal[string] keyword[with] identifier[open] ( identifier[self] . identifier[_filename] ) keyword[as] identifier[f] : identifier[fc_dct] = identifier[self] . identifier[_parse_q2r] ( identifier[f] ) identifier[self] . identifier[dimension] = identifier[fc_dct] [ literal[string] ] identifier[self] . identifier[epsilon] = identifier[fc_dct] [ literal[string] ] identifier[self] . identifier[borns] = identifier[fc_dct] [ literal[string] ] keyword[if] identifier[parse_fc] : ( identifier[self] . identifier[fc] , identifier[self] . identifier[primitive] , identifier[self] . identifier[supercell] )= identifier[self] . identifier[_arrange_supercell_fc] ( identifier[cell] , identifier[fc_dct] [ literal[string] ], identifier[is_full_fc] = identifier[is_full_fc] )
def run(self, cell, is_full_fc=False, parse_fc=True): """Make supercell force constants readable for phonopy Note ---- Born effective charges and dielectric constant tensor are read from QE output file if they exist. But this means dipole-dipole contributions are removed from force constants and this force constants matrix is not usable in phonopy. Arguments --------- cell : PhonopyAtoms Primitive cell used for QE/PH calculation. is_full_fc : Bool, optional, default=False Whether to create full or compact force constants. parse_fc : Bool, optional, default=True Force constants file of QE is not parsed when this is False. False may be used when expected to parse only epsilon and born. """ with open(self._filename) as f: fc_dct = self._parse_q2r(f) self.dimension = fc_dct['dimension'] self.epsilon = fc_dct['dielectric'] self.borns = fc_dct['born'] if parse_fc: (self.fc, self.primitive, self.supercell) = self._arrange_supercell_fc(cell, fc_dct['fc'], is_full_fc=is_full_fc) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['f']]
def build_bell_circuit(): """Returns a circuit putting 2 qubits in the Bell state.""" q = QuantumRegister(2) c = ClassicalRegister(2) qc = QuantumCircuit(q, c) qc.h(q[0]) qc.cx(q[0], q[1]) qc.measure(q, c) return qc
def function[build_bell_circuit, parameter[]]: constant[Returns a circuit putting 2 qubits in the Bell state.] variable[q] assign[=] call[name[QuantumRegister], parameter[constant[2]]] variable[c] assign[=] call[name[ClassicalRegister], parameter[constant[2]]] variable[qc] assign[=] call[name[QuantumCircuit], parameter[name[q], name[c]]] call[name[qc].h, parameter[call[name[q]][constant[0]]]] call[name[qc].cx, parameter[call[name[q]][constant[0]], call[name[q]][constant[1]]]] call[name[qc].measure, parameter[name[q], name[c]]] return[name[qc]]
keyword[def] identifier[build_bell_circuit] (): literal[string] identifier[q] = identifier[QuantumRegister] ( literal[int] ) identifier[c] = identifier[ClassicalRegister] ( literal[int] ) identifier[qc] = identifier[QuantumCircuit] ( identifier[q] , identifier[c] ) identifier[qc] . identifier[h] ( identifier[q] [ literal[int] ]) identifier[qc] . identifier[cx] ( identifier[q] [ literal[int] ], identifier[q] [ literal[int] ]) identifier[qc] . identifier[measure] ( identifier[q] , identifier[c] ) keyword[return] identifier[qc]
def build_bell_circuit(): """Returns a circuit putting 2 qubits in the Bell state.""" q = QuantumRegister(2) c = ClassicalRegister(2) qc = QuantumCircuit(q, c) qc.h(q[0]) qc.cx(q[0], q[1]) qc.measure(q, c) return qc
def machines(self): """gets a reference to the machines object""" if self._resources is None: self.__init() if "machines" in self._resources: url = self._url + "/machines" return _machines.Machines(url, securityHandler=self._securityHandler, initialize=False, proxy_url=self._proxy_url, proxy_port=self._proxy_port) else: return None
def function[machines, parameter[self]]: constant[gets a reference to the machines object] if compare[name[self]._resources is constant[None]] begin[:] call[name[self].__init, parameter[]] if compare[constant[machines] in name[self]._resources] begin[:] variable[url] assign[=] binary_operation[name[self]._url + constant[/machines]] return[call[name[_machines].Machines, parameter[name[url]]]]
keyword[def] identifier[machines] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_resources] keyword[is] keyword[None] : identifier[self] . identifier[__init] () keyword[if] literal[string] keyword[in] identifier[self] . identifier[_resources] : identifier[url] = identifier[self] . identifier[_url] + literal[string] keyword[return] identifier[_machines] . identifier[Machines] ( identifier[url] , identifier[securityHandler] = identifier[self] . identifier[_securityHandler] , identifier[initialize] = keyword[False] , identifier[proxy_url] = identifier[self] . identifier[_proxy_url] , identifier[proxy_port] = identifier[self] . identifier[_proxy_port] ) keyword[else] : keyword[return] keyword[None]
def machines(self): """gets a reference to the machines object""" if self._resources is None: self.__init() # depends on [control=['if'], data=[]] if 'machines' in self._resources: url = self._url + '/machines' return _machines.Machines(url, securityHandler=self._securityHandler, initialize=False, proxy_url=self._proxy_url, proxy_port=self._proxy_port) # depends on [control=['if'], data=[]] else: return None
def get_next_base26(prev=None): """Increment letter-based IDs. Generates IDs like ['a', 'b', ..., 'z', 'aa', ab', ..., 'az', 'ba', ...] Returns: str: Next base-26 ID. """ if not prev: return 'a' r = re.compile("^[a-z]*$") if not r.match(prev): raise ValueError("Invalid base26") if not prev.endswith('z'): return prev[:-1] + chr(ord(prev[-1]) + 1) return get_next_base26(prev[:-1]) + 'a'
def function[get_next_base26, parameter[prev]]: constant[Increment letter-based IDs. Generates IDs like ['a', 'b', ..., 'z', 'aa', ab', ..., 'az', 'ba', ...] Returns: str: Next base-26 ID. ] if <ast.UnaryOp object at 0x7da204564ac0> begin[:] return[constant[a]] variable[r] assign[=] call[name[re].compile, parameter[constant[^[a-z]*$]]] if <ast.UnaryOp object at 0x7da204564970> begin[:] <ast.Raise object at 0x7da2045667d0> if <ast.UnaryOp object at 0x7da204565b70> begin[:] return[binary_operation[call[name[prev]][<ast.Slice object at 0x7da2045674c0>] + call[name[chr], parameter[binary_operation[call[name[ord], parameter[call[name[prev]][<ast.UnaryOp object at 0x7da18c4ccf70>]]] + constant[1]]]]]] return[binary_operation[call[name[get_next_base26], parameter[call[name[prev]][<ast.Slice object at 0x7da18c4cfd30>]]] + constant[a]]]
keyword[def] identifier[get_next_base26] ( identifier[prev] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[prev] : keyword[return] literal[string] identifier[r] = identifier[re] . identifier[compile] ( literal[string] ) keyword[if] keyword[not] identifier[r] . identifier[match] ( identifier[prev] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] keyword[not] identifier[prev] . identifier[endswith] ( literal[string] ): keyword[return] identifier[prev] [:- literal[int] ]+ identifier[chr] ( identifier[ord] ( identifier[prev] [- literal[int] ])+ literal[int] ) keyword[return] identifier[get_next_base26] ( identifier[prev] [:- literal[int] ])+ literal[string]
def get_next_base26(prev=None): """Increment letter-based IDs. Generates IDs like ['a', 'b', ..., 'z', 'aa', ab', ..., 'az', 'ba', ...] Returns: str: Next base-26 ID. """ if not prev: return 'a' # depends on [control=['if'], data=[]] r = re.compile('^[a-z]*$') if not r.match(prev): raise ValueError('Invalid base26') # depends on [control=['if'], data=[]] if not prev.endswith('z'): return prev[:-1] + chr(ord(prev[-1]) + 1) # depends on [control=['if'], data=[]] return get_next_base26(prev[:-1]) + 'a'
def pull(self, images, file_name=None, save=True, **kwargs): '''pull an image from a singularity registry Parameters ========== images: refers to the uri given by the user to pull in the format <collection>/<namespace>. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user's requested name for the file. It can optionally be None if the user wants a default. save: if True, you should save the container to the database using self.add() Returns ======= finished: a single container path, or list of paths ''' if not isinstance(images,list): images = [images] # Interaction with a registry requires secrets self.require_secrets() bot.debug('Execution of PULL for %s images' %len(images)) finished = [] for image in images: q = parse_image_name(remove_uri(image)) # If a custom registry is not set, use default base if q['registry'] == None: q['registry'] = self.base # Ensure https is added back to the registry uri q = self._add_https(q) # All custom registries need api appended if not q['registry'].endswith('api'): q['registry'] = '%s/api' % q['registry'] # Verify image existence, and obtain id url = "%s/container/%s/%s:%s" %(q['registry'], q['collection'], q['image'], q['tag']) bot.debug('Retrieving manifest at %s' % url) try: manifest = self._get(url) except SSLError: bot.exit('Issue with %s, try exporting SREGISTRY_REGISTRY_NOHTTPS.' % url) # Private container collection if isinstance(manifest, Response): # Requires token if manifest.status_code == 403: SREGISTRY_EVENT = self.authorize(request_type="pull", names=q) headers = {'Authorization': SREGISTRY_EVENT } self._update_headers(headers) manifest = self._get(url) # Still denied if isinstance(manifest, Response): if manifest.status_code == 403: manifest = 403 if isinstance(manifest, int): if manifest == 400: bot.error('Bad request (400). Is this a private container?') elif manifest == 404: bot.error('Container not found (404)') elif manifest == 403: bot.error('Unauthorized (403)') sys.exit(1) # Successful pull if "image" in manifest: # Add self link to manifest manifest['selfLink'] = url if file_name is None: file_name = q['storage'].replace('/','-') # Show progress if not quiet image_file = self.download(url=manifest['image'], file_name=file_name, show_progress=not self.quiet) # If the user is saving to local storage if save is True: image_uri = "%s/%s:%s" %(manifest['collection'], manifest['name'], manifest['tag']) container = self.add(image_path = image_file, image_uri = image_uri, metadata = manifest, url = manifest['image']) image_file = container.image if os.path.exists(image_file): bot.debug('Retrieved image file %s' %image_file) bot.custom(prefix="Success!", message=image_file) finished.append(image_file) if len(finished) == 1: finished = finished[0] return finished
def function[pull, parameter[self, images, file_name, save]]: constant[pull an image from a singularity registry Parameters ========== images: refers to the uri given by the user to pull in the format <collection>/<namespace>. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user's requested name for the file. It can optionally be None if the user wants a default. save: if True, you should save the container to the database using self.add() Returns ======= finished: a single container path, or list of paths ] if <ast.UnaryOp object at 0x7da1b03b9870> begin[:] variable[images] assign[=] list[[<ast.Name object at 0x7da1b03ba2f0>]] call[name[self].require_secrets, parameter[]] call[name[bot].debug, parameter[binary_operation[constant[Execution of PULL for %s images] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[images]]]]]] variable[finished] assign[=] list[[]] for taget[name[image]] in starred[name[images]] begin[:] variable[q] assign[=] call[name[parse_image_name], parameter[call[name[remove_uri], parameter[name[image]]]]] if compare[call[name[q]][constant[registry]] equal[==] constant[None]] begin[:] call[name[q]][constant[registry]] assign[=] name[self].base variable[q] assign[=] call[name[self]._add_https, parameter[name[q]]] if <ast.UnaryOp object at 0x7da1b03b8850> begin[:] call[name[q]][constant[registry]] assign[=] binary_operation[constant[%s/api] <ast.Mod object at 0x7da2590d6920> call[name[q]][constant[registry]]] variable[url] assign[=] binary_operation[constant[%s/container/%s/%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b03b9030>, <ast.Subscript object at 0x7da1b03ba740>, <ast.Subscript object at 0x7da1b03bb880>, <ast.Subscript object at 0x7da1b03bbc70>]]] call[name[bot].debug, parameter[binary_operation[constant[Retrieving manifest at %s] <ast.Mod object at 0x7da2590d6920> name[url]]]] <ast.Try object at 0x7da1b03b9480> if call[name[isinstance], parameter[name[manifest], name[Response]]] begin[:] if compare[name[manifest].status_code equal[==] constant[403]] begin[:] variable[SREGISTRY_EVENT] assign[=] call[name[self].authorize, parameter[]] variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b03ba950>], [<ast.Name object at 0x7da1b03bb250>]] call[name[self]._update_headers, parameter[name[headers]]] variable[manifest] assign[=] call[name[self]._get, parameter[name[url]]] if call[name[isinstance], parameter[name[manifest], name[Response]]] begin[:] if compare[name[manifest].status_code equal[==] constant[403]] begin[:] variable[manifest] assign[=] constant[403] if call[name[isinstance], parameter[name[manifest], name[int]]] begin[:] if compare[name[manifest] equal[==] constant[400]] begin[:] call[name[bot].error, parameter[constant[Bad request (400). Is this a private container?]]] call[name[sys].exit, parameter[constant[1]]] if compare[constant[image] in name[manifest]] begin[:] call[name[manifest]][constant[selfLink]] assign[=] name[url] if compare[name[file_name] is constant[None]] begin[:] variable[file_name] assign[=] call[call[name[q]][constant[storage]].replace, parameter[constant[/], constant[-]]] variable[image_file] assign[=] call[name[self].download, parameter[]] if compare[name[save] is constant[True]] begin[:] variable[image_uri] assign[=] binary_operation[constant[%s/%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b038bbb0>, <ast.Subscript object at 0x7da1b038b5e0>, <ast.Subscript object at 0x7da1b03896c0>]]] variable[container] assign[=] call[name[self].add, parameter[]] variable[image_file] assign[=] name[container].image if call[name[os].path.exists, parameter[name[image_file]]] begin[:] call[name[bot].debug, parameter[binary_operation[constant[Retrieved image file %s] <ast.Mod object at 0x7da2590d6920> name[image_file]]]] call[name[bot].custom, parameter[]] call[name[finished].append, parameter[name[image_file]]] if compare[call[name[len], parameter[name[finished]]] equal[==] constant[1]] begin[:] variable[finished] assign[=] call[name[finished]][constant[0]] return[name[finished]]
keyword[def] identifier[pull] ( identifier[self] , identifier[images] , identifier[file_name] = keyword[None] , identifier[save] = keyword[True] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[images] , identifier[list] ): identifier[images] =[ identifier[images] ] identifier[self] . identifier[require_secrets] () identifier[bot] . identifier[debug] ( literal[string] % identifier[len] ( identifier[images] )) identifier[finished] =[] keyword[for] identifier[image] keyword[in] identifier[images] : identifier[q] = identifier[parse_image_name] ( identifier[remove_uri] ( identifier[image] )) keyword[if] identifier[q] [ literal[string] ]== keyword[None] : identifier[q] [ literal[string] ]= identifier[self] . identifier[base] identifier[q] = identifier[self] . identifier[_add_https] ( identifier[q] ) keyword[if] keyword[not] identifier[q] [ literal[string] ]. identifier[endswith] ( literal[string] ): identifier[q] [ literal[string] ]= literal[string] % identifier[q] [ literal[string] ] identifier[url] = literal[string] %( identifier[q] [ literal[string] ], identifier[q] [ literal[string] ], identifier[q] [ literal[string] ], identifier[q] [ literal[string] ]) identifier[bot] . identifier[debug] ( literal[string] % identifier[url] ) keyword[try] : identifier[manifest] = identifier[self] . identifier[_get] ( identifier[url] ) keyword[except] identifier[SSLError] : identifier[bot] . identifier[exit] ( literal[string] % identifier[url] ) keyword[if] identifier[isinstance] ( identifier[manifest] , identifier[Response] ): keyword[if] identifier[manifest] . identifier[status_code] == literal[int] : identifier[SREGISTRY_EVENT] = identifier[self] . identifier[authorize] ( identifier[request_type] = literal[string] , identifier[names] = identifier[q] ) identifier[headers] ={ literal[string] : identifier[SREGISTRY_EVENT] } identifier[self] . identifier[_update_headers] ( identifier[headers] ) identifier[manifest] = identifier[self] . identifier[_get] ( identifier[url] ) keyword[if] identifier[isinstance] ( identifier[manifest] , identifier[Response] ): keyword[if] identifier[manifest] . identifier[status_code] == literal[int] : identifier[manifest] = literal[int] keyword[if] identifier[isinstance] ( identifier[manifest] , identifier[int] ): keyword[if] identifier[manifest] == literal[int] : identifier[bot] . identifier[error] ( literal[string] ) keyword[elif] identifier[manifest] == literal[int] : identifier[bot] . identifier[error] ( literal[string] ) keyword[elif] identifier[manifest] == literal[int] : identifier[bot] . identifier[error] ( literal[string] ) identifier[sys] . identifier[exit] ( literal[int] ) keyword[if] literal[string] keyword[in] identifier[manifest] : identifier[manifest] [ literal[string] ]= identifier[url] keyword[if] identifier[file_name] keyword[is] keyword[None] : identifier[file_name] = identifier[q] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] ) identifier[image_file] = identifier[self] . identifier[download] ( identifier[url] = identifier[manifest] [ literal[string] ], identifier[file_name] = identifier[file_name] , identifier[show_progress] = keyword[not] identifier[self] . identifier[quiet] ) keyword[if] identifier[save] keyword[is] keyword[True] : identifier[image_uri] = literal[string] %( identifier[manifest] [ literal[string] ], identifier[manifest] [ literal[string] ], identifier[manifest] [ literal[string] ]) identifier[container] = identifier[self] . identifier[add] ( identifier[image_path] = identifier[image_file] , identifier[image_uri] = identifier[image_uri] , identifier[metadata] = identifier[manifest] , identifier[url] = identifier[manifest] [ literal[string] ]) identifier[image_file] = identifier[container] . identifier[image] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[image_file] ): identifier[bot] . identifier[debug] ( literal[string] % identifier[image_file] ) identifier[bot] . identifier[custom] ( identifier[prefix] = literal[string] , identifier[message] = identifier[image_file] ) identifier[finished] . identifier[append] ( identifier[image_file] ) keyword[if] identifier[len] ( identifier[finished] )== literal[int] : identifier[finished] = identifier[finished] [ literal[int] ] keyword[return] identifier[finished]
def pull(self, images, file_name=None, save=True, **kwargs): """pull an image from a singularity registry Parameters ========== images: refers to the uri given by the user to pull in the format <collection>/<namespace>. You should have an API that is able to retrieve a container based on parsing this uri. file_name: the user's requested name for the file. It can optionally be None if the user wants a default. save: if True, you should save the container to the database using self.add() Returns ======= finished: a single container path, or list of paths """ if not isinstance(images, list): images = [images] # depends on [control=['if'], data=[]] # Interaction with a registry requires secrets self.require_secrets() bot.debug('Execution of PULL for %s images' % len(images)) finished = [] for image in images: q = parse_image_name(remove_uri(image)) # If a custom registry is not set, use default base if q['registry'] == None: q['registry'] = self.base # depends on [control=['if'], data=[]] # Ensure https is added back to the registry uri q = self._add_https(q) # All custom registries need api appended if not q['registry'].endswith('api'): q['registry'] = '%s/api' % q['registry'] # depends on [control=['if'], data=[]] # Verify image existence, and obtain id url = '%s/container/%s/%s:%s' % (q['registry'], q['collection'], q['image'], q['tag']) bot.debug('Retrieving manifest at %s' % url) try: manifest = self._get(url) # depends on [control=['try'], data=[]] except SSLError: bot.exit('Issue with %s, try exporting SREGISTRY_REGISTRY_NOHTTPS.' % url) # depends on [control=['except'], data=[]] # Private container collection if isinstance(manifest, Response): # Requires token if manifest.status_code == 403: SREGISTRY_EVENT = self.authorize(request_type='pull', names=q) headers = {'Authorization': SREGISTRY_EVENT} self._update_headers(headers) manifest = self._get(url) # Still denied if isinstance(manifest, Response): if manifest.status_code == 403: manifest = 403 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if isinstance(manifest, int): if manifest == 400: bot.error('Bad request (400). Is this a private container?') # depends on [control=['if'], data=[]] elif manifest == 404: bot.error('Container not found (404)') # depends on [control=['if'], data=[]] elif manifest == 403: bot.error('Unauthorized (403)') # depends on [control=['if'], data=[]] sys.exit(1) # depends on [control=['if'], data=[]] # Successful pull if 'image' in manifest: # Add self link to manifest manifest['selfLink'] = url if file_name is None: file_name = q['storage'].replace('/', '-') # depends on [control=['if'], data=['file_name']] # Show progress if not quiet image_file = self.download(url=manifest['image'], file_name=file_name, show_progress=not self.quiet) # If the user is saving to local storage if save is True: image_uri = '%s/%s:%s' % (manifest['collection'], manifest['name'], manifest['tag']) container = self.add(image_path=image_file, image_uri=image_uri, metadata=manifest, url=manifest['image']) image_file = container.image # depends on [control=['if'], data=[]] if os.path.exists(image_file): bot.debug('Retrieved image file %s' % image_file) bot.custom(prefix='Success!', message=image_file) finished.append(image_file) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['manifest']] if len(finished) == 1: finished = finished[0] # depends on [control=['if'], data=[]] return finished # depends on [control=['for'], data=['image']]
def zSetDefaultMeritFunctionSEQ(self, ofType=0, ofData=0, ofRef=0, pupilInteg=0, rings=0, arms=0, obscuration=0, grid=0, delVignetted=False, useGlass=False, glassMin=0, glassMax=1000, glassEdge=0, useAir=False, airMin=0, airMax=1000, airEdge=0, axialSymm=True, ignoreLatCol=False, addFavOper=False, startAt=1, relativeXWgt=1.0, overallWgt=1.0, configNum=0): """Sets the default merit function for Sequential Merit Function Editor Parameters ---------- ofType : integer optimization function type (0=RMS, ...) ofData : integer optimization function data (0=Wavefront, 1=Spot Radius, ...) ofRef : integer optimization function reference (0=Centroid, ...) pupilInteg : integer pupil integration method (0=Gaussian Quadrature, 1=Rectangular Array) rings : integer rings (0=1, 1=2, 2=3, 3=4, ...) arms : integer arms (0=6, 1=8, 2=10, 3=12) obscuration : real obscuration delVignetted : boolean delete vignetted ? useGlass : boolean whether to use Glass settings for thickness boundary glassMin : real glass mininum thickness glassMax : real glass maximum thickness glassEdge : real glass edge thickness useAir : boolean whether to use Air settings for thickness boundary airMin : real air minimum thickness airMax : real air maximum thickness airEdge : real air edge thickness axialSymm : boolean assume axial symmetry ignoreLatCol : boolean ignore latent color addFavOper : boolean add favorite color configNum : integer configuration number (0=All) startAt : integer start at relativeXWgt : real relative X weight overallWgt : real overall weight """ mfe = self.pMFE wizard = mfe.pSEQOptimizationWizard wizard.pType = ofType wizard.pData = ofData wizard.pReference = ofRef wizard.pPupilIntegrationMethod = pupilInteg wizard.pRing = rings wizard.pArm = arms wizard.pObscuration = obscuration wizard.pGrid = grid wizard.pIsDeleteVignetteUsed = delVignetted wizard.pIsGlassUsed = useGlass wizard.pGlassMin = glassMin wizard.pGlassMax = glassMax wizard.pGlassEdge = glassEdge wizard.pIsAirUsed = useAir wizard.pAirMin = airMin wizard.pAirMax = airMax wizard.pAirEdge = airEdge wizard.pIsAssumeAxialSymmetryUsed = axialSymm wizard.pIsIgnoreLateralColorUsed = ignoreLatCol wizard.pConfiguration = configNum wizard.pIsAddFavoriteOperandsUsed = addFavOper wizard.pStartAt = startAt wizard.pRelativeXWeight = relativeXWgt wizard.pOverallWeight = overallWgt wizard.CommonSettings.OK()
def function[zSetDefaultMeritFunctionSEQ, parameter[self, ofType, ofData, ofRef, pupilInteg, rings, arms, obscuration, grid, delVignetted, useGlass, glassMin, glassMax, glassEdge, useAir, airMin, airMax, airEdge, axialSymm, ignoreLatCol, addFavOper, startAt, relativeXWgt, overallWgt, configNum]]: constant[Sets the default merit function for Sequential Merit Function Editor Parameters ---------- ofType : integer optimization function type (0=RMS, ...) ofData : integer optimization function data (0=Wavefront, 1=Spot Radius, ...) ofRef : integer optimization function reference (0=Centroid, ...) pupilInteg : integer pupil integration method (0=Gaussian Quadrature, 1=Rectangular Array) rings : integer rings (0=1, 1=2, 2=3, 3=4, ...) arms : integer arms (0=6, 1=8, 2=10, 3=12) obscuration : real obscuration delVignetted : boolean delete vignetted ? useGlass : boolean whether to use Glass settings for thickness boundary glassMin : real glass mininum thickness glassMax : real glass maximum thickness glassEdge : real glass edge thickness useAir : boolean whether to use Air settings for thickness boundary airMin : real air minimum thickness airMax : real air maximum thickness airEdge : real air edge thickness axialSymm : boolean assume axial symmetry ignoreLatCol : boolean ignore latent color addFavOper : boolean add favorite color configNum : integer configuration number (0=All) startAt : integer start at relativeXWgt : real relative X weight overallWgt : real overall weight ] variable[mfe] assign[=] name[self].pMFE variable[wizard] assign[=] name[mfe].pSEQOptimizationWizard name[wizard].pType assign[=] name[ofType] name[wizard].pData assign[=] name[ofData] name[wizard].pReference assign[=] name[ofRef] name[wizard].pPupilIntegrationMethod assign[=] name[pupilInteg] name[wizard].pRing assign[=] name[rings] name[wizard].pArm assign[=] name[arms] name[wizard].pObscuration assign[=] name[obscuration] name[wizard].pGrid assign[=] name[grid] name[wizard].pIsDeleteVignetteUsed assign[=] name[delVignetted] name[wizard].pIsGlassUsed assign[=] name[useGlass] name[wizard].pGlassMin assign[=] name[glassMin] name[wizard].pGlassMax assign[=] name[glassMax] name[wizard].pGlassEdge assign[=] name[glassEdge] name[wizard].pIsAirUsed assign[=] name[useAir] name[wizard].pAirMin assign[=] name[airMin] name[wizard].pAirMax assign[=] name[airMax] name[wizard].pAirEdge assign[=] name[airEdge] name[wizard].pIsAssumeAxialSymmetryUsed assign[=] name[axialSymm] name[wizard].pIsIgnoreLateralColorUsed assign[=] name[ignoreLatCol] name[wizard].pConfiguration assign[=] name[configNum] name[wizard].pIsAddFavoriteOperandsUsed assign[=] name[addFavOper] name[wizard].pStartAt assign[=] name[startAt] name[wizard].pRelativeXWeight assign[=] name[relativeXWgt] name[wizard].pOverallWeight assign[=] name[overallWgt] call[name[wizard].CommonSettings.OK, parameter[]]
keyword[def] identifier[zSetDefaultMeritFunctionSEQ] ( identifier[self] , identifier[ofType] = literal[int] , identifier[ofData] = literal[int] , identifier[ofRef] = literal[int] , identifier[pupilInteg] = literal[int] , identifier[rings] = literal[int] , identifier[arms] = literal[int] , identifier[obscuration] = literal[int] , identifier[grid] = literal[int] , identifier[delVignetted] = keyword[False] , identifier[useGlass] = keyword[False] , identifier[glassMin] = literal[int] , identifier[glassMax] = literal[int] , identifier[glassEdge] = literal[int] , identifier[useAir] = keyword[False] , identifier[airMin] = literal[int] , identifier[airMax] = literal[int] , identifier[airEdge] = literal[int] , identifier[axialSymm] = keyword[True] , identifier[ignoreLatCol] = keyword[False] , identifier[addFavOper] = keyword[False] , identifier[startAt] = literal[int] , identifier[relativeXWgt] = literal[int] , identifier[overallWgt] = literal[int] , identifier[configNum] = literal[int] ): literal[string] identifier[mfe] = identifier[self] . identifier[pMFE] identifier[wizard] = identifier[mfe] . identifier[pSEQOptimizationWizard] identifier[wizard] . identifier[pType] = identifier[ofType] identifier[wizard] . identifier[pData] = identifier[ofData] identifier[wizard] . identifier[pReference] = identifier[ofRef] identifier[wizard] . identifier[pPupilIntegrationMethod] = identifier[pupilInteg] identifier[wizard] . identifier[pRing] = identifier[rings] identifier[wizard] . identifier[pArm] = identifier[arms] identifier[wizard] . identifier[pObscuration] = identifier[obscuration] identifier[wizard] . identifier[pGrid] = identifier[grid] identifier[wizard] . identifier[pIsDeleteVignetteUsed] = identifier[delVignetted] identifier[wizard] . identifier[pIsGlassUsed] = identifier[useGlass] identifier[wizard] . identifier[pGlassMin] = identifier[glassMin] identifier[wizard] . identifier[pGlassMax] = identifier[glassMax] identifier[wizard] . identifier[pGlassEdge] = identifier[glassEdge] identifier[wizard] . identifier[pIsAirUsed] = identifier[useAir] identifier[wizard] . identifier[pAirMin] = identifier[airMin] identifier[wizard] . identifier[pAirMax] = identifier[airMax] identifier[wizard] . identifier[pAirEdge] = identifier[airEdge] identifier[wizard] . identifier[pIsAssumeAxialSymmetryUsed] = identifier[axialSymm] identifier[wizard] . identifier[pIsIgnoreLateralColorUsed] = identifier[ignoreLatCol] identifier[wizard] . identifier[pConfiguration] = identifier[configNum] identifier[wizard] . identifier[pIsAddFavoriteOperandsUsed] = identifier[addFavOper] identifier[wizard] . identifier[pStartAt] = identifier[startAt] identifier[wizard] . identifier[pRelativeXWeight] = identifier[relativeXWgt] identifier[wizard] . identifier[pOverallWeight] = identifier[overallWgt] identifier[wizard] . identifier[CommonSettings] . identifier[OK] ()
def zSetDefaultMeritFunctionSEQ(self, ofType=0, ofData=0, ofRef=0, pupilInteg=0, rings=0, arms=0, obscuration=0, grid=0, delVignetted=False, useGlass=False, glassMin=0, glassMax=1000, glassEdge=0, useAir=False, airMin=0, airMax=1000, airEdge=0, axialSymm=True, ignoreLatCol=False, addFavOper=False, startAt=1, relativeXWgt=1.0, overallWgt=1.0, configNum=0): """Sets the default merit function for Sequential Merit Function Editor Parameters ---------- ofType : integer optimization function type (0=RMS, ...) ofData : integer optimization function data (0=Wavefront, 1=Spot Radius, ...) ofRef : integer optimization function reference (0=Centroid, ...) pupilInteg : integer pupil integration method (0=Gaussian Quadrature, 1=Rectangular Array) rings : integer rings (0=1, 1=2, 2=3, 3=4, ...) arms : integer arms (0=6, 1=8, 2=10, 3=12) obscuration : real obscuration delVignetted : boolean delete vignetted ? useGlass : boolean whether to use Glass settings for thickness boundary glassMin : real glass mininum thickness glassMax : real glass maximum thickness glassEdge : real glass edge thickness useAir : boolean whether to use Air settings for thickness boundary airMin : real air minimum thickness airMax : real air maximum thickness airEdge : real air edge thickness axialSymm : boolean assume axial symmetry ignoreLatCol : boolean ignore latent color addFavOper : boolean add favorite color configNum : integer configuration number (0=All) startAt : integer start at relativeXWgt : real relative X weight overallWgt : real overall weight """ mfe = self.pMFE wizard = mfe.pSEQOptimizationWizard wizard.pType = ofType wizard.pData = ofData wizard.pReference = ofRef wizard.pPupilIntegrationMethod = pupilInteg wizard.pRing = rings wizard.pArm = arms wizard.pObscuration = obscuration wizard.pGrid = grid wizard.pIsDeleteVignetteUsed = delVignetted wizard.pIsGlassUsed = useGlass wizard.pGlassMin = glassMin wizard.pGlassMax = glassMax wizard.pGlassEdge = glassEdge wizard.pIsAirUsed = useAir wizard.pAirMin = airMin wizard.pAirMax = airMax wizard.pAirEdge = airEdge wizard.pIsAssumeAxialSymmetryUsed = axialSymm wizard.pIsIgnoreLateralColorUsed = ignoreLatCol wizard.pConfiguration = configNum wizard.pIsAddFavoriteOperandsUsed = addFavOper wizard.pStartAt = startAt wizard.pRelativeXWeight = relativeXWgt wizard.pOverallWeight = overallWgt wizard.CommonSettings.OK()
def revdocs2reverts(rev_docs, radius=defaults.RADIUS, use_sha1=False, resort=False, verbose=False): """ Converts a sequence of page-partitioned revision documents into a sequence of reverts. :Params: rev_docs : `iterable` ( `dict` ) a page-partitioned sequence of revision documents radius : `int` The maximum number of revisions that a revert can reference. use_sha1 : `bool` Use the sha1 field as the checksum for comparison. resort : `bool` If True, re-sort the revisions of each page. verbose : `bool` Print dots and stuff """ page_rev_docs = groupby(rev_docs, lambda rd: rd.get('page')) for page_doc, rev_docs in page_rev_docs: if verbose: sys.stderr.write(page_doc.get('title') + ": ") sys.stderr.flush() if resort: if verbose: sys.stderr.write("(sorting) ") sys.stderr.flush() rev_docs = sorted( rev_docs, key=lambda r: (r.get('timestamp'), r.get('id'))) detector = Detector(radius=radius) for rev_doc in rev_docs: if not use_sha1 and 'text' not in rev_doc: logger.warn("Skipping {0}: 'text' field not found in {0}" .format(rev_doc['id'], rev_doc)) continue if use_sha1: checksum = rev_doc.get('sha1') or DummyChecksum() elif 'text' in rev_doc: text_bytes = bytes(rev_doc['text'], 'utf8', 'replace') checksum = hashlib.sha1(text_bytes).digest() revert = detector.process(checksum, rev_doc) if revert: yield revert.to_json() if verbose: sys.stderr.write("r") sys.stderr.flush() else: if verbose: sys.stderr.write(".") sys.stderr.flush() if verbose: sys.stderr.write("\n") sys.stderr.flush()
def function[revdocs2reverts, parameter[rev_docs, radius, use_sha1, resort, verbose]]: constant[ Converts a sequence of page-partitioned revision documents into a sequence of reverts. :Params: rev_docs : `iterable` ( `dict` ) a page-partitioned sequence of revision documents radius : `int` The maximum number of revisions that a revert can reference. use_sha1 : `bool` Use the sha1 field as the checksum for comparison. resort : `bool` If True, re-sort the revisions of each page. verbose : `bool` Print dots and stuff ] variable[page_rev_docs] assign[=] call[name[groupby], parameter[name[rev_docs], <ast.Lambda object at 0x7da1b10e5120>]] for taget[tuple[[<ast.Name object at 0x7da1b10e52d0>, <ast.Name object at 0x7da1b1078f40>]]] in starred[name[page_rev_docs]] begin[:] if name[verbose] begin[:] call[name[sys].stderr.write, parameter[binary_operation[call[name[page_doc].get, parameter[constant[title]]] + constant[: ]]]] call[name[sys].stderr.flush, parameter[]] if name[resort] begin[:] if name[verbose] begin[:] call[name[sys].stderr.write, parameter[constant[(sorting) ]]] call[name[sys].stderr.flush, parameter[]] variable[rev_docs] assign[=] call[name[sorted], parameter[name[rev_docs]]] variable[detector] assign[=] call[name[Detector], parameter[]] for taget[name[rev_doc]] in starred[name[rev_docs]] begin[:] if <ast.BoolOp object at 0x7da1b107bc10> begin[:] call[name[logger].warn, parameter[call[constant[Skipping {0}: 'text' field not found in {0}].format, parameter[call[name[rev_doc]][constant[id]], name[rev_doc]]]]] continue if name[use_sha1] begin[:] variable[checksum] assign[=] <ast.BoolOp object at 0x7da1b1078820> variable[revert] assign[=] call[name[detector].process, parameter[name[checksum], name[rev_doc]]] if name[revert] begin[:] <ast.Yield object at 0x7da1b1078ca0> if name[verbose] begin[:] call[name[sys].stderr.write, parameter[constant[r]]] call[name[sys].stderr.flush, parameter[]] if name[verbose] begin[:] call[name[sys].stderr.write, parameter[constant[ ]]] call[name[sys].stderr.flush, parameter[]]
keyword[def] identifier[revdocs2reverts] ( identifier[rev_docs] , identifier[radius] = identifier[defaults] . identifier[RADIUS] , identifier[use_sha1] = keyword[False] , identifier[resort] = keyword[False] , identifier[verbose] = keyword[False] ): literal[string] identifier[page_rev_docs] = identifier[groupby] ( identifier[rev_docs] , keyword[lambda] identifier[rd] : identifier[rd] . identifier[get] ( literal[string] )) keyword[for] identifier[page_doc] , identifier[rev_docs] keyword[in] identifier[page_rev_docs] : keyword[if] identifier[verbose] : identifier[sys] . identifier[stderr] . identifier[write] ( identifier[page_doc] . identifier[get] ( literal[string] )+ literal[string] ) identifier[sys] . identifier[stderr] . identifier[flush] () keyword[if] identifier[resort] : keyword[if] identifier[verbose] : identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] ) identifier[sys] . identifier[stderr] . identifier[flush] () identifier[rev_docs] = identifier[sorted] ( identifier[rev_docs] , identifier[key] = keyword[lambda] identifier[r] :( identifier[r] . identifier[get] ( literal[string] ), identifier[r] . identifier[get] ( literal[string] ))) identifier[detector] = identifier[Detector] ( identifier[radius] = identifier[radius] ) keyword[for] identifier[rev_doc] keyword[in] identifier[rev_docs] : keyword[if] keyword[not] identifier[use_sha1] keyword[and] literal[string] keyword[not] keyword[in] identifier[rev_doc] : identifier[logger] . identifier[warn] ( literal[string] . identifier[format] ( identifier[rev_doc] [ literal[string] ], identifier[rev_doc] )) keyword[continue] keyword[if] identifier[use_sha1] : identifier[checksum] = identifier[rev_doc] . identifier[get] ( literal[string] ) keyword[or] identifier[DummyChecksum] () keyword[elif] literal[string] keyword[in] identifier[rev_doc] : identifier[text_bytes] = identifier[bytes] ( identifier[rev_doc] [ literal[string] ], literal[string] , literal[string] ) identifier[checksum] = identifier[hashlib] . identifier[sha1] ( identifier[text_bytes] ). identifier[digest] () identifier[revert] = identifier[detector] . identifier[process] ( identifier[checksum] , identifier[rev_doc] ) keyword[if] identifier[revert] : keyword[yield] identifier[revert] . identifier[to_json] () keyword[if] identifier[verbose] : identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] ) identifier[sys] . identifier[stderr] . identifier[flush] () keyword[else] : keyword[if] identifier[verbose] : identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] ) identifier[sys] . identifier[stderr] . identifier[flush] () keyword[if] identifier[verbose] : identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] ) identifier[sys] . identifier[stderr] . identifier[flush] ()
def revdocs2reverts(rev_docs, radius=defaults.RADIUS, use_sha1=False, resort=False, verbose=False): """ Converts a sequence of page-partitioned revision documents into a sequence of reverts. :Params: rev_docs : `iterable` ( `dict` ) a page-partitioned sequence of revision documents radius : `int` The maximum number of revisions that a revert can reference. use_sha1 : `bool` Use the sha1 field as the checksum for comparison. resort : `bool` If True, re-sort the revisions of each page. verbose : `bool` Print dots and stuff """ page_rev_docs = groupby(rev_docs, lambda rd: rd.get('page')) for (page_doc, rev_docs) in page_rev_docs: if verbose: sys.stderr.write(page_doc.get('title') + ': ') sys.stderr.flush() # depends on [control=['if'], data=[]] if resort: if verbose: sys.stderr.write('(sorting) ') sys.stderr.flush() # depends on [control=['if'], data=[]] rev_docs = sorted(rev_docs, key=lambda r: (r.get('timestamp'), r.get('id'))) # depends on [control=['if'], data=[]] detector = Detector(radius=radius) for rev_doc in rev_docs: if not use_sha1 and 'text' not in rev_doc: logger.warn("Skipping {0}: 'text' field not found in {0}".format(rev_doc['id'], rev_doc)) continue # depends on [control=['if'], data=[]] if use_sha1: checksum = rev_doc.get('sha1') or DummyChecksum() # depends on [control=['if'], data=[]] elif 'text' in rev_doc: text_bytes = bytes(rev_doc['text'], 'utf8', 'replace') checksum = hashlib.sha1(text_bytes).digest() # depends on [control=['if'], data=['rev_doc']] revert = detector.process(checksum, rev_doc) if revert: yield revert.to_json() if verbose: sys.stderr.write('r') sys.stderr.flush() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif verbose: sys.stderr.write('.') sys.stderr.flush() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rev_doc']] if verbose: sys.stderr.write('\n') sys.stderr.flush() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def unregister(self, handler): """ Remove handler :param handler: callback :return: """ for handler_obj in self.handlers: registered = handler_obj.handler if handler is registered: self.handlers.remove(handler_obj) return True raise ValueError('This handler is not registered!')
def function[unregister, parameter[self, handler]]: constant[ Remove handler :param handler: callback :return: ] for taget[name[handler_obj]] in starred[name[self].handlers] begin[:] variable[registered] assign[=] name[handler_obj].handler if compare[name[handler] is name[registered]] begin[:] call[name[self].handlers.remove, parameter[name[handler_obj]]] return[constant[True]] <ast.Raise object at 0x7da1b1846b00>
keyword[def] identifier[unregister] ( identifier[self] , identifier[handler] ): literal[string] keyword[for] identifier[handler_obj] keyword[in] identifier[self] . identifier[handlers] : identifier[registered] = identifier[handler_obj] . identifier[handler] keyword[if] identifier[handler] keyword[is] identifier[registered] : identifier[self] . identifier[handlers] . identifier[remove] ( identifier[handler_obj] ) keyword[return] keyword[True] keyword[raise] identifier[ValueError] ( literal[string] )
def unregister(self, handler): """ Remove handler :param handler: callback :return: """ for handler_obj in self.handlers: registered = handler_obj.handler if handler is registered: self.handlers.remove(handler_obj) return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['handler_obj']] raise ValueError('This handler is not registered!')
def extend_selection_to_complete_lines(self): """Extend current selection to complete lines""" cursor = self.textCursor() start_pos, end_pos = cursor.selectionStart(), cursor.selectionEnd() cursor.setPosition(start_pos) cursor.setPosition(end_pos, QTextCursor.KeepAnchor) if cursor.atBlockStart(): cursor.movePosition(QTextCursor.PreviousBlock, QTextCursor.KeepAnchor) cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor) self.setTextCursor(cursor)
def function[extend_selection_to_complete_lines, parameter[self]]: constant[Extend current selection to complete lines] variable[cursor] assign[=] call[name[self].textCursor, parameter[]] <ast.Tuple object at 0x7da20e960610> assign[=] tuple[[<ast.Call object at 0x7da20e961660>, <ast.Call object at 0x7da20e9627a0>]] call[name[cursor].setPosition, parameter[name[start_pos]]] call[name[cursor].setPosition, parameter[name[end_pos], name[QTextCursor].KeepAnchor]] if call[name[cursor].atBlockStart, parameter[]] begin[:] call[name[cursor].movePosition, parameter[name[QTextCursor].PreviousBlock, name[QTextCursor].KeepAnchor]] call[name[cursor].movePosition, parameter[name[QTextCursor].EndOfBlock, name[QTextCursor].KeepAnchor]] call[name[self].setTextCursor, parameter[name[cursor]]]
keyword[def] identifier[extend_selection_to_complete_lines] ( identifier[self] ): literal[string] identifier[cursor] = identifier[self] . identifier[textCursor] () identifier[start_pos] , identifier[end_pos] = identifier[cursor] . identifier[selectionStart] (), identifier[cursor] . identifier[selectionEnd] () identifier[cursor] . identifier[setPosition] ( identifier[start_pos] ) identifier[cursor] . identifier[setPosition] ( identifier[end_pos] , identifier[QTextCursor] . identifier[KeepAnchor] ) keyword[if] identifier[cursor] . identifier[atBlockStart] (): identifier[cursor] . identifier[movePosition] ( identifier[QTextCursor] . identifier[PreviousBlock] , identifier[QTextCursor] . identifier[KeepAnchor] ) identifier[cursor] . identifier[movePosition] ( identifier[QTextCursor] . identifier[EndOfBlock] , identifier[QTextCursor] . identifier[KeepAnchor] ) identifier[self] . identifier[setTextCursor] ( identifier[cursor] )
def extend_selection_to_complete_lines(self): """Extend current selection to complete lines""" cursor = self.textCursor() (start_pos, end_pos) = (cursor.selectionStart(), cursor.selectionEnd()) cursor.setPosition(start_pos) cursor.setPosition(end_pos, QTextCursor.KeepAnchor) if cursor.atBlockStart(): cursor.movePosition(QTextCursor.PreviousBlock, QTextCursor.KeepAnchor) cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor) # depends on [control=['if'], data=[]] self.setTextCursor(cursor)
def connect_to_networks(self, si, logger, vm_uuid, vm_network_mappings, default_network_name, reserved_networks, dv_switch_name, promiscuous_mode): """ Connect VM to Network :param si: VmWare Service Instance - defined connection to vCenter :param logger: :param vm_uuid: <str> UUID for VM :param vm_network_mappings: <collection of 'VmNetworkMapping'> :param default_network_name: <str> Full Network name - likes 'DataCenterName/NetworkName' :param reserved_networks: :param dv_switch_name: <str> Default dvSwitch name :param promiscuous_mode <str> 'True' or 'False' turn on/off promiscuous mode for the port group :return: None """ vm = self.pv_service.find_by_uuid(si, vm_uuid) if not vm: raise ValueError('VM having UUID {0} not found'.format(vm_uuid)) default_network_instance = self.pv_service.get_network_by_full_name(si, default_network_name) if not default_network_instance: raise ValueError('Default Network {0} not found'.format(default_network_name)) if vm_has_no_vnics(vm): raise ValueError('Trying to connect VM (uuid: {0}) but it has no vNics'.format(vm_uuid)) mappings = self._prepare_mappings(dv_switch_name=dv_switch_name, vm_network_mappings=vm_network_mappings) updated_mappings = self.virtual_switch_to_machine_connector.connect_by_mapping( si, vm, mappings, default_network_instance, reserved_networks, logger, promiscuous_mode) connection_results = [] for updated_mapping in updated_mappings: connection_result = ConnectionResult(mac_address=updated_mapping.vnic.macAddress, vnic_name=updated_mapping.vnic.deviceInfo.label, requested_vnic=updated_mapping.requested_vnic, vm_uuid=vm_uuid, network_name=updated_mapping.network.name, network_key=updated_mapping.network.key) connection_results.append(connection_result) return connection_results
def function[connect_to_networks, parameter[self, si, logger, vm_uuid, vm_network_mappings, default_network_name, reserved_networks, dv_switch_name, promiscuous_mode]]: constant[ Connect VM to Network :param si: VmWare Service Instance - defined connection to vCenter :param logger: :param vm_uuid: <str> UUID for VM :param vm_network_mappings: <collection of 'VmNetworkMapping'> :param default_network_name: <str> Full Network name - likes 'DataCenterName/NetworkName' :param reserved_networks: :param dv_switch_name: <str> Default dvSwitch name :param promiscuous_mode <str> 'True' or 'False' turn on/off promiscuous mode for the port group :return: None ] variable[vm] assign[=] call[name[self].pv_service.find_by_uuid, parameter[name[si], name[vm_uuid]]] if <ast.UnaryOp object at 0x7da204623640> begin[:] <ast.Raise object at 0x7da204623c40> variable[default_network_instance] assign[=] call[name[self].pv_service.get_network_by_full_name, parameter[name[si], name[default_network_name]]] if <ast.UnaryOp object at 0x7da204621450> begin[:] <ast.Raise object at 0x7da204620700> if call[name[vm_has_no_vnics], parameter[name[vm]]] begin[:] <ast.Raise object at 0x7da204620af0> variable[mappings] assign[=] call[name[self]._prepare_mappings, parameter[]] variable[updated_mappings] assign[=] call[name[self].virtual_switch_to_machine_connector.connect_by_mapping, parameter[name[si], name[vm], name[mappings], name[default_network_instance], name[reserved_networks], name[logger], name[promiscuous_mode]]] variable[connection_results] assign[=] list[[]] for taget[name[updated_mapping]] in starred[name[updated_mappings]] begin[:] variable[connection_result] assign[=] call[name[ConnectionResult], parameter[]] call[name[connection_results].append, parameter[name[connection_result]]] return[name[connection_results]]
keyword[def] identifier[connect_to_networks] ( identifier[self] , identifier[si] , identifier[logger] , identifier[vm_uuid] , identifier[vm_network_mappings] , identifier[default_network_name] , identifier[reserved_networks] , identifier[dv_switch_name] , identifier[promiscuous_mode] ): literal[string] identifier[vm] = identifier[self] . identifier[pv_service] . identifier[find_by_uuid] ( identifier[si] , identifier[vm_uuid] ) keyword[if] keyword[not] identifier[vm] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[vm_uuid] )) identifier[default_network_instance] = identifier[self] . identifier[pv_service] . identifier[get_network_by_full_name] ( identifier[si] , identifier[default_network_name] ) keyword[if] keyword[not] identifier[default_network_instance] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[default_network_name] )) keyword[if] identifier[vm_has_no_vnics] ( identifier[vm] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[vm_uuid] )) identifier[mappings] = identifier[self] . identifier[_prepare_mappings] ( identifier[dv_switch_name] = identifier[dv_switch_name] , identifier[vm_network_mappings] = identifier[vm_network_mappings] ) identifier[updated_mappings] = identifier[self] . identifier[virtual_switch_to_machine_connector] . identifier[connect_by_mapping] ( identifier[si] , identifier[vm] , identifier[mappings] , identifier[default_network_instance] , identifier[reserved_networks] , identifier[logger] , identifier[promiscuous_mode] ) identifier[connection_results] =[] keyword[for] identifier[updated_mapping] keyword[in] identifier[updated_mappings] : identifier[connection_result] = identifier[ConnectionResult] ( identifier[mac_address] = identifier[updated_mapping] . identifier[vnic] . identifier[macAddress] , identifier[vnic_name] = identifier[updated_mapping] . identifier[vnic] . identifier[deviceInfo] . identifier[label] , identifier[requested_vnic] = identifier[updated_mapping] . identifier[requested_vnic] , identifier[vm_uuid] = identifier[vm_uuid] , identifier[network_name] = identifier[updated_mapping] . identifier[network] . identifier[name] , identifier[network_key] = identifier[updated_mapping] . identifier[network] . identifier[key] ) identifier[connection_results] . identifier[append] ( identifier[connection_result] ) keyword[return] identifier[connection_results]
def connect_to_networks(self, si, logger, vm_uuid, vm_network_mappings, default_network_name, reserved_networks, dv_switch_name, promiscuous_mode): """ Connect VM to Network :param si: VmWare Service Instance - defined connection to vCenter :param logger: :param vm_uuid: <str> UUID for VM :param vm_network_mappings: <collection of 'VmNetworkMapping'> :param default_network_name: <str> Full Network name - likes 'DataCenterName/NetworkName' :param reserved_networks: :param dv_switch_name: <str> Default dvSwitch name :param promiscuous_mode <str> 'True' or 'False' turn on/off promiscuous mode for the port group :return: None """ vm = self.pv_service.find_by_uuid(si, vm_uuid) if not vm: raise ValueError('VM having UUID {0} not found'.format(vm_uuid)) # depends on [control=['if'], data=[]] default_network_instance = self.pv_service.get_network_by_full_name(si, default_network_name) if not default_network_instance: raise ValueError('Default Network {0} not found'.format(default_network_name)) # depends on [control=['if'], data=[]] if vm_has_no_vnics(vm): raise ValueError('Trying to connect VM (uuid: {0}) but it has no vNics'.format(vm_uuid)) # depends on [control=['if'], data=[]] mappings = self._prepare_mappings(dv_switch_name=dv_switch_name, vm_network_mappings=vm_network_mappings) updated_mappings = self.virtual_switch_to_machine_connector.connect_by_mapping(si, vm, mappings, default_network_instance, reserved_networks, logger, promiscuous_mode) connection_results = [] for updated_mapping in updated_mappings: connection_result = ConnectionResult(mac_address=updated_mapping.vnic.macAddress, vnic_name=updated_mapping.vnic.deviceInfo.label, requested_vnic=updated_mapping.requested_vnic, vm_uuid=vm_uuid, network_name=updated_mapping.network.name, network_key=updated_mapping.network.key) connection_results.append(connection_result) # depends on [control=['for'], data=['updated_mapping']] return connection_results
def _api_key_patch_remove(conn, apiKey, pvlist): ''' the remove patch operation for a list of (path, value) tuples on an ApiKey resource list path ''' response = conn.update_api_key(apiKey=apiKey, patchOperations=_api_key_patchops('remove', pvlist)) return response
def function[_api_key_patch_remove, parameter[conn, apiKey, pvlist]]: constant[ the remove patch operation for a list of (path, value) tuples on an ApiKey resource list path ] variable[response] assign[=] call[name[conn].update_api_key, parameter[]] return[name[response]]
keyword[def] identifier[_api_key_patch_remove] ( identifier[conn] , identifier[apiKey] , identifier[pvlist] ): literal[string] identifier[response] = identifier[conn] . identifier[update_api_key] ( identifier[apiKey] = identifier[apiKey] , identifier[patchOperations] = identifier[_api_key_patchops] ( literal[string] , identifier[pvlist] )) keyword[return] identifier[response]
def _api_key_patch_remove(conn, apiKey, pvlist): """ the remove patch operation for a list of (path, value) tuples on an ApiKey resource list path """ response = conn.update_api_key(apiKey=apiKey, patchOperations=_api_key_patchops('remove', pvlist)) return response
def send(self, obj_id): """ Send email to the assigned lists :param obj_id: int :return: dict|str """ response = self._client.session.post( '{url}/{id}/send'.format( url=self.endpoint_url, id=obj_id ) ) return self.process_response(response)
def function[send, parameter[self, obj_id]]: constant[ Send email to the assigned lists :param obj_id: int :return: dict|str ] variable[response] assign[=] call[name[self]._client.session.post, parameter[call[constant[{url}/{id}/send].format, parameter[]]]] return[call[name[self].process_response, parameter[name[response]]]]
keyword[def] identifier[send] ( identifier[self] , identifier[obj_id] ): literal[string] identifier[response] = identifier[self] . identifier[_client] . identifier[session] . identifier[post] ( literal[string] . identifier[format] ( identifier[url] = identifier[self] . identifier[endpoint_url] , identifier[id] = identifier[obj_id] ) ) keyword[return] identifier[self] . identifier[process_response] ( identifier[response] )
def send(self, obj_id): """ Send email to the assigned lists :param obj_id: int :return: dict|str """ response = self._client.session.post('{url}/{id}/send'.format(url=self.endpoint_url, id=obj_id)) return self.process_response(response)
def create(source, requirement_files=None, force=False, keep_wheels=False, archive_destination_dir='.', python_versions=None, validate_archive=False, wheel_args='', archive_format='zip', build_tag=''): """Create a Wagon archive and returns its path. Package name and version are extracted from the setup.py file of the `source` or from the PACKAGE_NAME==PACKAGE_VERSION if the source is a PyPI package. Supported `python_versions` must be in the format e.g [33, 27, 2, 3].. `force` will remove any excess dirs or archives before creation. `requirement_files` can be either a link/local path to a requirements.txt file or just `.`, in which case requirement files will be automatically extracted from either the GitHub archive URL or the local path provided provided in `source`. """ if validate_archive: _assert_virtualenv_is_installed() logger.info('Creating archive for %s...', source) processed_source = get_source(source) if os.path.isdir(processed_source) and not \ os.path.isfile(os.path.join(processed_source, 'setup.py')): raise WagonError( 'Source directory must contain a setup.py file') package_name, package_version = get_source_name_and_version( processed_source) tempdir = tempfile.mkdtemp() workdir = os.path.join(tempdir, package_name) wheels_path = os.path.join(workdir, DEFAULT_WHEELS_PATH) try: wheels = wheel( processed_source, requirement_files, wheels_path, wheel_args) finally: if processed_source != source: shutil.rmtree(processed_source, ignore_errors=True) platform = _get_platform_for_set_of_wheels(wheels_path) if is_verbose(): logger.debug('Platform is: %s', platform) python_versions = _set_python_versions(python_versions) if not os.path.isdir(archive_destination_dir): os.makedirs(archive_destination_dir) archive_name = _set_archive_name( package_name, package_version, python_versions, platform, build_tag) archive_path = os.path.join(archive_destination_dir, archive_name) _handle_output_file(archive_path, force) _generate_metadata_file( workdir, archive_name, platform, python_versions, package_name, package_version, build_tag, source, wheels) _create_wagon_archive(workdir, archive_path, archive_format) if not keep_wheels: logger.debug('Removing work directory...') shutil.rmtree(tempdir, ignore_errors=True) if validate_archive: validate(archive_path) logger.info('Wagon created successfully at: %s', archive_path) return archive_path
def function[create, parameter[source, requirement_files, force, keep_wheels, archive_destination_dir, python_versions, validate_archive, wheel_args, archive_format, build_tag]]: constant[Create a Wagon archive and returns its path. Package name and version are extracted from the setup.py file of the `source` or from the PACKAGE_NAME==PACKAGE_VERSION if the source is a PyPI package. Supported `python_versions` must be in the format e.g [33, 27, 2, 3].. `force` will remove any excess dirs or archives before creation. `requirement_files` can be either a link/local path to a requirements.txt file or just `.`, in which case requirement files will be automatically extracted from either the GitHub archive URL or the local path provided provided in `source`. ] if name[validate_archive] begin[:] call[name[_assert_virtualenv_is_installed], parameter[]] call[name[logger].info, parameter[constant[Creating archive for %s...], name[source]]] variable[processed_source] assign[=] call[name[get_source], parameter[name[source]]] if <ast.BoolOp object at 0x7da1b0efac50> begin[:] <ast.Raise object at 0x7da1b0ef9e10> <ast.Tuple object at 0x7da1b0dbee00> assign[=] call[name[get_source_name_and_version], parameter[name[processed_source]]] variable[tempdir] assign[=] call[name[tempfile].mkdtemp, parameter[]] variable[workdir] assign[=] call[name[os].path.join, parameter[name[tempdir], name[package_name]]] variable[wheels_path] assign[=] call[name[os].path.join, parameter[name[workdir], name[DEFAULT_WHEELS_PATH]]] <ast.Try object at 0x7da1b0dbebc0> variable[platform] assign[=] call[name[_get_platform_for_set_of_wheels], parameter[name[wheels_path]]] if call[name[is_verbose], parameter[]] begin[:] call[name[logger].debug, parameter[constant[Platform is: %s], name[platform]]] variable[python_versions] assign[=] call[name[_set_python_versions], parameter[name[python_versions]]] if <ast.UnaryOp object at 0x7da1b0dbe6b0> begin[:] call[name[os].makedirs, parameter[name[archive_destination_dir]]] variable[archive_name] assign[=] call[name[_set_archive_name], parameter[name[package_name], name[package_version], name[python_versions], name[platform], name[build_tag]]] variable[archive_path] assign[=] call[name[os].path.join, parameter[name[archive_destination_dir], name[archive_name]]] call[name[_handle_output_file], parameter[name[archive_path], name[force]]] call[name[_generate_metadata_file], parameter[name[workdir], name[archive_name], name[platform], name[python_versions], name[package_name], name[package_version], name[build_tag], name[source], name[wheels]]] call[name[_create_wagon_archive], parameter[name[workdir], name[archive_path], name[archive_format]]] if <ast.UnaryOp object at 0x7da1b0e26860> begin[:] call[name[logger].debug, parameter[constant[Removing work directory...]]] call[name[shutil].rmtree, parameter[name[tempdir]]] if name[validate_archive] begin[:] call[name[validate], parameter[name[archive_path]]] call[name[logger].info, parameter[constant[Wagon created successfully at: %s], name[archive_path]]] return[name[archive_path]]
keyword[def] identifier[create] ( identifier[source] , identifier[requirement_files] = keyword[None] , identifier[force] = keyword[False] , identifier[keep_wheels] = keyword[False] , identifier[archive_destination_dir] = literal[string] , identifier[python_versions] = keyword[None] , identifier[validate_archive] = keyword[False] , identifier[wheel_args] = literal[string] , identifier[archive_format] = literal[string] , identifier[build_tag] = literal[string] ): literal[string] keyword[if] identifier[validate_archive] : identifier[_assert_virtualenv_is_installed] () identifier[logger] . identifier[info] ( literal[string] , identifier[source] ) identifier[processed_source] = identifier[get_source] ( identifier[source] ) keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[processed_source] ) keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[processed_source] , literal[string] )): keyword[raise] identifier[WagonError] ( literal[string] ) identifier[package_name] , identifier[package_version] = identifier[get_source_name_and_version] ( identifier[processed_source] ) identifier[tempdir] = identifier[tempfile] . identifier[mkdtemp] () identifier[workdir] = identifier[os] . identifier[path] . identifier[join] ( identifier[tempdir] , identifier[package_name] ) identifier[wheels_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[workdir] , identifier[DEFAULT_WHEELS_PATH] ) keyword[try] : identifier[wheels] = identifier[wheel] ( identifier[processed_source] , identifier[requirement_files] , identifier[wheels_path] , identifier[wheel_args] ) keyword[finally] : keyword[if] identifier[processed_source] != identifier[source] : identifier[shutil] . identifier[rmtree] ( identifier[processed_source] , identifier[ignore_errors] = keyword[True] ) identifier[platform] = identifier[_get_platform_for_set_of_wheels] ( identifier[wheels_path] ) keyword[if] identifier[is_verbose] (): identifier[logger] . identifier[debug] ( literal[string] , identifier[platform] ) identifier[python_versions] = identifier[_set_python_versions] ( identifier[python_versions] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[archive_destination_dir] ): identifier[os] . identifier[makedirs] ( identifier[archive_destination_dir] ) identifier[archive_name] = identifier[_set_archive_name] ( identifier[package_name] , identifier[package_version] , identifier[python_versions] , identifier[platform] , identifier[build_tag] ) identifier[archive_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[archive_destination_dir] , identifier[archive_name] ) identifier[_handle_output_file] ( identifier[archive_path] , identifier[force] ) identifier[_generate_metadata_file] ( identifier[workdir] , identifier[archive_name] , identifier[platform] , identifier[python_versions] , identifier[package_name] , identifier[package_version] , identifier[build_tag] , identifier[source] , identifier[wheels] ) identifier[_create_wagon_archive] ( identifier[workdir] , identifier[archive_path] , identifier[archive_format] ) keyword[if] keyword[not] identifier[keep_wheels] : identifier[logger] . identifier[debug] ( literal[string] ) identifier[shutil] . identifier[rmtree] ( identifier[tempdir] , identifier[ignore_errors] = keyword[True] ) keyword[if] identifier[validate_archive] : identifier[validate] ( identifier[archive_path] ) identifier[logger] . identifier[info] ( literal[string] , identifier[archive_path] ) keyword[return] identifier[archive_path]
def create(source, requirement_files=None, force=False, keep_wheels=False, archive_destination_dir='.', python_versions=None, validate_archive=False, wheel_args='', archive_format='zip', build_tag=''): """Create a Wagon archive and returns its path. Package name and version are extracted from the setup.py file of the `source` or from the PACKAGE_NAME==PACKAGE_VERSION if the source is a PyPI package. Supported `python_versions` must be in the format e.g [33, 27, 2, 3].. `force` will remove any excess dirs or archives before creation. `requirement_files` can be either a link/local path to a requirements.txt file or just `.`, in which case requirement files will be automatically extracted from either the GitHub archive URL or the local path provided provided in `source`. """ if validate_archive: _assert_virtualenv_is_installed() # depends on [control=['if'], data=[]] logger.info('Creating archive for %s...', source) processed_source = get_source(source) if os.path.isdir(processed_source) and (not os.path.isfile(os.path.join(processed_source, 'setup.py'))): raise WagonError('Source directory must contain a setup.py file') # depends on [control=['if'], data=[]] (package_name, package_version) = get_source_name_and_version(processed_source) tempdir = tempfile.mkdtemp() workdir = os.path.join(tempdir, package_name) wheels_path = os.path.join(workdir, DEFAULT_WHEELS_PATH) try: wheels = wheel(processed_source, requirement_files, wheels_path, wheel_args) # depends on [control=['try'], data=[]] finally: if processed_source != source: shutil.rmtree(processed_source, ignore_errors=True) # depends on [control=['if'], data=['processed_source']] platform = _get_platform_for_set_of_wheels(wheels_path) if is_verbose(): logger.debug('Platform is: %s', platform) # depends on [control=['if'], data=[]] python_versions = _set_python_versions(python_versions) if not os.path.isdir(archive_destination_dir): os.makedirs(archive_destination_dir) # depends on [control=['if'], data=[]] archive_name = _set_archive_name(package_name, package_version, python_versions, platform, build_tag) archive_path = os.path.join(archive_destination_dir, archive_name) _handle_output_file(archive_path, force) _generate_metadata_file(workdir, archive_name, platform, python_versions, package_name, package_version, build_tag, source, wheels) _create_wagon_archive(workdir, archive_path, archive_format) if not keep_wheels: logger.debug('Removing work directory...') shutil.rmtree(tempdir, ignore_errors=True) # depends on [control=['if'], data=[]] if validate_archive: validate(archive_path) # depends on [control=['if'], data=[]] logger.info('Wagon created successfully at: %s', archive_path) return archive_path
def validate_config(raise_=True): """ Verifies that all configuration values have a valid setting """ ELIBConfig.check() known_paths = set() duplicate_values = set() missing_values = set() for config_value in ConfigValue.config_values: if config_value.path not in known_paths: known_paths.add(config_value.path) else: duplicate_values.add(config_value.name) try: config_value() except MissingValueError: missing_values.add(config_value.name) if raise_ and duplicate_values: raise DuplicateConfigValueError(str(duplicate_values)) if raise_ and missing_values: raise MissingValueError(str(missing_values), 'missing config value(s)') return duplicate_values, missing_values
def function[validate_config, parameter[raise_]]: constant[ Verifies that all configuration values have a valid setting ] call[name[ELIBConfig].check, parameter[]] variable[known_paths] assign[=] call[name[set], parameter[]] variable[duplicate_values] assign[=] call[name[set], parameter[]] variable[missing_values] assign[=] call[name[set], parameter[]] for taget[name[config_value]] in starred[name[ConfigValue].config_values] begin[:] if compare[name[config_value].path <ast.NotIn object at 0x7da2590d7190> name[known_paths]] begin[:] call[name[known_paths].add, parameter[name[config_value].path]] <ast.Try object at 0x7da2041d92a0> if <ast.BoolOp object at 0x7da2041da1a0> begin[:] <ast.Raise object at 0x7da2041d9840> if <ast.BoolOp object at 0x7da2041d85b0> begin[:] <ast.Raise object at 0x7da2041d8d60> return[tuple[[<ast.Name object at 0x7da2041da560>, <ast.Name object at 0x7da2041da800>]]]
keyword[def] identifier[validate_config] ( identifier[raise_] = keyword[True] ): literal[string] identifier[ELIBConfig] . identifier[check] () identifier[known_paths] = identifier[set] () identifier[duplicate_values] = identifier[set] () identifier[missing_values] = identifier[set] () keyword[for] identifier[config_value] keyword[in] identifier[ConfigValue] . identifier[config_values] : keyword[if] identifier[config_value] . identifier[path] keyword[not] keyword[in] identifier[known_paths] : identifier[known_paths] . identifier[add] ( identifier[config_value] . identifier[path] ) keyword[else] : identifier[duplicate_values] . identifier[add] ( identifier[config_value] . identifier[name] ) keyword[try] : identifier[config_value] () keyword[except] identifier[MissingValueError] : identifier[missing_values] . identifier[add] ( identifier[config_value] . identifier[name] ) keyword[if] identifier[raise_] keyword[and] identifier[duplicate_values] : keyword[raise] identifier[DuplicateConfigValueError] ( identifier[str] ( identifier[duplicate_values] )) keyword[if] identifier[raise_] keyword[and] identifier[missing_values] : keyword[raise] identifier[MissingValueError] ( identifier[str] ( identifier[missing_values] ), literal[string] ) keyword[return] identifier[duplicate_values] , identifier[missing_values]
def validate_config(raise_=True): """ Verifies that all configuration values have a valid setting """ ELIBConfig.check() known_paths = set() duplicate_values = set() missing_values = set() for config_value in ConfigValue.config_values: if config_value.path not in known_paths: known_paths.add(config_value.path) # depends on [control=['if'], data=['known_paths']] else: duplicate_values.add(config_value.name) try: config_value() # depends on [control=['try'], data=[]] except MissingValueError: missing_values.add(config_value.name) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['config_value']] if raise_ and duplicate_values: raise DuplicateConfigValueError(str(duplicate_values)) # depends on [control=['if'], data=[]] if raise_ and missing_values: raise MissingValueError(str(missing_values), 'missing config value(s)') # depends on [control=['if'], data=[]] return (duplicate_values, missing_values)
def hpoterms(): """Search for HPO terms.""" query = request.args.get('query') if query is None: return abort(500) terms = sorted(store.hpo_terms(query=query), key=itemgetter('hpo_number')) json_terms = [ {'name': '{} | {}'.format(term['_id'], term['description']), 'id': term['_id'] } for term in terms[:7]] return jsonify(json_terms)
def function[hpoterms, parameter[]]: constant[Search for HPO terms.] variable[query] assign[=] call[name[request].args.get, parameter[constant[query]]] if compare[name[query] is constant[None]] begin[:] return[call[name[abort], parameter[constant[500]]]] variable[terms] assign[=] call[name[sorted], parameter[call[name[store].hpo_terms, parameter[]]]] variable[json_terms] assign[=] <ast.ListComp object at 0x7da2041dada0> return[call[name[jsonify], parameter[name[json_terms]]]]
keyword[def] identifier[hpoterms] (): literal[string] identifier[query] = identifier[request] . identifier[args] . identifier[get] ( literal[string] ) keyword[if] identifier[query] keyword[is] keyword[None] : keyword[return] identifier[abort] ( literal[int] ) identifier[terms] = identifier[sorted] ( identifier[store] . identifier[hpo_terms] ( identifier[query] = identifier[query] ), identifier[key] = identifier[itemgetter] ( literal[string] )) identifier[json_terms] =[ { literal[string] : literal[string] . identifier[format] ( identifier[term] [ literal[string] ], identifier[term] [ literal[string] ]), literal[string] : identifier[term] [ literal[string] ] } keyword[for] identifier[term] keyword[in] identifier[terms] [: literal[int] ]] keyword[return] identifier[jsonify] ( identifier[json_terms] )
def hpoterms(): """Search for HPO terms.""" query = request.args.get('query') if query is None: return abort(500) # depends on [control=['if'], data=[]] terms = sorted(store.hpo_terms(query=query), key=itemgetter('hpo_number')) json_terms = [{'name': '{} | {}'.format(term['_id'], term['description']), 'id': term['_id']} for term in terms[:7]] return jsonify(json_terms)
def insert_row(self, index, row): """Insert a row before index in the table. Parameters ---------- index : int List index rules apply row : iterable Any iterable of appropriate length. Raises ------ TypeError: If `row` is not an iterable. ValueError: If size of `row` is inconsistent with the current number of columns. """ row = self._validate_row(row) row_obj = RowData(self, row) self._table.insert(index, row_obj)
def function[insert_row, parameter[self, index, row]]: constant[Insert a row before index in the table. Parameters ---------- index : int List index rules apply row : iterable Any iterable of appropriate length. Raises ------ TypeError: If `row` is not an iterable. ValueError: If size of `row` is inconsistent with the current number of columns. ] variable[row] assign[=] call[name[self]._validate_row, parameter[name[row]]] variable[row_obj] assign[=] call[name[RowData], parameter[name[self], name[row]]] call[name[self]._table.insert, parameter[name[index], name[row_obj]]]
keyword[def] identifier[insert_row] ( identifier[self] , identifier[index] , identifier[row] ): literal[string] identifier[row] = identifier[self] . identifier[_validate_row] ( identifier[row] ) identifier[row_obj] = identifier[RowData] ( identifier[self] , identifier[row] ) identifier[self] . identifier[_table] . identifier[insert] ( identifier[index] , identifier[row_obj] )
def insert_row(self, index, row): """Insert a row before index in the table. Parameters ---------- index : int List index rules apply row : iterable Any iterable of appropriate length. Raises ------ TypeError: If `row` is not an iterable. ValueError: If size of `row` is inconsistent with the current number of columns. """ row = self._validate_row(row) row_obj = RowData(self, row) self._table.insert(index, row_obj)
def detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, omega_levelu, iu0, ju0): r"""Rewrite a symbolic expression in terms of allowed transition detunings. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> omega_min, iu0, ju0 = find_omega_min(omega_levelu, Neu, Nl, xiu) >>> pairs = detunings_indices(Neu, Nl, xiu) >>> combs = detunings_combinations(pairs) >>> symb_omega_levelu, omega, gamma = define_frequencies(Neu) >>> E0, omega_laser = define_laser_variables(Nl) Most times it is possible to express these combinations of optical frequencies in terms of allowed transition detunings. >>> expr = +(omega_laser[0]-(symb_omega_levelu[1]-symb_omega_levelu[0])) >>> expr += -(omega_laser[1]-(symb_omega_levelu[3]-symb_omega_levelu[0])) >>> expr -omega_2 + omega_4 + varpi_1 - varpi_2 >>> detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, ... omega_levelu, iu0, ju0) '+delta1_2_1-delta2_4_1' But some times it is not possible: >>> expr = +(omega_laser[1]-(symb_omega_levelu[1]-symb_omega_levelu[0])) >>> expr += -(omega_laser[0]-(symb_omega_levelu[3]-symb_omega_levelu[0])) >>> expr -omega_2 + omega_4 - varpi_1 + varpi_2 >>> detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, ... omega_levelu, iu0, ju0) '300.000000000000-detuning_knob[0]+detuning_knob[1]' """ Nl = len(omega_laser) Neu = len(symb_omega_levelu) # We find the coefficients a_i of the field frequencies. a = [diff(expr, omega_laser[l]) for l in range(Nl)] # We look for a combination of the detunings obtained with the # function detunings_code. For each combination we sum the # detunings weighed by a_i. success = False for comb in combs: expr_try = 0 for l in range(Nl): expr_try += a[l]*(omega_laser[l] - symb_omega_levelu[comb[l][0]] + symb_omega_levelu[comb[l][1]]) if expr-expr_try == 0: success = True break assign = "" if success: for l in range(Nl): if a[l] != 0: if a[l] == 1: assign += "+" elif a[l] == -1: assign += "-" assign += "delta"+str(l+1) assign += "_"+str(comb[l][0]+1) assign += "_"+str(comb[l][1]+1) else: # We get the code for Hii using detuning knobs. # We find out the remainder terms. _remainder = expr - sum([a[l]*omega_laser[l] for l in range(Nl)]) # We find the coefficients of the remainder. b = [diff(_remainder, symb_omega_levelu[j]) for j in range(Neu)] # We calculate the remainder numerically. remainder = sum([b[j]*omega_levelu[j] for j in range(Neu)]) # We add the contributions from the detuning knobs. remainder += sum([a[l]*(omega_levelu[iu0[l]] - omega_levelu[ju0[l]]) for l in range(Nl)]) assign = str(remainder) # We get the code for Hii using detuning knobs. for l in range(Nl): if a[l] != 0: if a[l] == 1: assign += "+" elif a[l] == -1: assign += "-" assign += "detuning_knob["+str(l)+"]" return assign
def function[detunings_rewrite, parameter[expr, combs, omega_laser, symb_omega_levelu, omega_levelu, iu0, ju0]]: constant[Rewrite a symbolic expression in terms of allowed transition detunings. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> omega_min, iu0, ju0 = find_omega_min(omega_levelu, Neu, Nl, xiu) >>> pairs = detunings_indices(Neu, Nl, xiu) >>> combs = detunings_combinations(pairs) >>> symb_omega_levelu, omega, gamma = define_frequencies(Neu) >>> E0, omega_laser = define_laser_variables(Nl) Most times it is possible to express these combinations of optical frequencies in terms of allowed transition detunings. >>> expr = +(omega_laser[0]-(symb_omega_levelu[1]-symb_omega_levelu[0])) >>> expr += -(omega_laser[1]-(symb_omega_levelu[3]-symb_omega_levelu[0])) >>> expr -omega_2 + omega_4 + varpi_1 - varpi_2 >>> detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, ... omega_levelu, iu0, ju0) '+delta1_2_1-delta2_4_1' But some times it is not possible: >>> expr = +(omega_laser[1]-(symb_omega_levelu[1]-symb_omega_levelu[0])) >>> expr += -(omega_laser[0]-(symb_omega_levelu[3]-symb_omega_levelu[0])) >>> expr -omega_2 + omega_4 - varpi_1 + varpi_2 >>> detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, ... omega_levelu, iu0, ju0) '300.000000000000-detuning_knob[0]+detuning_knob[1]' ] variable[Nl] assign[=] call[name[len], parameter[name[omega_laser]]] variable[Neu] assign[=] call[name[len], parameter[name[symb_omega_levelu]]] variable[a] assign[=] <ast.ListComp object at 0x7da1b196eaa0> variable[success] assign[=] constant[False] for taget[name[comb]] in starred[name[combs]] begin[:] variable[expr_try] assign[=] constant[0] for taget[name[l]] in starred[call[name[range], parameter[name[Nl]]]] begin[:] <ast.AugAssign object at 0x7da1b196eb60> if compare[binary_operation[name[expr] - name[expr_try]] equal[==] constant[0]] begin[:] variable[success] assign[=] constant[True] break variable[assign] assign[=] constant[] if name[success] begin[:] for taget[name[l]] in starred[call[name[range], parameter[name[Nl]]]] begin[:] if compare[call[name[a]][name[l]] not_equal[!=] constant[0]] begin[:] if compare[call[name[a]][name[l]] equal[==] constant[1]] begin[:] <ast.AugAssign object at 0x7da18f58d570> <ast.AugAssign object at 0x7da18f58cb50> <ast.AugAssign object at 0x7da18f58d930> <ast.AugAssign object at 0x7da18f58e620> return[name[assign]]
keyword[def] identifier[detunings_rewrite] ( identifier[expr] , identifier[combs] , identifier[omega_laser] , identifier[symb_omega_levelu] , identifier[omega_levelu] , identifier[iu0] , identifier[ju0] ): literal[string] identifier[Nl] = identifier[len] ( identifier[omega_laser] ) identifier[Neu] = identifier[len] ( identifier[symb_omega_levelu] ) identifier[a] =[ identifier[diff] ( identifier[expr] , identifier[omega_laser] [ identifier[l] ]) keyword[for] identifier[l] keyword[in] identifier[range] ( identifier[Nl] )] identifier[success] = keyword[False] keyword[for] identifier[comb] keyword[in] identifier[combs] : identifier[expr_try] = literal[int] keyword[for] identifier[l] keyword[in] identifier[range] ( identifier[Nl] ): identifier[expr_try] += identifier[a] [ identifier[l] ]*( identifier[omega_laser] [ identifier[l] ]- identifier[symb_omega_levelu] [ identifier[comb] [ identifier[l] ][ literal[int] ]]+ identifier[symb_omega_levelu] [ identifier[comb] [ identifier[l] ][ literal[int] ]]) keyword[if] identifier[expr] - identifier[expr_try] == literal[int] : identifier[success] = keyword[True] keyword[break] identifier[assign] = literal[string] keyword[if] identifier[success] : keyword[for] identifier[l] keyword[in] identifier[range] ( identifier[Nl] ): keyword[if] identifier[a] [ identifier[l] ]!= literal[int] : keyword[if] identifier[a] [ identifier[l] ]== literal[int] : identifier[assign] += literal[string] keyword[elif] identifier[a] [ identifier[l] ]==- literal[int] : identifier[assign] += literal[string] identifier[assign] += literal[string] + identifier[str] ( identifier[l] + literal[int] ) identifier[assign] += literal[string] + identifier[str] ( identifier[comb] [ identifier[l] ][ literal[int] ]+ literal[int] ) identifier[assign] += literal[string] + identifier[str] ( identifier[comb] [ identifier[l] ][ literal[int] ]+ literal[int] ) keyword[else] : identifier[_remainder] = identifier[expr] - identifier[sum] ([ identifier[a] [ identifier[l] ]* identifier[omega_laser] [ identifier[l] ] keyword[for] identifier[l] keyword[in] identifier[range] ( identifier[Nl] )]) identifier[b] =[ identifier[diff] ( identifier[_remainder] , identifier[symb_omega_levelu] [ identifier[j] ]) keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[Neu] )] identifier[remainder] = identifier[sum] ([ identifier[b] [ identifier[j] ]* identifier[omega_levelu] [ identifier[j] ] keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[Neu] )]) identifier[remainder] += identifier[sum] ([ identifier[a] [ identifier[l] ]*( identifier[omega_levelu] [ identifier[iu0] [ identifier[l] ]]- identifier[omega_levelu] [ identifier[ju0] [ identifier[l] ]]) keyword[for] identifier[l] keyword[in] identifier[range] ( identifier[Nl] )]) identifier[assign] = identifier[str] ( identifier[remainder] ) keyword[for] identifier[l] keyword[in] identifier[range] ( identifier[Nl] ): keyword[if] identifier[a] [ identifier[l] ]!= literal[int] : keyword[if] identifier[a] [ identifier[l] ]== literal[int] : identifier[assign] += literal[string] keyword[elif] identifier[a] [ identifier[l] ]==- literal[int] : identifier[assign] += literal[string] identifier[assign] += literal[string] + identifier[str] ( identifier[l] )+ literal[string] keyword[return] identifier[assign]
def detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, omega_levelu, iu0, ju0): """Rewrite a symbolic expression in terms of allowed transition detunings. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> omega_min, iu0, ju0 = find_omega_min(omega_levelu, Neu, Nl, xiu) >>> pairs = detunings_indices(Neu, Nl, xiu) >>> combs = detunings_combinations(pairs) >>> symb_omega_levelu, omega, gamma = define_frequencies(Neu) >>> E0, omega_laser = define_laser_variables(Nl) Most times it is possible to express these combinations of optical frequencies in terms of allowed transition detunings. >>> expr = +(omega_laser[0]-(symb_omega_levelu[1]-symb_omega_levelu[0])) >>> expr += -(omega_laser[1]-(symb_omega_levelu[3]-symb_omega_levelu[0])) >>> expr -omega_2 + omega_4 + varpi_1 - varpi_2 >>> detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, ... omega_levelu, iu0, ju0) '+delta1_2_1-delta2_4_1' But some times it is not possible: >>> expr = +(omega_laser[1]-(symb_omega_levelu[1]-symb_omega_levelu[0])) >>> expr += -(omega_laser[0]-(symb_omega_levelu[3]-symb_omega_levelu[0])) >>> expr -omega_2 + omega_4 - varpi_1 + varpi_2 >>> detunings_rewrite(expr, combs, omega_laser, symb_omega_levelu, ... omega_levelu, iu0, ju0) '300.000000000000-detuning_knob[0]+detuning_knob[1]' """ Nl = len(omega_laser) Neu = len(symb_omega_levelu) # We find the coefficients a_i of the field frequencies. a = [diff(expr, omega_laser[l]) for l in range(Nl)] # We look for a combination of the detunings obtained with the # function detunings_code. For each combination we sum the # detunings weighed by a_i. success = False for comb in combs: expr_try = 0 for l in range(Nl): expr_try += a[l] * (omega_laser[l] - symb_omega_levelu[comb[l][0]] + symb_omega_levelu[comb[l][1]]) # depends on [control=['for'], data=['l']] if expr - expr_try == 0: success = True break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['comb']] assign = '' if success: for l in range(Nl): if a[l] != 0: if a[l] == 1: assign += '+' # depends on [control=['if'], data=[]] elif a[l] == -1: assign += '-' # depends on [control=['if'], data=[]] assign += 'delta' + str(l + 1) assign += '_' + str(comb[l][0] + 1) assign += '_' + str(comb[l][1] + 1) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['l']] # depends on [control=['if'], data=[]] else: # We get the code for Hii using detuning knobs. # We find out the remainder terms. _remainder = expr - sum([a[l] * omega_laser[l] for l in range(Nl)]) # We find the coefficients of the remainder. b = [diff(_remainder, symb_omega_levelu[j]) for j in range(Neu)] # We calculate the remainder numerically. remainder = sum([b[j] * omega_levelu[j] for j in range(Neu)]) # We add the contributions from the detuning knobs. remainder += sum([a[l] * (omega_levelu[iu0[l]] - omega_levelu[ju0[l]]) for l in range(Nl)]) assign = str(remainder) # We get the code for Hii using detuning knobs. for l in range(Nl): if a[l] != 0: if a[l] == 1: assign += '+' # depends on [control=['if'], data=[]] elif a[l] == -1: assign += '-' # depends on [control=['if'], data=[]] assign += 'detuning_knob[' + str(l) + ']' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['l']] return assign
def has_api_scopes(self, *api_scopes): """ Test if all given API scopes are authorized. :type api_scopes: list[str] :param api_scopes: The API scopes to test :rtype: bool|None :return: True or False, if the API Token has the API scopes field set, otherwise None """ if self._authorized_api_scopes is None: return None return all((x in self._authorized_api_scopes) for x in api_scopes)
def function[has_api_scopes, parameter[self]]: constant[ Test if all given API scopes are authorized. :type api_scopes: list[str] :param api_scopes: The API scopes to test :rtype: bool|None :return: True or False, if the API Token has the API scopes field set, otherwise None ] if compare[name[self]._authorized_api_scopes is constant[None]] begin[:] return[constant[None]] return[call[name[all], parameter[<ast.GeneratorExp object at 0x7da2047e9ae0>]]]
keyword[def] identifier[has_api_scopes] ( identifier[self] ,* identifier[api_scopes] ): literal[string] keyword[if] identifier[self] . identifier[_authorized_api_scopes] keyword[is] keyword[None] : keyword[return] keyword[None] keyword[return] identifier[all] (( identifier[x] keyword[in] identifier[self] . identifier[_authorized_api_scopes] ) keyword[for] identifier[x] keyword[in] identifier[api_scopes] )
def has_api_scopes(self, *api_scopes): """ Test if all given API scopes are authorized. :type api_scopes: list[str] :param api_scopes: The API scopes to test :rtype: bool|None :return: True or False, if the API Token has the API scopes field set, otherwise None """ if self._authorized_api_scopes is None: return None # depends on [control=['if'], data=[]] return all((x in self._authorized_api_scopes for x in api_scopes))
def get_subscriptions(self, fetch=False): """Return this Wallet's subscriptions object, populating it if fetch is True.""" return Subscriptions( self.resource.subscriptions, self.client, populate=fetch)
def function[get_subscriptions, parameter[self, fetch]]: constant[Return this Wallet's subscriptions object, populating it if fetch is True.] return[call[name[Subscriptions], parameter[name[self].resource.subscriptions, name[self].client]]]
keyword[def] identifier[get_subscriptions] ( identifier[self] , identifier[fetch] = keyword[False] ): literal[string] keyword[return] identifier[Subscriptions] ( identifier[self] . identifier[resource] . identifier[subscriptions] , identifier[self] . identifier[client] , identifier[populate] = identifier[fetch] )
def get_subscriptions(self, fetch=False): """Return this Wallet's subscriptions object, populating it if fetch is True.""" return Subscriptions(self.resource.subscriptions, self.client, populate=fetch)
def swap_channels(self, channel_swap): """ Swaps the two channels specified in the tuple. Parameters ---------- channel_swap : :obj:`tuple` of int the two channels to swap Returns ------- :obj:`ColorImage` color image with cols swapped """ if len(channel_swap) != 2: raise ValueError('Illegal value for channel swap') ci = channel_swap[0] cj = channel_swap[1] if ci < 0 or ci > 2 or cj < 0 or cj > 2: raise ValueError('Channels must be between 0 and 1') new_data = self.data.copy() new_data[:, :, ci] = self.data[:, :, cj] new_data[:, :, cj] = self.data[:, :, ci] return ColorImage(new_data, frame=self._frame)
def function[swap_channels, parameter[self, channel_swap]]: constant[ Swaps the two channels specified in the tuple. Parameters ---------- channel_swap : :obj:`tuple` of int the two channels to swap Returns ------- :obj:`ColorImage` color image with cols swapped ] if compare[call[name[len], parameter[name[channel_swap]]] not_equal[!=] constant[2]] begin[:] <ast.Raise object at 0x7da20c990c10> variable[ci] assign[=] call[name[channel_swap]][constant[0]] variable[cj] assign[=] call[name[channel_swap]][constant[1]] if <ast.BoolOp object at 0x7da20c991060> begin[:] <ast.Raise object at 0x7da20c9907f0> variable[new_data] assign[=] call[name[self].data.copy, parameter[]] call[name[new_data]][tuple[[<ast.Slice object at 0x7da20c991540>, <ast.Slice object at 0x7da20c992530>, <ast.Name object at 0x7da20c990ac0>]]] assign[=] call[name[self].data][tuple[[<ast.Slice object at 0x7da20c993a60>, <ast.Slice object at 0x7da20c9901f0>, <ast.Name object at 0x7da20c990b80>]]] call[name[new_data]][tuple[[<ast.Slice object at 0x7da20c992230>, <ast.Slice object at 0x7da20c992560>, <ast.Name object at 0x7da20c993b20>]]] assign[=] call[name[self].data][tuple[[<ast.Slice object at 0x7da20c991e10>, <ast.Slice object at 0x7da20c9929e0>, <ast.Name object at 0x7da20c991cf0>]]] return[call[name[ColorImage], parameter[name[new_data]]]]
keyword[def] identifier[swap_channels] ( identifier[self] , identifier[channel_swap] ): literal[string] keyword[if] identifier[len] ( identifier[channel_swap] )!= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[ci] = identifier[channel_swap] [ literal[int] ] identifier[cj] = identifier[channel_swap] [ literal[int] ] keyword[if] identifier[ci] < literal[int] keyword[or] identifier[ci] > literal[int] keyword[or] identifier[cj] < literal[int] keyword[or] identifier[cj] > literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[new_data] = identifier[self] . identifier[data] . identifier[copy] () identifier[new_data] [:,:, identifier[ci] ]= identifier[self] . identifier[data] [:,:, identifier[cj] ] identifier[new_data] [:,:, identifier[cj] ]= identifier[self] . identifier[data] [:,:, identifier[ci] ] keyword[return] identifier[ColorImage] ( identifier[new_data] , identifier[frame] = identifier[self] . identifier[_frame] )
def swap_channels(self, channel_swap): """ Swaps the two channels specified in the tuple. Parameters ---------- channel_swap : :obj:`tuple` of int the two channels to swap Returns ------- :obj:`ColorImage` color image with cols swapped """ if len(channel_swap) != 2: raise ValueError('Illegal value for channel swap') # depends on [control=['if'], data=[]] ci = channel_swap[0] cj = channel_swap[1] if ci < 0 or ci > 2 or cj < 0 or (cj > 2): raise ValueError('Channels must be between 0 and 1') # depends on [control=['if'], data=[]] new_data = self.data.copy() new_data[:, :, ci] = self.data[:, :, cj] new_data[:, :, cj] = self.data[:, :, ci] return ColorImage(new_data, frame=self._frame)