code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def parse_line(string): """Parse a single string as traceback line""" match = line_regexp().match(string) if match: matches = match.groupdict() line_number = matches['line_number'] path_to_python = matches['path_to_python'] spaceless_path_to_python = matches['spaceless_path_to_python'] if path_to_python: return path_to_python, line_number elif spaceless_path_to_python: return spaceless_path_to_python, line_number
def function[parse_line, parameter[string]]: constant[Parse a single string as traceback line] variable[match] assign[=] call[call[name[line_regexp], parameter[]].match, parameter[name[string]]] if name[match] begin[:] variable[matches] assign[=] call[name[match].groupdict, parameter[]] variable[line_number] assign[=] call[name[matches]][constant[line_number]] variable[path_to_python] assign[=] call[name[matches]][constant[path_to_python]] variable[spaceless_path_to_python] assign[=] call[name[matches]][constant[spaceless_path_to_python]] if name[path_to_python] begin[:] return[tuple[[<ast.Name object at 0x7da1b1435f60>, <ast.Name object at 0x7da1b1470eb0>]]]
keyword[def] identifier[parse_line] ( identifier[string] ): literal[string] identifier[match] = identifier[line_regexp] (). identifier[match] ( identifier[string] ) keyword[if] identifier[match] : identifier[matches] = identifier[match] . identifier[groupdict] () identifier[line_number] = identifier[matches] [ literal[string] ] identifier[path_to_python] = identifier[matches] [ literal[string] ] identifier[spaceless_path_to_python] = identifier[matches] [ literal[string] ] keyword[if] identifier[path_to_python] : keyword[return] identifier[path_to_python] , identifier[line_number] keyword[elif] identifier[spaceless_path_to_python] : keyword[return] identifier[spaceless_path_to_python] , identifier[line_number]
def parse_line(string): """Parse a single string as traceback line""" match = line_regexp().match(string) if match: matches = match.groupdict() line_number = matches['line_number'] path_to_python = matches['path_to_python'] spaceless_path_to_python = matches['spaceless_path_to_python'] if path_to_python: return (path_to_python, line_number) # depends on [control=['if'], data=[]] elif spaceless_path_to_python: return (spaceless_path_to_python, line_number) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def parse_second_row(row, url): """ Static method that parses a given table row element by using helper methods `Parser.parse_category_subcategory_and_or_quality`, `Parser.parse_torrent_link` and scrapping torrent's category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers. Used specifically with a torrent's second table row. :param lxml.HtmlElement row: row to parse :param urls.Url url_instance: Url used to combine base url's with scrapped links from tr :return: scrapped category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers :rtype: list """ tags = row.findall('./td') category, subcategory, quality, language = Parser.parse_torrent_properties(tags[0]) user_info = tags[1].find('./a') user = user_info.text_content() user_url = url.combine(user_info.get('href')) # Two urls - one is spam, second is torrent url. # Don't combine it with BASE_URL, since it's an absolute url. torrent_link = Parser.parse_torrent_link(tags[2]) size = tags[3].text # as 10.5 GB comments = tags[4].text times_completed = tags[5].text seeders = tags[6].text leechers = tags[7].text return [category, subcategory, quality, language, user, user_url, torrent_link, size, comments, times_completed, seeders, leechers]
def function[parse_second_row, parameter[row, url]]: constant[ Static method that parses a given table row element by using helper methods `Parser.parse_category_subcategory_and_or_quality`, `Parser.parse_torrent_link` and scrapping torrent's category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers. Used specifically with a torrent's second table row. :param lxml.HtmlElement row: row to parse :param urls.Url url_instance: Url used to combine base url's with scrapped links from tr :return: scrapped category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers :rtype: list ] variable[tags] assign[=] call[name[row].findall, parameter[constant[./td]]] <ast.Tuple object at 0x7da20e9b2020> assign[=] call[name[Parser].parse_torrent_properties, parameter[call[name[tags]][constant[0]]]] variable[user_info] assign[=] call[call[name[tags]][constant[1]].find, parameter[constant[./a]]] variable[user] assign[=] call[name[user_info].text_content, parameter[]] variable[user_url] assign[=] call[name[url].combine, parameter[call[name[user_info].get, parameter[constant[href]]]]] variable[torrent_link] assign[=] call[name[Parser].parse_torrent_link, parameter[call[name[tags]][constant[2]]]] variable[size] assign[=] call[name[tags]][constant[3]].text variable[comments] assign[=] call[name[tags]][constant[4]].text variable[times_completed] assign[=] call[name[tags]][constant[5]].text variable[seeders] assign[=] call[name[tags]][constant[6]].text variable[leechers] assign[=] call[name[tags]][constant[7]].text return[list[[<ast.Name object at 0x7da20e9b27d0>, <ast.Name object at 0x7da20e9b3550>, <ast.Name object at 0x7da20e9b1d20>, <ast.Name object at 0x7da20e9b07c0>, <ast.Name object at 0x7da20e9b1360>, <ast.Name object at 0x7da20e9b1ba0>, <ast.Name object at 0x7da20e9b30d0>, <ast.Name object at 0x7da20e9b2110>, <ast.Name object at 0x7da20e9b3a90>, <ast.Name object at 0x7da20e9b35e0>, <ast.Name object at 0x7da20e9b0d30>, <ast.Name object at 0x7da20e9b0040>]]]
keyword[def] identifier[parse_second_row] ( identifier[row] , identifier[url] ): literal[string] identifier[tags] = identifier[row] . identifier[findall] ( literal[string] ) identifier[category] , identifier[subcategory] , identifier[quality] , identifier[language] = identifier[Parser] . identifier[parse_torrent_properties] ( identifier[tags] [ literal[int] ]) identifier[user_info] = identifier[tags] [ literal[int] ]. identifier[find] ( literal[string] ) identifier[user] = identifier[user_info] . identifier[text_content] () identifier[user_url] = identifier[url] . identifier[combine] ( identifier[user_info] . identifier[get] ( literal[string] )) identifier[torrent_link] = identifier[Parser] . identifier[parse_torrent_link] ( identifier[tags] [ literal[int] ]) identifier[size] = identifier[tags] [ literal[int] ]. identifier[text] identifier[comments] = identifier[tags] [ literal[int] ]. identifier[text] identifier[times_completed] = identifier[tags] [ literal[int] ]. identifier[text] identifier[seeders] = identifier[tags] [ literal[int] ]. identifier[text] identifier[leechers] = identifier[tags] [ literal[int] ]. identifier[text] keyword[return] [ identifier[category] , identifier[subcategory] , identifier[quality] , identifier[language] , identifier[user] , identifier[user_url] , identifier[torrent_link] , identifier[size] , identifier[comments] , identifier[times_completed] , identifier[seeders] , identifier[leechers] ]
def parse_second_row(row, url): """ Static method that parses a given table row element by using helper methods `Parser.parse_category_subcategory_and_or_quality`, `Parser.parse_torrent_link` and scrapping torrent's category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers. Used specifically with a torrent's second table row. :param lxml.HtmlElement row: row to parse :param urls.Url url_instance: Url used to combine base url's with scrapped links from tr :return: scrapped category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers :rtype: list """ tags = row.findall('./td') (category, subcategory, quality, language) = Parser.parse_torrent_properties(tags[0]) user_info = tags[1].find('./a') user = user_info.text_content() user_url = url.combine(user_info.get('href')) # Two urls - one is spam, second is torrent url. # Don't combine it with BASE_URL, since it's an absolute url. torrent_link = Parser.parse_torrent_link(tags[2]) size = tags[3].text # as 10.5 GB comments = tags[4].text times_completed = tags[5].text seeders = tags[6].text leechers = tags[7].text return [category, subcategory, quality, language, user, user_url, torrent_link, size, comments, times_completed, seeders, leechers]
def create_calc_dh_d_shape(estimator): """ Return the function that can be used in the various gradient and hessian calculations to calculate the derivative of the transformation with respect to the shape parameters. Parameters ---------- estimator : an instance of the estimation.LogitTypeEstimator class. Should contain a `rows_to_alts` attribute that is a 2D scipy sparse matrix that maps the rows of the `design` matrix to the alternatives available in this dataset. Returns ------- Callable. Will accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of shape parameters. The dimensions of the returned vector should be `(design.shape[0], num_alternatives)`. """ dh_d_shape = estimator.rows_to_alts.copy() # Create a function that will take in the pre-formed matrix, replace its # data in-place with the new data, and return the correct dh_dshape on each # iteration of the minimizer calc_dh_d_shape = partial(_uneven_transform_deriv_shape, output_array=dh_d_shape) return calc_dh_d_shape
def function[create_calc_dh_d_shape, parameter[estimator]]: constant[ Return the function that can be used in the various gradient and hessian calculations to calculate the derivative of the transformation with respect to the shape parameters. Parameters ---------- estimator : an instance of the estimation.LogitTypeEstimator class. Should contain a `rows_to_alts` attribute that is a 2D scipy sparse matrix that maps the rows of the `design` matrix to the alternatives available in this dataset. Returns ------- Callable. Will accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of shape parameters. The dimensions of the returned vector should be `(design.shape[0], num_alternatives)`. ] variable[dh_d_shape] assign[=] call[name[estimator].rows_to_alts.copy, parameter[]] variable[calc_dh_d_shape] assign[=] call[name[partial], parameter[name[_uneven_transform_deriv_shape]]] return[name[calc_dh_d_shape]]
keyword[def] identifier[create_calc_dh_d_shape] ( identifier[estimator] ): literal[string] identifier[dh_d_shape] = identifier[estimator] . identifier[rows_to_alts] . identifier[copy] () identifier[calc_dh_d_shape] = identifier[partial] ( identifier[_uneven_transform_deriv_shape] , identifier[output_array] = identifier[dh_d_shape] ) keyword[return] identifier[calc_dh_d_shape]
def create_calc_dh_d_shape(estimator): """ Return the function that can be used in the various gradient and hessian calculations to calculate the derivative of the transformation with respect to the shape parameters. Parameters ---------- estimator : an instance of the estimation.LogitTypeEstimator class. Should contain a `rows_to_alts` attribute that is a 2D scipy sparse matrix that maps the rows of the `design` matrix to the alternatives available in this dataset. Returns ------- Callable. Will accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of shape parameters. The dimensions of the returned vector should be `(design.shape[0], num_alternatives)`. """ dh_d_shape = estimator.rows_to_alts.copy() # Create a function that will take in the pre-formed matrix, replace its # data in-place with the new data, and return the correct dh_dshape on each # iteration of the minimizer calc_dh_d_shape = partial(_uneven_transform_deriv_shape, output_array=dh_d_shape) return calc_dh_d_shape
def access_vlan(self, inter_type, inter, vlan_id): """ Add a L2 Interface to a specific VLAN. Args: inter_type: The type of interface you want to configure. Ex. tengigabitethernet, gigabitethernet, fortygigabitethernet. inter: The ID for the interface you want to configure. Ex. 1/0/1 vlan_id: ID for the VLAN interface being modified. Value of 2-4096. Returns: True if command completes successfully or False if not. Raises: None """ config = ET.Element('config') interface = ET.SubElement(config, 'interface', xmlns=("urn:brocade.com:mgmt:" "brocade-interface")) int_type = ET.SubElement(interface, inter_type) name = ET.SubElement(int_type, 'name') name.text = inter switchport = ET.SubElement(int_type, 'switchport') access = ET.SubElement(switchport, 'access') accessvlan = ET.SubElement(access, 'accessvlan') accessvlan.text = vlan_id try: self._callback(config) return True # TODO add logging and narrow exception window. except Exception as error: logging.error(error) return False
def function[access_vlan, parameter[self, inter_type, inter, vlan_id]]: constant[ Add a L2 Interface to a specific VLAN. Args: inter_type: The type of interface you want to configure. Ex. tengigabitethernet, gigabitethernet, fortygigabitethernet. inter: The ID for the interface you want to configure. Ex. 1/0/1 vlan_id: ID for the VLAN interface being modified. Value of 2-4096. Returns: True if command completes successfully or False if not. Raises: None ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[interface] assign[=] call[name[ET].SubElement, parameter[name[config], constant[interface]]] variable[int_type] assign[=] call[name[ET].SubElement, parameter[name[interface], name[inter_type]]] variable[name] assign[=] call[name[ET].SubElement, parameter[name[int_type], constant[name]]] name[name].text assign[=] name[inter] variable[switchport] assign[=] call[name[ET].SubElement, parameter[name[int_type], constant[switchport]]] variable[access] assign[=] call[name[ET].SubElement, parameter[name[switchport], constant[access]]] variable[accessvlan] assign[=] call[name[ET].SubElement, parameter[name[access], constant[accessvlan]]] name[accessvlan].text assign[=] name[vlan_id] <ast.Try object at 0x7da20c992a40>
keyword[def] identifier[access_vlan] ( identifier[self] , identifier[inter_type] , identifier[inter] , identifier[vlan_id] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[interface] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] =( literal[string] literal[string] )) identifier[int_type] = identifier[ET] . identifier[SubElement] ( identifier[interface] , identifier[inter_type] ) identifier[name] = identifier[ET] . identifier[SubElement] ( identifier[int_type] , literal[string] ) identifier[name] . identifier[text] = identifier[inter] identifier[switchport] = identifier[ET] . identifier[SubElement] ( identifier[int_type] , literal[string] ) identifier[access] = identifier[ET] . identifier[SubElement] ( identifier[switchport] , literal[string] ) identifier[accessvlan] = identifier[ET] . identifier[SubElement] ( identifier[access] , literal[string] ) identifier[accessvlan] . identifier[text] = identifier[vlan_id] keyword[try] : identifier[self] . identifier[_callback] ( identifier[config] ) keyword[return] keyword[True] keyword[except] identifier[Exception] keyword[as] identifier[error] : identifier[logging] . identifier[error] ( identifier[error] ) keyword[return] keyword[False]
def access_vlan(self, inter_type, inter, vlan_id): """ Add a L2 Interface to a specific VLAN. Args: inter_type: The type of interface you want to configure. Ex. tengigabitethernet, gigabitethernet, fortygigabitethernet. inter: The ID for the interface you want to configure. Ex. 1/0/1 vlan_id: ID for the VLAN interface being modified. Value of 2-4096. Returns: True if command completes successfully or False if not. Raises: None """ config = ET.Element('config') interface = ET.SubElement(config, 'interface', xmlns='urn:brocade.com:mgmt:brocade-interface') int_type = ET.SubElement(interface, inter_type) name = ET.SubElement(int_type, 'name') name.text = inter switchport = ET.SubElement(int_type, 'switchport') access = ET.SubElement(switchport, 'access') accessvlan = ET.SubElement(access, 'accessvlan') accessvlan.text = vlan_id try: self._callback(config) return True # depends on [control=['try'], data=[]] # TODO add logging and narrow exception window. except Exception as error: logging.error(error) return False # depends on [control=['except'], data=['error']]
def translate(self, dx, dy): """ Move the polygons from one place to another Parameters ---------- dx : number distance to move in the x-direction dy : number distance to move in the y-direction Returns ------- out : ``PolygonSet`` This object. """ vec = numpy.array((dx, dy)) self.polygons = [points + vec for points in self.polygons] return self
def function[translate, parameter[self, dx, dy]]: constant[ Move the polygons from one place to another Parameters ---------- dx : number distance to move in the x-direction dy : number distance to move in the y-direction Returns ------- out : ``PolygonSet`` This object. ] variable[vec] assign[=] call[name[numpy].array, parameter[tuple[[<ast.Name object at 0x7da1b08634f0>, <ast.Name object at 0x7da1b0862c50>]]]] name[self].polygons assign[=] <ast.ListComp object at 0x7da1b0861300> return[name[self]]
keyword[def] identifier[translate] ( identifier[self] , identifier[dx] , identifier[dy] ): literal[string] identifier[vec] = identifier[numpy] . identifier[array] (( identifier[dx] , identifier[dy] )) identifier[self] . identifier[polygons] =[ identifier[points] + identifier[vec] keyword[for] identifier[points] keyword[in] identifier[self] . identifier[polygons] ] keyword[return] identifier[self]
def translate(self, dx, dy): """ Move the polygons from one place to another Parameters ---------- dx : number distance to move in the x-direction dy : number distance to move in the y-direction Returns ------- out : ``PolygonSet`` This object. """ vec = numpy.array((dx, dy)) self.polygons = [points + vec for points in self.polygons] return self
def create_from_response_pdu(resp_pdu): """ Create instance from response PDU. :param resp_pdu: Byte array with request PDU. :return: Instance of :class:`WriteSingleRegister`. """ write_single_register = WriteSingleRegister() address, value = struct.unpack('>H' + conf.TYPE_CHAR, resp_pdu[1:5]) write_single_register.address = address write_single_register.data = value return write_single_register
def function[create_from_response_pdu, parameter[resp_pdu]]: constant[ Create instance from response PDU. :param resp_pdu: Byte array with request PDU. :return: Instance of :class:`WriteSingleRegister`. ] variable[write_single_register] assign[=] call[name[WriteSingleRegister], parameter[]] <ast.Tuple object at 0x7da204347f70> assign[=] call[name[struct].unpack, parameter[binary_operation[constant[>H] + name[conf].TYPE_CHAR], call[name[resp_pdu]][<ast.Slice object at 0x7da204344550>]]] name[write_single_register].address assign[=] name[address] name[write_single_register].data assign[=] name[value] return[name[write_single_register]]
keyword[def] identifier[create_from_response_pdu] ( identifier[resp_pdu] ): literal[string] identifier[write_single_register] = identifier[WriteSingleRegister] () identifier[address] , identifier[value] = identifier[struct] . identifier[unpack] ( literal[string] + identifier[conf] . identifier[TYPE_CHAR] , identifier[resp_pdu] [ literal[int] : literal[int] ]) identifier[write_single_register] . identifier[address] = identifier[address] identifier[write_single_register] . identifier[data] = identifier[value] keyword[return] identifier[write_single_register]
def create_from_response_pdu(resp_pdu): """ Create instance from response PDU. :param resp_pdu: Byte array with request PDU. :return: Instance of :class:`WriteSingleRegister`. """ write_single_register = WriteSingleRegister() (address, value) = struct.unpack('>H' + conf.TYPE_CHAR, resp_pdu[1:5]) write_single_register.address = address write_single_register.data = value return write_single_register
def json_numpy_obj_hook(dct): """Decodes a previously encoded numpy ndarray with proper shape and dtype. And decompresses the data with blosc :param dct: (dict) json encoded ndarray :return: (ndarray) if input was an encoded ndarray """ if isinstance(dct, dict) and '__ndarray__' in dct: array = dct['__ndarray__'] # http://stackoverflow.com/questions/24369666/typeerror-b1-is-not-json-serializable if sys.version_info >= (3, 0): array = array.encode('utf-8') data = base64.b64decode(array) if has_blosc: data = blosc.decompress(data) try: dtype = np.dtype(ast.literal_eval(dct['dtype'])) except ValueError: # If the array is not a recarray dtype = dct['dtype'] return np.frombuffer(data, dtype).reshape(dct['shape']) return dct
def function[json_numpy_obj_hook, parameter[dct]]: constant[Decodes a previously encoded numpy ndarray with proper shape and dtype. And decompresses the data with blosc :param dct: (dict) json encoded ndarray :return: (ndarray) if input was an encoded ndarray ] if <ast.BoolOp object at 0x7da1b0913310> begin[:] variable[array] assign[=] call[name[dct]][constant[__ndarray__]] if compare[name[sys].version_info greater_or_equal[>=] tuple[[<ast.Constant object at 0x7da1b0910a30>, <ast.Constant object at 0x7da1b09116c0>]]] begin[:] variable[array] assign[=] call[name[array].encode, parameter[constant[utf-8]]] variable[data] assign[=] call[name[base64].b64decode, parameter[name[array]]] if name[has_blosc] begin[:] variable[data] assign[=] call[name[blosc].decompress, parameter[name[data]]] <ast.Try object at 0x7da1b09303a0> return[call[call[name[np].frombuffer, parameter[name[data], name[dtype]]].reshape, parameter[call[name[dct]][constant[shape]]]]] return[name[dct]]
keyword[def] identifier[json_numpy_obj_hook] ( identifier[dct] ): literal[string] keyword[if] identifier[isinstance] ( identifier[dct] , identifier[dict] ) keyword[and] literal[string] keyword[in] identifier[dct] : identifier[array] = identifier[dct] [ literal[string] ] keyword[if] identifier[sys] . identifier[version_info] >=( literal[int] , literal[int] ): identifier[array] = identifier[array] . identifier[encode] ( literal[string] ) identifier[data] = identifier[base64] . identifier[b64decode] ( identifier[array] ) keyword[if] identifier[has_blosc] : identifier[data] = identifier[blosc] . identifier[decompress] ( identifier[data] ) keyword[try] : identifier[dtype] = identifier[np] . identifier[dtype] ( identifier[ast] . identifier[literal_eval] ( identifier[dct] [ literal[string] ])) keyword[except] identifier[ValueError] : identifier[dtype] = identifier[dct] [ literal[string] ] keyword[return] identifier[np] . identifier[frombuffer] ( identifier[data] , identifier[dtype] ). identifier[reshape] ( identifier[dct] [ literal[string] ]) keyword[return] identifier[dct]
def json_numpy_obj_hook(dct): """Decodes a previously encoded numpy ndarray with proper shape and dtype. And decompresses the data with blosc :param dct: (dict) json encoded ndarray :return: (ndarray) if input was an encoded ndarray """ if isinstance(dct, dict) and '__ndarray__' in dct: array = dct['__ndarray__'] # http://stackoverflow.com/questions/24369666/typeerror-b1-is-not-json-serializable if sys.version_info >= (3, 0): array = array.encode('utf-8') # depends on [control=['if'], data=[]] data = base64.b64decode(array) if has_blosc: data = blosc.decompress(data) # depends on [control=['if'], data=[]] try: dtype = np.dtype(ast.literal_eval(dct['dtype'])) # depends on [control=['try'], data=[]] except ValueError: # If the array is not a recarray dtype = dct['dtype'] # depends on [control=['except'], data=[]] return np.frombuffer(data, dtype).reshape(dct['shape']) # depends on [control=['if'], data=[]] return dct
def __appendActivities(self, periodicActivities): """ periodicActivities: A sequence of PeriodicActivityRequest elements """ for req in periodicActivities: act = self.Activity(repeating=req.repeating, period=req.period, cb=req.cb, iteratorHolder=[iter(xrange(req.period-1))]) self.__activities.append(act) return
def function[__appendActivities, parameter[self, periodicActivities]]: constant[ periodicActivities: A sequence of PeriodicActivityRequest elements ] for taget[name[req]] in starred[name[periodicActivities]] begin[:] variable[act] assign[=] call[name[self].Activity, parameter[]] call[name[self].__activities.append, parameter[name[act]]] return[None]
keyword[def] identifier[__appendActivities] ( identifier[self] , identifier[periodicActivities] ): literal[string] keyword[for] identifier[req] keyword[in] identifier[periodicActivities] : identifier[act] = identifier[self] . identifier[Activity] ( identifier[repeating] = identifier[req] . identifier[repeating] , identifier[period] = identifier[req] . identifier[period] , identifier[cb] = identifier[req] . identifier[cb] , identifier[iteratorHolder] =[ identifier[iter] ( identifier[xrange] ( identifier[req] . identifier[period] - literal[int] ))]) identifier[self] . identifier[__activities] . identifier[append] ( identifier[act] ) keyword[return]
def __appendActivities(self, periodicActivities): """ periodicActivities: A sequence of PeriodicActivityRequest elements """ for req in periodicActivities: act = self.Activity(repeating=req.repeating, period=req.period, cb=req.cb, iteratorHolder=[iter(xrange(req.period - 1))]) self.__activities.append(act) # depends on [control=['for'], data=['req']] return
def main(arguments=None): '''Runs thumbor server with the specified arguments.''' if arguments is None: arguments = sys.argv[1:] server_parameters = get_server_parameters(arguments) config = get_config(server_parameters.config_path, server_parameters.use_environment) configure_log(config, server_parameters.log_level.upper()) validate_config(config, server_parameters) importer = get_importer(config) with get_context(server_parameters, config, importer) as context: application = get_application(context) server = run_server(application, context) setup_signal_handler(server, config) logging.debug('thumbor running at %s:%d' % (context.server.ip, context.server.port)) tornado.ioloop.IOLoop.instance().start()
def function[main, parameter[arguments]]: constant[Runs thumbor server with the specified arguments.] if compare[name[arguments] is constant[None]] begin[:] variable[arguments] assign[=] call[name[sys].argv][<ast.Slice object at 0x7da1b1cd4f70>] variable[server_parameters] assign[=] call[name[get_server_parameters], parameter[name[arguments]]] variable[config] assign[=] call[name[get_config], parameter[name[server_parameters].config_path, name[server_parameters].use_environment]] call[name[configure_log], parameter[name[config], call[name[server_parameters].log_level.upper, parameter[]]]] call[name[validate_config], parameter[name[config], name[server_parameters]]] variable[importer] assign[=] call[name[get_importer], parameter[name[config]]] with call[name[get_context], parameter[name[server_parameters], name[config], name[importer]]] begin[:] variable[application] assign[=] call[name[get_application], parameter[name[context]]] variable[server] assign[=] call[name[run_server], parameter[name[application], name[context]]] call[name[setup_signal_handler], parameter[name[server], name[config]]] call[name[logging].debug, parameter[binary_operation[constant[thumbor running at %s:%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1b1a440>, <ast.Attribute object at 0x7da1b1b1ab90>]]]]] call[call[name[tornado].ioloop.IOLoop.instance, parameter[]].start, parameter[]]
keyword[def] identifier[main] ( identifier[arguments] = keyword[None] ): literal[string] keyword[if] identifier[arguments] keyword[is] keyword[None] : identifier[arguments] = identifier[sys] . identifier[argv] [ literal[int] :] identifier[server_parameters] = identifier[get_server_parameters] ( identifier[arguments] ) identifier[config] = identifier[get_config] ( identifier[server_parameters] . identifier[config_path] , identifier[server_parameters] . identifier[use_environment] ) identifier[configure_log] ( identifier[config] , identifier[server_parameters] . identifier[log_level] . identifier[upper] ()) identifier[validate_config] ( identifier[config] , identifier[server_parameters] ) identifier[importer] = identifier[get_importer] ( identifier[config] ) keyword[with] identifier[get_context] ( identifier[server_parameters] , identifier[config] , identifier[importer] ) keyword[as] identifier[context] : identifier[application] = identifier[get_application] ( identifier[context] ) identifier[server] = identifier[run_server] ( identifier[application] , identifier[context] ) identifier[setup_signal_handler] ( identifier[server] , identifier[config] ) identifier[logging] . identifier[debug] ( literal[string] %( identifier[context] . identifier[server] . identifier[ip] , identifier[context] . identifier[server] . identifier[port] )) identifier[tornado] . identifier[ioloop] . identifier[IOLoop] . identifier[instance] (). identifier[start] ()
def main(arguments=None): """Runs thumbor server with the specified arguments.""" if arguments is None: arguments = sys.argv[1:] # depends on [control=['if'], data=['arguments']] server_parameters = get_server_parameters(arguments) config = get_config(server_parameters.config_path, server_parameters.use_environment) configure_log(config, server_parameters.log_level.upper()) validate_config(config, server_parameters) importer = get_importer(config) with get_context(server_parameters, config, importer) as context: application = get_application(context) server = run_server(application, context) setup_signal_handler(server, config) logging.debug('thumbor running at %s:%d' % (context.server.ip, context.server.port)) tornado.ioloop.IOLoop.instance().start() # depends on [control=['with'], data=['context']]
def gpr(data, xseq, **params): """ Fit gaussian process """ try: from sklearn import gaussian_process except ImportError: raise PlotnineError( "To use gaussian process smoothing, " "You need to install scikit-learn.") kwargs = params['method_args'] if not kwargs: warnings.warn( "See sklearn.gaussian_process.GaussianProcessRegressor " "for parameters to pass in as 'method_args'", PlotnineWarning) regressor = gaussian_process.GaussianProcessRegressor(**kwargs) X = np.atleast_2d(data['x']).T n = len(data) Xseq = np.atleast_2d(xseq).T regressor.fit(X, data['y']) data = pd.DataFrame({'x': xseq}) if params['se']: y, stderr = regressor.predict(Xseq, return_std=True) data['y'] = y data['se'] = stderr data['ymin'], data['ymax'] = tdist_ci( y, n-1, stderr, params['level']) else: data['y'] = regressor.predict(Xseq, return_std=True) return data
def function[gpr, parameter[data, xseq]]: constant[ Fit gaussian process ] <ast.Try object at 0x7da204564fa0> variable[kwargs] assign[=] call[name[params]][constant[method_args]] if <ast.UnaryOp object at 0x7da204566560> begin[:] call[name[warnings].warn, parameter[constant[See sklearn.gaussian_process.GaussianProcessRegressor for parameters to pass in as 'method_args'], name[PlotnineWarning]]] variable[regressor] assign[=] call[name[gaussian_process].GaussianProcessRegressor, parameter[]] variable[X] assign[=] call[name[np].atleast_2d, parameter[call[name[data]][constant[x]]]].T variable[n] assign[=] call[name[len], parameter[name[data]]] variable[Xseq] assign[=] call[name[np].atleast_2d, parameter[name[xseq]]].T call[name[regressor].fit, parameter[name[X], call[name[data]][constant[y]]]] variable[data] assign[=] call[name[pd].DataFrame, parameter[dictionary[[<ast.Constant object at 0x7da204566680>], [<ast.Name object at 0x7da204564130>]]]] if call[name[params]][constant[se]] begin[:] <ast.Tuple object at 0x7da204565a50> assign[=] call[name[regressor].predict, parameter[name[Xseq]]] call[name[data]][constant[y]] assign[=] name[y] call[name[data]][constant[se]] assign[=] name[stderr] <ast.Tuple object at 0x7da1b184bf10> assign[=] call[name[tdist_ci], parameter[name[y], binary_operation[name[n] - constant[1]], name[stderr], call[name[params]][constant[level]]]] return[name[data]]
keyword[def] identifier[gpr] ( identifier[data] , identifier[xseq] ,** identifier[params] ): literal[string] keyword[try] : keyword[from] identifier[sklearn] keyword[import] identifier[gaussian_process] keyword[except] identifier[ImportError] : keyword[raise] identifier[PlotnineError] ( literal[string] literal[string] ) identifier[kwargs] = identifier[params] [ literal[string] ] keyword[if] keyword[not] identifier[kwargs] : identifier[warnings] . identifier[warn] ( literal[string] literal[string] , identifier[PlotnineWarning] ) identifier[regressor] = identifier[gaussian_process] . identifier[GaussianProcessRegressor] (** identifier[kwargs] ) identifier[X] = identifier[np] . identifier[atleast_2d] ( identifier[data] [ literal[string] ]). identifier[T] identifier[n] = identifier[len] ( identifier[data] ) identifier[Xseq] = identifier[np] . identifier[atleast_2d] ( identifier[xseq] ). identifier[T] identifier[regressor] . identifier[fit] ( identifier[X] , identifier[data] [ literal[string] ]) identifier[data] = identifier[pd] . identifier[DataFrame] ({ literal[string] : identifier[xseq] }) keyword[if] identifier[params] [ literal[string] ]: identifier[y] , identifier[stderr] = identifier[regressor] . identifier[predict] ( identifier[Xseq] , identifier[return_std] = keyword[True] ) identifier[data] [ literal[string] ]= identifier[y] identifier[data] [ literal[string] ]= identifier[stderr] identifier[data] [ literal[string] ], identifier[data] [ literal[string] ]= identifier[tdist_ci] ( identifier[y] , identifier[n] - literal[int] , identifier[stderr] , identifier[params] [ literal[string] ]) keyword[else] : identifier[data] [ literal[string] ]= identifier[regressor] . identifier[predict] ( identifier[Xseq] , identifier[return_std] = keyword[True] ) keyword[return] identifier[data]
def gpr(data, xseq, **params): """ Fit gaussian process """ try: from sklearn import gaussian_process # depends on [control=['try'], data=[]] except ImportError: raise PlotnineError('To use gaussian process smoothing, You need to install scikit-learn.') # depends on [control=['except'], data=[]] kwargs = params['method_args'] if not kwargs: warnings.warn("See sklearn.gaussian_process.GaussianProcessRegressor for parameters to pass in as 'method_args'", PlotnineWarning) # depends on [control=['if'], data=[]] regressor = gaussian_process.GaussianProcessRegressor(**kwargs) X = np.atleast_2d(data['x']).T n = len(data) Xseq = np.atleast_2d(xseq).T regressor.fit(X, data['y']) data = pd.DataFrame({'x': xseq}) if params['se']: (y, stderr) = regressor.predict(Xseq, return_std=True) data['y'] = y data['se'] = stderr (data['ymin'], data['ymax']) = tdist_ci(y, n - 1, stderr, params['level']) # depends on [control=['if'], data=[]] else: data['y'] = regressor.predict(Xseq, return_std=True) return data
def Send(self, message): """Send one message. Deprecated, users should migrate to call self.outgoing.InsertMessage directly. """ if not self.outgoing: raise NotConfigured("Send address not provided.") self.outgoing.InsertMessage(message)
def function[Send, parameter[self, message]]: constant[Send one message. Deprecated, users should migrate to call self.outgoing.InsertMessage directly. ] if <ast.UnaryOp object at 0x7da2054a6410> begin[:] <ast.Raise object at 0x7da1b1389930> call[name[self].outgoing.InsertMessage, parameter[name[message]]]
keyword[def] identifier[Send] ( identifier[self] , identifier[message] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[outgoing] : keyword[raise] identifier[NotConfigured] ( literal[string] ) identifier[self] . identifier[outgoing] . identifier[InsertMessage] ( identifier[message] )
def Send(self, message): """Send one message. Deprecated, users should migrate to call self.outgoing.InsertMessage directly. """ if not self.outgoing: raise NotConfigured('Send address not provided.') # depends on [control=['if'], data=[]] self.outgoing.InsertMessage(message)
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None, multiLine=None, allowUnquotedControlChars=None, lineSep=None, samplingRatio=None, dropFieldIfAllNull=None, encoding=None, locale=None): """ Loads JSON files and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. :param path: string represents path to the JSON dataset, or a list of paths, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param samplingRatio: defines fraction of input JSON objects used for schema inferring. If None is set, it uses the default value, ``1.0``. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. >>> df1 = spark.read.json('python/test_support/sql/people.json') >>> df1.dtypes [('age', 'bigint'), ('name', 'string')] >>> rdd = sc.textFile('python/test_support/sql/people.json') >>> df2 = spark.read.json(rdd) >>> df2.dtypes [('age', 'bigint'), ('name', 'string')] """ self._set_opts( schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, timestampFormat=timestampFormat, multiLine=multiLine, allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, samplingRatio=samplingRatio, dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding, locale=locale) if isinstance(path, basestring): path = [path] if type(path) == list: return self._df(self._jreader.json(self._spark._sc._jvm.PythonUtils.toSeq(path))) elif isinstance(path, RDD): def func(iterator): for x in iterator: if not isinstance(x, basestring): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = path.mapPartitions(func) keyed._bypass_serializer = True jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString()) return self._df(self._jreader.json(jrdd)) else: raise TypeError("path can be only string, list or RDD")
def function[json, parameter[self, path, schema, primitivesAsString, prefersDecimal, allowComments, allowUnquotedFieldNames, allowSingleQuotes, allowNumericLeadingZero, allowBackslashEscapingAnyCharacter, mode, columnNameOfCorruptRecord, dateFormat, timestampFormat, multiLine, allowUnquotedControlChars, lineSep, samplingRatio, dropFieldIfAllNull, encoding, locale]]: constant[ Loads JSON files and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. :param path: string represents path to the JSON dataset, or a list of paths, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string into a field configured by ``columnNameOfCorruptRecord``, and sets malformed fields to ``null``. To keep corrupt records, an user can set a string type field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a schema does not have the field, it drops corrupt records during parsing. When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\r``, ``\r\n`` and ``\n``. :param samplingRatio: defines fraction of input JSON objects used for schema inferring. If None is set, it uses the default value, ``1.0``. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. >>> df1 = spark.read.json('python/test_support/sql/people.json') >>> df1.dtypes [('age', 'bigint'), ('name', 'string')] >>> rdd = sc.textFile('python/test_support/sql/people.json') >>> df2 = spark.read.json(rdd) >>> df2.dtypes [('age', 'bigint'), ('name', 'string')] ] call[name[self]._set_opts, parameter[]] if call[name[isinstance], parameter[name[path], name[basestring]]] begin[:] variable[path] assign[=] list[[<ast.Name object at 0x7da1b20ab1c0>]] if compare[call[name[type], parameter[name[path]]] equal[==] name[list]] begin[:] return[call[name[self]._df, parameter[call[name[self]._jreader.json, parameter[call[name[self]._spark._sc._jvm.PythonUtils.toSeq, parameter[name[path]]]]]]]]
keyword[def] identifier[json] ( identifier[self] , identifier[path] , identifier[schema] = keyword[None] , identifier[primitivesAsString] = keyword[None] , identifier[prefersDecimal] = keyword[None] , identifier[allowComments] = keyword[None] , identifier[allowUnquotedFieldNames] = keyword[None] , identifier[allowSingleQuotes] = keyword[None] , identifier[allowNumericLeadingZero] = keyword[None] , identifier[allowBackslashEscapingAnyCharacter] = keyword[None] , identifier[mode] = keyword[None] , identifier[columnNameOfCorruptRecord] = keyword[None] , identifier[dateFormat] = keyword[None] , identifier[timestampFormat] = keyword[None] , identifier[multiLine] = keyword[None] , identifier[allowUnquotedControlChars] = keyword[None] , identifier[lineSep] = keyword[None] , identifier[samplingRatio] = keyword[None] , identifier[dropFieldIfAllNull] = keyword[None] , identifier[encoding] = keyword[None] , identifier[locale] = keyword[None] ): literal[string] identifier[self] . identifier[_set_opts] ( identifier[schema] = identifier[schema] , identifier[primitivesAsString] = identifier[primitivesAsString] , identifier[prefersDecimal] = identifier[prefersDecimal] , identifier[allowComments] = identifier[allowComments] , identifier[allowUnquotedFieldNames] = identifier[allowUnquotedFieldNames] , identifier[allowSingleQuotes] = identifier[allowSingleQuotes] , identifier[allowNumericLeadingZero] = identifier[allowNumericLeadingZero] , identifier[allowBackslashEscapingAnyCharacter] = identifier[allowBackslashEscapingAnyCharacter] , identifier[mode] = identifier[mode] , identifier[columnNameOfCorruptRecord] = identifier[columnNameOfCorruptRecord] , identifier[dateFormat] = identifier[dateFormat] , identifier[timestampFormat] = identifier[timestampFormat] , identifier[multiLine] = identifier[multiLine] , identifier[allowUnquotedControlChars] = identifier[allowUnquotedControlChars] , identifier[lineSep] = identifier[lineSep] , identifier[samplingRatio] = identifier[samplingRatio] , identifier[dropFieldIfAllNull] = identifier[dropFieldIfAllNull] , identifier[encoding] = identifier[encoding] , identifier[locale] = identifier[locale] ) keyword[if] identifier[isinstance] ( identifier[path] , identifier[basestring] ): identifier[path] =[ identifier[path] ] keyword[if] identifier[type] ( identifier[path] )== identifier[list] : keyword[return] identifier[self] . identifier[_df] ( identifier[self] . identifier[_jreader] . identifier[json] ( identifier[self] . identifier[_spark] . identifier[_sc] . identifier[_jvm] . identifier[PythonUtils] . identifier[toSeq] ( identifier[path] ))) keyword[elif] identifier[isinstance] ( identifier[path] , identifier[RDD] ): keyword[def] identifier[func] ( identifier[iterator] ): keyword[for] identifier[x] keyword[in] identifier[iterator] : keyword[if] keyword[not] identifier[isinstance] ( identifier[x] , identifier[basestring] ): identifier[x] = identifier[unicode] ( identifier[x] ) keyword[if] identifier[isinstance] ( identifier[x] , identifier[unicode] ): identifier[x] = identifier[x] . identifier[encode] ( literal[string] ) keyword[yield] identifier[x] identifier[keyed] = identifier[path] . identifier[mapPartitions] ( identifier[func] ) identifier[keyed] . identifier[_bypass_serializer] = keyword[True] identifier[jrdd] = identifier[keyed] . identifier[_jrdd] . identifier[map] ( identifier[self] . identifier[_spark] . identifier[_jvm] . identifier[BytesToString] ()) keyword[return] identifier[self] . identifier[_df] ( identifier[self] . identifier[_jreader] . identifier[json] ( identifier[jrdd] )) keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] )
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None, multiLine=None, allowUnquotedControlChars=None, lineSep=None, samplingRatio=None, dropFieldIfAllNull=None, encoding=None, locale=None): """ Loads JSON files and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. :param path: string represents path to the JSON dataset, or a list of paths, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string into a field configured by ``columnNameOfCorruptRecord``, and sets malformed fields to ``null``. To keep corrupt records, an user can set a string type field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a schema does not have the field, it drops corrupt records during parsing. When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param samplingRatio: defines fraction of input JSON objects used for schema inferring. If None is set, it uses the default value, ``1.0``. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. >>> df1 = spark.read.json('python/test_support/sql/people.json') >>> df1.dtypes [('age', 'bigint'), ('name', 'string')] >>> rdd = sc.textFile('python/test_support/sql/people.json') >>> df2 = spark.read.json(rdd) >>> df2.dtypes [('age', 'bigint'), ('name', 'string')] """ self._set_opts(schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, timestampFormat=timestampFormat, multiLine=multiLine, allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, samplingRatio=samplingRatio, dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding, locale=locale) if isinstance(path, basestring): path = [path] # depends on [control=['if'], data=[]] if type(path) == list: return self._df(self._jreader.json(self._spark._sc._jvm.PythonUtils.toSeq(path))) # depends on [control=['if'], data=[]] elif isinstance(path, RDD): def func(iterator): for x in iterator: if not isinstance(x, basestring): x = unicode(x) # depends on [control=['if'], data=[]] if isinstance(x, unicode): x = x.encode('utf-8') # depends on [control=['if'], data=[]] yield x # depends on [control=['for'], data=['x']] keyed = path.mapPartitions(func) keyed._bypass_serializer = True jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString()) return self._df(self._jreader.json(jrdd)) # depends on [control=['if'], data=[]] else: raise TypeError('path can be only string, list or RDD')
def safe_group_name(group_name, group_max_length=100, ellipsis=True): """Truncate group name to match limit breaking on space and optionally add an ellipsis. .. note:: Currently the ThreatConnect group name limit is 100 characters. Args: group_name (string): The raw group name to be truncated. group_max_length (int): The max length of the group name. ellipsis (boolean): If true the truncated name will have '...' appended. Returns: (string): The truncated group name with optional ellipsis. """ ellipsis_value = '' if ellipsis: ellipsis_value = ' ...' if group_name is not None and len(group_name) > group_max_length: # split name by spaces and reset group_name group_name_array = group_name.split(' ') group_name = '' for word in group_name_array: word = u'{}'.format(word) if (len(group_name) + len(word) + len(ellipsis_value)) >= group_max_length: group_name = '{}{}'.format(group_name, ellipsis_value) group_name = group_name.lstrip(' ') break group_name += ' {}'.format(word) return group_name
def function[safe_group_name, parameter[group_name, group_max_length, ellipsis]]: constant[Truncate group name to match limit breaking on space and optionally add an ellipsis. .. note:: Currently the ThreatConnect group name limit is 100 characters. Args: group_name (string): The raw group name to be truncated. group_max_length (int): The max length of the group name. ellipsis (boolean): If true the truncated name will have '...' appended. Returns: (string): The truncated group name with optional ellipsis. ] variable[ellipsis_value] assign[=] constant[] if name[ellipsis] begin[:] variable[ellipsis_value] assign[=] constant[ ...] if <ast.BoolOp object at 0x7da18c4cd0f0> begin[:] variable[group_name_array] assign[=] call[name[group_name].split, parameter[constant[ ]]] variable[group_name] assign[=] constant[] for taget[name[word]] in starred[name[group_name_array]] begin[:] variable[word] assign[=] call[constant[{}].format, parameter[name[word]]] if compare[binary_operation[binary_operation[call[name[len], parameter[name[group_name]]] + call[name[len], parameter[name[word]]]] + call[name[len], parameter[name[ellipsis_value]]]] greater_or_equal[>=] name[group_max_length]] begin[:] variable[group_name] assign[=] call[constant[{}{}].format, parameter[name[group_name], name[ellipsis_value]]] variable[group_name] assign[=] call[name[group_name].lstrip, parameter[constant[ ]]] break <ast.AugAssign object at 0x7da18f812560> return[name[group_name]]
keyword[def] identifier[safe_group_name] ( identifier[group_name] , identifier[group_max_length] = literal[int] , identifier[ellipsis] = keyword[True] ): literal[string] identifier[ellipsis_value] = literal[string] keyword[if] identifier[ellipsis] : identifier[ellipsis_value] = literal[string] keyword[if] identifier[group_name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[group_name] )> identifier[group_max_length] : identifier[group_name_array] = identifier[group_name] . identifier[split] ( literal[string] ) identifier[group_name] = literal[string] keyword[for] identifier[word] keyword[in] identifier[group_name_array] : identifier[word] = literal[string] . identifier[format] ( identifier[word] ) keyword[if] ( identifier[len] ( identifier[group_name] )+ identifier[len] ( identifier[word] )+ identifier[len] ( identifier[ellipsis_value] ))>= identifier[group_max_length] : identifier[group_name] = literal[string] . identifier[format] ( identifier[group_name] , identifier[ellipsis_value] ) identifier[group_name] = identifier[group_name] . identifier[lstrip] ( literal[string] ) keyword[break] identifier[group_name] += literal[string] . identifier[format] ( identifier[word] ) keyword[return] identifier[group_name]
def safe_group_name(group_name, group_max_length=100, ellipsis=True): """Truncate group name to match limit breaking on space and optionally add an ellipsis. .. note:: Currently the ThreatConnect group name limit is 100 characters. Args: group_name (string): The raw group name to be truncated. group_max_length (int): The max length of the group name. ellipsis (boolean): If true the truncated name will have '...' appended. Returns: (string): The truncated group name with optional ellipsis. """ ellipsis_value = '' if ellipsis: ellipsis_value = ' ...' # depends on [control=['if'], data=[]] if group_name is not None and len(group_name) > group_max_length: # split name by spaces and reset group_name group_name_array = group_name.split(' ') group_name = '' for word in group_name_array: word = u'{}'.format(word) if len(group_name) + len(word) + len(ellipsis_value) >= group_max_length: group_name = '{}{}'.format(group_name, ellipsis_value) group_name = group_name.lstrip(' ') break # depends on [control=['if'], data=[]] group_name += ' {}'.format(word) # depends on [control=['for'], data=['word']] # depends on [control=['if'], data=[]] return group_name
def get(self, attr, value=None, resolve=True): """Get the value of an attribute from submit description file. Args: attr (str): The name of the attribute whose value should be returned. value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None. resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If False then return the raw value of 'attr'. Defaults to True. Returns: str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'. """ try: if resolve: value = self._resolve_attribute(attr) else: value = self.attributes[attr] except KeyError: pass return value
def function[get, parameter[self, attr, value, resolve]]: constant[Get the value of an attribute from submit description file. Args: attr (str): The name of the attribute whose value should be returned. value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None. resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If False then return the raw value of 'attr'. Defaults to True. Returns: str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'. ] <ast.Try object at 0x7da1b1e90580> return[name[value]]
keyword[def] identifier[get] ( identifier[self] , identifier[attr] , identifier[value] = keyword[None] , identifier[resolve] = keyword[True] ): literal[string] keyword[try] : keyword[if] identifier[resolve] : identifier[value] = identifier[self] . identifier[_resolve_attribute] ( identifier[attr] ) keyword[else] : identifier[value] = identifier[self] . identifier[attributes] [ identifier[attr] ] keyword[except] identifier[KeyError] : keyword[pass] keyword[return] identifier[value]
def get(self, attr, value=None, resolve=True): """Get the value of an attribute from submit description file. Args: attr (str): The name of the attribute whose value should be returned. value (str, optional): A default value to return if 'attr' doesn't exist. Defaults to None. resolve (bool, optional): If True then resolve references to other attributes in the value of 'attr'. If False then return the raw value of 'attr'. Defaults to True. Returns: str: The value assigned to 'attr' if 'attr' exists, otherwise 'value'. """ try: if resolve: value = self._resolve_attribute(attr) # depends on [control=['if'], data=[]] else: value = self.attributes[attr] # depends on [control=['try'], data=[]] except KeyError: pass # depends on [control=['except'], data=[]] return value
def OnDoubleClick(self, event): """Double click on a given square in the map""" node = HotMapNavigator.findNodeAtPosition(self.hot_map, event.GetPosition()) if node: wx.PostEvent( self, SquareActivationEvent( node=node, point=event.GetPosition(), map=self ) )
def function[OnDoubleClick, parameter[self, event]]: constant[Double click on a given square in the map] variable[node] assign[=] call[name[HotMapNavigator].findNodeAtPosition, parameter[name[self].hot_map, call[name[event].GetPosition, parameter[]]]] if name[node] begin[:] call[name[wx].PostEvent, parameter[name[self], call[name[SquareActivationEvent], parameter[]]]]
keyword[def] identifier[OnDoubleClick] ( identifier[self] , identifier[event] ): literal[string] identifier[node] = identifier[HotMapNavigator] . identifier[findNodeAtPosition] ( identifier[self] . identifier[hot_map] , identifier[event] . identifier[GetPosition] ()) keyword[if] identifier[node] : identifier[wx] . identifier[PostEvent] ( identifier[self] , identifier[SquareActivationEvent] ( identifier[node] = identifier[node] , identifier[point] = identifier[event] . identifier[GetPosition] (), identifier[map] = identifier[self] ))
def OnDoubleClick(self, event): """Double click on a given square in the map""" node = HotMapNavigator.findNodeAtPosition(self.hot_map, event.GetPosition()) if node: wx.PostEvent(self, SquareActivationEvent(node=node, point=event.GetPosition(), map=self)) # depends on [control=['if'], data=[]]
def expect_re(regexp, buf, pos): """Require a regular expression at the current buffer position.""" match = regexp.match(buf, pos) if not match: return None, len(buf) return buf[match.start(1):match.end(1)], match.end(0)
def function[expect_re, parameter[regexp, buf, pos]]: constant[Require a regular expression at the current buffer position.] variable[match] assign[=] call[name[regexp].match, parameter[name[buf], name[pos]]] if <ast.UnaryOp object at 0x7da1b0274760> begin[:] return[tuple[[<ast.Constant object at 0x7da1b02745b0>, <ast.Call object at 0x7da1b0277fd0>]]] return[tuple[[<ast.Subscript object at 0x7da1b02749d0>, <ast.Call object at 0x7da1b0274af0>]]]
keyword[def] identifier[expect_re] ( identifier[regexp] , identifier[buf] , identifier[pos] ): literal[string] identifier[match] = identifier[regexp] . identifier[match] ( identifier[buf] , identifier[pos] ) keyword[if] keyword[not] identifier[match] : keyword[return] keyword[None] , identifier[len] ( identifier[buf] ) keyword[return] identifier[buf] [ identifier[match] . identifier[start] ( literal[int] ): identifier[match] . identifier[end] ( literal[int] )], identifier[match] . identifier[end] ( literal[int] )
def expect_re(regexp, buf, pos): """Require a regular expression at the current buffer position.""" match = regexp.match(buf, pos) if not match: return (None, len(buf)) # depends on [control=['if'], data=[]] return (buf[match.start(1):match.end(1)], match.end(0))
def applyCommand(self): """ Applies the current line of code as an interactive python command. """ # generate the command information cursor = self.textCursor() cursor.movePosition(cursor.EndOfLine) line = projex.text.nativestring(cursor.block().text()) at_end = cursor.atEnd() modifiers = QApplication.instance().keyboardModifiers() mod_mode = at_end or modifiers == Qt.ShiftModifier # test the line for information if mod_mode and line.endswith(':'): cursor.movePosition(cursor.EndOfLine) line = re.sub('^>>> ', '', line) line = re.sub('^\.\.\. ', '', line) count = len(line) - len(line.lstrip()) + 4 self.insertPlainText('\n... ' + count * ' ') return False elif mod_mode and line.startswith('...') and \ (line.strip() != '...' or not at_end): cursor.movePosition(cursor.EndOfLine) line = re.sub('^\.\.\. ', '', line) count = len(line) - len(line.lstrip()) self.insertPlainText('\n... ' + count * ' ') return False # if we're not at the end of the console, then add it to the end elif line.startswith('>>>') or line.startswith('...'): # move to the top of the command structure line = projex.text.nativestring(cursor.block().text()) while line.startswith('...'): cursor.movePosition(cursor.PreviousBlock) line = projex.text.nativestring(cursor.block().text()) # calculate the command cursor.movePosition(cursor.EndOfLine) line = projex.text.nativestring(cursor.block().text()) ended = False lines = [] while True: # add the new block lines.append(line) if cursor.atEnd(): ended = True break # move to the next line cursor.movePosition(cursor.NextBlock) cursor.movePosition(cursor.EndOfLine) line = projex.text.nativestring(cursor.block().text()) # check for a new command or the end of the command if not line.startswith('...'): break command = '\n'.join(lines) # if we did not end up at the end of the command block, then # copy it for modification if not (ended and command): self.waitForInput() self.insertPlainText(command.replace('>>> ', '')) cursor.movePosition(cursor.End) return False else: self.waitForInput() return False self.executeCommand(command) return True
def function[applyCommand, parameter[self]]: constant[ Applies the current line of code as an interactive python command. ] variable[cursor] assign[=] call[name[self].textCursor, parameter[]] call[name[cursor].movePosition, parameter[name[cursor].EndOfLine]] variable[line] assign[=] call[name[projex].text.nativestring, parameter[call[call[name[cursor].block, parameter[]].text, parameter[]]]] variable[at_end] assign[=] call[name[cursor].atEnd, parameter[]] variable[modifiers] assign[=] call[call[name[QApplication].instance, parameter[]].keyboardModifiers, parameter[]] variable[mod_mode] assign[=] <ast.BoolOp object at 0x7da2041d9570> if <ast.BoolOp object at 0x7da2041da8c0> begin[:] call[name[cursor].movePosition, parameter[name[cursor].EndOfLine]] variable[line] assign[=] call[name[re].sub, parameter[constant[^>>> ], constant[], name[line]]] variable[line] assign[=] call[name[re].sub, parameter[constant[^\.\.\. ], constant[], name[line]]] variable[count] assign[=] binary_operation[binary_operation[call[name[len], parameter[name[line]]] - call[name[len], parameter[call[name[line].lstrip, parameter[]]]]] + constant[4]] call[name[self].insertPlainText, parameter[binary_operation[constant[ ... ] + binary_operation[name[count] * constant[ ]]]]] return[constant[False]] call[name[self].executeCommand, parameter[name[command]]] return[constant[True]]
keyword[def] identifier[applyCommand] ( identifier[self] ): literal[string] identifier[cursor] = identifier[self] . identifier[textCursor] () identifier[cursor] . identifier[movePosition] ( identifier[cursor] . identifier[EndOfLine] ) identifier[line] = identifier[projex] . identifier[text] . identifier[nativestring] ( identifier[cursor] . identifier[block] (). identifier[text] ()) identifier[at_end] = identifier[cursor] . identifier[atEnd] () identifier[modifiers] = identifier[QApplication] . identifier[instance] (). identifier[keyboardModifiers] () identifier[mod_mode] = identifier[at_end] keyword[or] identifier[modifiers] == identifier[Qt] . identifier[ShiftModifier] keyword[if] identifier[mod_mode] keyword[and] identifier[line] . identifier[endswith] ( literal[string] ): identifier[cursor] . identifier[movePosition] ( identifier[cursor] . identifier[EndOfLine] ) identifier[line] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[line] ) identifier[line] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[line] ) identifier[count] = identifier[len] ( identifier[line] )- identifier[len] ( identifier[line] . identifier[lstrip] ())+ literal[int] identifier[self] . identifier[insertPlainText] ( literal[string] + identifier[count] * literal[string] ) keyword[return] keyword[False] keyword[elif] identifier[mod_mode] keyword[and] identifier[line] . identifier[startswith] ( literal[string] ) keyword[and] ( identifier[line] . identifier[strip] ()!= literal[string] keyword[or] keyword[not] identifier[at_end] ): identifier[cursor] . identifier[movePosition] ( identifier[cursor] . identifier[EndOfLine] ) identifier[line] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[line] ) identifier[count] = identifier[len] ( identifier[line] )- identifier[len] ( identifier[line] . identifier[lstrip] ()) identifier[self] . identifier[insertPlainText] ( literal[string] + identifier[count] * literal[string] ) keyword[return] keyword[False] keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ) keyword[or] identifier[line] . identifier[startswith] ( literal[string] ): identifier[line] = identifier[projex] . identifier[text] . identifier[nativestring] ( identifier[cursor] . identifier[block] (). identifier[text] ()) keyword[while] identifier[line] . identifier[startswith] ( literal[string] ): identifier[cursor] . identifier[movePosition] ( identifier[cursor] . identifier[PreviousBlock] ) identifier[line] = identifier[projex] . identifier[text] . identifier[nativestring] ( identifier[cursor] . identifier[block] (). identifier[text] ()) identifier[cursor] . identifier[movePosition] ( identifier[cursor] . identifier[EndOfLine] ) identifier[line] = identifier[projex] . identifier[text] . identifier[nativestring] ( identifier[cursor] . identifier[block] (). identifier[text] ()) identifier[ended] = keyword[False] identifier[lines] =[] keyword[while] keyword[True] : identifier[lines] . identifier[append] ( identifier[line] ) keyword[if] identifier[cursor] . identifier[atEnd] (): identifier[ended] = keyword[True] keyword[break] identifier[cursor] . identifier[movePosition] ( identifier[cursor] . identifier[NextBlock] ) identifier[cursor] . identifier[movePosition] ( identifier[cursor] . identifier[EndOfLine] ) identifier[line] = identifier[projex] . identifier[text] . identifier[nativestring] ( identifier[cursor] . identifier[block] (). identifier[text] ()) keyword[if] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ): keyword[break] identifier[command] = literal[string] . identifier[join] ( identifier[lines] ) keyword[if] keyword[not] ( identifier[ended] keyword[and] identifier[command] ): identifier[self] . identifier[waitForInput] () identifier[self] . identifier[insertPlainText] ( identifier[command] . identifier[replace] ( literal[string] , literal[string] )) identifier[cursor] . identifier[movePosition] ( identifier[cursor] . identifier[End] ) keyword[return] keyword[False] keyword[else] : identifier[self] . identifier[waitForInput] () keyword[return] keyword[False] identifier[self] . identifier[executeCommand] ( identifier[command] ) keyword[return] keyword[True]
def applyCommand(self): """ Applies the current line of code as an interactive python command. """ # generate the command information cursor = self.textCursor() cursor.movePosition(cursor.EndOfLine) line = projex.text.nativestring(cursor.block().text()) at_end = cursor.atEnd() modifiers = QApplication.instance().keyboardModifiers() mod_mode = at_end or modifiers == Qt.ShiftModifier # test the line for information if mod_mode and line.endswith(':'): cursor.movePosition(cursor.EndOfLine) line = re.sub('^>>> ', '', line) line = re.sub('^\\.\\.\\. ', '', line) count = len(line) - len(line.lstrip()) + 4 self.insertPlainText('\n... ' + count * ' ') return False # depends on [control=['if'], data=[]] elif mod_mode and line.startswith('...') and (line.strip() != '...' or not at_end): cursor.movePosition(cursor.EndOfLine) line = re.sub('^\\.\\.\\. ', '', line) count = len(line) - len(line.lstrip()) self.insertPlainText('\n... ' + count * ' ') return False # depends on [control=['if'], data=[]] # if we're not at the end of the console, then add it to the end elif line.startswith('>>>') or line.startswith('...'): # move to the top of the command structure line = projex.text.nativestring(cursor.block().text()) while line.startswith('...'): cursor.movePosition(cursor.PreviousBlock) line = projex.text.nativestring(cursor.block().text()) # depends on [control=['while'], data=[]] # calculate the command cursor.movePosition(cursor.EndOfLine) line = projex.text.nativestring(cursor.block().text()) ended = False lines = [] while True: # add the new block lines.append(line) if cursor.atEnd(): ended = True break # depends on [control=['if'], data=[]] # move to the next line cursor.movePosition(cursor.NextBlock) cursor.movePosition(cursor.EndOfLine) line = projex.text.nativestring(cursor.block().text()) # check for a new command or the end of the command if not line.startswith('...'): break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] command = '\n'.join(lines) # if we did not end up at the end of the command block, then # copy it for modification if not (ended and command): self.waitForInput() self.insertPlainText(command.replace('>>> ', '')) cursor.movePosition(cursor.End) return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: self.waitForInput() return False self.executeCommand(command) return True
def is_able_to_parse_detailed(self, desired_type: Type[Any], desired_ext: str, strict: bool) -> Tuple[bool, bool]: """ Utility method to check if a parser is able to parse a given type, either in * strict mode (desired_type must be one of the supported ones, or the parser should be generic) * inference mode (non-strict) : desired_type may be a parent class of one the parser is able to produce :param desired_type: the type of the object that should be parsed, :param desired_ext: the file extension that should be parsed :param strict: a boolean indicating whether to evaluate in strict mode or not :return: a first boolean indicating if there was a match, and a second boolean indicating if that match was strict (None if no match) """ # (1) first handle the easy joker+joker case if desired_ext is JOKER and desired_type is JOKER: return True, None # (2) if ext is not a joker we can quickly check if it is supported if desired_ext is not JOKER: check_var(desired_ext, var_types=str, var_name='desired_ext') if desired_ext not in self.supported_exts: # ** no match on extension - no need to go further return False, None # (3) if type=joker and ext is supported => easy if desired_type is JOKER: # ** only extension match is required - ok. return True, None # (4) at this point, ext is JOKER OR supported and type is not JOKER. Check type match check_var(desired_type, var_types=type, var_name='desired_type_of_output') check_var(strict, var_types=bool, var_name='strict') # -- first call custom checker if provided if self.is_able_to_parse_func is not None and not self.is_able_to_parse_func(strict, desired_type): return False, None # -- strict match : either the parser is able to parse Anything, or the type is in the list of supported types if self.is_generic() or (desired_type in self.supported_types): return True, True # exact match # -- non-strict match : if the parser is able to parse a subclass of the desired type, it is ok elif (not strict) \ and any(issubclass(supported, desired_type) for supported in self.supported_types): return True, False # approx match # -- no match at all else: return False, None
def function[is_able_to_parse_detailed, parameter[self, desired_type, desired_ext, strict]]: constant[ Utility method to check if a parser is able to parse a given type, either in * strict mode (desired_type must be one of the supported ones, or the parser should be generic) * inference mode (non-strict) : desired_type may be a parent class of one the parser is able to produce :param desired_type: the type of the object that should be parsed, :param desired_ext: the file extension that should be parsed :param strict: a boolean indicating whether to evaluate in strict mode or not :return: a first boolean indicating if there was a match, and a second boolean indicating if that match was strict (None if no match) ] if <ast.BoolOp object at 0x7da18ede5810> begin[:] return[tuple[[<ast.Constant object at 0x7da207f012d0>, <ast.Constant object at 0x7da207f03c70>]]] if compare[name[desired_ext] is_not name[JOKER]] begin[:] call[name[check_var], parameter[name[desired_ext]]] if compare[name[desired_ext] <ast.NotIn object at 0x7da2590d7190> name[self].supported_exts] begin[:] return[tuple[[<ast.Constant object at 0x7da207f02200>, <ast.Constant object at 0x7da207f03d00>]]] if compare[name[desired_type] is name[JOKER]] begin[:] return[tuple[[<ast.Constant object at 0x7da207f00880>, <ast.Constant object at 0x7da207f03fd0>]]] call[name[check_var], parameter[name[desired_type]]] call[name[check_var], parameter[name[strict]]] if <ast.BoolOp object at 0x7da207f01690> begin[:] return[tuple[[<ast.Constant object at 0x7da207f024d0>, <ast.Constant object at 0x7da207f01180>]]] if <ast.BoolOp object at 0x7da207f01210> begin[:] return[tuple[[<ast.Constant object at 0x7da207f000a0>, <ast.Constant object at 0x7da207f03e50>]]]
keyword[def] identifier[is_able_to_parse_detailed] ( identifier[self] , identifier[desired_type] : identifier[Type] [ identifier[Any] ], identifier[desired_ext] : identifier[str] , identifier[strict] : identifier[bool] )-> identifier[Tuple] [ identifier[bool] , identifier[bool] ]: literal[string] keyword[if] identifier[desired_ext] keyword[is] identifier[JOKER] keyword[and] identifier[desired_type] keyword[is] identifier[JOKER] : keyword[return] keyword[True] , keyword[None] keyword[if] identifier[desired_ext] keyword[is] keyword[not] identifier[JOKER] : identifier[check_var] ( identifier[desired_ext] , identifier[var_types] = identifier[str] , identifier[var_name] = literal[string] ) keyword[if] identifier[desired_ext] keyword[not] keyword[in] identifier[self] . identifier[supported_exts] : keyword[return] keyword[False] , keyword[None] keyword[if] identifier[desired_type] keyword[is] identifier[JOKER] : keyword[return] keyword[True] , keyword[None] identifier[check_var] ( identifier[desired_type] , identifier[var_types] = identifier[type] , identifier[var_name] = literal[string] ) identifier[check_var] ( identifier[strict] , identifier[var_types] = identifier[bool] , identifier[var_name] = literal[string] ) keyword[if] identifier[self] . identifier[is_able_to_parse_func] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[self] . identifier[is_able_to_parse_func] ( identifier[strict] , identifier[desired_type] ): keyword[return] keyword[False] , keyword[None] keyword[if] identifier[self] . identifier[is_generic] () keyword[or] ( identifier[desired_type] keyword[in] identifier[self] . identifier[supported_types] ): keyword[return] keyword[True] , keyword[True] keyword[elif] ( keyword[not] identifier[strict] ) keyword[and] identifier[any] ( identifier[issubclass] ( identifier[supported] , identifier[desired_type] ) keyword[for] identifier[supported] keyword[in] identifier[self] . identifier[supported_types] ): keyword[return] keyword[True] , keyword[False] keyword[else] : keyword[return] keyword[False] , keyword[None]
def is_able_to_parse_detailed(self, desired_type: Type[Any], desired_ext: str, strict: bool) -> Tuple[bool, bool]: """ Utility method to check if a parser is able to parse a given type, either in * strict mode (desired_type must be one of the supported ones, or the parser should be generic) * inference mode (non-strict) : desired_type may be a parent class of one the parser is able to produce :param desired_type: the type of the object that should be parsed, :param desired_ext: the file extension that should be parsed :param strict: a boolean indicating whether to evaluate in strict mode or not :return: a first boolean indicating if there was a match, and a second boolean indicating if that match was strict (None if no match) """ # (1) first handle the easy joker+joker case if desired_ext is JOKER and desired_type is JOKER: return (True, None) # depends on [control=['if'], data=[]] # (2) if ext is not a joker we can quickly check if it is supported if desired_ext is not JOKER: check_var(desired_ext, var_types=str, var_name='desired_ext') if desired_ext not in self.supported_exts: # ** no match on extension - no need to go further return (False, None) # depends on [control=['if'], data=[]] # (3) if type=joker and ext is supported => easy if desired_type is JOKER: # ** only extension match is required - ok. return (True, None) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['desired_ext', 'JOKER']] # (4) at this point, ext is JOKER OR supported and type is not JOKER. Check type match check_var(desired_type, var_types=type, var_name='desired_type_of_output') check_var(strict, var_types=bool, var_name='strict') # -- first call custom checker if provided if self.is_able_to_parse_func is not None and (not self.is_able_to_parse_func(strict, desired_type)): return (False, None) # depends on [control=['if'], data=[]] # -- strict match : either the parser is able to parse Anything, or the type is in the list of supported types if self.is_generic() or desired_type in self.supported_types: return (True, True) # exact match # depends on [control=['if'], data=[]] # -- non-strict match : if the parser is able to parse a subclass of the desired type, it is ok elif not strict and any((issubclass(supported, desired_type) for supported in self.supported_types)): return (True, False) # approx match # depends on [control=['if'], data=[]] else: # -- no match at all return (False, None)
def _calculate_fnr_fdr(group): """Calculate the false negative rate (1 - sensitivity) and false discovery rate (1 - precision). """ data = {k: d["value"] for k, d in group.set_index("metric").T.to_dict().items()} return pd.DataFrame([{"fnr": data["fn"] / float(data["tp"] + data["fn"]) * 100.0 if data["tp"] > 0 else 0.0, "fdr": data["fp"] / float(data["tp"] + data["fp"]) * 100.0 if data["tp"] > 0 else 0.0, "tpr": "TP: %s FN: %s" % (data["tp"], data["fn"]), "spc": "FP: %s" % (data["fp"])}])
def function[_calculate_fnr_fdr, parameter[group]]: constant[Calculate the false negative rate (1 - sensitivity) and false discovery rate (1 - precision). ] variable[data] assign[=] <ast.DictComp object at 0x7da20c6a9c30> return[call[name[pd].DataFrame, parameter[list[[<ast.Dict object at 0x7da20c6a8250>]]]]]
keyword[def] identifier[_calculate_fnr_fdr] ( identifier[group] ): literal[string] identifier[data] ={ identifier[k] : identifier[d] [ literal[string] ] keyword[for] identifier[k] , identifier[d] keyword[in] identifier[group] . identifier[set_index] ( literal[string] ). identifier[T] . identifier[to_dict] (). identifier[items] ()} keyword[return] identifier[pd] . identifier[DataFrame] ([{ literal[string] : identifier[data] [ literal[string] ]/ identifier[float] ( identifier[data] [ literal[string] ]+ identifier[data] [ literal[string] ])* literal[int] keyword[if] identifier[data] [ literal[string] ]> literal[int] keyword[else] literal[int] , literal[string] : identifier[data] [ literal[string] ]/ identifier[float] ( identifier[data] [ literal[string] ]+ identifier[data] [ literal[string] ])* literal[int] keyword[if] identifier[data] [ literal[string] ]> literal[int] keyword[else] literal[int] , literal[string] : literal[string] %( identifier[data] [ literal[string] ], identifier[data] [ literal[string] ]), literal[string] : literal[string] %( identifier[data] [ literal[string] ])}])
def _calculate_fnr_fdr(group): """Calculate the false negative rate (1 - sensitivity) and false discovery rate (1 - precision). """ data = {k: d['value'] for (k, d) in group.set_index('metric').T.to_dict().items()} return pd.DataFrame([{'fnr': data['fn'] / float(data['tp'] + data['fn']) * 100.0 if data['tp'] > 0 else 0.0, 'fdr': data['fp'] / float(data['tp'] + data['fp']) * 100.0 if data['tp'] > 0 else 0.0, 'tpr': 'TP: %s FN: %s' % (data['tp'], data['fn']), 'spc': 'FP: %s' % data['fp']}])
def copytree(self, target, symlinks=False): """Recursively copies this directory to the `target` location. The permissions and times are copied (like :meth:`~rpaths.Path.copystat`). If the optional `symlinks` flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. """ shutil.copytree(self.path, self._to_backend(target), symlinks)
def function[copytree, parameter[self, target, symlinks]]: constant[Recursively copies this directory to the `target` location. The permissions and times are copied (like :meth:`~rpaths.Path.copystat`). If the optional `symlinks` flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. ] call[name[shutil].copytree, parameter[name[self].path, call[name[self]._to_backend, parameter[name[target]]], name[symlinks]]]
keyword[def] identifier[copytree] ( identifier[self] , identifier[target] , identifier[symlinks] = keyword[False] ): literal[string] identifier[shutil] . identifier[copytree] ( identifier[self] . identifier[path] , identifier[self] . identifier[_to_backend] ( identifier[target] ), identifier[symlinks] )
def copytree(self, target, symlinks=False): """Recursively copies this directory to the `target` location. The permissions and times are copied (like :meth:`~rpaths.Path.copystat`). If the optional `symlinks` flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. """ shutil.copytree(self.path, self._to_backend(target), symlinks)
def _generate_message_error(cls, response_code, messages, response_id): """ :type response_code: int :type messages: list[str] :type response_id: str :rtype: str """ line_response_code = cls._FORMAT_RESPONSE_CODE_LINE \ .format(response_code) line_response_id = cls._FORMAT_RESPONSE_ID_LINE.format(response_id) line_error_message = cls._FORMAT_ERROR_MESSAGE_LINE.format( cls._GLUE_ERROR_MESSAGE_STRING_EMPTY.join(messages) ) return cls._glue_all_error_message( [line_response_code, line_response_id, line_error_message] )
def function[_generate_message_error, parameter[cls, response_code, messages, response_id]]: constant[ :type response_code: int :type messages: list[str] :type response_id: str :rtype: str ] variable[line_response_code] assign[=] call[name[cls]._FORMAT_RESPONSE_CODE_LINE.format, parameter[name[response_code]]] variable[line_response_id] assign[=] call[name[cls]._FORMAT_RESPONSE_ID_LINE.format, parameter[name[response_id]]] variable[line_error_message] assign[=] call[name[cls]._FORMAT_ERROR_MESSAGE_LINE.format, parameter[call[name[cls]._GLUE_ERROR_MESSAGE_STRING_EMPTY.join, parameter[name[messages]]]]] return[call[name[cls]._glue_all_error_message, parameter[list[[<ast.Name object at 0x7da1b071af80>, <ast.Name object at 0x7da1b0718b50>, <ast.Name object at 0x7da1b07183a0>]]]]]
keyword[def] identifier[_generate_message_error] ( identifier[cls] , identifier[response_code] , identifier[messages] , identifier[response_id] ): literal[string] identifier[line_response_code] = identifier[cls] . identifier[_FORMAT_RESPONSE_CODE_LINE] . identifier[format] ( identifier[response_code] ) identifier[line_response_id] = identifier[cls] . identifier[_FORMAT_RESPONSE_ID_LINE] . identifier[format] ( identifier[response_id] ) identifier[line_error_message] = identifier[cls] . identifier[_FORMAT_ERROR_MESSAGE_LINE] . identifier[format] ( identifier[cls] . identifier[_GLUE_ERROR_MESSAGE_STRING_EMPTY] . identifier[join] ( identifier[messages] ) ) keyword[return] identifier[cls] . identifier[_glue_all_error_message] ( [ identifier[line_response_code] , identifier[line_response_id] , identifier[line_error_message] ] )
def _generate_message_error(cls, response_code, messages, response_id): """ :type response_code: int :type messages: list[str] :type response_id: str :rtype: str """ line_response_code = cls._FORMAT_RESPONSE_CODE_LINE.format(response_code) line_response_id = cls._FORMAT_RESPONSE_ID_LINE.format(response_id) line_error_message = cls._FORMAT_ERROR_MESSAGE_LINE.format(cls._GLUE_ERROR_MESSAGE_STRING_EMPTY.join(messages)) return cls._glue_all_error_message([line_response_code, line_response_id, line_error_message])
def unroll_auth_headers(self, authheaders, exclude_signature=False, sep=",", quote=True): """Converts an authorization header dict-like object into a string representing the authorization. Keyword arguments: authheaders -- A string-indexable object which contains the headers appropriate for this signature version. """ res = "" ordered = collections.OrderedDict(sorted(authheaders.items())) form = '{0}=\"{1}\"' if quote else '{0}={1}' if exclude_signature: return sep.join([form.format(k, urlquote(str(v), safe='')) for k, v in ordered.items() if k != 'signature']) else: return sep.join([form.format(k, urlquote(str(v), safe='') if k != 'signature' else str(v)) for k, v in ordered.items()])
def function[unroll_auth_headers, parameter[self, authheaders, exclude_signature, sep, quote]]: constant[Converts an authorization header dict-like object into a string representing the authorization. Keyword arguments: authheaders -- A string-indexable object which contains the headers appropriate for this signature version. ] variable[res] assign[=] constant[] variable[ordered] assign[=] call[name[collections].OrderedDict, parameter[call[name[sorted], parameter[call[name[authheaders].items, parameter[]]]]]] variable[form] assign[=] <ast.IfExp object at 0x7da1b14d69e0> if name[exclude_signature] begin[:] return[call[name[sep].join, parameter[<ast.ListComp object at 0x7da1b14d7940>]]]
keyword[def] identifier[unroll_auth_headers] ( identifier[self] , identifier[authheaders] , identifier[exclude_signature] = keyword[False] , identifier[sep] = literal[string] , identifier[quote] = keyword[True] ): literal[string] identifier[res] = literal[string] identifier[ordered] = identifier[collections] . identifier[OrderedDict] ( identifier[sorted] ( identifier[authheaders] . identifier[items] ())) identifier[form] = literal[string] keyword[if] identifier[quote] keyword[else] literal[string] keyword[if] identifier[exclude_signature] : keyword[return] identifier[sep] . identifier[join] ([ identifier[form] . identifier[format] ( identifier[k] , identifier[urlquote] ( identifier[str] ( identifier[v] ), identifier[safe] = literal[string] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[ordered] . identifier[items] () keyword[if] identifier[k] != literal[string] ]) keyword[else] : keyword[return] identifier[sep] . identifier[join] ([ identifier[form] . identifier[format] ( identifier[k] , identifier[urlquote] ( identifier[str] ( identifier[v] ), identifier[safe] = literal[string] ) keyword[if] identifier[k] != literal[string] keyword[else] identifier[str] ( identifier[v] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[ordered] . identifier[items] ()])
def unroll_auth_headers(self, authheaders, exclude_signature=False, sep=',', quote=True): """Converts an authorization header dict-like object into a string representing the authorization. Keyword arguments: authheaders -- A string-indexable object which contains the headers appropriate for this signature version. """ res = '' ordered = collections.OrderedDict(sorted(authheaders.items())) form = '{0}="{1}"' if quote else '{0}={1}' if exclude_signature: return sep.join([form.format(k, urlquote(str(v), safe='')) for (k, v) in ordered.items() if k != 'signature']) # depends on [control=['if'], data=[]] else: return sep.join([form.format(k, urlquote(str(v), safe='') if k != 'signature' else str(v)) for (k, v) in ordered.items()])
def _get_from_riak(self, key): """ Args: key (str): riak key Returns: (tuple): riak obj json data and riak key """ obj = self.bucket.get(key) if obj.exists: return obj.data, obj.key raise ObjectDoesNotExist("%s %s" % (key, self.compiled_query))
def function[_get_from_riak, parameter[self, key]]: constant[ Args: key (str): riak key Returns: (tuple): riak obj json data and riak key ] variable[obj] assign[=] call[name[self].bucket.get, parameter[name[key]]] if name[obj].exists begin[:] return[tuple[[<ast.Attribute object at 0x7da18f723610>, <ast.Attribute object at 0x7da18f7213c0>]]] <ast.Raise object at 0x7da18f723100>
keyword[def] identifier[_get_from_riak] ( identifier[self] , identifier[key] ): literal[string] identifier[obj] = identifier[self] . identifier[bucket] . identifier[get] ( identifier[key] ) keyword[if] identifier[obj] . identifier[exists] : keyword[return] identifier[obj] . identifier[data] , identifier[obj] . identifier[key] keyword[raise] identifier[ObjectDoesNotExist] ( literal[string] %( identifier[key] , identifier[self] . identifier[compiled_query] ))
def _get_from_riak(self, key): """ Args: key (str): riak key Returns: (tuple): riak obj json data and riak key """ obj = self.bucket.get(key) if obj.exists: return (obj.data, obj.key) # depends on [control=['if'], data=[]] raise ObjectDoesNotExist('%s %s' % (key, self.compiled_query))
def UpdateUser(username, password=None, is_admin=False): """Updates the password or privilege-level for a user.""" user_type, password = _GetUserTypeAndPassword( username, password=password, is_admin=is_admin) grr_api = maintenance_utils.InitGRRRootAPI() grr_user = grr_api.GrrUser(username).Get() grr_user.Modify(user_type=user_type, password=password)
def function[UpdateUser, parameter[username, password, is_admin]]: constant[Updates the password or privilege-level for a user.] <ast.Tuple object at 0x7da1b1c0dd80> assign[=] call[name[_GetUserTypeAndPassword], parameter[name[username]]] variable[grr_api] assign[=] call[name[maintenance_utils].InitGRRRootAPI, parameter[]] variable[grr_user] assign[=] call[call[name[grr_api].GrrUser, parameter[name[username]]].Get, parameter[]] call[name[grr_user].Modify, parameter[]]
keyword[def] identifier[UpdateUser] ( identifier[username] , identifier[password] = keyword[None] , identifier[is_admin] = keyword[False] ): literal[string] identifier[user_type] , identifier[password] = identifier[_GetUserTypeAndPassword] ( identifier[username] , identifier[password] = identifier[password] , identifier[is_admin] = identifier[is_admin] ) identifier[grr_api] = identifier[maintenance_utils] . identifier[InitGRRRootAPI] () identifier[grr_user] = identifier[grr_api] . identifier[GrrUser] ( identifier[username] ). identifier[Get] () identifier[grr_user] . identifier[Modify] ( identifier[user_type] = identifier[user_type] , identifier[password] = identifier[password] )
def UpdateUser(username, password=None, is_admin=False): """Updates the password or privilege-level for a user.""" (user_type, password) = _GetUserTypeAndPassword(username, password=password, is_admin=is_admin) grr_api = maintenance_utils.InitGRRRootAPI() grr_user = grr_api.GrrUser(username).Get() grr_user.Modify(user_type=user_type, password=password)
def DbPutDeviceAttributeProperty2(self, argin): """ Put device attribute property. This command adds the possibility to have attribute property which are arrays. Not possible with the old DbPutDeviceAttributeProperty command. This old command is not deleted for compatibility reasons. :param argin: Str[0] = Device name Str[1] = Attribute number Str[2] = Attribute name Str[3] = Property number Str[4] = Property name Str[5] = Property value number (array case) Str[5] = Property value 1 Str[n] = Property value n (array case) ..... :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid """ self._log.debug("In DbPutDeviceAttributeProperty2()") device_name = argin[0] nb_attributes = int(argin[1]) self.db.put_device_attribute_property2(device_name, nb_attributes, argin[2:])
def function[DbPutDeviceAttributeProperty2, parameter[self, argin]]: constant[ Put device attribute property. This command adds the possibility to have attribute property which are arrays. Not possible with the old DbPutDeviceAttributeProperty command. This old command is not deleted for compatibility reasons. :param argin: Str[0] = Device name Str[1] = Attribute number Str[2] = Attribute name Str[3] = Property number Str[4] = Property name Str[5] = Property value number (array case) Str[5] = Property value 1 Str[n] = Property value n (array case) ..... :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid ] call[name[self]._log.debug, parameter[constant[In DbPutDeviceAttributeProperty2()]]] variable[device_name] assign[=] call[name[argin]][constant[0]] variable[nb_attributes] assign[=] call[name[int], parameter[call[name[argin]][constant[1]]]] call[name[self].db.put_device_attribute_property2, parameter[name[device_name], name[nb_attributes], call[name[argin]][<ast.Slice object at 0x7da2041d9cf0>]]]
keyword[def] identifier[DbPutDeviceAttributeProperty2] ( identifier[self] , identifier[argin] ): literal[string] identifier[self] . identifier[_log] . identifier[debug] ( literal[string] ) identifier[device_name] = identifier[argin] [ literal[int] ] identifier[nb_attributes] = identifier[int] ( identifier[argin] [ literal[int] ]) identifier[self] . identifier[db] . identifier[put_device_attribute_property2] ( identifier[device_name] , identifier[nb_attributes] , identifier[argin] [ literal[int] :])
def DbPutDeviceAttributeProperty2(self, argin): """ Put device attribute property. This command adds the possibility to have attribute property which are arrays. Not possible with the old DbPutDeviceAttributeProperty command. This old command is not deleted for compatibility reasons. :param argin: Str[0] = Device name Str[1] = Attribute number Str[2] = Attribute name Str[3] = Property number Str[4] = Property name Str[5] = Property value number (array case) Str[5] = Property value 1 Str[n] = Property value n (array case) ..... :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid """ self._log.debug('In DbPutDeviceAttributeProperty2()') device_name = argin[0] nb_attributes = int(argin[1]) self.db.put_device_attribute_property2(device_name, nb_attributes, argin[2:])
def interactive_output(f, controls): """Connect widget controls to a function. This function does not generate a user interface for the widgets (unlike `interact`). This enables customisation of the widget user interface layout. The user interface layout must be defined and displayed manually. """ out = Output() def observer(change): kwargs = {k:v.value for k,v in controls.items()} show_inline_matplotlib_plots() with out: clear_output(wait=True) f(**kwargs) show_inline_matplotlib_plots() for k,w in controls.items(): w.observe(observer, 'value') show_inline_matplotlib_plots() observer(None) return out
def function[interactive_output, parameter[f, controls]]: constant[Connect widget controls to a function. This function does not generate a user interface for the widgets (unlike `interact`). This enables customisation of the widget user interface layout. The user interface layout must be defined and displayed manually. ] variable[out] assign[=] call[name[Output], parameter[]] def function[observer, parameter[change]]: variable[kwargs] assign[=] <ast.DictComp object at 0x7da18f722da0> call[name[show_inline_matplotlib_plots], parameter[]] with name[out] begin[:] call[name[clear_output], parameter[]] call[name[f], parameter[]] call[name[show_inline_matplotlib_plots], parameter[]] for taget[tuple[[<ast.Name object at 0x7da18f723010>, <ast.Name object at 0x7da18f723490>]]] in starred[call[name[controls].items, parameter[]]] begin[:] call[name[w].observe, parameter[name[observer], constant[value]]] call[name[show_inline_matplotlib_plots], parameter[]] call[name[observer], parameter[constant[None]]] return[name[out]]
keyword[def] identifier[interactive_output] ( identifier[f] , identifier[controls] ): literal[string] identifier[out] = identifier[Output] () keyword[def] identifier[observer] ( identifier[change] ): identifier[kwargs] ={ identifier[k] : identifier[v] . identifier[value] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[controls] . identifier[items] ()} identifier[show_inline_matplotlib_plots] () keyword[with] identifier[out] : identifier[clear_output] ( identifier[wait] = keyword[True] ) identifier[f] (** identifier[kwargs] ) identifier[show_inline_matplotlib_plots] () keyword[for] identifier[k] , identifier[w] keyword[in] identifier[controls] . identifier[items] (): identifier[w] . identifier[observe] ( identifier[observer] , literal[string] ) identifier[show_inline_matplotlib_plots] () identifier[observer] ( keyword[None] ) keyword[return] identifier[out]
def interactive_output(f, controls): """Connect widget controls to a function. This function does not generate a user interface for the widgets (unlike `interact`). This enables customisation of the widget user interface layout. The user interface layout must be defined and displayed manually. """ out = Output() def observer(change): kwargs = {k: v.value for (k, v) in controls.items()} show_inline_matplotlib_plots() with out: clear_output(wait=True) f(**kwargs) show_inline_matplotlib_plots() # depends on [control=['with'], data=[]] for (k, w) in controls.items(): w.observe(observer, 'value') # depends on [control=['for'], data=[]] show_inline_matplotlib_plots() observer(None) return out
def _clean_freebayes_output(line): """Clean FreeBayes output to make post-processing with GATK happy. XXX Not applied on recent versions which fix issues to be more compatible with bgzip output, but retained in case of need. - Remove lines from FreeBayes outputs where REF/ALT are identical: 2 22816178 . G G 0.0339196 or there are multiple duplicate alleles: 4 60594753 . TGAAA T,T - Remove Type=Int specifications which are not valid VCF and GATK chokes on. """ if line.startswith("#"): line = line.replace("Type=Int,D", "Type=Integer,D") return line else: parts = line.split("\t") alleles = [x.strip() for x in parts[4].split(",")] + [parts[3].strip()] if len(alleles) == len(set(alleles)): return line return None
def function[_clean_freebayes_output, parameter[line]]: constant[Clean FreeBayes output to make post-processing with GATK happy. XXX Not applied on recent versions which fix issues to be more compatible with bgzip output, but retained in case of need. - Remove lines from FreeBayes outputs where REF/ALT are identical: 2 22816178 . G G 0.0339196 or there are multiple duplicate alleles: 4 60594753 . TGAAA T,T - Remove Type=Int specifications which are not valid VCF and GATK chokes on. ] if call[name[line].startswith, parameter[constant[#]]] begin[:] variable[line] assign[=] call[name[line].replace, parameter[constant[Type=Int,D], constant[Type=Integer,D]]] return[name[line]] return[constant[None]]
keyword[def] identifier[_clean_freebayes_output] ( identifier[line] ): literal[string] keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): identifier[line] = identifier[line] . identifier[replace] ( literal[string] , literal[string] ) keyword[return] identifier[line] keyword[else] : identifier[parts] = identifier[line] . identifier[split] ( literal[string] ) identifier[alleles] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[parts] [ literal[int] ]. identifier[split] ( literal[string] )]+[ identifier[parts] [ literal[int] ]. identifier[strip] ()] keyword[if] identifier[len] ( identifier[alleles] )== identifier[len] ( identifier[set] ( identifier[alleles] )): keyword[return] identifier[line] keyword[return] keyword[None]
def _clean_freebayes_output(line): """Clean FreeBayes output to make post-processing with GATK happy. XXX Not applied on recent versions which fix issues to be more compatible with bgzip output, but retained in case of need. - Remove lines from FreeBayes outputs where REF/ALT are identical: 2 22816178 . G G 0.0339196 or there are multiple duplicate alleles: 4 60594753 . TGAAA T,T - Remove Type=Int specifications which are not valid VCF and GATK chokes on. """ if line.startswith('#'): line = line.replace('Type=Int,D', 'Type=Integer,D') return line # depends on [control=['if'], data=[]] else: parts = line.split('\t') alleles = [x.strip() for x in parts[4].split(',')] + [parts[3].strip()] if len(alleles) == len(set(alleles)): return line # depends on [control=['if'], data=[]] return None
def setup_auth_paths(app, auth, prefix, params): """Add URL rules for auth paths.""" base = urljoin('/', prefix + '/') # Must end in slash app.add_url_rule(base + 'login', prefix + 'login_handler', auth.login_handler, defaults=params) app.add_url_rule(base + 'logout', prefix + 'logout_handler', auth.logout_handler, defaults=params) if (auth.client_id_handler): app.add_url_rule(base + 'client', prefix + 'client_id_handler', auth.client_id_handler, defaults=params) app.add_url_rule(base + 'token', prefix + 'access_token_handler', auth.access_token_handler, defaults=params) if (auth.home_handler): app.add_url_rule(base + 'home', prefix + 'home_handler', auth.home_handler, defaults=params)
def function[setup_auth_paths, parameter[app, auth, prefix, params]]: constant[Add URL rules for auth paths.] variable[base] assign[=] call[name[urljoin], parameter[constant[/], binary_operation[name[prefix] + constant[/]]]] call[name[app].add_url_rule, parameter[binary_operation[name[base] + constant[login]], binary_operation[name[prefix] + constant[login_handler]], name[auth].login_handler]] call[name[app].add_url_rule, parameter[binary_operation[name[base] + constant[logout]], binary_operation[name[prefix] + constant[logout_handler]], name[auth].logout_handler]] if name[auth].client_id_handler begin[:] call[name[app].add_url_rule, parameter[binary_operation[name[base] + constant[client]], binary_operation[name[prefix] + constant[client_id_handler]], name[auth].client_id_handler]] call[name[app].add_url_rule, parameter[binary_operation[name[base] + constant[token]], binary_operation[name[prefix] + constant[access_token_handler]], name[auth].access_token_handler]] if name[auth].home_handler begin[:] call[name[app].add_url_rule, parameter[binary_operation[name[base] + constant[home]], binary_operation[name[prefix] + constant[home_handler]], name[auth].home_handler]]
keyword[def] identifier[setup_auth_paths] ( identifier[app] , identifier[auth] , identifier[prefix] , identifier[params] ): literal[string] identifier[base] = identifier[urljoin] ( literal[string] , identifier[prefix] + literal[string] ) identifier[app] . identifier[add_url_rule] ( identifier[base] + literal[string] , identifier[prefix] + literal[string] , identifier[auth] . identifier[login_handler] , identifier[defaults] = identifier[params] ) identifier[app] . identifier[add_url_rule] ( identifier[base] + literal[string] , identifier[prefix] + literal[string] , identifier[auth] . identifier[logout_handler] , identifier[defaults] = identifier[params] ) keyword[if] ( identifier[auth] . identifier[client_id_handler] ): identifier[app] . identifier[add_url_rule] ( identifier[base] + literal[string] , identifier[prefix] + literal[string] , identifier[auth] . identifier[client_id_handler] , identifier[defaults] = identifier[params] ) identifier[app] . identifier[add_url_rule] ( identifier[base] + literal[string] , identifier[prefix] + literal[string] , identifier[auth] . identifier[access_token_handler] , identifier[defaults] = identifier[params] ) keyword[if] ( identifier[auth] . identifier[home_handler] ): identifier[app] . identifier[add_url_rule] ( identifier[base] + literal[string] , identifier[prefix] + literal[string] , identifier[auth] . identifier[home_handler] , identifier[defaults] = identifier[params] )
def setup_auth_paths(app, auth, prefix, params): """Add URL rules for auth paths.""" base = urljoin('/', prefix + '/') # Must end in slash app.add_url_rule(base + 'login', prefix + 'login_handler', auth.login_handler, defaults=params) app.add_url_rule(base + 'logout', prefix + 'logout_handler', auth.logout_handler, defaults=params) if auth.client_id_handler: app.add_url_rule(base + 'client', prefix + 'client_id_handler', auth.client_id_handler, defaults=params) # depends on [control=['if'], data=[]] app.add_url_rule(base + 'token', prefix + 'access_token_handler', auth.access_token_handler, defaults=params) if auth.home_handler: app.add_url_rule(base + 'home', prefix + 'home_handler', auth.home_handler, defaults=params) # depends on [control=['if'], data=[]]
def date_created(self): """Date the Scopus record was created.""" date_created = self.xml.find('institution-profile/date-created') if date_created is not None: date_created = (int(date_created.attrib['year']), int(date_created.attrib['month']), int(date_created.attrib['day'])) else: date_created = (None, None, None) return date_created
def function[date_created, parameter[self]]: constant[Date the Scopus record was created.] variable[date_created] assign[=] call[name[self].xml.find, parameter[constant[institution-profile/date-created]]] if compare[name[date_created] is_not constant[None]] begin[:] variable[date_created] assign[=] tuple[[<ast.Call object at 0x7da2047ea8f0>, <ast.Call object at 0x7da2047eb3a0>, <ast.Call object at 0x7da2047e80a0>]] return[name[date_created]]
keyword[def] identifier[date_created] ( identifier[self] ): literal[string] identifier[date_created] = identifier[self] . identifier[xml] . identifier[find] ( literal[string] ) keyword[if] identifier[date_created] keyword[is] keyword[not] keyword[None] : identifier[date_created] =( identifier[int] ( identifier[date_created] . identifier[attrib] [ literal[string] ]), identifier[int] ( identifier[date_created] . identifier[attrib] [ literal[string] ]), identifier[int] ( identifier[date_created] . identifier[attrib] [ literal[string] ])) keyword[else] : identifier[date_created] =( keyword[None] , keyword[None] , keyword[None] ) keyword[return] identifier[date_created]
def date_created(self): """Date the Scopus record was created.""" date_created = self.xml.find('institution-profile/date-created') if date_created is not None: date_created = (int(date_created.attrib['year']), int(date_created.attrib['month']), int(date_created.attrib['day'])) # depends on [control=['if'], data=['date_created']] else: date_created = (None, None, None) return date_created
def validatePopElement(self, ctxt, elem, qname): """Pop the element end from the validation stack. """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlValidatePopElement(ctxt__o, self._o, elem__o, qname) return ret
def function[validatePopElement, parameter[self, ctxt, elem, qname]]: constant[Pop the element end from the validation stack. ] if compare[name[ctxt] is constant[None]] begin[:] variable[ctxt__o] assign[=] constant[None] if compare[name[elem] is constant[None]] begin[:] variable[elem__o] assign[=] constant[None] variable[ret] assign[=] call[name[libxml2mod].xmlValidatePopElement, parameter[name[ctxt__o], name[self]._o, name[elem__o], name[qname]]] return[name[ret]]
keyword[def] identifier[validatePopElement] ( identifier[self] , identifier[ctxt] , identifier[elem] , identifier[qname] ): literal[string] keyword[if] identifier[ctxt] keyword[is] keyword[None] : identifier[ctxt__o] = keyword[None] keyword[else] : identifier[ctxt__o] = identifier[ctxt] . identifier[_o] keyword[if] identifier[elem] keyword[is] keyword[None] : identifier[elem__o] = keyword[None] keyword[else] : identifier[elem__o] = identifier[elem] . identifier[_o] identifier[ret] = identifier[libxml2mod] . identifier[xmlValidatePopElement] ( identifier[ctxt__o] , identifier[self] . identifier[_o] , identifier[elem__o] , identifier[qname] ) keyword[return] identifier[ret]
def validatePopElement(self, ctxt, elem, qname): """Pop the element end from the validation stack. """ if ctxt is None: ctxt__o = None # depends on [control=['if'], data=[]] else: ctxt__o = ctxt._o if elem is None: elem__o = None # depends on [control=['if'], data=[]] else: elem__o = elem._o ret = libxml2mod.xmlValidatePopElement(ctxt__o, self._o, elem__o, qname) return ret
def copy_resources_to_log_dir(log_dir): """Copies the necessary static assets to the log_dir and returns the path of the main css file.""" css_path = resource_filename(Requirement.parse("egat"), "/egat/data/default.css") header_path = resource_filename(Requirement.parse("egat"), "/egat/data/egat_header.png") shutil.copyfile(css_path, log_dir + "/style.css") shutil.copyfile(header_path, log_dir + "/egat_header.png") return log_dir + os.sep + "style.css"
def function[copy_resources_to_log_dir, parameter[log_dir]]: constant[Copies the necessary static assets to the log_dir and returns the path of the main css file.] variable[css_path] assign[=] call[name[resource_filename], parameter[call[name[Requirement].parse, parameter[constant[egat]]], constant[/egat/data/default.css]]] variable[header_path] assign[=] call[name[resource_filename], parameter[call[name[Requirement].parse, parameter[constant[egat]]], constant[/egat/data/egat_header.png]]] call[name[shutil].copyfile, parameter[name[css_path], binary_operation[name[log_dir] + constant[/style.css]]]] call[name[shutil].copyfile, parameter[name[header_path], binary_operation[name[log_dir] + constant[/egat_header.png]]]] return[binary_operation[binary_operation[name[log_dir] + name[os].sep] + constant[style.css]]]
keyword[def] identifier[copy_resources_to_log_dir] ( identifier[log_dir] ): literal[string] identifier[css_path] = identifier[resource_filename] ( identifier[Requirement] . identifier[parse] ( literal[string] ), literal[string] ) identifier[header_path] = identifier[resource_filename] ( identifier[Requirement] . identifier[parse] ( literal[string] ), literal[string] ) identifier[shutil] . identifier[copyfile] ( identifier[css_path] , identifier[log_dir] + literal[string] ) identifier[shutil] . identifier[copyfile] ( identifier[header_path] , identifier[log_dir] + literal[string] ) keyword[return] identifier[log_dir] + identifier[os] . identifier[sep] + literal[string]
def copy_resources_to_log_dir(log_dir): """Copies the necessary static assets to the log_dir and returns the path of the main css file.""" css_path = resource_filename(Requirement.parse('egat'), '/egat/data/default.css') header_path = resource_filename(Requirement.parse('egat'), '/egat/data/egat_header.png') shutil.copyfile(css_path, log_dir + '/style.css') shutil.copyfile(header_path, log_dir + '/egat_header.png') return log_dir + os.sep + 'style.css'
def UnregisterMessageHandler(self, timeout=None): """Unregisters any registered message handler.""" if self.handler_thread: self.handler_stop = True self.handler_thread.join(timeout) if self.handler_thread.isAlive(): raise RuntimeError("Message handler thread did not join in time.") self.handler_thread = None
def function[UnregisterMessageHandler, parameter[self, timeout]]: constant[Unregisters any registered message handler.] if name[self].handler_thread begin[:] name[self].handler_stop assign[=] constant[True] call[name[self].handler_thread.join, parameter[name[timeout]]] if call[name[self].handler_thread.isAlive, parameter[]] begin[:] <ast.Raise object at 0x7da1b1d914e0> name[self].handler_thread assign[=] constant[None]
keyword[def] identifier[UnregisterMessageHandler] ( identifier[self] , identifier[timeout] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[handler_thread] : identifier[self] . identifier[handler_stop] = keyword[True] identifier[self] . identifier[handler_thread] . identifier[join] ( identifier[timeout] ) keyword[if] identifier[self] . identifier[handler_thread] . identifier[isAlive] (): keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[self] . identifier[handler_thread] = keyword[None]
def UnregisterMessageHandler(self, timeout=None): """Unregisters any registered message handler.""" if self.handler_thread: self.handler_stop = True self.handler_thread.join(timeout) if self.handler_thread.isAlive(): raise RuntimeError('Message handler thread did not join in time.') # depends on [control=['if'], data=[]] self.handler_thread = None # depends on [control=['if'], data=[]]
def bbox(img): """Find the bounding box around nonzero elements in the given array Copied from https://stackoverflow.com/a/31402351/5703449 . Returns: rowmin, rowmax, colmin, colmax """ rows = np.any(img, axis=1) cols = np.any(img, axis=0) rmin, rmax = np.where(rows)[0][[0, -1]] cmin, cmax = np.where(cols)[0][[0, -1]] return rmin, rmax, cmin, cmax
def function[bbox, parameter[img]]: constant[Find the bounding box around nonzero elements in the given array Copied from https://stackoverflow.com/a/31402351/5703449 . Returns: rowmin, rowmax, colmin, colmax ] variable[rows] assign[=] call[name[np].any, parameter[name[img]]] variable[cols] assign[=] call[name[np].any, parameter[name[img]]] <ast.Tuple object at 0x7da1b22a59f0> assign[=] call[call[call[name[np].where, parameter[name[rows]]]][constant[0]]][list[[<ast.Constant object at 0x7da1b22a4400>, <ast.UnaryOp object at 0x7da1b22a6500>]]] <ast.Tuple object at 0x7da1b22a66b0> assign[=] call[call[call[name[np].where, parameter[name[cols]]]][constant[0]]][list[[<ast.Constant object at 0x7da1b22f99c0>, <ast.UnaryOp object at 0x7da1b22f8520>]]] return[tuple[[<ast.Name object at 0x7da1b22f8dc0>, <ast.Name object at 0x7da1b22f8460>, <ast.Name object at 0x7da1b22f9750>, <ast.Name object at 0x7da1b22fa710>]]]
keyword[def] identifier[bbox] ( identifier[img] ): literal[string] identifier[rows] = identifier[np] . identifier[any] ( identifier[img] , identifier[axis] = literal[int] ) identifier[cols] = identifier[np] . identifier[any] ( identifier[img] , identifier[axis] = literal[int] ) identifier[rmin] , identifier[rmax] = identifier[np] . identifier[where] ( identifier[rows] )[ literal[int] ][[ literal[int] ,- literal[int] ]] identifier[cmin] , identifier[cmax] = identifier[np] . identifier[where] ( identifier[cols] )[ literal[int] ][[ literal[int] ,- literal[int] ]] keyword[return] identifier[rmin] , identifier[rmax] , identifier[cmin] , identifier[cmax]
def bbox(img): """Find the bounding box around nonzero elements in the given array Copied from https://stackoverflow.com/a/31402351/5703449 . Returns: rowmin, rowmax, colmin, colmax """ rows = np.any(img, axis=1) cols = np.any(img, axis=0) (rmin, rmax) = np.where(rows)[0][[0, -1]] (cmin, cmax) = np.where(cols)[0][[0, -1]] return (rmin, rmax, cmin, cmax)
def offset(self, num_to_skip): """Skip to an offset in a query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.offset` for more information on this method. Args: num_to_skip (int): The number of results to skip at the beginning of query results. (Must be non-negative.) Returns: ~.firestore_v1beta1.query.Query: An offset query. """ query = query_mod.Query(self) return query.offset(num_to_skip)
def function[offset, parameter[self, num_to_skip]]: constant[Skip to an offset in a query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.offset` for more information on this method. Args: num_to_skip (int): The number of results to skip at the beginning of query results. (Must be non-negative.) Returns: ~.firestore_v1beta1.query.Query: An offset query. ] variable[query] assign[=] call[name[query_mod].Query, parameter[name[self]]] return[call[name[query].offset, parameter[name[num_to_skip]]]]
keyword[def] identifier[offset] ( identifier[self] , identifier[num_to_skip] ): literal[string] identifier[query] = identifier[query_mod] . identifier[Query] ( identifier[self] ) keyword[return] identifier[query] . identifier[offset] ( identifier[num_to_skip] )
def offset(self, num_to_skip): """Skip to an offset in a query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.offset` for more information on this method. Args: num_to_skip (int): The number of results to skip at the beginning of query results. (Must be non-negative.) Returns: ~.firestore_v1beta1.query.Query: An offset query. """ query = query_mod.Query(self) return query.offset(num_to_skip)
def _Complete(self): """Marks the hunt as completed.""" self._RemoveForemanRule() if "w" in self.hunt_obj.mode: self.hunt_obj.Set(self.hunt_obj.Schema.STATE("COMPLETED")) self.hunt_obj.Flush()
def function[_Complete, parameter[self]]: constant[Marks the hunt as completed.] call[name[self]._RemoveForemanRule, parameter[]] if compare[constant[w] in name[self].hunt_obj.mode] begin[:] call[name[self].hunt_obj.Set, parameter[call[name[self].hunt_obj.Schema.STATE, parameter[constant[COMPLETED]]]]] call[name[self].hunt_obj.Flush, parameter[]]
keyword[def] identifier[_Complete] ( identifier[self] ): literal[string] identifier[self] . identifier[_RemoveForemanRule] () keyword[if] literal[string] keyword[in] identifier[self] . identifier[hunt_obj] . identifier[mode] : identifier[self] . identifier[hunt_obj] . identifier[Set] ( identifier[self] . identifier[hunt_obj] . identifier[Schema] . identifier[STATE] ( literal[string] )) identifier[self] . identifier[hunt_obj] . identifier[Flush] ()
def _Complete(self): """Marks the hunt as completed.""" self._RemoveForemanRule() if 'w' in self.hunt_obj.mode: self.hunt_obj.Set(self.hunt_obj.Schema.STATE('COMPLETED')) self.hunt_obj.Flush() # depends on [control=['if'], data=[]]
def find_definition(project, code, offset, resource=None, maxfixes=1): """Return the definition location of the python name at `offset` A `Location` object is returned if the definition location can be determined, otherwise ``None`` is returned. """ fixer = fixsyntax.FixSyntax(project, code, resource, maxfixes) pyname = fixer.pyname_at(offset) if pyname is not None: module, lineno = pyname.get_definition_location() name = rope.base.worder.Worder(code).get_word_at(offset) if lineno is not None: start = module.lines.get_line_start(lineno) def check_offset(occurrence): if occurrence.offset < start: return False pyname_filter = occurrences.PyNameFilter(pyname) finder = occurrences.Finder(project, name, [check_offset, pyname_filter]) for occurrence in finder.find_occurrences(pymodule=module): return Location(occurrence)
def function[find_definition, parameter[project, code, offset, resource, maxfixes]]: constant[Return the definition location of the python name at `offset` A `Location` object is returned if the definition location can be determined, otherwise ``None`` is returned. ] variable[fixer] assign[=] call[name[fixsyntax].FixSyntax, parameter[name[project], name[code], name[resource], name[maxfixes]]] variable[pyname] assign[=] call[name[fixer].pyname_at, parameter[name[offset]]] if compare[name[pyname] is_not constant[None]] begin[:] <ast.Tuple object at 0x7da18fe90370> assign[=] call[name[pyname].get_definition_location, parameter[]] variable[name] assign[=] call[call[name[rope].base.worder.Worder, parameter[name[code]]].get_word_at, parameter[name[offset]]] if compare[name[lineno] is_not constant[None]] begin[:] variable[start] assign[=] call[name[module].lines.get_line_start, parameter[name[lineno]]] def function[check_offset, parameter[occurrence]]: if compare[name[occurrence].offset less[<] name[start]] begin[:] return[constant[False]] variable[pyname_filter] assign[=] call[name[occurrences].PyNameFilter, parameter[name[pyname]]] variable[finder] assign[=] call[name[occurrences].Finder, parameter[name[project], name[name], list[[<ast.Name object at 0x7da18fe92ec0>, <ast.Name object at 0x7da18fe92d70>]]]] for taget[name[occurrence]] in starred[call[name[finder].find_occurrences, parameter[]]] begin[:] return[call[name[Location], parameter[name[occurrence]]]]
keyword[def] identifier[find_definition] ( identifier[project] , identifier[code] , identifier[offset] , identifier[resource] = keyword[None] , identifier[maxfixes] = literal[int] ): literal[string] identifier[fixer] = identifier[fixsyntax] . identifier[FixSyntax] ( identifier[project] , identifier[code] , identifier[resource] , identifier[maxfixes] ) identifier[pyname] = identifier[fixer] . identifier[pyname_at] ( identifier[offset] ) keyword[if] identifier[pyname] keyword[is] keyword[not] keyword[None] : identifier[module] , identifier[lineno] = identifier[pyname] . identifier[get_definition_location] () identifier[name] = identifier[rope] . identifier[base] . identifier[worder] . identifier[Worder] ( identifier[code] ). identifier[get_word_at] ( identifier[offset] ) keyword[if] identifier[lineno] keyword[is] keyword[not] keyword[None] : identifier[start] = identifier[module] . identifier[lines] . identifier[get_line_start] ( identifier[lineno] ) keyword[def] identifier[check_offset] ( identifier[occurrence] ): keyword[if] identifier[occurrence] . identifier[offset] < identifier[start] : keyword[return] keyword[False] identifier[pyname_filter] = identifier[occurrences] . identifier[PyNameFilter] ( identifier[pyname] ) identifier[finder] = identifier[occurrences] . identifier[Finder] ( identifier[project] , identifier[name] , [ identifier[check_offset] , identifier[pyname_filter] ]) keyword[for] identifier[occurrence] keyword[in] identifier[finder] . identifier[find_occurrences] ( identifier[pymodule] = identifier[module] ): keyword[return] identifier[Location] ( identifier[occurrence] )
def find_definition(project, code, offset, resource=None, maxfixes=1): """Return the definition location of the python name at `offset` A `Location` object is returned if the definition location can be determined, otherwise ``None`` is returned. """ fixer = fixsyntax.FixSyntax(project, code, resource, maxfixes) pyname = fixer.pyname_at(offset) if pyname is not None: (module, lineno) = pyname.get_definition_location() name = rope.base.worder.Worder(code).get_word_at(offset) if lineno is not None: start = module.lines.get_line_start(lineno) def check_offset(occurrence): if occurrence.offset < start: return False # depends on [control=['if'], data=[]] pyname_filter = occurrences.PyNameFilter(pyname) finder = occurrences.Finder(project, name, [check_offset, pyname_filter]) for occurrence in finder.find_occurrences(pymodule=module): return Location(occurrence) # depends on [control=['for'], data=['occurrence']] # depends on [control=['if'], data=['lineno']] # depends on [control=['if'], data=['pyname']]
def handle_oauth1_response(self, args): """Handles an oauth1 authorization response.""" client = self.make_client() client.verifier = args.get('oauth_verifier') tup = session.get('%s_oauthtok' % self.name) if not tup: raise OAuthException( 'Token not found, maybe you disabled cookie', type='token_not_found' ) client.resource_owner_key = tup[0] client.resource_owner_secret = tup[1] uri, headers, data = client.sign( self.expand_url(self.access_token_url), _encode(self.access_token_method) ) headers.update(self._access_token_headers) resp, content = self.http_request( uri, headers, to_bytes(data, self.encoding), method=self.access_token_method ) data = parse_response(resp, content) if resp.code not in (200, 201): raise OAuthException( 'Invalid response from %s' % self.name, type='invalid_response', data=data ) return data
def function[handle_oauth1_response, parameter[self, args]]: constant[Handles an oauth1 authorization response.] variable[client] assign[=] call[name[self].make_client, parameter[]] name[client].verifier assign[=] call[name[args].get, parameter[constant[oauth_verifier]]] variable[tup] assign[=] call[name[session].get, parameter[binary_operation[constant[%s_oauthtok] <ast.Mod object at 0x7da2590d6920> name[self].name]]] if <ast.UnaryOp object at 0x7da1b0244940> begin[:] <ast.Raise object at 0x7da1b02466e0> name[client].resource_owner_key assign[=] call[name[tup]][constant[0]] name[client].resource_owner_secret assign[=] call[name[tup]][constant[1]] <ast.Tuple object at 0x7da1b0245000> assign[=] call[name[client].sign, parameter[call[name[self].expand_url, parameter[name[self].access_token_url]], call[name[_encode], parameter[name[self].access_token_method]]]] call[name[headers].update, parameter[name[self]._access_token_headers]] <ast.Tuple object at 0x7da1b0247c10> assign[=] call[name[self].http_request, parameter[name[uri], name[headers], call[name[to_bytes], parameter[name[data], name[self].encoding]]]] variable[data] assign[=] call[name[parse_response], parameter[name[resp], name[content]]] if compare[name[resp].code <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b059e350>, <ast.Constant object at 0x7da1b059e830>]]] begin[:] <ast.Raise object at 0x7da1b059d3f0> return[name[data]]
keyword[def] identifier[handle_oauth1_response] ( identifier[self] , identifier[args] ): literal[string] identifier[client] = identifier[self] . identifier[make_client] () identifier[client] . identifier[verifier] = identifier[args] . identifier[get] ( literal[string] ) identifier[tup] = identifier[session] . identifier[get] ( literal[string] % identifier[self] . identifier[name] ) keyword[if] keyword[not] identifier[tup] : keyword[raise] identifier[OAuthException] ( literal[string] , identifier[type] = literal[string] ) identifier[client] . identifier[resource_owner_key] = identifier[tup] [ literal[int] ] identifier[client] . identifier[resource_owner_secret] = identifier[tup] [ literal[int] ] identifier[uri] , identifier[headers] , identifier[data] = identifier[client] . identifier[sign] ( identifier[self] . identifier[expand_url] ( identifier[self] . identifier[access_token_url] ), identifier[_encode] ( identifier[self] . identifier[access_token_method] ) ) identifier[headers] . identifier[update] ( identifier[self] . identifier[_access_token_headers] ) identifier[resp] , identifier[content] = identifier[self] . identifier[http_request] ( identifier[uri] , identifier[headers] , identifier[to_bytes] ( identifier[data] , identifier[self] . identifier[encoding] ), identifier[method] = identifier[self] . identifier[access_token_method] ) identifier[data] = identifier[parse_response] ( identifier[resp] , identifier[content] ) keyword[if] identifier[resp] . identifier[code] keyword[not] keyword[in] ( literal[int] , literal[int] ): keyword[raise] identifier[OAuthException] ( literal[string] % identifier[self] . identifier[name] , identifier[type] = literal[string] , identifier[data] = identifier[data] ) keyword[return] identifier[data]
def handle_oauth1_response(self, args): """Handles an oauth1 authorization response.""" client = self.make_client() client.verifier = args.get('oauth_verifier') tup = session.get('%s_oauthtok' % self.name) if not tup: raise OAuthException('Token not found, maybe you disabled cookie', type='token_not_found') # depends on [control=['if'], data=[]] client.resource_owner_key = tup[0] client.resource_owner_secret = tup[1] (uri, headers, data) = client.sign(self.expand_url(self.access_token_url), _encode(self.access_token_method)) headers.update(self._access_token_headers) (resp, content) = self.http_request(uri, headers, to_bytes(data, self.encoding), method=self.access_token_method) data = parse_response(resp, content) if resp.code not in (200, 201): raise OAuthException('Invalid response from %s' % self.name, type='invalid_response', data=data) # depends on [control=['if'], data=[]] return data
def Query(self, queue, limit=1): """Retrieves tasks from a queue without leasing them. This is good for a read only snapshot of the tasks. Args: queue: The task queue that this task belongs to, usually client.Queue() where client is the ClientURN object you want to schedule msgs on. limit: Number of values to fetch. Returns: A list of Task() objects. """ # This function is usually used for manual testing so we also accept client # ids and get the queue from it. if isinstance(queue, rdf_client.ClientURN): queue = queue.Queue() return self.data_store.QueueQueryTasks(queue, limit=limit)
def function[Query, parameter[self, queue, limit]]: constant[Retrieves tasks from a queue without leasing them. This is good for a read only snapshot of the tasks. Args: queue: The task queue that this task belongs to, usually client.Queue() where client is the ClientURN object you want to schedule msgs on. limit: Number of values to fetch. Returns: A list of Task() objects. ] if call[name[isinstance], parameter[name[queue], name[rdf_client].ClientURN]] begin[:] variable[queue] assign[=] call[name[queue].Queue, parameter[]] return[call[name[self].data_store.QueueQueryTasks, parameter[name[queue]]]]
keyword[def] identifier[Query] ( identifier[self] , identifier[queue] , identifier[limit] = literal[int] ): literal[string] keyword[if] identifier[isinstance] ( identifier[queue] , identifier[rdf_client] . identifier[ClientURN] ): identifier[queue] = identifier[queue] . identifier[Queue] () keyword[return] identifier[self] . identifier[data_store] . identifier[QueueQueryTasks] ( identifier[queue] , identifier[limit] = identifier[limit] )
def Query(self, queue, limit=1): """Retrieves tasks from a queue without leasing them. This is good for a read only snapshot of the tasks. Args: queue: The task queue that this task belongs to, usually client.Queue() where client is the ClientURN object you want to schedule msgs on. limit: Number of values to fetch. Returns: A list of Task() objects. """ # This function is usually used for manual testing so we also accept client # ids and get the queue from it. if isinstance(queue, rdf_client.ClientURN): queue = queue.Queue() # depends on [control=['if'], data=[]] return self.data_store.QueueQueryTasks(queue, limit=limit)
def _set_raise_on_bulk_item_failure(self, raise_on_bulk_item_failure): """ Set the raise_on_bulk_item_failure parameter :param raise_on_bulk_item_failure a bool the status of the raise_on_bulk_item_failure """ self._raise_on_bulk_item_failure = raise_on_bulk_item_failure self.bulker.raise_on_bulk_item_failure = raise_on_bulk_item_failure
def function[_set_raise_on_bulk_item_failure, parameter[self, raise_on_bulk_item_failure]]: constant[ Set the raise_on_bulk_item_failure parameter :param raise_on_bulk_item_failure a bool the status of the raise_on_bulk_item_failure ] name[self]._raise_on_bulk_item_failure assign[=] name[raise_on_bulk_item_failure] name[self].bulker.raise_on_bulk_item_failure assign[=] name[raise_on_bulk_item_failure]
keyword[def] identifier[_set_raise_on_bulk_item_failure] ( identifier[self] , identifier[raise_on_bulk_item_failure] ): literal[string] identifier[self] . identifier[_raise_on_bulk_item_failure] = identifier[raise_on_bulk_item_failure] identifier[self] . identifier[bulker] . identifier[raise_on_bulk_item_failure] = identifier[raise_on_bulk_item_failure]
def _set_raise_on_bulk_item_failure(self, raise_on_bulk_item_failure): """ Set the raise_on_bulk_item_failure parameter :param raise_on_bulk_item_failure a bool the status of the raise_on_bulk_item_failure """ self._raise_on_bulk_item_failure = raise_on_bulk_item_failure self.bulker.raise_on_bulk_item_failure = raise_on_bulk_item_failure
def make_dist(toxinidir, toxdir, package): """zip up the package into the toxdir.""" dist = os.path.join(toxdir, "dist") # Suppress warnings. success = safe_shell_out(["python", "setup.py", "sdist", "--quiet", "--formats=zip", "--dist-dir", dist], cwd=toxinidir) if success: return os.path.join(dist, package + ".zip")
def function[make_dist, parameter[toxinidir, toxdir, package]]: constant[zip up the package into the toxdir.] variable[dist] assign[=] call[name[os].path.join, parameter[name[toxdir], constant[dist]]] variable[success] assign[=] call[name[safe_shell_out], parameter[list[[<ast.Constant object at 0x7da207f02d40>, <ast.Constant object at 0x7da207f03880>, <ast.Constant object at 0x7da207f03a30>, <ast.Constant object at 0x7da207f03d30>, <ast.Constant object at 0x7da207f022c0>, <ast.Constant object at 0x7da207f02da0>, <ast.Name object at 0x7da207f01ae0>]]]] if name[success] begin[:] return[call[name[os].path.join, parameter[name[dist], binary_operation[name[package] + constant[.zip]]]]]
keyword[def] identifier[make_dist] ( identifier[toxinidir] , identifier[toxdir] , identifier[package] ): literal[string] identifier[dist] = identifier[os] . identifier[path] . identifier[join] ( identifier[toxdir] , literal[string] ) identifier[success] = identifier[safe_shell_out] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , identifier[dist] ], identifier[cwd] = identifier[toxinidir] ) keyword[if] identifier[success] : keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[dist] , identifier[package] + literal[string] )
def make_dist(toxinidir, toxdir, package): """zip up the package into the toxdir.""" dist = os.path.join(toxdir, 'dist') # Suppress warnings. success = safe_shell_out(['python', 'setup.py', 'sdist', '--quiet', '--formats=zip', '--dist-dir', dist], cwd=toxinidir) if success: return os.path.join(dist, package + '.zip') # depends on [control=['if'], data=[]]
def compare(self, buf, offset=0, length=1, ignore=""): """Compare buffer""" for i in range(offset, offset + length): if isinstance(self.m_types, (type(Union), type(Structure))): if compare(self.m_buf[i], buf[i], ignore=ignore): return 1 elif self.m_buf[i] != buf[i]: return 1 return 0
def function[compare, parameter[self, buf, offset, length, ignore]]: constant[Compare buffer] for taget[name[i]] in starred[call[name[range], parameter[name[offset], binary_operation[name[offset] + name[length]]]]] begin[:] if call[name[isinstance], parameter[name[self].m_types, tuple[[<ast.Call object at 0x7da1b0158580>, <ast.Call object at 0x7da1b0108a90>]]]] begin[:] if call[name[compare], parameter[call[name[self].m_buf][name[i]], call[name[buf]][name[i]]]] begin[:] return[constant[1]] return[constant[0]]
keyword[def] identifier[compare] ( identifier[self] , identifier[buf] , identifier[offset] = literal[int] , identifier[length] = literal[int] , identifier[ignore] = literal[string] ): literal[string] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[offset] , identifier[offset] + identifier[length] ): keyword[if] identifier[isinstance] ( identifier[self] . identifier[m_types] ,( identifier[type] ( identifier[Union] ), identifier[type] ( identifier[Structure] ))): keyword[if] identifier[compare] ( identifier[self] . identifier[m_buf] [ identifier[i] ], identifier[buf] [ identifier[i] ], identifier[ignore] = identifier[ignore] ): keyword[return] literal[int] keyword[elif] identifier[self] . identifier[m_buf] [ identifier[i] ]!= identifier[buf] [ identifier[i] ]: keyword[return] literal[int] keyword[return] literal[int]
def compare(self, buf, offset=0, length=1, ignore=''): """Compare buffer""" for i in range(offset, offset + length): if isinstance(self.m_types, (type(Union), type(Structure))): if compare(self.m_buf[i], buf[i], ignore=ignore): return 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif self.m_buf[i] != buf[i]: return 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] return 0
def close(self): """ Close the link. """ # Stop the comm thread self._thread.stop() # Close the USB dongle try: if self.cfusb: self.cfusb.set_crtp_to_usb(False) self.cfusb.close() except Exception as e: # If we pull out the dongle we will not make this call logger.info('Could not close {}'.format(e)) pass self.cfusb = None
def function[close, parameter[self]]: constant[ Close the link. ] call[name[self]._thread.stop, parameter[]] <ast.Try object at 0x7da1b1687550> name[self].cfusb assign[=] constant[None]
keyword[def] identifier[close] ( identifier[self] ): literal[string] identifier[self] . identifier[_thread] . identifier[stop] () keyword[try] : keyword[if] identifier[self] . identifier[cfusb] : identifier[self] . identifier[cfusb] . identifier[set_crtp_to_usb] ( keyword[False] ) identifier[self] . identifier[cfusb] . identifier[close] () keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[e] )) keyword[pass] identifier[self] . identifier[cfusb] = keyword[None]
def close(self): """ Close the link. """ # Stop the comm thread self._thread.stop() # Close the USB dongle try: if self.cfusb: self.cfusb.set_crtp_to_usb(False) self.cfusb.close() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except Exception as e: # If we pull out the dongle we will not make this call logger.info('Could not close {}'.format(e)) pass # depends on [control=['except'], data=['e']] self.cfusb = None
def set_ovs_view(self, asset_data, view_name): """ view_name should be frontView, sideView, or topView """ if not isinstance(asset_data, DataInputStream): raise InvalidArgument('view file must be an ' + 'osid.transport.DataInputStream object') if view_name not in ['frontView', 'sideView', 'topView']: raise InvalidArgument('View name must be frontView, sideView, or topView.') self.clear_file(view_name) self.add_file(asset_data, label=view_name, asset_type=OV_ASSET_TYPE, asset_content_type=OV_ASSET_CONTENT_TYPE)
def function[set_ovs_view, parameter[self, asset_data, view_name]]: constant[ view_name should be frontView, sideView, or topView ] if <ast.UnaryOp object at 0x7da20c6aa770> begin[:] <ast.Raise object at 0x7da20c6a9ab0> if compare[name[view_name] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da20c6a8dc0>, <ast.Constant object at 0x7da20c6aa530>, <ast.Constant object at 0x7da20c6aa800>]]] begin[:] <ast.Raise object at 0x7da20c6ab310> call[name[self].clear_file, parameter[name[view_name]]] call[name[self].add_file, parameter[name[asset_data]]]
keyword[def] identifier[set_ovs_view] ( identifier[self] , identifier[asset_data] , identifier[view_name] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[asset_data] , identifier[DataInputStream] ): keyword[raise] identifier[InvalidArgument] ( literal[string] + literal[string] ) keyword[if] identifier[view_name] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ]: keyword[raise] identifier[InvalidArgument] ( literal[string] ) identifier[self] . identifier[clear_file] ( identifier[view_name] ) identifier[self] . identifier[add_file] ( identifier[asset_data] , identifier[label] = identifier[view_name] , identifier[asset_type] = identifier[OV_ASSET_TYPE] , identifier[asset_content_type] = identifier[OV_ASSET_CONTENT_TYPE] )
def set_ovs_view(self, asset_data, view_name): """ view_name should be frontView, sideView, or topView """ if not isinstance(asset_data, DataInputStream): raise InvalidArgument('view file must be an ' + 'osid.transport.DataInputStream object') # depends on [control=['if'], data=[]] if view_name not in ['frontView', 'sideView', 'topView']: raise InvalidArgument('View name must be frontView, sideView, or topView.') # depends on [control=['if'], data=[]] self.clear_file(view_name) self.add_file(asset_data, label=view_name, asset_type=OV_ASSET_TYPE, asset_content_type=OV_ASSET_CONTENT_TYPE)
def get_analysis_and_structure(self, structure, calculate_valences=True, guesstimate_spin=False, op_threshold=0.1): """ Obtain an analysis of a given structure and if it may be Jahn-Teller active or not. This is a heuristic, and may give false positives and false negatives (false positives are preferred). :param structure: input structure :param calculate_valences (bool): whether to attempt to calculate valences or not, structure should have oxidation states to perform analysis :param guesstimate_spin (bool): whether to guesstimate spin state from magnetic moments or not, use with caution :param op_threshold (float): threshold for order parameter above which to consider site to match an octahedral or tetrahedral motif, since Jahn-Teller structures can often be quite distorted, this threshold is smaller than one might expect :return (dict): analysis of structure, with key 'strength' which may be 'none', 'strong', 'weak', or 'unknown' """ structure = structure.get_primitive_structure() if calculate_valences: bva = BVAnalyzer() structure = bva.get_oxi_state_decorated_structure(structure) # no point testing multiple equivalent sites, doesn't make any difference to analysis # but makes returned symmetrized_structure = SpacegroupAnalyzer(structure).get_symmetrized_structure() # to detect structural motifs of a given site op = LocalStructOrderParams(['oct', 'tet']) # dict of site index to the Jahn-Teller analysis of that site jt_sites = [] non_jt_sites = [] for indices in symmetrized_structure.equivalent_indices: idx = indices[0] site = symmetrized_structure[idx] # only interested in sites with oxidation states if isinstance(site.specie, Specie) and site.specie.element.is_transition_metal: # get motif around site order_params = op.get_order_parameters(symmetrized_structure, idx) if order_params[0] > order_params[1] and order_params[0] > op_threshold: motif = 'oct' motif_order_parameter = order_params[0] elif order_params[1] > op_threshold: motif = 'tet' motif_order_parameter = order_params[1] else: motif = 'unknown' motif_order_parameter = None if motif == "oct" or motif == "tet": # guess spin of metal ion if guesstimate_spin and 'magmom' in site.properties: # estimate if high spin or low spin magmom = site.properties['magmom'] spin_state = self._estimate_spin_state(site.specie, motif, magmom) else: spin_state = "unknown" magnitude = self.get_magnitude_of_effect_from_species(site.specie, spin_state, motif) if magnitude != "none": ligands = get_neighbors_of_site_with_index(structure, idx, approach="min_dist", delta=0.15) ligand_bond_lengths = [ligand.distance(structure[idx]) for ligand in ligands] ligands_species = list(set([str(ligand.specie) for ligand in ligands])) ligand_bond_length_spread = max(ligand_bond_lengths) - \ min(ligand_bond_lengths) def trim(f): # avoid storing to unreasonable precision, hurts readability return float("{:.4f}".format(f)) # to be Jahn-Teller active, all ligands have to be the same if len(ligands_species) == 1: jt_sites.append({'strength': magnitude, 'motif': motif, 'motif_order_parameter': trim(motif_order_parameter), 'spin_state': spin_state, 'species': str(site.specie), 'ligand': ligands_species[0], 'ligand_bond_lengths': [trim(length) for length in ligand_bond_lengths], 'ligand_bond_length_spread': trim(ligand_bond_length_spread), 'site_indices': indices}) # store reasons for not being J-T active else: non_jt_sites.append({'site_indices': indices, 'strength': "none", 'reason': "Not Jahn-Teller active for this " "electronic configuration."}) else: non_jt_sites.append({'site_indices': indices, 'strength': "none", 'reason': "motif is {}".format(motif)}) # perform aggregation of all sites if jt_sites: analysis = {'active': True} # if any site could exhibit 'strong' Jahn-Teller effect # then mark whole structure as strong strong_magnitudes = [site['strength'] == "strong" for site in jt_sites] if any(strong_magnitudes): analysis['strength'] = "strong" else: analysis['strength'] = "weak" analysis['sites'] = jt_sites return analysis, structure else: return {'active': False, 'sites': non_jt_sites}, structure
def function[get_analysis_and_structure, parameter[self, structure, calculate_valences, guesstimate_spin, op_threshold]]: constant[ Obtain an analysis of a given structure and if it may be Jahn-Teller active or not. This is a heuristic, and may give false positives and false negatives (false positives are preferred). :param structure: input structure :param calculate_valences (bool): whether to attempt to calculate valences or not, structure should have oxidation states to perform analysis :param guesstimate_spin (bool): whether to guesstimate spin state from magnetic moments or not, use with caution :param op_threshold (float): threshold for order parameter above which to consider site to match an octahedral or tetrahedral motif, since Jahn-Teller structures can often be quite distorted, this threshold is smaller than one might expect :return (dict): analysis of structure, with key 'strength' which may be 'none', 'strong', 'weak', or 'unknown' ] variable[structure] assign[=] call[name[structure].get_primitive_structure, parameter[]] if name[calculate_valences] begin[:] variable[bva] assign[=] call[name[BVAnalyzer], parameter[]] variable[structure] assign[=] call[name[bva].get_oxi_state_decorated_structure, parameter[name[structure]]] variable[symmetrized_structure] assign[=] call[call[name[SpacegroupAnalyzer], parameter[name[structure]]].get_symmetrized_structure, parameter[]] variable[op] assign[=] call[name[LocalStructOrderParams], parameter[list[[<ast.Constant object at 0x7da1b1beae60>, <ast.Constant object at 0x7da1b1beaef0>]]]] variable[jt_sites] assign[=] list[[]] variable[non_jt_sites] assign[=] list[[]] for taget[name[indices]] in starred[name[symmetrized_structure].equivalent_indices] begin[:] variable[idx] assign[=] call[name[indices]][constant[0]] variable[site] assign[=] call[name[symmetrized_structure]][name[idx]] if <ast.BoolOp object at 0x7da1b1bebd30> begin[:] variable[order_params] assign[=] call[name[op].get_order_parameters, parameter[name[symmetrized_structure], name[idx]]] if <ast.BoolOp object at 0x7da1b1bea710> begin[:] variable[motif] assign[=] constant[oct] variable[motif_order_parameter] assign[=] call[name[order_params]][constant[0]] if <ast.BoolOp object at 0x7da1b1bea110> begin[:] if <ast.BoolOp object at 0x7da1b1be9ed0> begin[:] variable[magmom] assign[=] call[name[site].properties][constant[magmom]] variable[spin_state] assign[=] call[name[self]._estimate_spin_state, parameter[name[site].specie, name[motif], name[magmom]]] variable[magnitude] assign[=] call[name[self].get_magnitude_of_effect_from_species, parameter[name[site].specie, name[spin_state], name[motif]]] if compare[name[magnitude] not_equal[!=] constant[none]] begin[:] variable[ligands] assign[=] call[name[get_neighbors_of_site_with_index], parameter[name[structure], name[idx]]] variable[ligand_bond_lengths] assign[=] <ast.ListComp object at 0x7da1b1be8460> variable[ligands_species] assign[=] call[name[list], parameter[call[name[set], parameter[<ast.ListComp object at 0x7da1b1be8220>]]]] variable[ligand_bond_length_spread] assign[=] binary_operation[call[name[max], parameter[name[ligand_bond_lengths]]] - call[name[min], parameter[name[ligand_bond_lengths]]]] def function[trim, parameter[f]]: return[call[name[float], parameter[call[constant[{:.4f}].format, parameter[name[f]]]]]] if compare[call[name[len], parameter[name[ligands_species]]] equal[==] constant[1]] begin[:] call[name[jt_sites].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1c5b550>, <ast.Constant object at 0x7da1b1c5b580>, <ast.Constant object at 0x7da1b1c59f30>, <ast.Constant object at 0x7da1b1c59d50>, <ast.Constant object at 0x7da1b1c5b340>, <ast.Constant object at 0x7da1b1c58040>, <ast.Constant object at 0x7da1b1c58610>, <ast.Constant object at 0x7da1b1c594b0>, <ast.Constant object at 0x7da1b1c58220>], [<ast.Name object at 0x7da1b1c5aec0>, <ast.Name object at 0x7da1b1c5a500>, <ast.Call object at 0x7da1b1c5a110>, <ast.Name object at 0x7da1b1c5bdc0>, <ast.Call object at 0x7da1b1c5bb20>, <ast.Subscript object at 0x7da1b1c5b7c0>, <ast.ListComp object at 0x7da1b1c5a8f0>, <ast.Call object at 0x7da1b1c59570>, <ast.Name object at 0x7da1b1c5aef0>]]]] if name[jt_sites] begin[:] variable[analysis] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c58bb0>], [<ast.Constant object at 0x7da1b1c59ff0>]] variable[strong_magnitudes] assign[=] <ast.ListComp object at 0x7da1b1c58910> if call[name[any], parameter[name[strong_magnitudes]]] begin[:] call[name[analysis]][constant[strength]] assign[=] constant[strong] call[name[analysis]][constant[sites]] assign[=] name[jt_sites] return[tuple[[<ast.Name object at 0x7da1b1c5a740>, <ast.Name object at 0x7da1b1c59db0>]]]
keyword[def] identifier[get_analysis_and_structure] ( identifier[self] , identifier[structure] , identifier[calculate_valences] = keyword[True] , identifier[guesstimate_spin] = keyword[False] , identifier[op_threshold] = literal[int] ): literal[string] identifier[structure] = identifier[structure] . identifier[get_primitive_structure] () keyword[if] identifier[calculate_valences] : identifier[bva] = identifier[BVAnalyzer] () identifier[structure] = identifier[bva] . identifier[get_oxi_state_decorated_structure] ( identifier[structure] ) identifier[symmetrized_structure] = identifier[SpacegroupAnalyzer] ( identifier[structure] ). identifier[get_symmetrized_structure] () identifier[op] = identifier[LocalStructOrderParams] ([ literal[string] , literal[string] ]) identifier[jt_sites] =[] identifier[non_jt_sites] =[] keyword[for] identifier[indices] keyword[in] identifier[symmetrized_structure] . identifier[equivalent_indices] : identifier[idx] = identifier[indices] [ literal[int] ] identifier[site] = identifier[symmetrized_structure] [ identifier[idx] ] keyword[if] identifier[isinstance] ( identifier[site] . identifier[specie] , identifier[Specie] ) keyword[and] identifier[site] . identifier[specie] . identifier[element] . identifier[is_transition_metal] : identifier[order_params] = identifier[op] . identifier[get_order_parameters] ( identifier[symmetrized_structure] , identifier[idx] ) keyword[if] identifier[order_params] [ literal[int] ]> identifier[order_params] [ literal[int] ] keyword[and] identifier[order_params] [ literal[int] ]> identifier[op_threshold] : identifier[motif] = literal[string] identifier[motif_order_parameter] = identifier[order_params] [ literal[int] ] keyword[elif] identifier[order_params] [ literal[int] ]> identifier[op_threshold] : identifier[motif] = literal[string] identifier[motif_order_parameter] = identifier[order_params] [ literal[int] ] keyword[else] : identifier[motif] = literal[string] identifier[motif_order_parameter] = keyword[None] keyword[if] identifier[motif] == literal[string] keyword[or] identifier[motif] == literal[string] : keyword[if] identifier[guesstimate_spin] keyword[and] literal[string] keyword[in] identifier[site] . identifier[properties] : identifier[magmom] = identifier[site] . identifier[properties] [ literal[string] ] identifier[spin_state] = identifier[self] . identifier[_estimate_spin_state] ( identifier[site] . identifier[specie] , identifier[motif] , identifier[magmom] ) keyword[else] : identifier[spin_state] = literal[string] identifier[magnitude] = identifier[self] . identifier[get_magnitude_of_effect_from_species] ( identifier[site] . identifier[specie] , identifier[spin_state] , identifier[motif] ) keyword[if] identifier[magnitude] != literal[string] : identifier[ligands] = identifier[get_neighbors_of_site_with_index] ( identifier[structure] , identifier[idx] , identifier[approach] = literal[string] , identifier[delta] = literal[int] ) identifier[ligand_bond_lengths] =[ identifier[ligand] . identifier[distance] ( identifier[structure] [ identifier[idx] ]) keyword[for] identifier[ligand] keyword[in] identifier[ligands] ] identifier[ligands_species] = identifier[list] ( identifier[set] ([ identifier[str] ( identifier[ligand] . identifier[specie] ) keyword[for] identifier[ligand] keyword[in] identifier[ligands] ])) identifier[ligand_bond_length_spread] = identifier[max] ( identifier[ligand_bond_lengths] )- identifier[min] ( identifier[ligand_bond_lengths] ) keyword[def] identifier[trim] ( identifier[f] ): keyword[return] identifier[float] ( literal[string] . identifier[format] ( identifier[f] )) keyword[if] identifier[len] ( identifier[ligands_species] )== literal[int] : identifier[jt_sites] . identifier[append] ({ literal[string] : identifier[magnitude] , literal[string] : identifier[motif] , literal[string] : identifier[trim] ( identifier[motif_order_parameter] ), literal[string] : identifier[spin_state] , literal[string] : identifier[str] ( identifier[site] . identifier[specie] ), literal[string] : identifier[ligands_species] [ literal[int] ], literal[string] :[ identifier[trim] ( identifier[length] ) keyword[for] identifier[length] keyword[in] identifier[ligand_bond_lengths] ], literal[string] : identifier[trim] ( identifier[ligand_bond_length_spread] ), literal[string] : identifier[indices] }) keyword[else] : identifier[non_jt_sites] . identifier[append] ({ literal[string] : identifier[indices] , literal[string] : literal[string] , literal[string] : literal[string] literal[string] }) keyword[else] : identifier[non_jt_sites] . identifier[append] ({ literal[string] : identifier[indices] , literal[string] : literal[string] , literal[string] : literal[string] . identifier[format] ( identifier[motif] )}) keyword[if] identifier[jt_sites] : identifier[analysis] ={ literal[string] : keyword[True] } identifier[strong_magnitudes] =[ identifier[site] [ literal[string] ]== literal[string] keyword[for] identifier[site] keyword[in] identifier[jt_sites] ] keyword[if] identifier[any] ( identifier[strong_magnitudes] ): identifier[analysis] [ literal[string] ]= literal[string] keyword[else] : identifier[analysis] [ literal[string] ]= literal[string] identifier[analysis] [ literal[string] ]= identifier[jt_sites] keyword[return] identifier[analysis] , identifier[structure] keyword[else] : keyword[return] { literal[string] : keyword[False] , literal[string] : identifier[non_jt_sites] }, identifier[structure]
def get_analysis_and_structure(self, structure, calculate_valences=True, guesstimate_spin=False, op_threshold=0.1): """ Obtain an analysis of a given structure and if it may be Jahn-Teller active or not. This is a heuristic, and may give false positives and false negatives (false positives are preferred). :param structure: input structure :param calculate_valences (bool): whether to attempt to calculate valences or not, structure should have oxidation states to perform analysis :param guesstimate_spin (bool): whether to guesstimate spin state from magnetic moments or not, use with caution :param op_threshold (float): threshold for order parameter above which to consider site to match an octahedral or tetrahedral motif, since Jahn-Teller structures can often be quite distorted, this threshold is smaller than one might expect :return (dict): analysis of structure, with key 'strength' which may be 'none', 'strong', 'weak', or 'unknown' """ structure = structure.get_primitive_structure() if calculate_valences: bva = BVAnalyzer() structure = bva.get_oxi_state_decorated_structure(structure) # depends on [control=['if'], data=[]] # no point testing multiple equivalent sites, doesn't make any difference to analysis # but makes returned symmetrized_structure = SpacegroupAnalyzer(structure).get_symmetrized_structure() # to detect structural motifs of a given site op = LocalStructOrderParams(['oct', 'tet']) # dict of site index to the Jahn-Teller analysis of that site jt_sites = [] non_jt_sites = [] for indices in symmetrized_structure.equivalent_indices: idx = indices[0] site = symmetrized_structure[idx] # only interested in sites with oxidation states if isinstance(site.specie, Specie) and site.specie.element.is_transition_metal: # get motif around site order_params = op.get_order_parameters(symmetrized_structure, idx) if order_params[0] > order_params[1] and order_params[0] > op_threshold: motif = 'oct' motif_order_parameter = order_params[0] # depends on [control=['if'], data=[]] elif order_params[1] > op_threshold: motif = 'tet' motif_order_parameter = order_params[1] # depends on [control=['if'], data=[]] else: motif = 'unknown' motif_order_parameter = None if motif == 'oct' or motif == 'tet': # guess spin of metal ion if guesstimate_spin and 'magmom' in site.properties: # estimate if high spin or low spin magmom = site.properties['magmom'] spin_state = self._estimate_spin_state(site.specie, motif, magmom) # depends on [control=['if'], data=[]] else: spin_state = 'unknown' magnitude = self.get_magnitude_of_effect_from_species(site.specie, spin_state, motif) if magnitude != 'none': ligands = get_neighbors_of_site_with_index(structure, idx, approach='min_dist', delta=0.15) ligand_bond_lengths = [ligand.distance(structure[idx]) for ligand in ligands] ligands_species = list(set([str(ligand.specie) for ligand in ligands])) ligand_bond_length_spread = max(ligand_bond_lengths) - min(ligand_bond_lengths) def trim(f): # avoid storing to unreasonable precision, hurts readability return float('{:.4f}'.format(f)) # to be Jahn-Teller active, all ligands have to be the same if len(ligands_species) == 1: jt_sites.append({'strength': magnitude, 'motif': motif, 'motif_order_parameter': trim(motif_order_parameter), 'spin_state': spin_state, 'species': str(site.specie), 'ligand': ligands_species[0], 'ligand_bond_lengths': [trim(length) for length in ligand_bond_lengths], 'ligand_bond_length_spread': trim(ligand_bond_length_spread), 'site_indices': indices}) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['magnitude']] else: # store reasons for not being J-T active non_jt_sites.append({'site_indices': indices, 'strength': 'none', 'reason': 'Not Jahn-Teller active for this electronic configuration.'}) # depends on [control=['if'], data=[]] else: non_jt_sites.append({'site_indices': indices, 'strength': 'none', 'reason': 'motif is {}'.format(motif)}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['indices']] # perform aggregation of all sites if jt_sites: analysis = {'active': True} # if any site could exhibit 'strong' Jahn-Teller effect # then mark whole structure as strong strong_magnitudes = [site['strength'] == 'strong' for site in jt_sites] if any(strong_magnitudes): analysis['strength'] = 'strong' # depends on [control=['if'], data=[]] else: analysis['strength'] = 'weak' analysis['sites'] = jt_sites return (analysis, structure) # depends on [control=['if'], data=[]] else: return ({'active': False, 'sites': non_jt_sites}, structure)
def _handle_kernel_died(self, since_last_heartbeat): """ Handle the kernel's death by asking if the user wants to restart. """ self.log.debug("kernel died: %s", since_last_heartbeat) if self.custom_restart: self.custom_restart_kernel_died.emit(since_last_heartbeat) else: message = 'The kernel heartbeat has been inactive for %.2f ' \ 'seconds. Do you want to restart the kernel? You may ' \ 'first want to check the network connection.' % \ since_last_heartbeat self.restart_kernel(message, now=True)
def function[_handle_kernel_died, parameter[self, since_last_heartbeat]]: constant[ Handle the kernel's death by asking if the user wants to restart. ] call[name[self].log.debug, parameter[constant[kernel died: %s], name[since_last_heartbeat]]] if name[self].custom_restart begin[:] call[name[self].custom_restart_kernel_died.emit, parameter[name[since_last_heartbeat]]]
keyword[def] identifier[_handle_kernel_died] ( identifier[self] , identifier[since_last_heartbeat] ): literal[string] identifier[self] . identifier[log] . identifier[debug] ( literal[string] , identifier[since_last_heartbeat] ) keyword[if] identifier[self] . identifier[custom_restart] : identifier[self] . identifier[custom_restart_kernel_died] . identifier[emit] ( identifier[since_last_heartbeat] ) keyword[else] : identifier[message] = literal[string] literal[string] literal[string] % identifier[since_last_heartbeat] identifier[self] . identifier[restart_kernel] ( identifier[message] , identifier[now] = keyword[True] )
def _handle_kernel_died(self, since_last_heartbeat): """ Handle the kernel's death by asking if the user wants to restart. """ self.log.debug('kernel died: %s', since_last_heartbeat) if self.custom_restart: self.custom_restart_kernel_died.emit(since_last_heartbeat) # depends on [control=['if'], data=[]] else: message = 'The kernel heartbeat has been inactive for %.2f seconds. Do you want to restart the kernel? You may first want to check the network connection.' % since_last_heartbeat self.restart_kernel(message, now=True)
def hmset(key, **fieldsvals): ''' Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ''' host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals))
def function[hmset, parameter[key]]: constant[ Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ] variable[host] assign[=] call[name[fieldsvals].pop, parameter[constant[host], constant[None]]] variable[port] assign[=] call[name[fieldsvals].pop, parameter[constant[port], constant[None]]] variable[database] assign[=] call[name[fieldsvals].pop, parameter[constant[db], constant[None]]] variable[password] assign[=] call[name[fieldsvals].pop, parameter[constant[password], constant[None]]] variable[server] assign[=] call[name[_connect], parameter[name[host], name[port], name[database], name[password]]] return[call[name[server].hmset, parameter[name[key], call[name[salt].utils.args.clean_kwargs, parameter[]]]]]
keyword[def] identifier[hmset] ( identifier[key] ,** identifier[fieldsvals] ): literal[string] identifier[host] = identifier[fieldsvals] . identifier[pop] ( literal[string] , keyword[None] ) identifier[port] = identifier[fieldsvals] . identifier[pop] ( literal[string] , keyword[None] ) identifier[database] = identifier[fieldsvals] . identifier[pop] ( literal[string] , keyword[None] ) identifier[password] = identifier[fieldsvals] . identifier[pop] ( literal[string] , keyword[None] ) identifier[server] = identifier[_connect] ( identifier[host] , identifier[port] , identifier[database] , identifier[password] ) keyword[return] identifier[server] . identifier[hmset] ( identifier[key] , identifier[salt] . identifier[utils] . identifier[args] . identifier[clean_kwargs] (** identifier[fieldsvals] ))
def hmset(key, **fieldsvals): """ Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 """ host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals))
def _set_repo_option(repo, option): ''' Set the option to repo ''' if not option: return opt = option.split('=') if len(opt) != 2: return if opt[0] == 'trusted': repo['trusted'] = opt[1] == 'yes' else: repo[opt[0]] = opt[1]
def function[_set_repo_option, parameter[repo, option]]: constant[ Set the option to repo ] if <ast.UnaryOp object at 0x7da18eb545e0> begin[:] return[None] variable[opt] assign[=] call[name[option].split, parameter[constant[=]]] if compare[call[name[len], parameter[name[opt]]] not_equal[!=] constant[2]] begin[:] return[None] if compare[call[name[opt]][constant[0]] equal[==] constant[trusted]] begin[:] call[name[repo]][constant[trusted]] assign[=] compare[call[name[opt]][constant[1]] equal[==] constant[yes]]
keyword[def] identifier[_set_repo_option] ( identifier[repo] , identifier[option] ): literal[string] keyword[if] keyword[not] identifier[option] : keyword[return] identifier[opt] = identifier[option] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[opt] )!= literal[int] : keyword[return] keyword[if] identifier[opt] [ literal[int] ]== literal[string] : identifier[repo] [ literal[string] ]= identifier[opt] [ literal[int] ]== literal[string] keyword[else] : identifier[repo] [ identifier[opt] [ literal[int] ]]= identifier[opt] [ literal[int] ]
def _set_repo_option(repo, option): """ Set the option to repo """ if not option: return # depends on [control=['if'], data=[]] opt = option.split('=') if len(opt) != 2: return # depends on [control=['if'], data=[]] if opt[0] == 'trusted': repo['trusted'] = opt[1] == 'yes' # depends on [control=['if'], data=[]] else: repo[opt[0]] = opt[1]
def modified_lines(filename, extra_data, commit=None): """Returns the lines that have been modifed for this file. Args: filename: the file to check. extra_data: is the extra_data returned by modified_files. Additionally, a value of None means that the file was not modified. commit: the complete sha1 (40 chars) of the commit. Note that specifying this value will only work (100%) when commit == last_commit (with respect to the currently checked out revision), otherwise, we could miss some lines. Returns: a list of lines that were modified, or None in case all lines are new. """ if extra_data is None: return [] if extra_data != 'M': return None command = ['hg', 'diff', '-U', '0'] if commit: command.append('--change=%s' % commit) command.append(filename) # Split as bytes, as the output may have some non unicode characters. diff_lines = subprocess.check_output(command).split( os.linesep.encode('utf-8')) diff_line_numbers = utils.filter_lines( diff_lines, br'@@ -\d+,\d+ \+(?P<start_line>\d+),(?P<lines>\d+) @@', groups=('start_line', 'lines')) modified_line_numbers = [] for start_line, lines in diff_line_numbers: start_line = int(start_line) lines = int(lines) modified_line_numbers.extend(range(start_line, start_line + lines)) return modified_line_numbers
def function[modified_lines, parameter[filename, extra_data, commit]]: constant[Returns the lines that have been modifed for this file. Args: filename: the file to check. extra_data: is the extra_data returned by modified_files. Additionally, a value of None means that the file was not modified. commit: the complete sha1 (40 chars) of the commit. Note that specifying this value will only work (100%) when commit == last_commit (with respect to the currently checked out revision), otherwise, we could miss some lines. Returns: a list of lines that were modified, or None in case all lines are new. ] if compare[name[extra_data] is constant[None]] begin[:] return[list[[]]] if compare[name[extra_data] not_equal[!=] constant[M]] begin[:] return[constant[None]] variable[command] assign[=] list[[<ast.Constant object at 0x7da1b0bdab00>, <ast.Constant object at 0x7da1b0bd8a00>, <ast.Constant object at 0x7da1b0bdac80>, <ast.Constant object at 0x7da1b0bdaaa0>]] if name[commit] begin[:] call[name[command].append, parameter[binary_operation[constant[--change=%s] <ast.Mod object at 0x7da2590d6920> name[commit]]]] call[name[command].append, parameter[name[filename]]] variable[diff_lines] assign[=] call[call[name[subprocess].check_output, parameter[name[command]]].split, parameter[call[name[os].linesep.encode, parameter[constant[utf-8]]]]] variable[diff_line_numbers] assign[=] call[name[utils].filter_lines, parameter[name[diff_lines], constant[b'@@ -\\d+,\\d+ \\+(?P<start_line>\\d+),(?P<lines>\\d+) @@']]] variable[modified_line_numbers] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b0e3b1c0>, <ast.Name object at 0x7da1b0e39870>]]] in starred[name[diff_line_numbers]] begin[:] variable[start_line] assign[=] call[name[int], parameter[name[start_line]]] variable[lines] assign[=] call[name[int], parameter[name[lines]]] call[name[modified_line_numbers].extend, parameter[call[name[range], parameter[name[start_line], binary_operation[name[start_line] + name[lines]]]]]] return[name[modified_line_numbers]]
keyword[def] identifier[modified_lines] ( identifier[filename] , identifier[extra_data] , identifier[commit] = keyword[None] ): literal[string] keyword[if] identifier[extra_data] keyword[is] keyword[None] : keyword[return] [] keyword[if] identifier[extra_data] != literal[string] : keyword[return] keyword[None] identifier[command] =[ literal[string] , literal[string] , literal[string] , literal[string] ] keyword[if] identifier[commit] : identifier[command] . identifier[append] ( literal[string] % identifier[commit] ) identifier[command] . identifier[append] ( identifier[filename] ) identifier[diff_lines] = identifier[subprocess] . identifier[check_output] ( identifier[command] ). identifier[split] ( identifier[os] . identifier[linesep] . identifier[encode] ( literal[string] )) identifier[diff_line_numbers] = identifier[utils] . identifier[filter_lines] ( identifier[diff_lines] , literal[string] , identifier[groups] =( literal[string] , literal[string] )) identifier[modified_line_numbers] =[] keyword[for] identifier[start_line] , identifier[lines] keyword[in] identifier[diff_line_numbers] : identifier[start_line] = identifier[int] ( identifier[start_line] ) identifier[lines] = identifier[int] ( identifier[lines] ) identifier[modified_line_numbers] . identifier[extend] ( identifier[range] ( identifier[start_line] , identifier[start_line] + identifier[lines] )) keyword[return] identifier[modified_line_numbers]
def modified_lines(filename, extra_data, commit=None): """Returns the lines that have been modifed for this file. Args: filename: the file to check. extra_data: is the extra_data returned by modified_files. Additionally, a value of None means that the file was not modified. commit: the complete sha1 (40 chars) of the commit. Note that specifying this value will only work (100%) when commit == last_commit (with respect to the currently checked out revision), otherwise, we could miss some lines. Returns: a list of lines that were modified, or None in case all lines are new. """ if extra_data is None: return [] # depends on [control=['if'], data=[]] if extra_data != 'M': return None # depends on [control=['if'], data=[]] command = ['hg', 'diff', '-U', '0'] if commit: command.append('--change=%s' % commit) # depends on [control=['if'], data=[]] command.append(filename) # Split as bytes, as the output may have some non unicode characters. diff_lines = subprocess.check_output(command).split(os.linesep.encode('utf-8')) diff_line_numbers = utils.filter_lines(diff_lines, b'@@ -\\d+,\\d+ \\+(?P<start_line>\\d+),(?P<lines>\\d+) @@', groups=('start_line', 'lines')) modified_line_numbers = [] for (start_line, lines) in diff_line_numbers: start_line = int(start_line) lines = int(lines) modified_line_numbers.extend(range(start_line, start_line + lines)) # depends on [control=['for'], data=[]] return modified_line_numbers
def delete(self, dataset): """The method is deleting dataset by it's id""" url = self._get_url('/api/1.0/meta/dataset/{}/delete'.format(dataset)) json_data = '' binary_data = json_data.encode() headers = self._get_request_headers() req = urllib.request.Request(url, binary_data, headers) resp = urllib.request.urlopen(req) str_response = resp.read().decode('utf-8') if str_response != '"successful"' or resp.status < 200 or resp.status >= 300: msg = 'Dataset has not been deleted, because of the following error(s): {}'.format(str_response) raise ValueError(msg)
def function[delete, parameter[self, dataset]]: constant[The method is deleting dataset by it's id] variable[url] assign[=] call[name[self]._get_url, parameter[call[constant[/api/1.0/meta/dataset/{}/delete].format, parameter[name[dataset]]]]] variable[json_data] assign[=] constant[] variable[binary_data] assign[=] call[name[json_data].encode, parameter[]] variable[headers] assign[=] call[name[self]._get_request_headers, parameter[]] variable[req] assign[=] call[name[urllib].request.Request, parameter[name[url], name[binary_data], name[headers]]] variable[resp] assign[=] call[name[urllib].request.urlopen, parameter[name[req]]] variable[str_response] assign[=] call[call[name[resp].read, parameter[]].decode, parameter[constant[utf-8]]] if <ast.BoolOp object at 0x7da18bc716f0> begin[:] variable[msg] assign[=] call[constant[Dataset has not been deleted, because of the following error(s): {}].format, parameter[name[str_response]]] <ast.Raise object at 0x7da18bc73eb0>
keyword[def] identifier[delete] ( identifier[self] , identifier[dataset] ): literal[string] identifier[url] = identifier[self] . identifier[_get_url] ( literal[string] . identifier[format] ( identifier[dataset] )) identifier[json_data] = literal[string] identifier[binary_data] = identifier[json_data] . identifier[encode] () identifier[headers] = identifier[self] . identifier[_get_request_headers] () identifier[req] = identifier[urllib] . identifier[request] . identifier[Request] ( identifier[url] , identifier[binary_data] , identifier[headers] ) identifier[resp] = identifier[urllib] . identifier[request] . identifier[urlopen] ( identifier[req] ) identifier[str_response] = identifier[resp] . identifier[read] (). identifier[decode] ( literal[string] ) keyword[if] identifier[str_response] != literal[string] keyword[or] identifier[resp] . identifier[status] < literal[int] keyword[or] identifier[resp] . identifier[status] >= literal[int] : identifier[msg] = literal[string] . identifier[format] ( identifier[str_response] ) keyword[raise] identifier[ValueError] ( identifier[msg] )
def delete(self, dataset): """The method is deleting dataset by it's id""" url = self._get_url('/api/1.0/meta/dataset/{}/delete'.format(dataset)) json_data = '' binary_data = json_data.encode() headers = self._get_request_headers() req = urllib.request.Request(url, binary_data, headers) resp = urllib.request.urlopen(req) str_response = resp.read().decode('utf-8') if str_response != '"successful"' or resp.status < 200 or resp.status >= 300: msg = 'Dataset has not been deleted, because of the following error(s): {}'.format(str_response) raise ValueError(msg) # depends on [control=['if'], data=[]]
def list(connection): """ List watched EBS volumes :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :returns: None """ volumes = get_watched_volumes(connection) if not volumes: logger.info('No watched volumes found') return logger.info( '+-----------------------' '+----------------------' '+--------------' '+------------+') logger.info( '| {volume:<21} ' '| {volume_name:<20.20} ' '| {interval:<12} ' '| {retention:<10} |'.format( volume='Volume ID', volume_name='Volume name', interval='Interval', retention='Retention')) logger.info( '+-----------------------' '+----------------------' '+--------------' '+------------+') for volume in volumes: if 'AutomatedEBSSnapshots' not in volume.tags: interval = 'Interval tag not found' elif volume.tags['AutomatedEBSSnapshots'] not in VALID_INTERVALS: interval = 'Invalid interval' else: interval = volume.tags['AutomatedEBSSnapshots'] if 'AutomatedEBSSnapshotsRetention' not in volume.tags: retention = 0 else: retention = volume.tags['AutomatedEBSSnapshotsRetention'] # Get the volume name try: volume_name = volume.tags['Name'] except KeyError: volume_name = '' logger.info( '| {volume_id:<14} ' '| {volume_name:<20.20} ' '| {interval:<12} ' '| {retention:<10} |'.format( volume_id=volume.id, volume_name=volume_name, interval=interval, retention=retention)) logger.info( '+-----------------------' '+----------------------' '+--------------' '+------------+')
def function[list, parameter[connection]]: constant[ List watched EBS volumes :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :returns: None ] variable[volumes] assign[=] call[name[get_watched_volumes], parameter[name[connection]]] if <ast.UnaryOp object at 0x7da20c76ce50> begin[:] call[name[logger].info, parameter[constant[No watched volumes found]]] return[None] call[name[logger].info, parameter[constant[+-----------------------+----------------------+--------------+------------+]]] call[name[logger].info, parameter[call[constant[| {volume:<21} | {volume_name:<20.20} | {interval:<12} | {retention:<10} |].format, parameter[]]]] call[name[logger].info, parameter[constant[+-----------------------+----------------------+--------------+------------+]]] for taget[name[volume]] in starred[name[volumes]] begin[:] if compare[constant[AutomatedEBSSnapshots] <ast.NotIn object at 0x7da2590d7190> name[volume].tags] begin[:] variable[interval] assign[=] constant[Interval tag not found] if compare[constant[AutomatedEBSSnapshotsRetention] <ast.NotIn object at 0x7da2590d7190> name[volume].tags] begin[:] variable[retention] assign[=] constant[0] <ast.Try object at 0x7da1b0e318a0> call[name[logger].info, parameter[call[constant[| {volume_id:<14} | {volume_name:<20.20} | {interval:<12} | {retention:<10} |].format, parameter[]]]] call[name[logger].info, parameter[constant[+-----------------------+----------------------+--------------+------------+]]]
keyword[def] identifier[list] ( identifier[connection] ): literal[string] identifier[volumes] = identifier[get_watched_volumes] ( identifier[connection] ) keyword[if] keyword[not] identifier[volumes] : identifier[logger] . identifier[info] ( literal[string] ) keyword[return] identifier[logger] . identifier[info] ( literal[string] literal[string] literal[string] literal[string] ) identifier[logger] . identifier[info] ( literal[string] literal[string] literal[string] literal[string] . identifier[format] ( identifier[volume] = literal[string] , identifier[volume_name] = literal[string] , identifier[interval] = literal[string] , identifier[retention] = literal[string] )) identifier[logger] . identifier[info] ( literal[string] literal[string] literal[string] literal[string] ) keyword[for] identifier[volume] keyword[in] identifier[volumes] : keyword[if] literal[string] keyword[not] keyword[in] identifier[volume] . identifier[tags] : identifier[interval] = literal[string] keyword[elif] identifier[volume] . identifier[tags] [ literal[string] ] keyword[not] keyword[in] identifier[VALID_INTERVALS] : identifier[interval] = literal[string] keyword[else] : identifier[interval] = identifier[volume] . identifier[tags] [ literal[string] ] keyword[if] literal[string] keyword[not] keyword[in] identifier[volume] . identifier[tags] : identifier[retention] = literal[int] keyword[else] : identifier[retention] = identifier[volume] . identifier[tags] [ literal[string] ] keyword[try] : identifier[volume_name] = identifier[volume] . identifier[tags] [ literal[string] ] keyword[except] identifier[KeyError] : identifier[volume_name] = literal[string] identifier[logger] . identifier[info] ( literal[string] literal[string] literal[string] literal[string] . identifier[format] ( identifier[volume_id] = identifier[volume] . identifier[id] , identifier[volume_name] = identifier[volume_name] , identifier[interval] = identifier[interval] , identifier[retention] = identifier[retention] )) identifier[logger] . identifier[info] ( literal[string] literal[string] literal[string] literal[string] )
def list(connection): """ List watched EBS volumes :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :returns: None """ volumes = get_watched_volumes(connection) if not volumes: logger.info('No watched volumes found') return # depends on [control=['if'], data=[]] logger.info('+-----------------------+----------------------+--------------+------------+') logger.info('| {volume:<21} | {volume_name:<20.20} | {interval:<12} | {retention:<10} |'.format(volume='Volume ID', volume_name='Volume name', interval='Interval', retention='Retention')) logger.info('+-----------------------+----------------------+--------------+------------+') for volume in volumes: if 'AutomatedEBSSnapshots' not in volume.tags: interval = 'Interval tag not found' # depends on [control=['if'], data=[]] elif volume.tags['AutomatedEBSSnapshots'] not in VALID_INTERVALS: interval = 'Invalid interval' # depends on [control=['if'], data=[]] else: interval = volume.tags['AutomatedEBSSnapshots'] if 'AutomatedEBSSnapshotsRetention' not in volume.tags: retention = 0 # depends on [control=['if'], data=[]] else: retention = volume.tags['AutomatedEBSSnapshotsRetention'] # Get the volume name try: volume_name = volume.tags['Name'] # depends on [control=['try'], data=[]] except KeyError: volume_name = '' # depends on [control=['except'], data=[]] logger.info('|\xa0{volume_id:<14} | {volume_name:<20.20} | {interval:<12} | {retention:<10} |'.format(volume_id=volume.id, volume_name=volume_name, interval=interval, retention=retention)) # depends on [control=['for'], data=['volume']] logger.info('+-----------------------+----------------------+--------------+------------+')
def dump_service(self, sc): """Read all data blocks of a given service. :meth:`dump_service` reads all data blocks from the service with service code *sc* and returns a list of strings suitable for printing. The number of strings returned does not necessarily reflect the number of data blocks because a range of data blocks with equal content is reduced to fewer lines of output. """ def lprint(fmt, data, index): ispchr = lambda x: x >= 32 and x <= 126 # noqa: E731 def print_bytes(octets): return ' '.join(['%02x' % x for x in octets]) def print_chars(octets): return ''.join([chr(x) if ispchr(x) else '.' for x in octets]) return fmt.format(index, print_bytes(data), print_chars(data)) data_line_fmt = "{0:04X}: {1} |{2}|" same_line_fmt = "{0:<4s} {1} |{2}|" lines = list() last_data = None same_data = 0 for i in itertools.count(): # pragma: no branch assert i < 0x10000 try: this_data = self.read_without_encryption([sc], [BlockCode(i)]) except Type3TagCommandError: i = i - 1 break if this_data == last_data: same_data += 1 else: if same_data > 1: lines.append(lprint(same_line_fmt, last_data, "*")) lines.append(lprint(data_line_fmt, this_data, i)) last_data = this_data same_data = 0 if same_data > 1: lines.append(lprint(same_line_fmt, last_data, "*")) if same_data > 0: lines.append(lprint(data_line_fmt, this_data, i)) return lines
def function[dump_service, parameter[self, sc]]: constant[Read all data blocks of a given service. :meth:`dump_service` reads all data blocks from the service with service code *sc* and returns a list of strings suitable for printing. The number of strings returned does not necessarily reflect the number of data blocks because a range of data blocks with equal content is reduced to fewer lines of output. ] def function[lprint, parameter[fmt, data, index]]: variable[ispchr] assign[=] <ast.Lambda object at 0x7da20e74be20> def function[print_bytes, parameter[octets]]: return[call[constant[ ].join, parameter[<ast.ListComp object at 0x7da20e74b700>]]] def function[print_chars, parameter[octets]]: return[call[constant[].join, parameter[<ast.ListComp object at 0x7da20e74bf40>]]] return[call[name[fmt].format, parameter[name[index], call[name[print_bytes], parameter[name[data]]], call[name[print_chars], parameter[name[data]]]]]] variable[data_line_fmt] assign[=] constant[{0:04X}: {1} |{2}|] variable[same_line_fmt] assign[=] constant[{0:<4s} {1} |{2}|] variable[lines] assign[=] call[name[list], parameter[]] variable[last_data] assign[=] constant[None] variable[same_data] assign[=] constant[0] for taget[name[i]] in starred[call[name[itertools].count, parameter[]]] begin[:] assert[compare[name[i] less[<] constant[65536]]] <ast.Try object at 0x7da1b184d270> if compare[name[this_data] equal[==] name[last_data]] begin[:] <ast.AugAssign object at 0x7da1b184c4c0> if compare[name[same_data] greater[>] constant[1]] begin[:] call[name[lines].append, parameter[call[name[lprint], parameter[name[same_line_fmt], name[last_data], constant[*]]]]] if compare[name[same_data] greater[>] constant[0]] begin[:] call[name[lines].append, parameter[call[name[lprint], parameter[name[data_line_fmt], name[this_data], name[i]]]]] return[name[lines]]
keyword[def] identifier[dump_service] ( identifier[self] , identifier[sc] ): literal[string] keyword[def] identifier[lprint] ( identifier[fmt] , identifier[data] , identifier[index] ): identifier[ispchr] = keyword[lambda] identifier[x] : identifier[x] >= literal[int] keyword[and] identifier[x] <= literal[int] keyword[def] identifier[print_bytes] ( identifier[octets] ): keyword[return] literal[string] . identifier[join] ([ literal[string] % identifier[x] keyword[for] identifier[x] keyword[in] identifier[octets] ]) keyword[def] identifier[print_chars] ( identifier[octets] ): keyword[return] literal[string] . identifier[join] ([ identifier[chr] ( identifier[x] ) keyword[if] identifier[ispchr] ( identifier[x] ) keyword[else] literal[string] keyword[for] identifier[x] keyword[in] identifier[octets] ]) keyword[return] identifier[fmt] . identifier[format] ( identifier[index] , identifier[print_bytes] ( identifier[data] ), identifier[print_chars] ( identifier[data] )) identifier[data_line_fmt] = literal[string] identifier[same_line_fmt] = literal[string] identifier[lines] = identifier[list] () identifier[last_data] = keyword[None] identifier[same_data] = literal[int] keyword[for] identifier[i] keyword[in] identifier[itertools] . identifier[count] (): keyword[assert] identifier[i] < literal[int] keyword[try] : identifier[this_data] = identifier[self] . identifier[read_without_encryption] ([ identifier[sc] ],[ identifier[BlockCode] ( identifier[i] )]) keyword[except] identifier[Type3TagCommandError] : identifier[i] = identifier[i] - literal[int] keyword[break] keyword[if] identifier[this_data] == identifier[last_data] : identifier[same_data] += literal[int] keyword[else] : keyword[if] identifier[same_data] > literal[int] : identifier[lines] . identifier[append] ( identifier[lprint] ( identifier[same_line_fmt] , identifier[last_data] , literal[string] )) identifier[lines] . identifier[append] ( identifier[lprint] ( identifier[data_line_fmt] , identifier[this_data] , identifier[i] )) identifier[last_data] = identifier[this_data] identifier[same_data] = literal[int] keyword[if] identifier[same_data] > literal[int] : identifier[lines] . identifier[append] ( identifier[lprint] ( identifier[same_line_fmt] , identifier[last_data] , literal[string] )) keyword[if] identifier[same_data] > literal[int] : identifier[lines] . identifier[append] ( identifier[lprint] ( identifier[data_line_fmt] , identifier[this_data] , identifier[i] )) keyword[return] identifier[lines]
def dump_service(self, sc): """Read all data blocks of a given service. :meth:`dump_service` reads all data blocks from the service with service code *sc* and returns a list of strings suitable for printing. The number of strings returned does not necessarily reflect the number of data blocks because a range of data blocks with equal content is reduced to fewer lines of output. """ def lprint(fmt, data, index): ispchr = lambda x: x >= 32 and x <= 126 # noqa: E731 def print_bytes(octets): return ' '.join(['%02x' % x for x in octets]) def print_chars(octets): return ''.join([chr(x) if ispchr(x) else '.' for x in octets]) return fmt.format(index, print_bytes(data), print_chars(data)) data_line_fmt = '{0:04X}: {1} |{2}|' same_line_fmt = '{0:<4s} {1} |{2}|' lines = list() last_data = None same_data = 0 for i in itertools.count(): # pragma: no branch assert i < 65536 try: this_data = self.read_without_encryption([sc], [BlockCode(i)]) # depends on [control=['try'], data=[]] except Type3TagCommandError: i = i - 1 break # depends on [control=['except'], data=[]] if this_data == last_data: same_data += 1 # depends on [control=['if'], data=[]] else: if same_data > 1: lines.append(lprint(same_line_fmt, last_data, '*')) # depends on [control=['if'], data=[]] lines.append(lprint(data_line_fmt, this_data, i)) last_data = this_data same_data = 0 # depends on [control=['for'], data=['i']] if same_data > 1: lines.append(lprint(same_line_fmt, last_data, '*')) # depends on [control=['if'], data=[]] if same_data > 0: lines.append(lprint(data_line_fmt, this_data, i)) # depends on [control=['if'], data=[]] return lines
def update_row(self, key, value): """Update a column named `header` in the table. If length of column is smaller than number of rows, lets say `k`, only the first `k` values in the column is updated. Parameters ---------- key : int or slice index of the row, or a slice object. value : iterable If an index is specified, `value` should be an iterable of appropriate length. Instead if a slice object is passed as key, value should be an iterable of rows. Raises ------ IndexError: If index specified is out of range. TypeError: If `value` is of incorrect type. ValueError: If length of row does not matches number of columns. """ if isinstance(key, int): row = self._validate_row(value, init_table_if_required=False) row_obj = RowData(self, row) self._table[key] = row_obj elif isinstance(key, slice): row_obj_list = [] for row in value: row_ = self._validate_row(row, init_table_if_required=True) row_obj_list.append(RowData(self, row_)) self._table[key] = row_obj_list else: raise TypeError("key must be an integer or a slice object")
def function[update_row, parameter[self, key, value]]: constant[Update a column named `header` in the table. If length of column is smaller than number of rows, lets say `k`, only the first `k` values in the column is updated. Parameters ---------- key : int or slice index of the row, or a slice object. value : iterable If an index is specified, `value` should be an iterable of appropriate length. Instead if a slice object is passed as key, value should be an iterable of rows. Raises ------ IndexError: If index specified is out of range. TypeError: If `value` is of incorrect type. ValueError: If length of row does not matches number of columns. ] if call[name[isinstance], parameter[name[key], name[int]]] begin[:] variable[row] assign[=] call[name[self]._validate_row, parameter[name[value]]] variable[row_obj] assign[=] call[name[RowData], parameter[name[self], name[row]]] call[name[self]._table][name[key]] assign[=] name[row_obj]
keyword[def] identifier[update_row] ( identifier[self] , identifier[key] , identifier[value] ): literal[string] keyword[if] identifier[isinstance] ( identifier[key] , identifier[int] ): identifier[row] = identifier[self] . identifier[_validate_row] ( identifier[value] , identifier[init_table_if_required] = keyword[False] ) identifier[row_obj] = identifier[RowData] ( identifier[self] , identifier[row] ) identifier[self] . identifier[_table] [ identifier[key] ]= identifier[row_obj] keyword[elif] identifier[isinstance] ( identifier[key] , identifier[slice] ): identifier[row_obj_list] =[] keyword[for] identifier[row] keyword[in] identifier[value] : identifier[row_] = identifier[self] . identifier[_validate_row] ( identifier[row] , identifier[init_table_if_required] = keyword[True] ) identifier[row_obj_list] . identifier[append] ( identifier[RowData] ( identifier[self] , identifier[row_] )) identifier[self] . identifier[_table] [ identifier[key] ]= identifier[row_obj_list] keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] )
def update_row(self, key, value): """Update a column named `header` in the table. If length of column is smaller than number of rows, lets say `k`, only the first `k` values in the column is updated. Parameters ---------- key : int or slice index of the row, or a slice object. value : iterable If an index is specified, `value` should be an iterable of appropriate length. Instead if a slice object is passed as key, value should be an iterable of rows. Raises ------ IndexError: If index specified is out of range. TypeError: If `value` is of incorrect type. ValueError: If length of row does not matches number of columns. """ if isinstance(key, int): row = self._validate_row(value, init_table_if_required=False) row_obj = RowData(self, row) self._table[key] = row_obj # depends on [control=['if'], data=[]] elif isinstance(key, slice): row_obj_list = [] for row in value: row_ = self._validate_row(row, init_table_if_required=True) row_obj_list.append(RowData(self, row_)) # depends on [control=['for'], data=['row']] self._table[key] = row_obj_list # depends on [control=['if'], data=[]] else: raise TypeError('key must be an integer or a slice object')
def select_action(self, q_values): """Return the selected action # Arguments q_values (np.ndarray): List of the estimations of Q for each action # Returns Selection action """ assert q_values.ndim == 1 nb_actions = q_values.shape[0] if np.random.uniform() < self.eps: action = np.random.randint(0, nb_actions) else: action = np.argmax(q_values) return action
def function[select_action, parameter[self, q_values]]: constant[Return the selected action # Arguments q_values (np.ndarray): List of the estimations of Q for each action # Returns Selection action ] assert[compare[name[q_values].ndim equal[==] constant[1]]] variable[nb_actions] assign[=] call[name[q_values].shape][constant[0]] if compare[call[name[np].random.uniform, parameter[]] less[<] name[self].eps] begin[:] variable[action] assign[=] call[name[np].random.randint, parameter[constant[0], name[nb_actions]]] return[name[action]]
keyword[def] identifier[select_action] ( identifier[self] , identifier[q_values] ): literal[string] keyword[assert] identifier[q_values] . identifier[ndim] == literal[int] identifier[nb_actions] = identifier[q_values] . identifier[shape] [ literal[int] ] keyword[if] identifier[np] . identifier[random] . identifier[uniform] ()< identifier[self] . identifier[eps] : identifier[action] = identifier[np] . identifier[random] . identifier[randint] ( literal[int] , identifier[nb_actions] ) keyword[else] : identifier[action] = identifier[np] . identifier[argmax] ( identifier[q_values] ) keyword[return] identifier[action]
def select_action(self, q_values): """Return the selected action # Arguments q_values (np.ndarray): List of the estimations of Q for each action # Returns Selection action """ assert q_values.ndim == 1 nb_actions = q_values.shape[0] if np.random.uniform() < self.eps: action = np.random.randint(0, nb_actions) # depends on [control=['if'], data=[]] else: action = np.argmax(q_values) return action
def send_command(self, device_id, action): """Send device command to rflink gateway.""" command = deserialize_packet_id(device_id) command['command'] = action log.debug('sending command: %s', command) self.send_packet(command)
def function[send_command, parameter[self, device_id, action]]: constant[Send device command to rflink gateway.] variable[command] assign[=] call[name[deserialize_packet_id], parameter[name[device_id]]] call[name[command]][constant[command]] assign[=] name[action] call[name[log].debug, parameter[constant[sending command: %s], name[command]]] call[name[self].send_packet, parameter[name[command]]]
keyword[def] identifier[send_command] ( identifier[self] , identifier[device_id] , identifier[action] ): literal[string] identifier[command] = identifier[deserialize_packet_id] ( identifier[device_id] ) identifier[command] [ literal[string] ]= identifier[action] identifier[log] . identifier[debug] ( literal[string] , identifier[command] ) identifier[self] . identifier[send_packet] ( identifier[command] )
def send_command(self, device_id, action): """Send device command to rflink gateway.""" command = deserialize_packet_id(device_id) command['command'] = action log.debug('sending command: %s', command) self.send_packet(command)
def scale(input_value, input_min, input_max, out_min, out_max): """ scale a value from one range to another """ # Figure out how 'wide' each range is input_span = input_max - input_min output_span = out_max - out_min # Convert the left range into a 0-1 range (float) valuescaled = float(input_value - input_min) / float(input_span) # Convert the 0-1 range into a value in the right range. return out_min + (valuescaled * output_span)
def function[scale, parameter[input_value, input_min, input_max, out_min, out_max]]: constant[ scale a value from one range to another ] variable[input_span] assign[=] binary_operation[name[input_max] - name[input_min]] variable[output_span] assign[=] binary_operation[name[out_max] - name[out_min]] variable[valuescaled] assign[=] binary_operation[call[name[float], parameter[binary_operation[name[input_value] - name[input_min]]]] / call[name[float], parameter[name[input_span]]]] return[binary_operation[name[out_min] + binary_operation[name[valuescaled] * name[output_span]]]]
keyword[def] identifier[scale] ( identifier[input_value] , identifier[input_min] , identifier[input_max] , identifier[out_min] , identifier[out_max] ): literal[string] identifier[input_span] = identifier[input_max] - identifier[input_min] identifier[output_span] = identifier[out_max] - identifier[out_min] identifier[valuescaled] = identifier[float] ( identifier[input_value] - identifier[input_min] )/ identifier[float] ( identifier[input_span] ) keyword[return] identifier[out_min] +( identifier[valuescaled] * identifier[output_span] )
def scale(input_value, input_min, input_max, out_min, out_max): """ scale a value from one range to another """ # Figure out how 'wide' each range is input_span = input_max - input_min output_span = out_max - out_min # Convert the left range into a 0-1 range (float) valuescaled = float(input_value - input_min) / float(input_span) # Convert the 0-1 range into a value in the right range. return out_min + valuescaled * output_span
def wait(self): """ wait for all actions to complete on a droplet """ interval_seconds = 5 while True: actions = self.actions() slept = False for a in actions: if a['status'] == 'in-progress': # n.b. gevent will monkey patch time.sleep(interval_seconds) slept = True break if not slept: break
def function[wait, parameter[self]]: constant[ wait for all actions to complete on a droplet ] variable[interval_seconds] assign[=] constant[5] while constant[True] begin[:] variable[actions] assign[=] call[name[self].actions, parameter[]] variable[slept] assign[=] constant[False] for taget[name[a]] in starred[name[actions]] begin[:] if compare[call[name[a]][constant[status]] equal[==] constant[in-progress]] begin[:] call[name[time].sleep, parameter[name[interval_seconds]]] variable[slept] assign[=] constant[True] break if <ast.UnaryOp object at 0x7da1b0089000> begin[:] break
keyword[def] identifier[wait] ( identifier[self] ): literal[string] identifier[interval_seconds] = literal[int] keyword[while] keyword[True] : identifier[actions] = identifier[self] . identifier[actions] () identifier[slept] = keyword[False] keyword[for] identifier[a] keyword[in] identifier[actions] : keyword[if] identifier[a] [ literal[string] ]== literal[string] : identifier[time] . identifier[sleep] ( identifier[interval_seconds] ) identifier[slept] = keyword[True] keyword[break] keyword[if] keyword[not] identifier[slept] : keyword[break]
def wait(self): """ wait for all actions to complete on a droplet """ interval_seconds = 5 while True: actions = self.actions() slept = False for a in actions: if a['status'] == 'in-progress': # n.b. gevent will monkey patch time.sleep(interval_seconds) slept = True break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']] if not slept: break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
def _get_observed_mmax(catalogue, config): '''Check see if observed mmax values are input, if not then take from the catalogue''' if config['input_mmax']: obsmax = config['input_mmax'] if config['input_mmax_uncertainty']: return config['input_mmax'], config['input_mmax_uncertainty'] else: raise ValueError('Input mmax uncertainty must be specified!') max_location = np.argmax(catalogue['magnitude']) obsmax = catalogue['magnitude'][max_location] cond = isinstance(catalogue['sigmaMagnitude'], np.ndarray) and \ len(catalogue['sigmaMagnitude']) > 0 and not \ np.all(np.isnan(catalogue['sigmaMagnitude'])) if cond: if not np.isnan(catalogue['sigmaMagnitude'][max_location]): return obsmax, catalogue['sigmaMagnitude'][max_location] else: print('Uncertainty not given on observed Mmax\n' 'Taking largest magnitude uncertainty found in catalogue') return obsmax, np.nanmax(catalogue['sigmaMagnitude']) elif config['input_mmax_uncertainty']: return obsmax, config['input_mmax_uncertainty'] else: raise ValueError('Input mmax uncertainty must be specified!')
def function[_get_observed_mmax, parameter[catalogue, config]]: constant[Check see if observed mmax values are input, if not then take from the catalogue] if call[name[config]][constant[input_mmax]] begin[:] variable[obsmax] assign[=] call[name[config]][constant[input_mmax]] if call[name[config]][constant[input_mmax_uncertainty]] begin[:] return[tuple[[<ast.Subscript object at 0x7da20c794130>, <ast.Subscript object at 0x7da20c7953c0>]]] variable[max_location] assign[=] call[name[np].argmax, parameter[call[name[catalogue]][constant[magnitude]]]] variable[obsmax] assign[=] call[call[name[catalogue]][constant[magnitude]]][name[max_location]] variable[cond] assign[=] <ast.BoolOp object at 0x7da20c794f10> if name[cond] begin[:] if <ast.UnaryOp object at 0x7da20c794520> begin[:] return[tuple[[<ast.Name object at 0x7da20c7950c0>, <ast.Subscript object at 0x7da20c7942b0>]]]
keyword[def] identifier[_get_observed_mmax] ( identifier[catalogue] , identifier[config] ): literal[string] keyword[if] identifier[config] [ literal[string] ]: identifier[obsmax] = identifier[config] [ literal[string] ] keyword[if] identifier[config] [ literal[string] ]: keyword[return] identifier[config] [ literal[string] ], identifier[config] [ literal[string] ] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[max_location] = identifier[np] . identifier[argmax] ( identifier[catalogue] [ literal[string] ]) identifier[obsmax] = identifier[catalogue] [ literal[string] ][ identifier[max_location] ] identifier[cond] = identifier[isinstance] ( identifier[catalogue] [ literal[string] ], identifier[np] . identifier[ndarray] ) keyword[and] identifier[len] ( identifier[catalogue] [ literal[string] ])> literal[int] keyword[and] keyword[not] identifier[np] . identifier[all] ( identifier[np] . identifier[isnan] ( identifier[catalogue] [ literal[string] ])) keyword[if] identifier[cond] : keyword[if] keyword[not] identifier[np] . identifier[isnan] ( identifier[catalogue] [ literal[string] ][ identifier[max_location] ]): keyword[return] identifier[obsmax] , identifier[catalogue] [ literal[string] ][ identifier[max_location] ] keyword[else] : identifier[print] ( literal[string] literal[string] ) keyword[return] identifier[obsmax] , identifier[np] . identifier[nanmax] ( identifier[catalogue] [ literal[string] ]) keyword[elif] identifier[config] [ literal[string] ]: keyword[return] identifier[obsmax] , identifier[config] [ literal[string] ] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] )
def _get_observed_mmax(catalogue, config): """Check see if observed mmax values are input, if not then take from the catalogue""" if config['input_mmax']: obsmax = config['input_mmax'] if config['input_mmax_uncertainty']: return (config['input_mmax'], config['input_mmax_uncertainty']) # depends on [control=['if'], data=[]] else: raise ValueError('Input mmax uncertainty must be specified!') # depends on [control=['if'], data=[]] max_location = np.argmax(catalogue['magnitude']) obsmax = catalogue['magnitude'][max_location] cond = isinstance(catalogue['sigmaMagnitude'], np.ndarray) and len(catalogue['sigmaMagnitude']) > 0 and (not np.all(np.isnan(catalogue['sigmaMagnitude']))) if cond: if not np.isnan(catalogue['sigmaMagnitude'][max_location]): return (obsmax, catalogue['sigmaMagnitude'][max_location]) # depends on [control=['if'], data=[]] else: print('Uncertainty not given on observed Mmax\nTaking largest magnitude uncertainty found in catalogue') return (obsmax, np.nanmax(catalogue['sigmaMagnitude'])) # depends on [control=['if'], data=[]] elif config['input_mmax_uncertainty']: return (obsmax, config['input_mmax_uncertainty']) # depends on [control=['if'], data=[]] else: raise ValueError('Input mmax uncertainty must be specified!')
def map(self, map_fn, name="Map"): """Applies a map operator to the stream. Attributes: map_fn (function): The user-defined logic of the map. """ op = Operator( _generate_uuid(), OpType.Map, name, map_fn, num_instances=self.env.config.parallelism) return self.__register(op)
def function[map, parameter[self, map_fn, name]]: constant[Applies a map operator to the stream. Attributes: map_fn (function): The user-defined logic of the map. ] variable[op] assign[=] call[name[Operator], parameter[call[name[_generate_uuid], parameter[]], name[OpType].Map, name[name], name[map_fn]]] return[call[name[self].__register, parameter[name[op]]]]
keyword[def] identifier[map] ( identifier[self] , identifier[map_fn] , identifier[name] = literal[string] ): literal[string] identifier[op] = identifier[Operator] ( identifier[_generate_uuid] (), identifier[OpType] . identifier[Map] , identifier[name] , identifier[map_fn] , identifier[num_instances] = identifier[self] . identifier[env] . identifier[config] . identifier[parallelism] ) keyword[return] identifier[self] . identifier[__register] ( identifier[op] )
def map(self, map_fn, name='Map'): """Applies a map operator to the stream. Attributes: map_fn (function): The user-defined logic of the map. """ op = Operator(_generate_uuid(), OpType.Map, name, map_fn, num_instances=self.env.config.parallelism) return self.__register(op)
def print_traceback(self, always_print=False): """ Prints the traceback to console - if there is any traceback, otherwise does nothing. :param always_print: print the traceback, even if there is nothing in the buffer (default: false) """ if self._exception or always_print: self.__echo.critical("--{ TRACEBACK }" + "-" * 100) self.__format_lines_error(self.traceback) self.__echo.critical("---------------" + "-" * 100)
def function[print_traceback, parameter[self, always_print]]: constant[ Prints the traceback to console - if there is any traceback, otherwise does nothing. :param always_print: print the traceback, even if there is nothing in the buffer (default: false) ] if <ast.BoolOp object at 0x7da1b0017220> begin[:] call[name[self].__echo.critical, parameter[binary_operation[constant[--{ TRACEBACK }] + binary_operation[constant[-] * constant[100]]]]] call[name[self].__format_lines_error, parameter[name[self].traceback]] call[name[self].__echo.critical, parameter[binary_operation[constant[---------------] + binary_operation[constant[-] * constant[100]]]]]
keyword[def] identifier[print_traceback] ( identifier[self] , identifier[always_print] = keyword[False] ): literal[string] keyword[if] identifier[self] . identifier[_exception] keyword[or] identifier[always_print] : identifier[self] . identifier[__echo] . identifier[critical] ( literal[string] + literal[string] * literal[int] ) identifier[self] . identifier[__format_lines_error] ( identifier[self] . identifier[traceback] ) identifier[self] . identifier[__echo] . identifier[critical] ( literal[string] + literal[string] * literal[int] )
def print_traceback(self, always_print=False): """ Prints the traceback to console - if there is any traceback, otherwise does nothing. :param always_print: print the traceback, even if there is nothing in the buffer (default: false) """ if self._exception or always_print: self.__echo.critical('--{ TRACEBACK }' + '-' * 100) self.__format_lines_error(self.traceback) self.__echo.critical('---------------' + '-' * 100) # depends on [control=['if'], data=[]]
def get_conn(self): """Returns a Google Cloud Dataproc service object.""" http_authorized = self._authorize() return build( 'dataproc', self.api_version, http=http_authorized, cache_discovery=False)
def function[get_conn, parameter[self]]: constant[Returns a Google Cloud Dataproc service object.] variable[http_authorized] assign[=] call[name[self]._authorize, parameter[]] return[call[name[build], parameter[constant[dataproc], name[self].api_version]]]
keyword[def] identifier[get_conn] ( identifier[self] ): literal[string] identifier[http_authorized] = identifier[self] . identifier[_authorize] () keyword[return] identifier[build] ( literal[string] , identifier[self] . identifier[api_version] , identifier[http] = identifier[http_authorized] , identifier[cache_discovery] = keyword[False] )
def get_conn(self): """Returns a Google Cloud Dataproc service object.""" http_authorized = self._authorize() return build('dataproc', self.api_version, http=http_authorized, cache_discovery=False)
def requires_ROOT(version, exception=False): """ A decorator for functions or methods that require a minimum ROOT version. If `exception` is False (the default) a warning is issued and None is returned, otherwise a `NotImplementedError` exception is raised. `exception` may also be an `Exception` in which case it will be raised instead of `NotImplementedError`. """ def decorator(f): @wraps(f) def wrapper(*args, **kwargs): if ROOT_VERSION < version: msg = ("{0} requires at least ROOT {1} " "but you are using {2}".format( f.__name__, version, ROOT_VERSION)) if inspect.isclass(exception) and issubclass(exception, Exception): raise exception elif exception: raise NotImplementedError(msg) warnings.warn(msg) return None return f(*args, **kwargs) return decorator
def function[requires_ROOT, parameter[version, exception]]: constant[ A decorator for functions or methods that require a minimum ROOT version. If `exception` is False (the default) a warning is issued and None is returned, otherwise a `NotImplementedError` exception is raised. `exception` may also be an `Exception` in which case it will be raised instead of `NotImplementedError`. ] def function[decorator, parameter[f]]: def function[wrapper, parameter[]]: if compare[name[ROOT_VERSION] less[<] name[version]] begin[:] variable[msg] assign[=] call[constant[{0} requires at least ROOT {1} but you are using {2}].format, parameter[name[f].__name__, name[version], name[ROOT_VERSION]]] if <ast.BoolOp object at 0x7da1b1192bc0> begin[:] <ast.Raise object at 0x7da1b1192d40> call[name[warnings].warn, parameter[name[msg]]] return[constant[None]] return[call[name[f], parameter[<ast.Starred object at 0x7da1b11204f0>]]] return[name[decorator]]
keyword[def] identifier[requires_ROOT] ( identifier[version] , identifier[exception] = keyword[False] ): literal[string] keyword[def] identifier[decorator] ( identifier[f] ): @ identifier[wraps] ( identifier[f] ) keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ): keyword[if] identifier[ROOT_VERSION] < identifier[version] : identifier[msg] =( literal[string] literal[string] . identifier[format] ( identifier[f] . identifier[__name__] , identifier[version] , identifier[ROOT_VERSION] )) keyword[if] identifier[inspect] . identifier[isclass] ( identifier[exception] ) keyword[and] identifier[issubclass] ( identifier[exception] , identifier[Exception] ): keyword[raise] identifier[exception] keyword[elif] identifier[exception] : keyword[raise] identifier[NotImplementedError] ( identifier[msg] ) identifier[warnings] . identifier[warn] ( identifier[msg] ) keyword[return] keyword[None] keyword[return] identifier[f] (* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[decorator]
def requires_ROOT(version, exception=False): """ A decorator for functions or methods that require a minimum ROOT version. If `exception` is False (the default) a warning is issued and None is returned, otherwise a `NotImplementedError` exception is raised. `exception` may also be an `Exception` in which case it will be raised instead of `NotImplementedError`. """ def decorator(f): @wraps(f) def wrapper(*args, **kwargs): if ROOT_VERSION < version: msg = '{0} requires at least ROOT {1} but you are using {2}'.format(f.__name__, version, ROOT_VERSION) if inspect.isclass(exception) and issubclass(exception, Exception): raise exception # depends on [control=['if'], data=[]] elif exception: raise NotImplementedError(msg) # depends on [control=['if'], data=[]] warnings.warn(msg) return None # depends on [control=['if'], data=['ROOT_VERSION', 'version']] return f(*args, **kwargs) return decorator
def importSafeElementTree(module_names=None): """Find a working ElementTree implementation that is not vulnerable to XXE, using `defusedxml`. >>> XXESafeElementTree = importSafeElementTree() @param module_names: The names of modules to try to use as a safe ElementTree. Defaults to C{L{xxe_safe_elementtree_modules}} @returns: An ElementTree module that is not vulnerable to XXE. """ if module_names is None: module_names = xxe_safe_elementtree_modules try: return importElementTree(module_names) except ImportError: raise ImportError('Unable to find a ElementTree module ' 'that is not vulnerable to XXE. ' 'Tried importing %r' % (module_names, ))
def function[importSafeElementTree, parameter[module_names]]: constant[Find a working ElementTree implementation that is not vulnerable to XXE, using `defusedxml`. >>> XXESafeElementTree = importSafeElementTree() @param module_names: The names of modules to try to use as a safe ElementTree. Defaults to C{L{xxe_safe_elementtree_modules}} @returns: An ElementTree module that is not vulnerable to XXE. ] if compare[name[module_names] is constant[None]] begin[:] variable[module_names] assign[=] name[xxe_safe_elementtree_modules] <ast.Try object at 0x7da1b06c98a0>
keyword[def] identifier[importSafeElementTree] ( identifier[module_names] = keyword[None] ): literal[string] keyword[if] identifier[module_names] keyword[is] keyword[None] : identifier[module_names] = identifier[xxe_safe_elementtree_modules] keyword[try] : keyword[return] identifier[importElementTree] ( identifier[module_names] ) keyword[except] identifier[ImportError] : keyword[raise] identifier[ImportError] ( literal[string] literal[string] literal[string] %( identifier[module_names] ,))
def importSafeElementTree(module_names=None): """Find a working ElementTree implementation that is not vulnerable to XXE, using `defusedxml`. >>> XXESafeElementTree = importSafeElementTree() @param module_names: The names of modules to try to use as a safe ElementTree. Defaults to C{L{xxe_safe_elementtree_modules}} @returns: An ElementTree module that is not vulnerable to XXE. """ if module_names is None: module_names = xxe_safe_elementtree_modules # depends on [control=['if'], data=['module_names']] try: return importElementTree(module_names) # depends on [control=['try'], data=[]] except ImportError: raise ImportError('Unable to find a ElementTree module that is not vulnerable to XXE. Tried importing %r' % (module_names,)) # depends on [control=['except'], data=[]]
def _read_amino_acids(): """Read the amino acid information from a resource file.""" this_dir = os.path.dirname(os.path.abspath(__file__)) aa_file = os.path.join(this_dir, os.pardir, 'resources', 'amino_acids.tsv') amino_acids = {} amino_acids_reverse = {} with open(aa_file, 'rt') as fh: lines = fh.readlines() for lin in lines[1:]: terms = lin.strip().split('\t') key = terms[2] val = {'full_name': terms[0], 'short_name': terms[1], 'indra_name': terms[3]} amino_acids[key] = val for v in val.values(): amino_acids_reverse[v] = key return amino_acids, amino_acids_reverse
def function[_read_amino_acids, parameter[]]: constant[Read the amino acid information from a resource file.] variable[this_dir] assign[=] call[name[os].path.dirname, parameter[call[name[os].path.abspath, parameter[name[__file__]]]]] variable[aa_file] assign[=] call[name[os].path.join, parameter[name[this_dir], name[os].pardir, constant[resources], constant[amino_acids.tsv]]] variable[amino_acids] assign[=] dictionary[[], []] variable[amino_acids_reverse] assign[=] dictionary[[], []] with call[name[open], parameter[name[aa_file], constant[rt]]] begin[:] variable[lines] assign[=] call[name[fh].readlines, parameter[]] for taget[name[lin]] in starred[call[name[lines]][<ast.Slice object at 0x7da20c991300>]] begin[:] variable[terms] assign[=] call[call[name[lin].strip, parameter[]].split, parameter[constant[ ]]] variable[key] assign[=] call[name[terms]][constant[2]] variable[val] assign[=] dictionary[[<ast.Constant object at 0x7da20c992ec0>, <ast.Constant object at 0x7da20c993f70>, <ast.Constant object at 0x7da20c993940>], [<ast.Subscript object at 0x7da20c990fa0>, <ast.Subscript object at 0x7da20c9935e0>, <ast.Subscript object at 0x7da20c992fe0>]] call[name[amino_acids]][name[key]] assign[=] name[val] for taget[name[v]] in starred[call[name[val].values, parameter[]]] begin[:] call[name[amino_acids_reverse]][name[v]] assign[=] name[key] return[tuple[[<ast.Name object at 0x7da20c992770>, <ast.Name object at 0x7da20c992dd0>]]]
keyword[def] identifier[_read_amino_acids] (): literal[string] identifier[this_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[__file__] )) identifier[aa_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[this_dir] , identifier[os] . identifier[pardir] , literal[string] , literal[string] ) identifier[amino_acids] ={} identifier[amino_acids_reverse] ={} keyword[with] identifier[open] ( identifier[aa_file] , literal[string] ) keyword[as] identifier[fh] : identifier[lines] = identifier[fh] . identifier[readlines] () keyword[for] identifier[lin] keyword[in] identifier[lines] [ literal[int] :]: identifier[terms] = identifier[lin] . identifier[strip] (). identifier[split] ( literal[string] ) identifier[key] = identifier[terms] [ literal[int] ] identifier[val] ={ literal[string] : identifier[terms] [ literal[int] ], literal[string] : identifier[terms] [ literal[int] ], literal[string] : identifier[terms] [ literal[int] ]} identifier[amino_acids] [ identifier[key] ]= identifier[val] keyword[for] identifier[v] keyword[in] identifier[val] . identifier[values] (): identifier[amino_acids_reverse] [ identifier[v] ]= identifier[key] keyword[return] identifier[amino_acids] , identifier[amino_acids_reverse]
def _read_amino_acids(): """Read the amino acid information from a resource file.""" this_dir = os.path.dirname(os.path.abspath(__file__)) aa_file = os.path.join(this_dir, os.pardir, 'resources', 'amino_acids.tsv') amino_acids = {} amino_acids_reverse = {} with open(aa_file, 'rt') as fh: lines = fh.readlines() # depends on [control=['with'], data=['fh']] for lin in lines[1:]: terms = lin.strip().split('\t') key = terms[2] val = {'full_name': terms[0], 'short_name': terms[1], 'indra_name': terms[3]} amino_acids[key] = val for v in val.values(): amino_acids_reverse[v] = key # depends on [control=['for'], data=['v']] # depends on [control=['for'], data=['lin']] return (amino_acids, amino_acids_reverse)
def custom_showwarning( message, category, filename="", lineno=-1, file=None, line=None ): """Hook to override default showwarning. https://stackoverflow.com/questions/2187269/python-print-only-the-message-on-warnings """ if file is None: file = sys.stderr if file is None: # sys.stderr is None when run with pythonw.exe: # warnings get lost return text = "%s: %s\n" % (category.__name__, message) try: file.write(text) except OSError: # the file (probably stderr) is invalid - this warning gets lost. pass
def function[custom_showwarning, parameter[message, category, filename, lineno, file, line]]: constant[Hook to override default showwarning. https://stackoverflow.com/questions/2187269/python-print-only-the-message-on-warnings ] if compare[name[file] is constant[None]] begin[:] variable[file] assign[=] name[sys].stderr if compare[name[file] is constant[None]] begin[:] return[None] variable[text] assign[=] binary_operation[constant[%s: %s ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1affc27d0>, <ast.Name object at 0x7da1affc0340>]]] <ast.Try object at 0x7da1affc06a0>
keyword[def] identifier[custom_showwarning] ( identifier[message] , identifier[category] , identifier[filename] = literal[string] , identifier[lineno] =- literal[int] , identifier[file] = keyword[None] , identifier[line] = keyword[None] ): literal[string] keyword[if] identifier[file] keyword[is] keyword[None] : identifier[file] = identifier[sys] . identifier[stderr] keyword[if] identifier[file] keyword[is] keyword[None] : keyword[return] identifier[text] = literal[string] %( identifier[category] . identifier[__name__] , identifier[message] ) keyword[try] : identifier[file] . identifier[write] ( identifier[text] ) keyword[except] identifier[OSError] : keyword[pass]
def custom_showwarning(message, category, filename='', lineno=-1, file=None, line=None): """Hook to override default showwarning. https://stackoverflow.com/questions/2187269/python-print-only-the-message-on-warnings """ if file is None: file = sys.stderr if file is None: # sys.stderr is None when run with pythonw.exe: # warnings get lost return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['file']] text = '%s: %s\n' % (category.__name__, message) try: file.write(text) # depends on [control=['try'], data=[]] except OSError: # the file (probably stderr) is invalid - this warning gets lost. pass # depends on [control=['except'], data=[]]
def remove(self, pkgs): """Remove packages from queue """ print("\nRemove packages from the queue:\n") with open(self.queue_list, "w") as queue: for line in self.queued.splitlines(): if line not in pkgs: queue.write(line + "\n") else: print("{0}{1}{2}".format(self.meta.color["RED"], line, self.meta.color["ENDC"])) self.quit = True queue.close() if self.quit: print("")
def function[remove, parameter[self, pkgs]]: constant[Remove packages from queue ] call[name[print], parameter[constant[ Remove packages from the queue: ]]] with call[name[open], parameter[name[self].queue_list, constant[w]]] begin[:] for taget[name[line]] in starred[call[name[self].queued.splitlines, parameter[]]] begin[:] if compare[name[line] <ast.NotIn object at 0x7da2590d7190> name[pkgs]] begin[:] call[name[queue].write, parameter[binary_operation[name[line] + constant[ ]]]] call[name[queue].close, parameter[]] if name[self].quit begin[:] call[name[print], parameter[constant[]]]
keyword[def] identifier[remove] ( identifier[self] , identifier[pkgs] ): literal[string] identifier[print] ( literal[string] ) keyword[with] identifier[open] ( identifier[self] . identifier[queue_list] , literal[string] ) keyword[as] identifier[queue] : keyword[for] identifier[line] keyword[in] identifier[self] . identifier[queued] . identifier[splitlines] (): keyword[if] identifier[line] keyword[not] keyword[in] identifier[pkgs] : identifier[queue] . identifier[write] ( identifier[line] + literal[string] ) keyword[else] : identifier[print] ( literal[string] . identifier[format] ( identifier[self] . identifier[meta] . identifier[color] [ literal[string] ], identifier[line] , identifier[self] . identifier[meta] . identifier[color] [ literal[string] ])) identifier[self] . identifier[quit] = keyword[True] identifier[queue] . identifier[close] () keyword[if] identifier[self] . identifier[quit] : identifier[print] ( literal[string] )
def remove(self, pkgs): """Remove packages from queue """ print('\nRemove packages from the queue:\n') with open(self.queue_list, 'w') as queue: for line in self.queued.splitlines(): if line not in pkgs: queue.write(line + '\n') # depends on [control=['if'], data=['line']] else: print('{0}{1}{2}'.format(self.meta.color['RED'], line, self.meta.color['ENDC'])) self.quit = True # depends on [control=['for'], data=['line']] queue.close() # depends on [control=['with'], data=['queue']] if self.quit: print('') # depends on [control=['if'], data=[]]
def get_share(self, group_id, resource_id, depth=1): """ Retrieves a specific resource share available to a group. :param group_id: The unique ID of the group. :type group_id: ``str`` :param resource_id: The unique ID of the resource. :type resource_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` """ response = self._perform_request( '/um/groups/%s/shares/%s?depth=%s' % (group_id, resource_id, str(depth))) return response
def function[get_share, parameter[self, group_id, resource_id, depth]]: constant[ Retrieves a specific resource share available to a group. :param group_id: The unique ID of the group. :type group_id: ``str`` :param resource_id: The unique ID of the resource. :type resource_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` ] variable[response] assign[=] call[name[self]._perform_request, parameter[binary_operation[constant[/um/groups/%s/shares/%s?depth=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b00fa9b0>, <ast.Name object at 0x7da1b00fadd0>, <ast.Call object at 0x7da1b00f95d0>]]]]] return[name[response]]
keyword[def] identifier[get_share] ( identifier[self] , identifier[group_id] , identifier[resource_id] , identifier[depth] = literal[int] ): literal[string] identifier[response] = identifier[self] . identifier[_perform_request] ( literal[string] %( identifier[group_id] , identifier[resource_id] , identifier[str] ( identifier[depth] ))) keyword[return] identifier[response]
def get_share(self, group_id, resource_id, depth=1): """ Retrieves a specific resource share available to a group. :param group_id: The unique ID of the group. :type group_id: ``str`` :param resource_id: The unique ID of the resource. :type resource_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` """ response = self._perform_request('/um/groups/%s/shares/%s?depth=%s' % (group_id, resource_id, str(depth))) return response
def acquisition_function_withGradients(self, x): """ Returns the acquisition function and its its gradient at x. """ aqu_x = self.acquisition_function(x) aqu_x_grad = self.d_acquisition_function(x) return aqu_x, aqu_x_grad
def function[acquisition_function_withGradients, parameter[self, x]]: constant[ Returns the acquisition function and its its gradient at x. ] variable[aqu_x] assign[=] call[name[self].acquisition_function, parameter[name[x]]] variable[aqu_x_grad] assign[=] call[name[self].d_acquisition_function, parameter[name[x]]] return[tuple[[<ast.Name object at 0x7da20c7cb250>, <ast.Name object at 0x7da20c7c8df0>]]]
keyword[def] identifier[acquisition_function_withGradients] ( identifier[self] , identifier[x] ): literal[string] identifier[aqu_x] = identifier[self] . identifier[acquisition_function] ( identifier[x] ) identifier[aqu_x_grad] = identifier[self] . identifier[d_acquisition_function] ( identifier[x] ) keyword[return] identifier[aqu_x] , identifier[aqu_x_grad]
def acquisition_function_withGradients(self, x): """ Returns the acquisition function and its its gradient at x. """ aqu_x = self.acquisition_function(x) aqu_x_grad = self.d_acquisition_function(x) return (aqu_x, aqu_x_grad)
def getDbNames(self): """This function returns the list of open databases""" request = [] request.append(uu({'-dbnames': '' })) result = self._doRequest(request) result = FMResultset.FMResultset(result) dbNames = [] for dbName in result.resultset: dbNames.append(string.lower(dbName['DATABASE_NAME'])) return dbNames
def function[getDbNames, parameter[self]]: constant[This function returns the list of open databases] variable[request] assign[=] list[[]] call[name[request].append, parameter[call[name[uu], parameter[dictionary[[<ast.Constant object at 0x7da1b28194b0>], [<ast.Constant object at 0x7da1b281a8c0>]]]]]] variable[result] assign[=] call[name[self]._doRequest, parameter[name[request]]] variable[result] assign[=] call[name[FMResultset].FMResultset, parameter[name[result]]] variable[dbNames] assign[=] list[[]] for taget[name[dbName]] in starred[name[result].resultset] begin[:] call[name[dbNames].append, parameter[call[name[string].lower, parameter[call[name[dbName]][constant[DATABASE_NAME]]]]]] return[name[dbNames]]
keyword[def] identifier[getDbNames] ( identifier[self] ): literal[string] identifier[request] =[] identifier[request] . identifier[append] ( identifier[uu] ({ literal[string] : literal[string] })) identifier[result] = identifier[self] . identifier[_doRequest] ( identifier[request] ) identifier[result] = identifier[FMResultset] . identifier[FMResultset] ( identifier[result] ) identifier[dbNames] =[] keyword[for] identifier[dbName] keyword[in] identifier[result] . identifier[resultset] : identifier[dbNames] . identifier[append] ( identifier[string] . identifier[lower] ( identifier[dbName] [ literal[string] ])) keyword[return] identifier[dbNames]
def getDbNames(self): """This function returns the list of open databases""" request = [] request.append(uu({'-dbnames': ''})) result = self._doRequest(request) result = FMResultset.FMResultset(result) dbNames = [] for dbName in result.resultset: dbNames.append(string.lower(dbName['DATABASE_NAME'])) # depends on [control=['for'], data=['dbName']] return dbNames
def insert_entry(self, entry): """! @brief Insert new clustering feature to the leaf node. @param[in] entry (cfentry): Clustering feature. """ self.feature += entry; self.entries.append(entry);
def function[insert_entry, parameter[self, entry]]: constant[! @brief Insert new clustering feature to the leaf node. @param[in] entry (cfentry): Clustering feature. ] <ast.AugAssign object at 0x7da20e9553f0> call[name[self].entries.append, parameter[name[entry]]]
keyword[def] identifier[insert_entry] ( identifier[self] , identifier[entry] ): literal[string] identifier[self] . identifier[feature] += identifier[entry] ; identifier[self] . identifier[entries] . identifier[append] ( identifier[entry] );
def insert_entry(self, entry): """! @brief Insert new clustering feature to the leaf node. @param[in] entry (cfentry): Clustering feature. """ self.feature += entry self.entries.append(entry)
async def set_message( self, text=None, reply_to=0, parse_mode=(), link_preview=None): """ Changes the draft message on the Telegram servers. The changes are reflected in this object. :param str text: New text of the draft. Preserved if left as None. :param int reply_to: Message ID to reply to. Preserved if left as 0, erased if set to None. :param bool link_preview: Whether to attach a web page preview. Preserved if left as None. :param str parse_mode: The parse mode to be used for the text. :return bool: ``True`` on success. """ if text is None: text = self._text if reply_to == 0: reply_to = self.reply_to_msg_id if link_preview is None: link_preview = self.link_preview raw_text, entities =\ await self._client._parse_message_text(text, parse_mode) result = await self._client(SaveDraftRequest( peer=self._peer, message=raw_text, no_webpage=not link_preview, reply_to_msg_id=reply_to, entities=entities )) if result: self._text = text self._raw_text = raw_text self.link_preview = link_preview self.reply_to_msg_id = reply_to self.date = datetime.datetime.now(tz=datetime.timezone.utc) return result
<ast.AsyncFunctionDef object at 0x7da18eb56890>
keyword[async] keyword[def] identifier[set_message] ( identifier[self] , identifier[text] = keyword[None] , identifier[reply_to] = literal[int] , identifier[parse_mode] =(), identifier[link_preview] = keyword[None] ): literal[string] keyword[if] identifier[text] keyword[is] keyword[None] : identifier[text] = identifier[self] . identifier[_text] keyword[if] identifier[reply_to] == literal[int] : identifier[reply_to] = identifier[self] . identifier[reply_to_msg_id] keyword[if] identifier[link_preview] keyword[is] keyword[None] : identifier[link_preview] = identifier[self] . identifier[link_preview] identifier[raw_text] , identifier[entities] = keyword[await] identifier[self] . identifier[_client] . identifier[_parse_message_text] ( identifier[text] , identifier[parse_mode] ) identifier[result] = keyword[await] identifier[self] . identifier[_client] ( identifier[SaveDraftRequest] ( identifier[peer] = identifier[self] . identifier[_peer] , identifier[message] = identifier[raw_text] , identifier[no_webpage] = keyword[not] identifier[link_preview] , identifier[reply_to_msg_id] = identifier[reply_to] , identifier[entities] = identifier[entities] )) keyword[if] identifier[result] : identifier[self] . identifier[_text] = identifier[text] identifier[self] . identifier[_raw_text] = identifier[raw_text] identifier[self] . identifier[link_preview] = identifier[link_preview] identifier[self] . identifier[reply_to_msg_id] = identifier[reply_to] identifier[self] . identifier[date] = identifier[datetime] . identifier[datetime] . identifier[now] ( identifier[tz] = identifier[datetime] . identifier[timezone] . identifier[utc] ) keyword[return] identifier[result]
async def set_message(self, text=None, reply_to=0, parse_mode=(), link_preview=None): """ Changes the draft message on the Telegram servers. The changes are reflected in this object. :param str text: New text of the draft. Preserved if left as None. :param int reply_to: Message ID to reply to. Preserved if left as 0, erased if set to None. :param bool link_preview: Whether to attach a web page preview. Preserved if left as None. :param str parse_mode: The parse mode to be used for the text. :return bool: ``True`` on success. """ if text is None: text = self._text # depends on [control=['if'], data=['text']] if reply_to == 0: reply_to = self.reply_to_msg_id # depends on [control=['if'], data=['reply_to']] if link_preview is None: link_preview = self.link_preview # depends on [control=['if'], data=['link_preview']] (raw_text, entities) = await self._client._parse_message_text(text, parse_mode) result = await self._client(SaveDraftRequest(peer=self._peer, message=raw_text, no_webpage=not link_preview, reply_to_msg_id=reply_to, entities=entities)) if result: self._text = text self._raw_text = raw_text self.link_preview = link_preview self.reply_to_msg_id = reply_to self.date = datetime.datetime.now(tz=datetime.timezone.utc) # depends on [control=['if'], data=[]] return result
def hash(self): ''' :rtype: int :return: hash of the field ''' hashed = super(String, self).hash() return khash(hashed, self._max_size)
def function[hash, parameter[self]]: constant[ :rtype: int :return: hash of the field ] variable[hashed] assign[=] call[call[name[super], parameter[name[String], name[self]]].hash, parameter[]] return[call[name[khash], parameter[name[hashed], name[self]._max_size]]]
keyword[def] identifier[hash] ( identifier[self] ): literal[string] identifier[hashed] = identifier[super] ( identifier[String] , identifier[self] ). identifier[hash] () keyword[return] identifier[khash] ( identifier[hashed] , identifier[self] . identifier[_max_size] )
def hash(self): """ :rtype: int :return: hash of the field """ hashed = super(String, self).hash() return khash(hashed, self._max_size)
def connect_to_region(cls, region, session=None, access_key=None, secret_key=None, **kwargs): """ Connect to an AWS region. This method has been deprecated in favor of :meth:`~.connect` Parameters ---------- region : str Name of an AWS region session : :class:`~botocore.session.Session`, optional The Session object to use for the connection access_key : str, optional If session is None, set this access key when creating the session secret_key : str, optional If session is None, set this secret key when creating the session **kwargs : dict Keyword arguments to pass to the constructor """ warnings.warn("connect_to_region is deprecated and will be removed. " "Use connect instead.") if session is None: session = botocore.session.get_session() if access_key is not None: session.set_credentials(access_key, secret_key) client = session.create_client('dynamodb', region) return cls(client, **kwargs)
def function[connect_to_region, parameter[cls, region, session, access_key, secret_key]]: constant[ Connect to an AWS region. This method has been deprecated in favor of :meth:`~.connect` Parameters ---------- region : str Name of an AWS region session : :class:`~botocore.session.Session`, optional The Session object to use for the connection access_key : str, optional If session is None, set this access key when creating the session secret_key : str, optional If session is None, set this secret key when creating the session **kwargs : dict Keyword arguments to pass to the constructor ] call[name[warnings].warn, parameter[constant[connect_to_region is deprecated and will be removed. Use connect instead.]]] if compare[name[session] is constant[None]] begin[:] variable[session] assign[=] call[name[botocore].session.get_session, parameter[]] if compare[name[access_key] is_not constant[None]] begin[:] call[name[session].set_credentials, parameter[name[access_key], name[secret_key]]] variable[client] assign[=] call[name[session].create_client, parameter[constant[dynamodb], name[region]]] return[call[name[cls], parameter[name[client]]]]
keyword[def] identifier[connect_to_region] ( identifier[cls] , identifier[region] , identifier[session] = keyword[None] , identifier[access_key] = keyword[None] , identifier[secret_key] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[warnings] . identifier[warn] ( literal[string] literal[string] ) keyword[if] identifier[session] keyword[is] keyword[None] : identifier[session] = identifier[botocore] . identifier[session] . identifier[get_session] () keyword[if] identifier[access_key] keyword[is] keyword[not] keyword[None] : identifier[session] . identifier[set_credentials] ( identifier[access_key] , identifier[secret_key] ) identifier[client] = identifier[session] . identifier[create_client] ( literal[string] , identifier[region] ) keyword[return] identifier[cls] ( identifier[client] ,** identifier[kwargs] )
def connect_to_region(cls, region, session=None, access_key=None, secret_key=None, **kwargs): """ Connect to an AWS region. This method has been deprecated in favor of :meth:`~.connect` Parameters ---------- region : str Name of an AWS region session : :class:`~botocore.session.Session`, optional The Session object to use for the connection access_key : str, optional If session is None, set this access key when creating the session secret_key : str, optional If session is None, set this secret key when creating the session **kwargs : dict Keyword arguments to pass to the constructor """ warnings.warn('connect_to_region is deprecated and will be removed. Use connect instead.') if session is None: session = botocore.session.get_session() if access_key is not None: session.set_credentials(access_key, secret_key) # depends on [control=['if'], data=['access_key']] # depends on [control=['if'], data=['session']] client = session.create_client('dynamodb', region) return cls(client, **kwargs)
def exec_args(f): """decorator for adding block/targets args for execution applied to %pxconfig and %%px """ args = [ magic_arguments.argument('-b', '--block', action="store_const", const=True, dest='block', help="use blocking (sync) execution", ), magic_arguments.argument('-a', '--noblock', action="store_const", const=False, dest='block', help="use non-blocking (async) execution", ), magic_arguments.argument('-t', '--targets', type=str, help="specify the targets on which to execute", ), magic_arguments.argument('--verbose', action="store_const", const=True, dest="set_verbose", help="print a message at each execution", ), magic_arguments.argument('--no-verbose', action="store_const", const=False, dest="set_verbose", help="don't print any messages", ), ] for a in args: f = a(f) return f
def function[exec_args, parameter[f]]: constant[decorator for adding block/targets args for execution applied to %pxconfig and %%px ] variable[args] assign[=] list[[<ast.Call object at 0x7da1b26ac820>, <ast.Call object at 0x7da1b26ae380>, <ast.Call object at 0x7da1b26aeef0>, <ast.Call object at 0x7da1b26ade70>, <ast.Call object at 0x7da18dc06a70>]] for taget[name[a]] in starred[name[args]] begin[:] variable[f] assign[=] call[name[a], parameter[name[f]]] return[name[f]]
keyword[def] identifier[exec_args] ( identifier[f] ): literal[string] identifier[args] =[ identifier[magic_arguments] . identifier[argument] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[const] = keyword[True] , identifier[dest] = literal[string] , identifier[help] = literal[string] , ), identifier[magic_arguments] . identifier[argument] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[const] = keyword[False] , identifier[dest] = literal[string] , identifier[help] = literal[string] , ), identifier[magic_arguments] . identifier[argument] ( literal[string] , literal[string] , identifier[type] = identifier[str] , identifier[help] = literal[string] , ), identifier[magic_arguments] . identifier[argument] ( literal[string] , identifier[action] = literal[string] , identifier[const] = keyword[True] , identifier[dest] = literal[string] , identifier[help] = literal[string] , ), identifier[magic_arguments] . identifier[argument] ( literal[string] , identifier[action] = literal[string] , identifier[const] = keyword[False] , identifier[dest] = literal[string] , identifier[help] = literal[string] , ), ] keyword[for] identifier[a] keyword[in] identifier[args] : identifier[f] = identifier[a] ( identifier[f] ) keyword[return] identifier[f]
def exec_args(f): """decorator for adding block/targets args for execution applied to %pxconfig and %%px """ args = [magic_arguments.argument('-b', '--block', action='store_const', const=True, dest='block', help='use blocking (sync) execution'), magic_arguments.argument('-a', '--noblock', action='store_const', const=False, dest='block', help='use non-blocking (async) execution'), magic_arguments.argument('-t', '--targets', type=str, help='specify the targets on which to execute'), magic_arguments.argument('--verbose', action='store_const', const=True, dest='set_verbose', help='print a message at each execution'), magic_arguments.argument('--no-verbose', action='store_const', const=False, dest='set_verbose', help="don't print any messages")] for a in args: f = a(f) # depends on [control=['for'], data=['a']] return f
def get_contents_debug_adapter_protocol(self, lst, fmt=None): ''' This method is to be used in the case where the variables are all saved by its id (and as such don't need to have the `resolve` method called later on, so, keys don't need to embed the reference in the key). Note that the return should be ordered. :return list(tuple(name:str, value:object, evaluateName:str)) ''' l = len(lst) ret = [] format_str = '%0' + str(int(len(str(l - 1)))) + 'd' if fmt is not None and fmt.get('hex', False): format_str = '0x%0' + str(int(len(hex(l).lstrip('0x')))) + 'x' for i, item in enumerate(lst): ret.append((format_str % i, item, '[%s]' % i)) if i > MAX_ITEMS_TO_HANDLE: ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG, None)) break ret.append(('__len__', len(lst), partial(_apply_evaluate_name, evaluate_name='len(%s)'))) # Needed in case the class extends the built-in type and has some additional fields. from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(lst, fmt=fmt) if from_default_resolver: ret = from_default_resolver + ret return ret
def function[get_contents_debug_adapter_protocol, parameter[self, lst, fmt]]: constant[ This method is to be used in the case where the variables are all saved by its id (and as such don't need to have the `resolve` method called later on, so, keys don't need to embed the reference in the key). Note that the return should be ordered. :return list(tuple(name:str, value:object, evaluateName:str)) ] variable[l] assign[=] call[name[len], parameter[name[lst]]] variable[ret] assign[=] list[[]] variable[format_str] assign[=] binary_operation[binary_operation[constant[%0] + call[name[str], parameter[call[name[int], parameter[call[name[len], parameter[call[name[str], parameter[binary_operation[name[l] - constant[1]]]]]]]]]]] + constant[d]] if <ast.BoolOp object at 0x7da1b08db4c0> begin[:] variable[format_str] assign[=] binary_operation[binary_operation[constant[0x%0] + call[name[str], parameter[call[name[int], parameter[call[name[len], parameter[call[call[name[hex], parameter[name[l]]].lstrip, parameter[constant[0x]]]]]]]]]] + constant[x]] for taget[tuple[[<ast.Name object at 0x7da1b08d9390>, <ast.Name object at 0x7da1b08da350>]]] in starred[call[name[enumerate], parameter[name[lst]]]] begin[:] call[name[ret].append, parameter[tuple[[<ast.BinOp object at 0x7da1b08da0e0>, <ast.Name object at 0x7da1b08dab00>, <ast.BinOp object at 0x7da1b08dabc0>]]]] if compare[name[i] greater[>] name[MAX_ITEMS_TO_HANDLE]] begin[:] call[name[ret].append, parameter[tuple[[<ast.Name object at 0x7da1b08db430>, <ast.Name object at 0x7da1b08d9180>, <ast.Constant object at 0x7da1b08d92a0>]]]] break call[name[ret].append, parameter[tuple[[<ast.Constant object at 0x7da1b08d97e0>, <ast.Call object at 0x7da1b08db070>, <ast.Call object at 0x7da1b08d88b0>]]]] variable[from_default_resolver] assign[=] call[name[defaultResolver].get_contents_debug_adapter_protocol, parameter[name[lst]]] if name[from_default_resolver] begin[:] variable[ret] assign[=] binary_operation[name[from_default_resolver] + name[ret]] return[name[ret]]
keyword[def] identifier[get_contents_debug_adapter_protocol] ( identifier[self] , identifier[lst] , identifier[fmt] = keyword[None] ): literal[string] identifier[l] = identifier[len] ( identifier[lst] ) identifier[ret] =[] identifier[format_str] = literal[string] + identifier[str] ( identifier[int] ( identifier[len] ( identifier[str] ( identifier[l] - literal[int] ))))+ literal[string] keyword[if] identifier[fmt] keyword[is] keyword[not] keyword[None] keyword[and] identifier[fmt] . identifier[get] ( literal[string] , keyword[False] ): identifier[format_str] = literal[string] + identifier[str] ( identifier[int] ( identifier[len] ( identifier[hex] ( identifier[l] ). identifier[lstrip] ( literal[string] ))))+ literal[string] keyword[for] identifier[i] , identifier[item] keyword[in] identifier[enumerate] ( identifier[lst] ): identifier[ret] . identifier[append] (( identifier[format_str] % identifier[i] , identifier[item] , literal[string] % identifier[i] )) keyword[if] identifier[i] > identifier[MAX_ITEMS_TO_HANDLE] : identifier[ret] . identifier[append] (( identifier[TOO_LARGE_ATTR] , identifier[TOO_LARGE_MSG] , keyword[None] )) keyword[break] identifier[ret] . identifier[append] (( literal[string] , identifier[len] ( identifier[lst] ), identifier[partial] ( identifier[_apply_evaluate_name] , identifier[evaluate_name] = literal[string] ))) identifier[from_default_resolver] = identifier[defaultResolver] . identifier[get_contents_debug_adapter_protocol] ( identifier[lst] , identifier[fmt] = identifier[fmt] ) keyword[if] identifier[from_default_resolver] : identifier[ret] = identifier[from_default_resolver] + identifier[ret] keyword[return] identifier[ret]
def get_contents_debug_adapter_protocol(self, lst, fmt=None): """ This method is to be used in the case where the variables are all saved by its id (and as such don't need to have the `resolve` method called later on, so, keys don't need to embed the reference in the key). Note that the return should be ordered. :return list(tuple(name:str, value:object, evaluateName:str)) """ l = len(lst) ret = [] format_str = '%0' + str(int(len(str(l - 1)))) + 'd' if fmt is not None and fmt.get('hex', False): format_str = '0x%0' + str(int(len(hex(l).lstrip('0x')))) + 'x' # depends on [control=['if'], data=[]] for (i, item) in enumerate(lst): ret.append((format_str % i, item, '[%s]' % i)) if i > MAX_ITEMS_TO_HANDLE: ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG, None)) break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] ret.append(('__len__', len(lst), partial(_apply_evaluate_name, evaluate_name='len(%s)'))) # Needed in case the class extends the built-in type and has some additional fields. from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(lst, fmt=fmt) if from_default_resolver: ret = from_default_resolver + ret # depends on [control=['if'], data=[]] return ret
def adjust_memory(val, magnitude, direction="increase", out_modifier="", maximum=None): """Adjust memory based on number of cores utilized. """ modifier = val[-1:] amount = float(val[:-1]) if direction == "decrease": new_amount = amount / float(magnitude) # dealing with a specifier like 1G, need to scale to Mb if new_amount < 1 or (out_modifier.upper().startswith("M") and modifier.upper().startswith("G")): if modifier.upper().startswith("G"): new_amount = (amount * 1024) / magnitude modifier = "M" + modifier[1:] else: raise ValueError("Unexpected decrease in memory: %s by %s" % (val, magnitude)) amount = int(new_amount) elif direction == "increase" and magnitude > 1: # for increases with multiple cores, leave small percentage of # memory for system to maintain process running resource and # avoid OOM killers adjuster = 0.91 amount = int(math.ceil(amount * (adjuster * magnitude))) if out_modifier.upper().startswith("G") and modifier.upper().startswith("M"): modifier = out_modifier amount = int(math.floor(amount / 1024.0)) if out_modifier.upper().startswith("M") and modifier.upper().startswith("G"): modifier = out_modifier modifier = int(amount * 1024) if maximum: max_modifier = maximum[-1] max_amount = float(maximum[:-1]) if modifier.upper() == "G" and max_modifier.upper() == "M": max_amount = max_amount / 1024.0 elif modifier.upper() == "M" and max_modifier.upper() == "G": max_amount = max_amount * 1024.0 amount = min([amount, max_amount]) return "{amount}{modifier}".format(amount=int(math.floor(amount)), modifier=modifier)
def function[adjust_memory, parameter[val, magnitude, direction, out_modifier, maximum]]: constant[Adjust memory based on number of cores utilized. ] variable[modifier] assign[=] call[name[val]][<ast.Slice object at 0x7da1b2344340>] variable[amount] assign[=] call[name[float], parameter[call[name[val]][<ast.Slice object at 0x7da1b2345030>]]] if compare[name[direction] equal[==] constant[decrease]] begin[:] variable[new_amount] assign[=] binary_operation[name[amount] / call[name[float], parameter[name[magnitude]]]] if <ast.BoolOp object at 0x7da20c6e5990> begin[:] if call[call[name[modifier].upper, parameter[]].startswith, parameter[constant[G]]] begin[:] variable[new_amount] assign[=] binary_operation[binary_operation[name[amount] * constant[1024]] / name[magnitude]] variable[modifier] assign[=] binary_operation[constant[M] + call[name[modifier]][<ast.Slice object at 0x7da20c6e7460>]] variable[amount] assign[=] call[name[int], parameter[name[new_amount]]] if <ast.BoolOp object at 0x7da20c6e5ab0> begin[:] variable[modifier] assign[=] name[out_modifier] variable[amount] assign[=] call[name[int], parameter[call[name[math].floor, parameter[binary_operation[name[amount] / constant[1024.0]]]]]] if <ast.BoolOp object at 0x7da20c6e6c50> begin[:] variable[modifier] assign[=] name[out_modifier] variable[modifier] assign[=] call[name[int], parameter[binary_operation[name[amount] * constant[1024]]]] if name[maximum] begin[:] variable[max_modifier] assign[=] call[name[maximum]][<ast.UnaryOp object at 0x7da20c6e40a0>] variable[max_amount] assign[=] call[name[float], parameter[call[name[maximum]][<ast.Slice object at 0x7da20c6e7af0>]]] if <ast.BoolOp object at 0x7da20c6e4370> begin[:] variable[max_amount] assign[=] binary_operation[name[max_amount] / constant[1024.0]] variable[amount] assign[=] call[name[min], parameter[list[[<ast.Name object at 0x7da20c6e5fc0>, <ast.Name object at 0x7da20c6e4bb0>]]]] return[call[constant[{amount}{modifier}].format, parameter[]]]
keyword[def] identifier[adjust_memory] ( identifier[val] , identifier[magnitude] , identifier[direction] = literal[string] , identifier[out_modifier] = literal[string] , identifier[maximum] = keyword[None] ): literal[string] identifier[modifier] = identifier[val] [- literal[int] :] identifier[amount] = identifier[float] ( identifier[val] [:- literal[int] ]) keyword[if] identifier[direction] == literal[string] : identifier[new_amount] = identifier[amount] / identifier[float] ( identifier[magnitude] ) keyword[if] identifier[new_amount] < literal[int] keyword[or] ( identifier[out_modifier] . identifier[upper] (). identifier[startswith] ( literal[string] ) keyword[and] identifier[modifier] . identifier[upper] (). identifier[startswith] ( literal[string] )): keyword[if] identifier[modifier] . identifier[upper] (). identifier[startswith] ( literal[string] ): identifier[new_amount] =( identifier[amount] * literal[int] )/ identifier[magnitude] identifier[modifier] = literal[string] + identifier[modifier] [ literal[int] :] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] %( identifier[val] , identifier[magnitude] )) identifier[amount] = identifier[int] ( identifier[new_amount] ) keyword[elif] identifier[direction] == literal[string] keyword[and] identifier[magnitude] > literal[int] : identifier[adjuster] = literal[int] identifier[amount] = identifier[int] ( identifier[math] . identifier[ceil] ( identifier[amount] *( identifier[adjuster] * identifier[magnitude] ))) keyword[if] identifier[out_modifier] . identifier[upper] (). identifier[startswith] ( literal[string] ) keyword[and] identifier[modifier] . identifier[upper] (). identifier[startswith] ( literal[string] ): identifier[modifier] = identifier[out_modifier] identifier[amount] = identifier[int] ( identifier[math] . identifier[floor] ( identifier[amount] / literal[int] )) keyword[if] identifier[out_modifier] . identifier[upper] (). identifier[startswith] ( literal[string] ) keyword[and] identifier[modifier] . identifier[upper] (). identifier[startswith] ( literal[string] ): identifier[modifier] = identifier[out_modifier] identifier[modifier] = identifier[int] ( identifier[amount] * literal[int] ) keyword[if] identifier[maximum] : identifier[max_modifier] = identifier[maximum] [- literal[int] ] identifier[max_amount] = identifier[float] ( identifier[maximum] [:- literal[int] ]) keyword[if] identifier[modifier] . identifier[upper] ()== literal[string] keyword[and] identifier[max_modifier] . identifier[upper] ()== literal[string] : identifier[max_amount] = identifier[max_amount] / literal[int] keyword[elif] identifier[modifier] . identifier[upper] ()== literal[string] keyword[and] identifier[max_modifier] . identifier[upper] ()== literal[string] : identifier[max_amount] = identifier[max_amount] * literal[int] identifier[amount] = identifier[min] ([ identifier[amount] , identifier[max_amount] ]) keyword[return] literal[string] . identifier[format] ( identifier[amount] = identifier[int] ( identifier[math] . identifier[floor] ( identifier[amount] )), identifier[modifier] = identifier[modifier] )
def adjust_memory(val, magnitude, direction='increase', out_modifier='', maximum=None): """Adjust memory based on number of cores utilized. """ modifier = val[-1:] amount = float(val[:-1]) if direction == 'decrease': new_amount = amount / float(magnitude) # dealing with a specifier like 1G, need to scale to Mb if new_amount < 1 or (out_modifier.upper().startswith('M') and modifier.upper().startswith('G')): if modifier.upper().startswith('G'): new_amount = amount * 1024 / magnitude modifier = 'M' + modifier[1:] # depends on [control=['if'], data=[]] else: raise ValueError('Unexpected decrease in memory: %s by %s' % (val, magnitude)) # depends on [control=['if'], data=[]] amount = int(new_amount) # depends on [control=['if'], data=[]] elif direction == 'increase' and magnitude > 1: # for increases with multiple cores, leave small percentage of # memory for system to maintain process running resource and # avoid OOM killers adjuster = 0.91 amount = int(math.ceil(amount * (adjuster * magnitude))) # depends on [control=['if'], data=[]] if out_modifier.upper().startswith('G') and modifier.upper().startswith('M'): modifier = out_modifier amount = int(math.floor(amount / 1024.0)) # depends on [control=['if'], data=[]] if out_modifier.upper().startswith('M') and modifier.upper().startswith('G'): modifier = out_modifier modifier = int(amount * 1024) # depends on [control=['if'], data=[]] if maximum: max_modifier = maximum[-1] max_amount = float(maximum[:-1]) if modifier.upper() == 'G' and max_modifier.upper() == 'M': max_amount = max_amount / 1024.0 # depends on [control=['if'], data=[]] elif modifier.upper() == 'M' and max_modifier.upper() == 'G': max_amount = max_amount * 1024.0 # depends on [control=['if'], data=[]] amount = min([amount, max_amount]) # depends on [control=['if'], data=[]] return '{amount}{modifier}'.format(amount=int(math.floor(amount)), modifier=modifier)
def EXCHANGE(classical_reg1, classical_reg2): """ Produce an EXCHANGE instruction. :param classical_reg1: The first classical register, which gets modified. :param classical_reg2: The second classical register, which gets modified. :return: A ClassicalExchange instance. """ left = unpack_classical_reg(classical_reg1) right = unpack_classical_reg(classical_reg2) return ClassicalExchange(left, right)
def function[EXCHANGE, parameter[classical_reg1, classical_reg2]]: constant[ Produce an EXCHANGE instruction. :param classical_reg1: The first classical register, which gets modified. :param classical_reg2: The second classical register, which gets modified. :return: A ClassicalExchange instance. ] variable[left] assign[=] call[name[unpack_classical_reg], parameter[name[classical_reg1]]] variable[right] assign[=] call[name[unpack_classical_reg], parameter[name[classical_reg2]]] return[call[name[ClassicalExchange], parameter[name[left], name[right]]]]
keyword[def] identifier[EXCHANGE] ( identifier[classical_reg1] , identifier[classical_reg2] ): literal[string] identifier[left] = identifier[unpack_classical_reg] ( identifier[classical_reg1] ) identifier[right] = identifier[unpack_classical_reg] ( identifier[classical_reg2] ) keyword[return] identifier[ClassicalExchange] ( identifier[left] , identifier[right] )
def EXCHANGE(classical_reg1, classical_reg2): """ Produce an EXCHANGE instruction. :param classical_reg1: The first classical register, which gets modified. :param classical_reg2: The second classical register, which gets modified. :return: A ClassicalExchange instance. """ left = unpack_classical_reg(classical_reg1) right = unpack_classical_reg(classical_reg2) return ClassicalExchange(left, right)
def do_unmute(self, sender, body, args): """Unmutes the chatroom for a user""" if sender.get('MUTED'): sender['MUTED'] = False self.broadcast('%s has unmuted this chatroom' % (sender['NICK'],)) for msg in sender.get('QUEUED_MESSAGES', []): self.send_message(msg, sender) sender['QUEUED_MESSAGES'] = [] else: self.send_message('you were not muted', sender)
def function[do_unmute, parameter[self, sender, body, args]]: constant[Unmutes the chatroom for a user] if call[name[sender].get, parameter[constant[MUTED]]] begin[:] call[name[sender]][constant[MUTED]] assign[=] constant[False] call[name[self].broadcast, parameter[binary_operation[constant[%s has unmuted this chatroom] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da20c9915d0>]]]]] for taget[name[msg]] in starred[call[name[sender].get, parameter[constant[QUEUED_MESSAGES], list[[]]]]] begin[:] call[name[self].send_message, parameter[name[msg], name[sender]]] call[name[sender]][constant[QUEUED_MESSAGES]] assign[=] list[[]]
keyword[def] identifier[do_unmute] ( identifier[self] , identifier[sender] , identifier[body] , identifier[args] ): literal[string] keyword[if] identifier[sender] . identifier[get] ( literal[string] ): identifier[sender] [ literal[string] ]= keyword[False] identifier[self] . identifier[broadcast] ( literal[string] %( identifier[sender] [ literal[string] ],)) keyword[for] identifier[msg] keyword[in] identifier[sender] . identifier[get] ( literal[string] ,[]): identifier[self] . identifier[send_message] ( identifier[msg] , identifier[sender] ) identifier[sender] [ literal[string] ]=[] keyword[else] : identifier[self] . identifier[send_message] ( literal[string] , identifier[sender] )
def do_unmute(self, sender, body, args): """Unmutes the chatroom for a user""" if sender.get('MUTED'): sender['MUTED'] = False self.broadcast('%s has unmuted this chatroom' % (sender['NICK'],)) for msg in sender.get('QUEUED_MESSAGES', []): self.send_message(msg, sender) # depends on [control=['for'], data=['msg']] sender['QUEUED_MESSAGES'] = [] # depends on [control=['if'], data=[]] else: self.send_message('you were not muted', sender)
def extract_arguments(args, prefix=DATA_PREFIX): """Return a dict of arguments created by `add_parser_arguments`. If the key in `args` contains two underscores, a nested dictionary will be created. Only keys starting with given prefix are examined. The prefix is stripped away and does not appear in the result. """ data = {} for key, value in iteritems(args.__dict__): if key.startswith(prefix) and value is not None: parts = key[len(prefix):].split('__') # Think of `d` as a pointer into the resulting nested dictionary. # The `for` loop iterates over all parts of the key except the last # to find the proper dict into which the value should be inserted. # If the subdicts do not exist, they are created. d = data for p in parts[:-1]: assert p not in d or isinstance(d[p], dict) d = d.setdefault(p, {}) # At this point `d` points to the correct dict and value can be # inserted. d[parts[-1]] = value if value != '' else None return data
def function[extract_arguments, parameter[args, prefix]]: constant[Return a dict of arguments created by `add_parser_arguments`. If the key in `args` contains two underscores, a nested dictionary will be created. Only keys starting with given prefix are examined. The prefix is stripped away and does not appear in the result. ] variable[data] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b24e91e0>, <ast.Name object at 0x7da1b24e8580>]]] in starred[call[name[iteritems], parameter[name[args].__dict__]]] begin[:] if <ast.BoolOp object at 0x7da1b24eb430> begin[:] variable[parts] assign[=] call[call[name[key]][<ast.Slice object at 0x7da1b24ea650>].split, parameter[constant[__]]] variable[d] assign[=] name[data] for taget[name[p]] in starred[call[name[parts]][<ast.Slice object at 0x7da1b24ea1a0>]] begin[:] assert[<ast.BoolOp object at 0x7da1b24e8f70>] variable[d] assign[=] call[name[d].setdefault, parameter[name[p], dictionary[[], []]]] call[name[d]][call[name[parts]][<ast.UnaryOp object at 0x7da1b253b9a0>]] assign[=] <ast.IfExp object at 0x7da1b25387f0> return[name[data]]
keyword[def] identifier[extract_arguments] ( identifier[args] , identifier[prefix] = identifier[DATA_PREFIX] ): literal[string] identifier[data] ={} keyword[for] identifier[key] , identifier[value] keyword[in] identifier[iteritems] ( identifier[args] . identifier[__dict__] ): keyword[if] identifier[key] . identifier[startswith] ( identifier[prefix] ) keyword[and] identifier[value] keyword[is] keyword[not] keyword[None] : identifier[parts] = identifier[key] [ identifier[len] ( identifier[prefix] ):]. identifier[split] ( literal[string] ) identifier[d] = identifier[data] keyword[for] identifier[p] keyword[in] identifier[parts] [:- literal[int] ]: keyword[assert] identifier[p] keyword[not] keyword[in] identifier[d] keyword[or] identifier[isinstance] ( identifier[d] [ identifier[p] ], identifier[dict] ) identifier[d] = identifier[d] . identifier[setdefault] ( identifier[p] ,{}) identifier[d] [ identifier[parts] [- literal[int] ]]= identifier[value] keyword[if] identifier[value] != literal[string] keyword[else] keyword[None] keyword[return] identifier[data]
def extract_arguments(args, prefix=DATA_PREFIX): """Return a dict of arguments created by `add_parser_arguments`. If the key in `args` contains two underscores, a nested dictionary will be created. Only keys starting with given prefix are examined. The prefix is stripped away and does not appear in the result. """ data = {} for (key, value) in iteritems(args.__dict__): if key.startswith(prefix) and value is not None: parts = key[len(prefix):].split('__') # Think of `d` as a pointer into the resulting nested dictionary. # The `for` loop iterates over all parts of the key except the last # to find the proper dict into which the value should be inserted. # If the subdicts do not exist, they are created. d = data for p in parts[:-1]: assert p not in d or isinstance(d[p], dict) d = d.setdefault(p, {}) # depends on [control=['for'], data=['p']] # At this point `d` points to the correct dict and value can be # inserted. d[parts[-1]] = value if value != '' else None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return data
def _update_cred(self): """ Updates the reduced costs with the values from the dual solution """ ui = self.c[self._inds, self._x] - self._v[self._x] self.cred = self.c - ui[:, None] - self._v[None, :]
def function[_update_cred, parameter[self]]: constant[ Updates the reduced costs with the values from the dual solution ] variable[ui] assign[=] binary_operation[call[name[self].c][tuple[[<ast.Attribute object at 0x7da18eb57910>, <ast.Attribute object at 0x7da18eb573d0>]]] - call[name[self]._v][name[self]._x]] name[self].cred assign[=] binary_operation[binary_operation[name[self].c - call[name[ui]][tuple[[<ast.Slice object at 0x7da204567580>, <ast.Constant object at 0x7da204564e80>]]]] - call[name[self]._v][tuple[[<ast.Constant object at 0x7da204564070>, <ast.Slice object at 0x7da204565060>]]]]
keyword[def] identifier[_update_cred] ( identifier[self] ): literal[string] identifier[ui] = identifier[self] . identifier[c] [ identifier[self] . identifier[_inds] , identifier[self] . identifier[_x] ]- identifier[self] . identifier[_v] [ identifier[self] . identifier[_x] ] identifier[self] . identifier[cred] = identifier[self] . identifier[c] - identifier[ui] [:, keyword[None] ]- identifier[self] . identifier[_v] [ keyword[None] ,:]
def _update_cred(self): """ Updates the reduced costs with the values from the dual solution """ ui = self.c[self._inds, self._x] - self._v[self._x] self.cred = self.c - ui[:, None] - self._v[None, :]
def infer_domain(terms): """ Infer the domain from a collection of terms. The algorithm for inferring domains is as follows: - If all input terms have a domain of GENERIC, the result is GENERIC. - If there is exactly one non-generic domain in the input terms, the result is that domain. - Otherwise, an AmbiguousDomain error is raised. Parameters ---------- terms : iterable[zipline.pipeline.term.Term] Returns ------- inferred : Domain or NotSpecified Raises ------ AmbiguousDomain Raised if more than one concrete domain is present in the input terms. """ domains = {t.domain for t in terms} num_domains = len(domains) if num_domains == 0: return GENERIC elif num_domains == 1: return domains.pop() elif num_domains == 2 and GENERIC in domains: domains.remove(GENERIC) return domains.pop() else: # Remove GENERIC if it's present before raising. Showing it to the user # is confusing because it doesn't contribute to the error. domains.discard(GENERIC) raise AmbiguousDomain(sorted(domains, key=repr))
def function[infer_domain, parameter[terms]]: constant[ Infer the domain from a collection of terms. The algorithm for inferring domains is as follows: - If all input terms have a domain of GENERIC, the result is GENERIC. - If there is exactly one non-generic domain in the input terms, the result is that domain. - Otherwise, an AmbiguousDomain error is raised. Parameters ---------- terms : iterable[zipline.pipeline.term.Term] Returns ------- inferred : Domain or NotSpecified Raises ------ AmbiguousDomain Raised if more than one concrete domain is present in the input terms. ] variable[domains] assign[=] <ast.SetComp object at 0x7da1b2046680> variable[num_domains] assign[=] call[name[len], parameter[name[domains]]] if compare[name[num_domains] equal[==] constant[0]] begin[:] return[name[GENERIC]]
keyword[def] identifier[infer_domain] ( identifier[terms] ): literal[string] identifier[domains] ={ identifier[t] . identifier[domain] keyword[for] identifier[t] keyword[in] identifier[terms] } identifier[num_domains] = identifier[len] ( identifier[domains] ) keyword[if] identifier[num_domains] == literal[int] : keyword[return] identifier[GENERIC] keyword[elif] identifier[num_domains] == literal[int] : keyword[return] identifier[domains] . identifier[pop] () keyword[elif] identifier[num_domains] == literal[int] keyword[and] identifier[GENERIC] keyword[in] identifier[domains] : identifier[domains] . identifier[remove] ( identifier[GENERIC] ) keyword[return] identifier[domains] . identifier[pop] () keyword[else] : identifier[domains] . identifier[discard] ( identifier[GENERIC] ) keyword[raise] identifier[AmbiguousDomain] ( identifier[sorted] ( identifier[domains] , identifier[key] = identifier[repr] ))
def infer_domain(terms): """ Infer the domain from a collection of terms. The algorithm for inferring domains is as follows: - If all input terms have a domain of GENERIC, the result is GENERIC. - If there is exactly one non-generic domain in the input terms, the result is that domain. - Otherwise, an AmbiguousDomain error is raised. Parameters ---------- terms : iterable[zipline.pipeline.term.Term] Returns ------- inferred : Domain or NotSpecified Raises ------ AmbiguousDomain Raised if more than one concrete domain is present in the input terms. """ domains = {t.domain for t in terms} num_domains = len(domains) if num_domains == 0: return GENERIC # depends on [control=['if'], data=[]] elif num_domains == 1: return domains.pop() # depends on [control=['if'], data=[]] elif num_domains == 2 and GENERIC in domains: domains.remove(GENERIC) return domains.pop() # depends on [control=['if'], data=[]] else: # Remove GENERIC if it's present before raising. Showing it to the user # is confusing because it doesn't contribute to the error. domains.discard(GENERIC) raise AmbiguousDomain(sorted(domains, key=repr))
def _prepare_uri(self, service_name, **parameters): """Prepare the URI for a request :param service_name: The target service :type service_name: str :param kwargs: query parameters :return: The uri of the request """ query_parameters = [] for key, value in parameters.items(): if isinstance(value, (list, tuple)): value = ",".join([str(member) for member in value]) if isinstance(value, bool): value = "true" if value else "false" query_parameters.append("{}={}".format(key, value)) if query_parameters: uri = "{}{}?{}".format(self.base_url, service_name, "&".join(query_parameters)) else: uri = "{}{}".format(self.base_url, service_name) return uri
def function[_prepare_uri, parameter[self, service_name]]: constant[Prepare the URI for a request :param service_name: The target service :type service_name: str :param kwargs: query parameters :return: The uri of the request ] variable[query_parameters] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b04f7e20>, <ast.Name object at 0x7da1b04f4be0>]]] in starred[call[name[parameters].items, parameter[]]] begin[:] if call[name[isinstance], parameter[name[value], tuple[[<ast.Name object at 0x7da1b04f6f20>, <ast.Name object at 0x7da1b039b940>]]]] begin[:] variable[value] assign[=] call[constant[,].join, parameter[<ast.ListComp object at 0x7da1b039a830>]] if call[name[isinstance], parameter[name[value], name[bool]]] begin[:] variable[value] assign[=] <ast.IfExp object at 0x7da1b03991b0> call[name[query_parameters].append, parameter[call[constant[{}={}].format, parameter[name[key], name[value]]]]] if name[query_parameters] begin[:] variable[uri] assign[=] call[constant[{}{}?{}].format, parameter[name[self].base_url, name[service_name], call[constant[&].join, parameter[name[query_parameters]]]]] return[name[uri]]
keyword[def] identifier[_prepare_uri] ( identifier[self] , identifier[service_name] ,** identifier[parameters] ): literal[string] identifier[query_parameters] =[] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[parameters] . identifier[items] (): keyword[if] identifier[isinstance] ( identifier[value] ,( identifier[list] , identifier[tuple] )): identifier[value] = literal[string] . identifier[join] ([ identifier[str] ( identifier[member] ) keyword[for] identifier[member] keyword[in] identifier[value] ]) keyword[if] identifier[isinstance] ( identifier[value] , identifier[bool] ): identifier[value] = literal[string] keyword[if] identifier[value] keyword[else] literal[string] identifier[query_parameters] . identifier[append] ( literal[string] . identifier[format] ( identifier[key] , identifier[value] )) keyword[if] identifier[query_parameters] : identifier[uri] = literal[string] . identifier[format] ( identifier[self] . identifier[base_url] , identifier[service_name] , literal[string] . identifier[join] ( identifier[query_parameters] )) keyword[else] : identifier[uri] = literal[string] . identifier[format] ( identifier[self] . identifier[base_url] , identifier[service_name] ) keyword[return] identifier[uri]
def _prepare_uri(self, service_name, **parameters): """Prepare the URI for a request :param service_name: The target service :type service_name: str :param kwargs: query parameters :return: The uri of the request """ query_parameters = [] for (key, value) in parameters.items(): if isinstance(value, (list, tuple)): value = ','.join([str(member) for member in value]) # depends on [control=['if'], data=[]] if isinstance(value, bool): value = 'true' if value else 'false' # depends on [control=['if'], data=[]] query_parameters.append('{}={}'.format(key, value)) # depends on [control=['for'], data=[]] if query_parameters: uri = '{}{}?{}'.format(self.base_url, service_name, '&'.join(query_parameters)) # depends on [control=['if'], data=[]] else: uri = '{}{}'.format(self.base_url, service_name) return uri
def validate( message, get_certificate=lambda url: urlopen(url).read(), certificate_url_regex=DEFAULT_CERTIFICATE_URL_REGEX, max_age=DEFAULT_MAX_AGE ): """ Validate a decoded SNS message. Parameters: message: Decoded SNS message. get_certificate: Function that receives a URL, and returns the certificate from that URL as a string. The default doesn't implement caching. certificate_url_regex: Regex that validates the signing certificate URL. Default value checks it's hosted on an AWS-controlled domain, in the format "https://sns.<data-center>.amazonaws.com/" max_age: Maximum age of an SNS message before it fails validation, expressed as a `datetime.timedelta`. Defaults to one hour, the max. lifetime of an SNS message. """ # Check the signing certicate URL. SigningCertURLValidator(certificate_url_regex).validate(message) # Check the message age. if not isinstance(max_age, datetime.timedelta): raise ValueError("max_age must be None or a timedelta object") MessageAgeValidator(max_age).validate(message) # Passed the basic checks, let's download the cert. # We've validated the URL, so aren't worried about a malicious server. certificate = get_certificate(message["SigningCertURL"]) # Check the cryptographic signature. SignatureValidator(certificate).validate(message)
def function[validate, parameter[message, get_certificate, certificate_url_regex, max_age]]: constant[ Validate a decoded SNS message. Parameters: message: Decoded SNS message. get_certificate: Function that receives a URL, and returns the certificate from that URL as a string. The default doesn't implement caching. certificate_url_regex: Regex that validates the signing certificate URL. Default value checks it's hosted on an AWS-controlled domain, in the format "https://sns.<data-center>.amazonaws.com/" max_age: Maximum age of an SNS message before it fails validation, expressed as a `datetime.timedelta`. Defaults to one hour, the max. lifetime of an SNS message. ] call[call[name[SigningCertURLValidator], parameter[name[certificate_url_regex]]].validate, parameter[name[message]]] if <ast.UnaryOp object at 0x7da1b03badd0> begin[:] <ast.Raise object at 0x7da1b03b9cc0> call[call[name[MessageAgeValidator], parameter[name[max_age]]].validate, parameter[name[message]]] variable[certificate] assign[=] call[name[get_certificate], parameter[call[name[message]][constant[SigningCertURL]]]] call[call[name[SignatureValidator], parameter[name[certificate]]].validate, parameter[name[message]]]
keyword[def] identifier[validate] ( identifier[message] , identifier[get_certificate] = keyword[lambda] identifier[url] : identifier[urlopen] ( identifier[url] ). identifier[read] (), identifier[certificate_url_regex] = identifier[DEFAULT_CERTIFICATE_URL_REGEX] , identifier[max_age] = identifier[DEFAULT_MAX_AGE] ): literal[string] identifier[SigningCertURLValidator] ( identifier[certificate_url_regex] ). identifier[validate] ( identifier[message] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[max_age] , identifier[datetime] . identifier[timedelta] ): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[MessageAgeValidator] ( identifier[max_age] ). identifier[validate] ( identifier[message] ) identifier[certificate] = identifier[get_certificate] ( identifier[message] [ literal[string] ]) identifier[SignatureValidator] ( identifier[certificate] ). identifier[validate] ( identifier[message] )
def validate(message, get_certificate=lambda url: urlopen(url).read(), certificate_url_regex=DEFAULT_CERTIFICATE_URL_REGEX, max_age=DEFAULT_MAX_AGE): """ Validate a decoded SNS message. Parameters: message: Decoded SNS message. get_certificate: Function that receives a URL, and returns the certificate from that URL as a string. The default doesn't implement caching. certificate_url_regex: Regex that validates the signing certificate URL. Default value checks it's hosted on an AWS-controlled domain, in the format "https://sns.<data-center>.amazonaws.com/" max_age: Maximum age of an SNS message before it fails validation, expressed as a `datetime.timedelta`. Defaults to one hour, the max. lifetime of an SNS message. """ # Check the signing certicate URL. SigningCertURLValidator(certificate_url_regex).validate(message) # Check the message age. if not isinstance(max_age, datetime.timedelta): raise ValueError('max_age must be None or a timedelta object') # depends on [control=['if'], data=[]] MessageAgeValidator(max_age).validate(message) # Passed the basic checks, let's download the cert. # We've validated the URL, so aren't worried about a malicious server. certificate = get_certificate(message['SigningCertURL']) # Check the cryptographic signature. SignatureValidator(certificate).validate(message)
def calc_scores_for_node(G, node, depth_limit=22, number_of_recommendations=None, impact_mode=10): """Calculate the score of multiple records.""" n, w, dep, _ = dfs_edges(G, node, depth_limit, "Record") count_total_ways = len(n) # print "Number of paths {}".format(len(n)) if impact_mode == 0: impact_div = 12 elif impact_mode == 1: impact_div = 1000 elif impact_mode == 2: impact_div = 100 elif impact_mode == 10: impact_div = count_total_ways elif impact_mode == 11: impact_div = count_total_ways/2 d_ = {'Nodes': n, 'Scores': w, 'Depth': dep} d = pd.DataFrame(data=d_) del n, w, dep, d_ n, w, dep = None, None, None gc.collect() nodes = array('I') weight_high = array('f') weight_new = array('f') ways = array('I') nodes_with_weight = d.groupby('Nodes') del d gc.collect() # print "Number nodes {}".format(len(nodes_with_weight)) for node, end_nodes in nodes_with_weight: nodes.append(node) new_score, highest_score, number_of_paths = \ calc_weight_of_multiple_paths(end_nodes, impact_div) weight_high.append(highest_score) weight_new.append(new_score) ways.append(number_of_paths) new_weights_d = {'Node': nodes, 'Score_Highest': weight_high, 'Score': weight_new, 'Paths': ways} new_weights = pd.DataFrame(data=new_weights_d) del new_weights_d, nodes, weight_high, weight_new, ways gc.collect() # Numpy sort by score new_weights = new_weights.sort_values(by='Score', ascending=False) new_weights = new_weights[:number_of_recommendations] return new_weights
def function[calc_scores_for_node, parameter[G, node, depth_limit, number_of_recommendations, impact_mode]]: constant[Calculate the score of multiple records.] <ast.Tuple object at 0x7da1b1629ed0> assign[=] call[name[dfs_edges], parameter[name[G], name[node], name[depth_limit], constant[Record]]] variable[count_total_ways] assign[=] call[name[len], parameter[name[n]]] if compare[name[impact_mode] equal[==] constant[0]] begin[:] variable[impact_div] assign[=] constant[12] variable[d_] assign[=] dictionary[[<ast.Constant object at 0x7da1b162b250>, <ast.Constant object at 0x7da1b16283a0>, <ast.Constant object at 0x7da1b162b970>], [<ast.Name object at 0x7da1b1629cf0>, <ast.Name object at 0x7da1b162bbb0>, <ast.Name object at 0x7da1b162a770>]] variable[d] assign[=] call[name[pd].DataFrame, parameter[]] <ast.Delete object at 0x7da1b168e200> <ast.Tuple object at 0x7da1b168fbb0> assign[=] tuple[[<ast.Constant object at 0x7da1b168f010>, <ast.Constant object at 0x7da1b168fd00>, <ast.Constant object at 0x7da1b168e860>]] call[name[gc].collect, parameter[]] variable[nodes] assign[=] call[name[array], parameter[constant[I]]] variable[weight_high] assign[=] call[name[array], parameter[constant[f]]] variable[weight_new] assign[=] call[name[array], parameter[constant[f]]] variable[ways] assign[=] call[name[array], parameter[constant[I]]] variable[nodes_with_weight] assign[=] call[name[d].groupby, parameter[constant[Nodes]]] <ast.Delete object at 0x7da1b168cc40> call[name[gc].collect, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b14a9990>, <ast.Name object at 0x7da1b14abd00>]]] in starred[name[nodes_with_weight]] begin[:] call[name[nodes].append, parameter[name[node]]] <ast.Tuple object at 0x7da1b14a8b80> assign[=] call[name[calc_weight_of_multiple_paths], parameter[name[end_nodes], name[impact_div]]] call[name[weight_high].append, parameter[name[highest_score]]] call[name[weight_new].append, parameter[name[new_score]]] call[name[ways].append, parameter[name[number_of_paths]]] variable[new_weights_d] assign[=] dictionary[[<ast.Constant object at 0x7da1b14a8e50>, <ast.Constant object at 0x7da1b14a9a80>, <ast.Constant object at 0x7da1b14a9e10>, <ast.Constant object at 0x7da1b14a8f40>], [<ast.Name object at 0x7da1b14abaf0>, <ast.Name object at 0x7da1b14a93c0>, <ast.Name object at 0x7da1b14aa530>, <ast.Name object at 0x7da1b14ab670>]] variable[new_weights] assign[=] call[name[pd].DataFrame, parameter[]] <ast.Delete object at 0x7da1b14ab130> call[name[gc].collect, parameter[]] variable[new_weights] assign[=] call[name[new_weights].sort_values, parameter[]] variable[new_weights] assign[=] call[name[new_weights]][<ast.Slice object at 0x7da1b14aa680>] return[name[new_weights]]
keyword[def] identifier[calc_scores_for_node] ( identifier[G] , identifier[node] , identifier[depth_limit] = literal[int] , identifier[number_of_recommendations] = keyword[None] , identifier[impact_mode] = literal[int] ): literal[string] identifier[n] , identifier[w] , identifier[dep] , identifier[_] = identifier[dfs_edges] ( identifier[G] , identifier[node] , identifier[depth_limit] , literal[string] ) identifier[count_total_ways] = identifier[len] ( identifier[n] ) keyword[if] identifier[impact_mode] == literal[int] : identifier[impact_div] = literal[int] keyword[elif] identifier[impact_mode] == literal[int] : identifier[impact_div] = literal[int] keyword[elif] identifier[impact_mode] == literal[int] : identifier[impact_div] = literal[int] keyword[elif] identifier[impact_mode] == literal[int] : identifier[impact_div] = identifier[count_total_ways] keyword[elif] identifier[impact_mode] == literal[int] : identifier[impact_div] = identifier[count_total_ways] / literal[int] identifier[d_] ={ literal[string] : identifier[n] , literal[string] : identifier[w] , literal[string] : identifier[dep] } identifier[d] = identifier[pd] . identifier[DataFrame] ( identifier[data] = identifier[d_] ) keyword[del] identifier[n] , identifier[w] , identifier[dep] , identifier[d_] identifier[n] , identifier[w] , identifier[dep] = keyword[None] , keyword[None] , keyword[None] identifier[gc] . identifier[collect] () identifier[nodes] = identifier[array] ( literal[string] ) identifier[weight_high] = identifier[array] ( literal[string] ) identifier[weight_new] = identifier[array] ( literal[string] ) identifier[ways] = identifier[array] ( literal[string] ) identifier[nodes_with_weight] = identifier[d] . identifier[groupby] ( literal[string] ) keyword[del] identifier[d] identifier[gc] . identifier[collect] () keyword[for] identifier[node] , identifier[end_nodes] keyword[in] identifier[nodes_with_weight] : identifier[nodes] . identifier[append] ( identifier[node] ) identifier[new_score] , identifier[highest_score] , identifier[number_of_paths] = identifier[calc_weight_of_multiple_paths] ( identifier[end_nodes] , identifier[impact_div] ) identifier[weight_high] . identifier[append] ( identifier[highest_score] ) identifier[weight_new] . identifier[append] ( identifier[new_score] ) identifier[ways] . identifier[append] ( identifier[number_of_paths] ) identifier[new_weights_d] ={ literal[string] : identifier[nodes] , literal[string] : identifier[weight_high] , literal[string] : identifier[weight_new] , literal[string] : identifier[ways] } identifier[new_weights] = identifier[pd] . identifier[DataFrame] ( identifier[data] = identifier[new_weights_d] ) keyword[del] identifier[new_weights_d] , identifier[nodes] , identifier[weight_high] , identifier[weight_new] , identifier[ways] identifier[gc] . identifier[collect] () identifier[new_weights] = identifier[new_weights] . identifier[sort_values] ( identifier[by] = literal[string] , identifier[ascending] = keyword[False] ) identifier[new_weights] = identifier[new_weights] [: identifier[number_of_recommendations] ] keyword[return] identifier[new_weights]
def calc_scores_for_node(G, node, depth_limit=22, number_of_recommendations=None, impact_mode=10): """Calculate the score of multiple records.""" (n, w, dep, _) = dfs_edges(G, node, depth_limit, 'Record') count_total_ways = len(n) # print "Number of paths {}".format(len(n)) if impact_mode == 0: impact_div = 12 # depends on [control=['if'], data=[]] elif impact_mode == 1: impact_div = 1000 # depends on [control=['if'], data=[]] elif impact_mode == 2: impact_div = 100 # depends on [control=['if'], data=[]] elif impact_mode == 10: impact_div = count_total_ways # depends on [control=['if'], data=[]] elif impact_mode == 11: impact_div = count_total_ways / 2 # depends on [control=['if'], data=[]] d_ = {'Nodes': n, 'Scores': w, 'Depth': dep} d = pd.DataFrame(data=d_) del n, w, dep, d_ (n, w, dep) = (None, None, None) gc.collect() nodes = array('I') weight_high = array('f') weight_new = array('f') ways = array('I') nodes_with_weight = d.groupby('Nodes') del d gc.collect() # print "Number nodes {}".format(len(nodes_with_weight)) for (node, end_nodes) in nodes_with_weight: nodes.append(node) (new_score, highest_score, number_of_paths) = calc_weight_of_multiple_paths(end_nodes, impact_div) weight_high.append(highest_score) weight_new.append(new_score) ways.append(number_of_paths) # depends on [control=['for'], data=[]] new_weights_d = {'Node': nodes, 'Score_Highest': weight_high, 'Score': weight_new, 'Paths': ways} new_weights = pd.DataFrame(data=new_weights_d) del new_weights_d, nodes, weight_high, weight_new, ways gc.collect() # Numpy sort by score new_weights = new_weights.sort_values(by='Score', ascending=False) new_weights = new_weights[:number_of_recommendations] return new_weights
def is_progressive(image): """ Check to see if an image is progressive. """ if not isinstance(image, Image.Image): # Can only check PIL images for progressive encoding. return False return ('progressive' in image.info) or ('progression' in image.info)
def function[is_progressive, parameter[image]]: constant[ Check to see if an image is progressive. ] if <ast.UnaryOp object at 0x7da18c4cd4b0> begin[:] return[constant[False]] return[<ast.BoolOp object at 0x7da20c6ab4f0>]
keyword[def] identifier[is_progressive] ( identifier[image] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[image] , identifier[Image] . identifier[Image] ): keyword[return] keyword[False] keyword[return] ( literal[string] keyword[in] identifier[image] . identifier[info] ) keyword[or] ( literal[string] keyword[in] identifier[image] . identifier[info] )
def is_progressive(image): """ Check to see if an image is progressive. """ if not isinstance(image, Image.Image): # Can only check PIL images for progressive encoding. return False # depends on [control=['if'], data=[]] return 'progressive' in image.info or 'progression' in image.info
def _binary_temporary_file(self, delete): """:return: a binary temporary file where the content is dumped to.""" file = NamedTemporaryFile("wb+", delete=delete) self._binary_file(file) return file
def function[_binary_temporary_file, parameter[self, delete]]: constant[:return: a binary temporary file where the content is dumped to.] variable[file] assign[=] call[name[NamedTemporaryFile], parameter[constant[wb+]]] call[name[self]._binary_file, parameter[name[file]]] return[name[file]]
keyword[def] identifier[_binary_temporary_file] ( identifier[self] , identifier[delete] ): literal[string] identifier[file] = identifier[NamedTemporaryFile] ( literal[string] , identifier[delete] = identifier[delete] ) identifier[self] . identifier[_binary_file] ( identifier[file] ) keyword[return] identifier[file]
def _binary_temporary_file(self, delete): """:return: a binary temporary file where the content is dumped to.""" file = NamedTemporaryFile('wb+', delete=delete) self._binary_file(file) return file
def split_storage(path, default='osfstorage'): """Extract storage name from file path. If a path begins with a known storage provider the name is removed from the path. Otherwise the `default` storage provider is returned and the path is not modified. """ path = norm_remote_path(path) for provider in KNOWN_PROVIDERS: if path.startswith(provider + '/'): if six.PY3: return path.split('/', maxsplit=1) else: return path.split('/', 1) return (default, path)
def function[split_storage, parameter[path, default]]: constant[Extract storage name from file path. If a path begins with a known storage provider the name is removed from the path. Otherwise the `default` storage provider is returned and the path is not modified. ] variable[path] assign[=] call[name[norm_remote_path], parameter[name[path]]] for taget[name[provider]] in starred[name[KNOWN_PROVIDERS]] begin[:] if call[name[path].startswith, parameter[binary_operation[name[provider] + constant[/]]]] begin[:] if name[six].PY3 begin[:] return[call[name[path].split, parameter[constant[/]]]] return[tuple[[<ast.Name object at 0x7da20c993760>, <ast.Name object at 0x7da20c992c20>]]]
keyword[def] identifier[split_storage] ( identifier[path] , identifier[default] = literal[string] ): literal[string] identifier[path] = identifier[norm_remote_path] ( identifier[path] ) keyword[for] identifier[provider] keyword[in] identifier[KNOWN_PROVIDERS] : keyword[if] identifier[path] . identifier[startswith] ( identifier[provider] + literal[string] ): keyword[if] identifier[six] . identifier[PY3] : keyword[return] identifier[path] . identifier[split] ( literal[string] , identifier[maxsplit] = literal[int] ) keyword[else] : keyword[return] identifier[path] . identifier[split] ( literal[string] , literal[int] ) keyword[return] ( identifier[default] , identifier[path] )
def split_storage(path, default='osfstorage'): """Extract storage name from file path. If a path begins with a known storage provider the name is removed from the path. Otherwise the `default` storage provider is returned and the path is not modified. """ path = norm_remote_path(path) for provider in KNOWN_PROVIDERS: if path.startswith(provider + '/'): if six.PY3: return path.split('/', maxsplit=1) # depends on [control=['if'], data=[]] else: return path.split('/', 1) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['provider']] return (default, path)
def to_download(): """ Build interval of urls to download. We always get the first file of the next day. Ex: 2013-01-01 => 2013-01-02.0000 """ first_day = parse(interval_first) last_day = parse(interval_last) format_change = parse('2010-06-14') one_day = datetime.timedelta(1) cur_day = first_day url_list = [] while cur_day < last_day: fname = filename.format(day=cur_day.strftime("%Y%m%d")) if cur_day > format_change: cur_day += one_day url = base_url.format(year_month=cur_day.strftime("%Y.%m"), file_day=cur_day.strftime("%Y%m%d")) else: url = base_url_old.format(year_month=cur_day.strftime("%Y.%m"), file_day=cur_day.strftime("%Y%m%d")) cur_day += one_day url_list.append((fname, url)) return sorted(url_list, key=lambda tup: tup[0], reverse=True)
def function[to_download, parameter[]]: constant[ Build interval of urls to download. We always get the first file of the next day. Ex: 2013-01-01 => 2013-01-02.0000 ] variable[first_day] assign[=] call[name[parse], parameter[name[interval_first]]] variable[last_day] assign[=] call[name[parse], parameter[name[interval_last]]] variable[format_change] assign[=] call[name[parse], parameter[constant[2010-06-14]]] variable[one_day] assign[=] call[name[datetime].timedelta, parameter[constant[1]]] variable[cur_day] assign[=] name[first_day] variable[url_list] assign[=] list[[]] while compare[name[cur_day] less[<] name[last_day]] begin[:] variable[fname] assign[=] call[name[filename].format, parameter[]] if compare[name[cur_day] greater[>] name[format_change]] begin[:] <ast.AugAssign object at 0x7da1b008a290> variable[url] assign[=] call[name[base_url].format, parameter[]] call[name[url_list].append, parameter[tuple[[<ast.Name object at 0x7da20c990ac0>, <ast.Name object at 0x7da20c9936d0>]]]] return[call[name[sorted], parameter[name[url_list]]]]
keyword[def] identifier[to_download] (): literal[string] identifier[first_day] = identifier[parse] ( identifier[interval_first] ) identifier[last_day] = identifier[parse] ( identifier[interval_last] ) identifier[format_change] = identifier[parse] ( literal[string] ) identifier[one_day] = identifier[datetime] . identifier[timedelta] ( literal[int] ) identifier[cur_day] = identifier[first_day] identifier[url_list] =[] keyword[while] identifier[cur_day] < identifier[last_day] : identifier[fname] = identifier[filename] . identifier[format] ( identifier[day] = identifier[cur_day] . identifier[strftime] ( literal[string] )) keyword[if] identifier[cur_day] > identifier[format_change] : identifier[cur_day] += identifier[one_day] identifier[url] = identifier[base_url] . identifier[format] ( identifier[year_month] = identifier[cur_day] . identifier[strftime] ( literal[string] ), identifier[file_day] = identifier[cur_day] . identifier[strftime] ( literal[string] )) keyword[else] : identifier[url] = identifier[base_url_old] . identifier[format] ( identifier[year_month] = identifier[cur_day] . identifier[strftime] ( literal[string] ), identifier[file_day] = identifier[cur_day] . identifier[strftime] ( literal[string] )) identifier[cur_day] += identifier[one_day] identifier[url_list] . identifier[append] (( identifier[fname] , identifier[url] )) keyword[return] identifier[sorted] ( identifier[url_list] , identifier[key] = keyword[lambda] identifier[tup] : identifier[tup] [ literal[int] ], identifier[reverse] = keyword[True] )
def to_download(): """ Build interval of urls to download. We always get the first file of the next day. Ex: 2013-01-01 => 2013-01-02.0000 """ first_day = parse(interval_first) last_day = parse(interval_last) format_change = parse('2010-06-14') one_day = datetime.timedelta(1) cur_day = first_day url_list = [] while cur_day < last_day: fname = filename.format(day=cur_day.strftime('%Y%m%d')) if cur_day > format_change: cur_day += one_day url = base_url.format(year_month=cur_day.strftime('%Y.%m'), file_day=cur_day.strftime('%Y%m%d')) # depends on [control=['if'], data=['cur_day']] else: url = base_url_old.format(year_month=cur_day.strftime('%Y.%m'), file_day=cur_day.strftime('%Y%m%d')) cur_day += one_day url_list.append((fname, url)) # depends on [control=['while'], data=['cur_day']] return sorted(url_list, key=lambda tup: tup[0], reverse=True)
def _condition_as_sql(self, qn, connection): ''' Return sql for condition. ''' def escape(value): if isinstance(value, bool): value = str(int(value)) if isinstance(value, six.string_types): # Escape params used with LIKE if '%' in value: value = value.replace('%', '%%') # Escape single quotes if "'" in value: value = value.replace("'", "''") # Add single quote to text values value = "'" + value + "'" return value sql, param = self.condition.query.where.as_sql(qn, connection) param = map(escape, param) return sql % tuple(param)
def function[_condition_as_sql, parameter[self, qn, connection]]: constant[ Return sql for condition. ] def function[escape, parameter[value]]: if call[name[isinstance], parameter[name[value], name[bool]]] begin[:] variable[value] assign[=] call[name[str], parameter[call[name[int], parameter[name[value]]]]] if call[name[isinstance], parameter[name[value], name[six].string_types]] begin[:] if compare[constant[%] in name[value]] begin[:] variable[value] assign[=] call[name[value].replace, parameter[constant[%], constant[%%]]] if compare[constant['] in name[value]] begin[:] variable[value] assign[=] call[name[value].replace, parameter[constant['], constant['']]] variable[value] assign[=] binary_operation[binary_operation[constant['] + name[value]] + constant[']] return[name[value]] <ast.Tuple object at 0x7da1b033e740> assign[=] call[name[self].condition.query.where.as_sql, parameter[name[qn], name[connection]]] variable[param] assign[=] call[name[map], parameter[name[escape], name[param]]] return[binary_operation[name[sql] <ast.Mod object at 0x7da2590d6920> call[name[tuple], parameter[name[param]]]]]
keyword[def] identifier[_condition_as_sql] ( identifier[self] , identifier[qn] , identifier[connection] ): literal[string] keyword[def] identifier[escape] ( identifier[value] ): keyword[if] identifier[isinstance] ( identifier[value] , identifier[bool] ): identifier[value] = identifier[str] ( identifier[int] ( identifier[value] )) keyword[if] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[string_types] ): keyword[if] literal[string] keyword[in] identifier[value] : identifier[value] = identifier[value] . identifier[replace] ( literal[string] , literal[string] ) keyword[if] literal[string] keyword[in] identifier[value] : identifier[value] = identifier[value] . identifier[replace] ( literal[string] , literal[string] ) identifier[value] = literal[string] + identifier[value] + literal[string] keyword[return] identifier[value] identifier[sql] , identifier[param] = identifier[self] . identifier[condition] . identifier[query] . identifier[where] . identifier[as_sql] ( identifier[qn] , identifier[connection] ) identifier[param] = identifier[map] ( identifier[escape] , identifier[param] ) keyword[return] identifier[sql] % identifier[tuple] ( identifier[param] )
def _condition_as_sql(self, qn, connection): """ Return sql for condition. """ def escape(value): if isinstance(value, bool): value = str(int(value)) # depends on [control=['if'], data=[]] if isinstance(value, six.string_types): # Escape params used with LIKE if '%' in value: value = value.replace('%', '%%') # depends on [control=['if'], data=['value']] # Escape single quotes if "'" in value: value = value.replace("'", "''") # depends on [control=['if'], data=['value']] # Add single quote to text values value = "'" + value + "'" # depends on [control=['if'], data=[]] return value (sql, param) = self.condition.query.where.as_sql(qn, connection) param = map(escape, param) return sql % tuple(param)
def loadstack(self): print("Loading stack from: %s" % self.stack_fn) data = np.load(self.stack_fn, encoding='latin1') #self.fn_list = list([i.decode("utf-8") for i in data['fn_list']]) self.fn_list = data['fn_list'] #Load flags originally used for stack creation #self.flags = data['flags'] #{'datestack':self.datestack, 'stats':self.stats, 'med':self.med, 'trend':self.trend, 'sort':self.sort, 'save':self.save} if 'source' in data: self.source = list(data['source']) else: self.source = ['None' for i in self.fn_list] if 'error' in data: self.error = np.ma.fix_invalid(data['error'], fill_value=-9999) else: self.error = np.ma.zeros(len(self.fn_list)) #if 'error_dict_list' in data: # self.error_dict_list = data['error_dict_list'][()] #else: self.error_dict_list = [None for i in self.fn_list] #This is a shortcut, should load from the data['date_list'] arrays if 'date_list_o' in data: from pygeotools.lib import timelib from datetime import datetime self.date_list_o = np.ma.fix_invalid(data['date_list_o'], fill_value=1.0) #This is a hack - need universal timelib time zone support or stripping self.date_list = np.ma.masked_equal([i.replace(tzinfo=None) for i in timelib.o2dt(self.date_list_o)], datetime(1,1,1)) else: self.get_date_list() print("Loading ma stack") self.ma_stack = np.ma.fix_invalid(data['ma_stack_full']).astype(self.dtype) #Note: the str is an intermediate fix - all new stacks should have str written self.proj = str(data['proj']) #If we don't have gt, we're in trouble - can't recompute res/extent if 'gt' in data: self.gt = data['gt'] else: print("No geotransform found in stack") #Check if res and extent are defined - can reconstruct #Should throw error #Note: Once we have gt, could just run get_res() and get_extent() to avoid the following #Or could check to make sure consistent #Some stacks in Oct 2015 and Nov 2015 did not have res/extent saved properly """ if 'res' in data: if data['res'] != 'None': #self.res = float(data['res']) self.res = float(np.atleast_1d(data['res'])[0]) else: self.get_res() else: self.get_res() if 'extent' in data: if data['extent'] != 'None': #self.extent = list(data['extent']) #self.extent = list(np.atleast_1d(data['extent'])[0]) extent = np.atleast_1d(data['extent'])[0] if isinstance(extent, str): self.extent = [float(x) for x in extent.split()] else: self.extent = list(extent) else: self.get_extent() else: self.get_extent() """ #Just do this to be safe, if gt is bad, no point in proceeding self.get_res() self.get_extent() saveflag=False if self.datestack: #statlist = ['dt_stack', 'dt_mean', 'dt_ptp', 'dt_min', 'dt_max', 'dt_center'] statlist = ['dt_ptp', 'dt_min', 'dt_max', 'dt_center'] if all([s in data for s in statlist]): print("Loading datestack") #self.dt_stack = np.ma.fix_invalid(data['dt_stack']).astype(self.dtype) #self.dt_stack_mean = np.ma.fix_invalid(data['dt_mean'], fill_value=-9999).astype(self.dtype) self.dt_stack_ptp = np.ma.fix_invalid(data['dt_ptp'], fill_value=-9999).astype(self.dtype) self.dt_stack_min = np.ma.fix_invalid(data['dt_min'], fill_value=-9999).astype(self.dtype) self.dt_stack_max = np.ma.fix_invalid(data['dt_max'], fill_value=-9999).astype(self.dtype) self.dt_stack_center = np.ma.fix_invalid(data['dt_center'], fill_value=-9999).astype(self.dtype) else: if self.date_list_o.count() > 1: #self.make_datestack() self.compute_dt_stats() self.write_datestack() saveflag=True if self.stats: #Could do this individually to save time statlist = ['count', 'mean', 'std', 'min', 'max'] if self.med: statlist.append('med') statlist.append('nmad') if all([s in data for s in statlist]): print("Loading stats") self.stack_count = np.ma.masked_equal(data['count'], 0).astype(np.uint16) self.stack_mean = np.ma.fix_invalid(data['mean'], fill_value=-9999).astype(self.dtype) self.stack_std = np.ma.fix_invalid(data['std'], fill_value=-9999).astype(self.dtype) self.stack_min = np.ma.fix_invalid(data['min'], fill_value=-9999).astype(self.dtype) self.stack_max = np.ma.fix_invalid(data['max'], fill_value=-9999).astype(self.dtype) if self.med: self.stack_med = np.ma.fix_invalid(data['med'], fill_value=-9999).astype(self.dtype) self.stack_nmad = np.ma.fix_invalid(data['nmad'], fill_value=-9999).astype(self.dtype) else: if self.ma_stack.shape[0] > 1: self.compute_stats() self.write_stats() saveflag=True if self.trend: if 'n_thresh' in data: self.n_thresh = data['n_thresh'] if 'min_dt_ptp' in data: self.min_dt_ptp = data['min_dt_ptp'] if 'robust' in data: self.robust = data['robust'] #statlist = ['trend', 'intercept', 'detrended_std', 'rsquared'] statlist = ['trend', 'intercept', 'detrended_std'] if all([s in data for s in statlist]): print("Loading trend") self.stack_trend = np.ma.fix_invalid(data['trend'], fill_value=-9999).astype(self.dtype) self.stack_intercept = np.ma.fix_invalid(data['intercept'], fill_value=-9999).astype(self.dtype) self.stack_detrended_std = np.ma.fix_invalid(data['detrended_std'], fill_value=-9999).astype(self.dtype) #self.stack_rsquared = np.ma.fix_invalid(data['rsquared'], fill_value=-9999).astype(self.dtype) else: if self.ma_stack.shape[0] >= self.n_thresh: self.compute_trend() self.write_trend() saveflag=True if saveflag: self.savestack() data.close()
def function[loadstack, parameter[self]]: call[name[print], parameter[binary_operation[constant[Loading stack from: %s] <ast.Mod object at 0x7da2590d6920> name[self].stack_fn]]] variable[data] assign[=] call[name[np].load, parameter[name[self].stack_fn]] name[self].fn_list assign[=] call[name[data]][constant[fn_list]] if compare[constant[source] in name[data]] begin[:] name[self].source assign[=] call[name[list], parameter[call[name[data]][constant[source]]]] if compare[constant[error] in name[data]] begin[:] name[self].error assign[=] call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[error]]]] name[self].error_dict_list assign[=] <ast.ListComp object at 0x7da1b066b550> if compare[constant[date_list_o] in name[data]] begin[:] from relative_module[pygeotools.lib] import module[timelib] from relative_module[datetime] import module[datetime] name[self].date_list_o assign[=] call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[date_list_o]]]] name[self].date_list assign[=] call[name[np].ma.masked_equal, parameter[<ast.ListComp object at 0x7da1b0668190>, call[name[datetime], parameter[constant[1], constant[1], constant[1]]]]] call[name[print], parameter[constant[Loading ma stack]]] name[self].ma_stack assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[ma_stack_full]]]].astype, parameter[name[self].dtype]] name[self].proj assign[=] call[name[str], parameter[call[name[data]][constant[proj]]]] if compare[constant[gt] in name[data]] begin[:] name[self].gt assign[=] call[name[data]][constant[gt]] constant[ if 'res' in data: if data['res'] != 'None': #self.res = float(data['res']) self.res = float(np.atleast_1d(data['res'])[0]) else: self.get_res() else: self.get_res() if 'extent' in data: if data['extent'] != 'None': #self.extent = list(data['extent']) #self.extent = list(np.atleast_1d(data['extent'])[0]) extent = np.atleast_1d(data['extent'])[0] if isinstance(extent, str): self.extent = [float(x) for x in extent.split()] else: self.extent = list(extent) else: self.get_extent() else: self.get_extent() ] call[name[self].get_res, parameter[]] call[name[self].get_extent, parameter[]] variable[saveflag] assign[=] constant[False] if name[self].datestack begin[:] variable[statlist] assign[=] list[[<ast.Constant object at 0x7da1b066aaa0>, <ast.Constant object at 0x7da1b066a740>, <ast.Constant object at 0x7da1b066bfa0>, <ast.Constant object at 0x7da1b06693c0>]] if call[name[all], parameter[<ast.ListComp object at 0x7da1b0668a30>]] begin[:] call[name[print], parameter[constant[Loading datestack]]] name[self].dt_stack_ptp assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[dt_ptp]]]].astype, parameter[name[self].dtype]] name[self].dt_stack_min assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[dt_min]]]].astype, parameter[name[self].dtype]] name[self].dt_stack_max assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[dt_max]]]].astype, parameter[name[self].dtype]] name[self].dt_stack_center assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[dt_center]]]].astype, parameter[name[self].dtype]] if name[self].stats begin[:] variable[statlist] assign[=] list[[<ast.Constant object at 0x7da1b06c68c0>, <ast.Constant object at 0x7da1b06c5a20>, <ast.Constant object at 0x7da1b06c63e0>, <ast.Constant object at 0x7da1b06c4f40>, <ast.Constant object at 0x7da1b06c48b0>]] if name[self].med begin[:] call[name[statlist].append, parameter[constant[med]]] call[name[statlist].append, parameter[constant[nmad]]] if call[name[all], parameter[<ast.ListComp object at 0x7da1b06c6230>]] begin[:] call[name[print], parameter[constant[Loading stats]]] name[self].stack_count assign[=] call[call[name[np].ma.masked_equal, parameter[call[name[data]][constant[count]], constant[0]]].astype, parameter[name[np].uint16]] name[self].stack_mean assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[mean]]]].astype, parameter[name[self].dtype]] name[self].stack_std assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[std]]]].astype, parameter[name[self].dtype]] name[self].stack_min assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[min]]]].astype, parameter[name[self].dtype]] name[self].stack_max assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[max]]]].astype, parameter[name[self].dtype]] if name[self].med begin[:] name[self].stack_med assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[med]]]].astype, parameter[name[self].dtype]] name[self].stack_nmad assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[nmad]]]].astype, parameter[name[self].dtype]] if name[self].trend begin[:] if compare[constant[n_thresh] in name[data]] begin[:] name[self].n_thresh assign[=] call[name[data]][constant[n_thresh]] if compare[constant[min_dt_ptp] in name[data]] begin[:] name[self].min_dt_ptp assign[=] call[name[data]][constant[min_dt_ptp]] if compare[constant[robust] in name[data]] begin[:] name[self].robust assign[=] call[name[data]][constant[robust]] variable[statlist] assign[=] list[[<ast.Constant object at 0x7da1b0762dd0>, <ast.Constant object at 0x7da1b0761930>, <ast.Constant object at 0x7da1b07626e0>]] if call[name[all], parameter[<ast.ListComp object at 0x7da1b07615a0>]] begin[:] call[name[print], parameter[constant[Loading trend]]] name[self].stack_trend assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[trend]]]].astype, parameter[name[self].dtype]] name[self].stack_intercept assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[intercept]]]].astype, parameter[name[self].dtype]] name[self].stack_detrended_std assign[=] call[call[name[np].ma.fix_invalid, parameter[call[name[data]][constant[detrended_std]]]].astype, parameter[name[self].dtype]] if name[saveflag] begin[:] call[name[self].savestack, parameter[]] call[name[data].close, parameter[]]
keyword[def] identifier[loadstack] ( identifier[self] ): identifier[print] ( literal[string] % identifier[self] . identifier[stack_fn] ) identifier[data] = identifier[np] . identifier[load] ( identifier[self] . identifier[stack_fn] , identifier[encoding] = literal[string] ) identifier[self] . identifier[fn_list] = identifier[data] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[data] : identifier[self] . identifier[source] = identifier[list] ( identifier[data] [ literal[string] ]) keyword[else] : identifier[self] . identifier[source] =[ literal[string] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[fn_list] ] keyword[if] literal[string] keyword[in] identifier[data] : identifier[self] . identifier[error] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ) keyword[else] : identifier[self] . identifier[error] = identifier[np] . identifier[ma] . identifier[zeros] ( identifier[len] ( identifier[self] . identifier[fn_list] )) identifier[self] . identifier[error_dict_list] =[ keyword[None] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[fn_list] ] keyword[if] literal[string] keyword[in] identifier[data] : keyword[from] identifier[pygeotools] . identifier[lib] keyword[import] identifier[timelib] keyword[from] identifier[datetime] keyword[import] identifier[datetime] identifier[self] . identifier[date_list_o] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] = literal[int] ) identifier[self] . identifier[date_list] = identifier[np] . identifier[ma] . identifier[masked_equal] ([ identifier[i] . identifier[replace] ( identifier[tzinfo] = keyword[None] ) keyword[for] identifier[i] keyword[in] identifier[timelib] . identifier[o2dt] ( identifier[self] . identifier[date_list_o] )], identifier[datetime] ( literal[int] , literal[int] , literal[int] )) keyword[else] : identifier[self] . identifier[get_date_list] () identifier[print] ( literal[string] ) identifier[self] . identifier[ma_stack] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ]). identifier[astype] ( identifier[self] . identifier[dtype] ) identifier[self] . identifier[proj] = identifier[str] ( identifier[data] [ literal[string] ]) keyword[if] literal[string] keyword[in] identifier[data] : identifier[self] . identifier[gt] = identifier[data] [ literal[string] ] keyword[else] : identifier[print] ( literal[string] ) literal[string] identifier[self] . identifier[get_res] () identifier[self] . identifier[get_extent] () identifier[saveflag] = keyword[False] keyword[if] identifier[self] . identifier[datestack] : identifier[statlist] =[ literal[string] , literal[string] , literal[string] , literal[string] ] keyword[if] identifier[all] ([ identifier[s] keyword[in] identifier[data] keyword[for] identifier[s] keyword[in] identifier[statlist] ]): identifier[print] ( literal[string] ) identifier[self] . identifier[dt_stack_ptp] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ). identifier[astype] ( identifier[self] . identifier[dtype] ) identifier[self] . identifier[dt_stack_min] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ). identifier[astype] ( identifier[self] . identifier[dtype] ) identifier[self] . identifier[dt_stack_max] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ). identifier[astype] ( identifier[self] . identifier[dtype] ) identifier[self] . identifier[dt_stack_center] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ). identifier[astype] ( identifier[self] . identifier[dtype] ) keyword[else] : keyword[if] identifier[self] . identifier[date_list_o] . identifier[count] ()> literal[int] : identifier[self] . identifier[compute_dt_stats] () identifier[self] . identifier[write_datestack] () identifier[saveflag] = keyword[True] keyword[if] identifier[self] . identifier[stats] : identifier[statlist] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] keyword[if] identifier[self] . identifier[med] : identifier[statlist] . identifier[append] ( literal[string] ) identifier[statlist] . identifier[append] ( literal[string] ) keyword[if] identifier[all] ([ identifier[s] keyword[in] identifier[data] keyword[for] identifier[s] keyword[in] identifier[statlist] ]): identifier[print] ( literal[string] ) identifier[self] . identifier[stack_count] = identifier[np] . identifier[ma] . identifier[masked_equal] ( identifier[data] [ literal[string] ], literal[int] ). identifier[astype] ( identifier[np] . identifier[uint16] ) identifier[self] . identifier[stack_mean] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ). identifier[astype] ( identifier[self] . identifier[dtype] ) identifier[self] . identifier[stack_std] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ). identifier[astype] ( identifier[self] . identifier[dtype] ) identifier[self] . identifier[stack_min] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ). identifier[astype] ( identifier[self] . identifier[dtype] ) identifier[self] . identifier[stack_max] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ). identifier[astype] ( identifier[self] . identifier[dtype] ) keyword[if] identifier[self] . identifier[med] : identifier[self] . identifier[stack_med] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ). identifier[astype] ( identifier[self] . identifier[dtype] ) identifier[self] . identifier[stack_nmad] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ). identifier[astype] ( identifier[self] . identifier[dtype] ) keyword[else] : keyword[if] identifier[self] . identifier[ma_stack] . identifier[shape] [ literal[int] ]> literal[int] : identifier[self] . identifier[compute_stats] () identifier[self] . identifier[write_stats] () identifier[saveflag] = keyword[True] keyword[if] identifier[self] . identifier[trend] : keyword[if] literal[string] keyword[in] identifier[data] : identifier[self] . identifier[n_thresh] = identifier[data] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[data] : identifier[self] . identifier[min_dt_ptp] = identifier[data] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[data] : identifier[self] . identifier[robust] = identifier[data] [ literal[string] ] identifier[statlist] =[ literal[string] , literal[string] , literal[string] ] keyword[if] identifier[all] ([ identifier[s] keyword[in] identifier[data] keyword[for] identifier[s] keyword[in] identifier[statlist] ]): identifier[print] ( literal[string] ) identifier[self] . identifier[stack_trend] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ). identifier[astype] ( identifier[self] . identifier[dtype] ) identifier[self] . identifier[stack_intercept] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ). identifier[astype] ( identifier[self] . identifier[dtype] ) identifier[self] . identifier[stack_detrended_std] = identifier[np] . identifier[ma] . identifier[fix_invalid] ( identifier[data] [ literal[string] ], identifier[fill_value] =- literal[int] ). identifier[astype] ( identifier[self] . identifier[dtype] ) keyword[else] : keyword[if] identifier[self] . identifier[ma_stack] . identifier[shape] [ literal[int] ]>= identifier[self] . identifier[n_thresh] : identifier[self] . identifier[compute_trend] () identifier[self] . identifier[write_trend] () identifier[saveflag] = keyword[True] keyword[if] identifier[saveflag] : identifier[self] . identifier[savestack] () identifier[data] . identifier[close] ()
def loadstack(self): print('Loading stack from: %s' % self.stack_fn) data = np.load(self.stack_fn, encoding='latin1') #self.fn_list = list([i.decode("utf-8") for i in data['fn_list']]) self.fn_list = data['fn_list'] #Load flags originally used for stack creation #self.flags = data['flags'] #{'datestack':self.datestack, 'stats':self.stats, 'med':self.med, 'trend':self.trend, 'sort':self.sort, 'save':self.save} if 'source' in data: self.source = list(data['source']) # depends on [control=['if'], data=['data']] else: self.source = ['None' for i in self.fn_list] if 'error' in data: self.error = np.ma.fix_invalid(data['error'], fill_value=-9999) # depends on [control=['if'], data=['data']] else: self.error = np.ma.zeros(len(self.fn_list)) #if 'error_dict_list' in data: # self.error_dict_list = data['error_dict_list'][()] #else: self.error_dict_list = [None for i in self.fn_list] #This is a shortcut, should load from the data['date_list'] arrays if 'date_list_o' in data: from pygeotools.lib import timelib from datetime import datetime self.date_list_o = np.ma.fix_invalid(data['date_list_o'], fill_value=1.0) #This is a hack - need universal timelib time zone support or stripping self.date_list = np.ma.masked_equal([i.replace(tzinfo=None) for i in timelib.o2dt(self.date_list_o)], datetime(1, 1, 1)) # depends on [control=['if'], data=['data']] else: self.get_date_list() print('Loading ma stack') self.ma_stack = np.ma.fix_invalid(data['ma_stack_full']).astype(self.dtype) #Note: the str is an intermediate fix - all new stacks should have str written self.proj = str(data['proj']) #If we don't have gt, we're in trouble - can't recompute res/extent if 'gt' in data: self.gt = data['gt'] # depends on [control=['if'], data=['data']] else: print('No geotransform found in stack') #Check if res and extent are defined - can reconstruct #Should throw error #Note: Once we have gt, could just run get_res() and get_extent() to avoid the following #Or could check to make sure consistent #Some stacks in Oct 2015 and Nov 2015 did not have res/extent saved properly "\n if 'res' in data:\n if data['res'] != 'None':\n #self.res = float(data['res'])\n self.res = float(np.atleast_1d(data['res'])[0])\n else:\n self.get_res()\n else:\n self.get_res()\n if 'extent' in data:\n if data['extent'] != 'None':\n #self.extent = list(data['extent'])\n #self.extent = list(np.atleast_1d(data['extent'])[0])\n extent = np.atleast_1d(data['extent'])[0]\n if isinstance(extent, str):\n self.extent = [float(x) for x in extent.split()]\n else:\n self.extent = list(extent)\n else:\n self.get_extent()\n else:\n self.get_extent() \n " #Just do this to be safe, if gt is bad, no point in proceeding self.get_res() self.get_extent() saveflag = False if self.datestack: #statlist = ['dt_stack', 'dt_mean', 'dt_ptp', 'dt_min', 'dt_max', 'dt_center'] statlist = ['dt_ptp', 'dt_min', 'dt_max', 'dt_center'] if all([s in data for s in statlist]): print('Loading datestack') #self.dt_stack = np.ma.fix_invalid(data['dt_stack']).astype(self.dtype) #self.dt_stack_mean = np.ma.fix_invalid(data['dt_mean'], fill_value=-9999).astype(self.dtype) self.dt_stack_ptp = np.ma.fix_invalid(data['dt_ptp'], fill_value=-9999).astype(self.dtype) self.dt_stack_min = np.ma.fix_invalid(data['dt_min'], fill_value=-9999).astype(self.dtype) self.dt_stack_max = np.ma.fix_invalid(data['dt_max'], fill_value=-9999).astype(self.dtype) self.dt_stack_center = np.ma.fix_invalid(data['dt_center'], fill_value=-9999).astype(self.dtype) # depends on [control=['if'], data=[]] elif self.date_list_o.count() > 1: #self.make_datestack() self.compute_dt_stats() self.write_datestack() saveflag = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if self.stats: #Could do this individually to save time statlist = ['count', 'mean', 'std', 'min', 'max'] if self.med: statlist.append('med') statlist.append('nmad') # depends on [control=['if'], data=[]] if all([s in data for s in statlist]): print('Loading stats') self.stack_count = np.ma.masked_equal(data['count'], 0).astype(np.uint16) self.stack_mean = np.ma.fix_invalid(data['mean'], fill_value=-9999).astype(self.dtype) self.stack_std = np.ma.fix_invalid(data['std'], fill_value=-9999).astype(self.dtype) self.stack_min = np.ma.fix_invalid(data['min'], fill_value=-9999).astype(self.dtype) self.stack_max = np.ma.fix_invalid(data['max'], fill_value=-9999).astype(self.dtype) if self.med: self.stack_med = np.ma.fix_invalid(data['med'], fill_value=-9999).astype(self.dtype) self.stack_nmad = np.ma.fix_invalid(data['nmad'], fill_value=-9999).astype(self.dtype) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif self.ma_stack.shape[0] > 1: self.compute_stats() self.write_stats() saveflag = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if self.trend: if 'n_thresh' in data: self.n_thresh = data['n_thresh'] # depends on [control=['if'], data=['data']] if 'min_dt_ptp' in data: self.min_dt_ptp = data['min_dt_ptp'] # depends on [control=['if'], data=['data']] if 'robust' in data: self.robust = data['robust'] # depends on [control=['if'], data=['data']] #statlist = ['trend', 'intercept', 'detrended_std', 'rsquared'] statlist = ['trend', 'intercept', 'detrended_std'] if all([s in data for s in statlist]): print('Loading trend') self.stack_trend = np.ma.fix_invalid(data['trend'], fill_value=-9999).astype(self.dtype) self.stack_intercept = np.ma.fix_invalid(data['intercept'], fill_value=-9999).astype(self.dtype) self.stack_detrended_std = np.ma.fix_invalid(data['detrended_std'], fill_value=-9999).astype(self.dtype) # depends on [control=['if'], data=[]] #self.stack_rsquared = np.ma.fix_invalid(data['rsquared'], fill_value=-9999).astype(self.dtype) elif self.ma_stack.shape[0] >= self.n_thresh: self.compute_trend() self.write_trend() saveflag = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if saveflag: self.savestack() # depends on [control=['if'], data=[]] data.close()
def get_ad_leads(self, start_date=None, end_date=None, filtering=(), page=1, page_size=100, version='v1.0'): """ 获取朋友圈销售线索数据接口 :param start_date: 开始日期 默认今天 :param end_date: 结束日期 默认今天 :param filtering: 过滤条件 [{field: 过滤字段, operator: 操作符, values: 字段取值}] :param page: 页码,获取指定页数据 :param page_size: 一页获取的数据条数(1-100) :param version: 版本号 v1.0 """ today = datetime.date.today() if start_date is None: start_date = today if end_date is None: end_date = today if isinstance(start_date, datetime.date): start_date = start_date.strftime("%Y-%m-%d") if isinstance(end_date, datetime.date): end_date = end_date.strftime("%Y-%m-%d") return self._get( 'wechat_ad_leads/get', params=optionaldict( date_range=json.dumps({'start_date': start_date, 'end_date': end_date}), filtering=json.dumps(filtering) if filtering else None, page=page, page_size=page_size, version=version ), result_processor=lambda x: x['data'] )
def function[get_ad_leads, parameter[self, start_date, end_date, filtering, page, page_size, version]]: constant[ 获取朋友圈销售线索数据接口 :param start_date: 开始日期 默认今天 :param end_date: 结束日期 默认今天 :param filtering: 过滤条件 [{field: 过滤字段, operator: 操作符, values: 字段取值}] :param page: 页码,获取指定页数据 :param page_size: 一页获取的数据条数(1-100) :param version: 版本号 v1.0 ] variable[today] assign[=] call[name[datetime].date.today, parameter[]] if compare[name[start_date] is constant[None]] begin[:] variable[start_date] assign[=] name[today] if compare[name[end_date] is constant[None]] begin[:] variable[end_date] assign[=] name[today] if call[name[isinstance], parameter[name[start_date], name[datetime].date]] begin[:] variable[start_date] assign[=] call[name[start_date].strftime, parameter[constant[%Y-%m-%d]]] if call[name[isinstance], parameter[name[end_date], name[datetime].date]] begin[:] variable[end_date] assign[=] call[name[end_date].strftime, parameter[constant[%Y-%m-%d]]] return[call[name[self]._get, parameter[constant[wechat_ad_leads/get]]]]
keyword[def] identifier[get_ad_leads] ( identifier[self] , identifier[start_date] = keyword[None] , identifier[end_date] = keyword[None] , identifier[filtering] =(), identifier[page] = literal[int] , identifier[page_size] = literal[int] , identifier[version] = literal[string] ): literal[string] identifier[today] = identifier[datetime] . identifier[date] . identifier[today] () keyword[if] identifier[start_date] keyword[is] keyword[None] : identifier[start_date] = identifier[today] keyword[if] identifier[end_date] keyword[is] keyword[None] : identifier[end_date] = identifier[today] keyword[if] identifier[isinstance] ( identifier[start_date] , identifier[datetime] . identifier[date] ): identifier[start_date] = identifier[start_date] . identifier[strftime] ( literal[string] ) keyword[if] identifier[isinstance] ( identifier[end_date] , identifier[datetime] . identifier[date] ): identifier[end_date] = identifier[end_date] . identifier[strftime] ( literal[string] ) keyword[return] identifier[self] . identifier[_get] ( literal[string] , identifier[params] = identifier[optionaldict] ( identifier[date_range] = identifier[json] . identifier[dumps] ({ literal[string] : identifier[start_date] , literal[string] : identifier[end_date] }), identifier[filtering] = identifier[json] . identifier[dumps] ( identifier[filtering] ) keyword[if] identifier[filtering] keyword[else] keyword[None] , identifier[page] = identifier[page] , identifier[page_size] = identifier[page_size] , identifier[version] = identifier[version] ), identifier[result_processor] = keyword[lambda] identifier[x] : identifier[x] [ literal[string] ] )
def get_ad_leads(self, start_date=None, end_date=None, filtering=(), page=1, page_size=100, version='v1.0'): """ 获取朋友圈销售线索数据接口 :param start_date: 开始日期 默认今天 :param end_date: 结束日期 默认今天 :param filtering: 过滤条件 [{field: 过滤字段, operator: 操作符, values: 字段取值}] :param page: 页码,获取指定页数据 :param page_size: 一页获取的数据条数(1-100) :param version: 版本号 v1.0 """ today = datetime.date.today() if start_date is None: start_date = today # depends on [control=['if'], data=['start_date']] if end_date is None: end_date = today # depends on [control=['if'], data=['end_date']] if isinstance(start_date, datetime.date): start_date = start_date.strftime('%Y-%m-%d') # depends on [control=['if'], data=[]] if isinstance(end_date, datetime.date): end_date = end_date.strftime('%Y-%m-%d') # depends on [control=['if'], data=[]] return self._get('wechat_ad_leads/get', params=optionaldict(date_range=json.dumps({'start_date': start_date, 'end_date': end_date}), filtering=json.dumps(filtering) if filtering else None, page=page, page_size=page_size, version=version), result_processor=lambda x: x['data'])
def show_subnetpool(self, subnetpool, **_params): """Fetches information of a certain subnetpool.""" return self.get(self.subnetpool_path % (subnetpool), params=_params)
def function[show_subnetpool, parameter[self, subnetpool]]: constant[Fetches information of a certain subnetpool.] return[call[name[self].get, parameter[binary_operation[name[self].subnetpool_path <ast.Mod object at 0x7da2590d6920> name[subnetpool]]]]]
keyword[def] identifier[show_subnetpool] ( identifier[self] , identifier[subnetpool] ,** identifier[_params] ): literal[string] keyword[return] identifier[self] . identifier[get] ( identifier[self] . identifier[subnetpool_path] %( identifier[subnetpool] ), identifier[params] = identifier[_params] )
def show_subnetpool(self, subnetpool, **_params): """Fetches information of a certain subnetpool.""" return self.get(self.subnetpool_path % subnetpool, params=_params)
def make_encoder(activation, num_topics, layer_sizes): """Create the encoder function. Args: activation: Activation function to use. num_topics: The number of topics. layer_sizes: The number of hidden units per layer in the encoder. Returns: encoder: A `callable` mapping a bag-of-words `Tensor` to a `tfd.Distribution` instance over topics. """ encoder_net = tf.keras.Sequential() for num_hidden_units in layer_sizes: encoder_net.add( tf.keras.layers.Dense( num_hidden_units, activation=activation, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) encoder_net.add( tf.keras.layers.Dense( num_topics, activation=tf.nn.softplus, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) def encoder(bag_of_words): net = _clip_dirichlet_parameters(encoder_net(bag_of_words)) return tfd.Dirichlet(concentration=net, name="topics_posterior") return encoder
def function[make_encoder, parameter[activation, num_topics, layer_sizes]]: constant[Create the encoder function. Args: activation: Activation function to use. num_topics: The number of topics. layer_sizes: The number of hidden units per layer in the encoder. Returns: encoder: A `callable` mapping a bag-of-words `Tensor` to a `tfd.Distribution` instance over topics. ] variable[encoder_net] assign[=] call[name[tf].keras.Sequential, parameter[]] for taget[name[num_hidden_units]] in starred[name[layer_sizes]] begin[:] call[name[encoder_net].add, parameter[call[name[tf].keras.layers.Dense, parameter[name[num_hidden_units]]]]] call[name[encoder_net].add, parameter[call[name[tf].keras.layers.Dense, parameter[name[num_topics]]]]] def function[encoder, parameter[bag_of_words]]: variable[net] assign[=] call[name[_clip_dirichlet_parameters], parameter[call[name[encoder_net], parameter[name[bag_of_words]]]]] return[call[name[tfd].Dirichlet, parameter[]]] return[name[encoder]]
keyword[def] identifier[make_encoder] ( identifier[activation] , identifier[num_topics] , identifier[layer_sizes] ): literal[string] identifier[encoder_net] = identifier[tf] . identifier[keras] . identifier[Sequential] () keyword[for] identifier[num_hidden_units] keyword[in] identifier[layer_sizes] : identifier[encoder_net] . identifier[add] ( identifier[tf] . identifier[keras] . identifier[layers] . identifier[Dense] ( identifier[num_hidden_units] , identifier[activation] = identifier[activation] , identifier[kernel_initializer] = identifier[tf] . identifier[compat] . identifier[v1] . identifier[glorot_normal_initializer] ())) identifier[encoder_net] . identifier[add] ( identifier[tf] . identifier[keras] . identifier[layers] . identifier[Dense] ( identifier[num_topics] , identifier[activation] = identifier[tf] . identifier[nn] . identifier[softplus] , identifier[kernel_initializer] = identifier[tf] . identifier[compat] . identifier[v1] . identifier[glorot_normal_initializer] ())) keyword[def] identifier[encoder] ( identifier[bag_of_words] ): identifier[net] = identifier[_clip_dirichlet_parameters] ( identifier[encoder_net] ( identifier[bag_of_words] )) keyword[return] identifier[tfd] . identifier[Dirichlet] ( identifier[concentration] = identifier[net] , identifier[name] = literal[string] ) keyword[return] identifier[encoder]
def make_encoder(activation, num_topics, layer_sizes): """Create the encoder function. Args: activation: Activation function to use. num_topics: The number of topics. layer_sizes: The number of hidden units per layer in the encoder. Returns: encoder: A `callable` mapping a bag-of-words `Tensor` to a `tfd.Distribution` instance over topics. """ encoder_net = tf.keras.Sequential() for num_hidden_units in layer_sizes: encoder_net.add(tf.keras.layers.Dense(num_hidden_units, activation=activation, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) # depends on [control=['for'], data=['num_hidden_units']] encoder_net.add(tf.keras.layers.Dense(num_topics, activation=tf.nn.softplus, kernel_initializer=tf.compat.v1.glorot_normal_initializer())) def encoder(bag_of_words): net = _clip_dirichlet_parameters(encoder_net(bag_of_words)) return tfd.Dirichlet(concentration=net, name='topics_posterior') return encoder
def pelix_bundles(self): """ List of installed bundles """ framework = self.__context.get_framework() return { bundle.get_bundle_id(): { "name": bundle.get_symbolic_name(), "version": bundle.get_version(), "state": bundle.get_state(), "location": bundle.get_location(), } for bundle in framework.get_bundles() }
def function[pelix_bundles, parameter[self]]: constant[ List of installed bundles ] variable[framework] assign[=] call[name[self].__context.get_framework, parameter[]] return[<ast.DictComp object at 0x7da1b0349540>]
keyword[def] identifier[pelix_bundles] ( identifier[self] ): literal[string] identifier[framework] = identifier[self] . identifier[__context] . identifier[get_framework] () keyword[return] { identifier[bundle] . identifier[get_bundle_id] ():{ literal[string] : identifier[bundle] . identifier[get_symbolic_name] (), literal[string] : identifier[bundle] . identifier[get_version] (), literal[string] : identifier[bundle] . identifier[get_state] (), literal[string] : identifier[bundle] . identifier[get_location] (), } keyword[for] identifier[bundle] keyword[in] identifier[framework] . identifier[get_bundles] () }
def pelix_bundles(self): """ List of installed bundles """ framework = self.__context.get_framework() return {bundle.get_bundle_id(): {'name': bundle.get_symbolic_name(), 'version': bundle.get_version(), 'state': bundle.get_state(), 'location': bundle.get_location()} for bundle in framework.get_bundles()}
def serialize(self, node: SchemaNode, appstruct: Union[PotentialDatetimeType, ColanderNullType]) \ -> Union[str, ColanderNullType]: """ Serializes Python object to string representation. """ if not appstruct: return colander.null try: appstruct = coerce_to_pendulum(appstruct, assume_local=self.use_local_tz) except (ValueError, ParserError) as e: raise Invalid( node, "{!r} is not a pendulum.DateTime object; error was " "{!r}".format(appstruct, e)) return appstruct.isoformat()
def function[serialize, parameter[self, node, appstruct]]: constant[ Serializes Python object to string representation. ] if <ast.UnaryOp object at 0x7da1b172b220> begin[:] return[name[colander].null] <ast.Try object at 0x7da1b172be20> return[call[name[appstruct].isoformat, parameter[]]]
keyword[def] identifier[serialize] ( identifier[self] , identifier[node] : identifier[SchemaNode] , identifier[appstruct] : identifier[Union] [ identifier[PotentialDatetimeType] , identifier[ColanderNullType] ])-> identifier[Union] [ identifier[str] , identifier[ColanderNullType] ]: literal[string] keyword[if] keyword[not] identifier[appstruct] : keyword[return] identifier[colander] . identifier[null] keyword[try] : identifier[appstruct] = identifier[coerce_to_pendulum] ( identifier[appstruct] , identifier[assume_local] = identifier[self] . identifier[use_local_tz] ) keyword[except] ( identifier[ValueError] , identifier[ParserError] ) keyword[as] identifier[e] : keyword[raise] identifier[Invalid] ( identifier[node] , literal[string] literal[string] . identifier[format] ( identifier[appstruct] , identifier[e] )) keyword[return] identifier[appstruct] . identifier[isoformat] ()
def serialize(self, node: SchemaNode, appstruct: Union[PotentialDatetimeType, ColanderNullType]) -> Union[str, ColanderNullType]: """ Serializes Python object to string representation. """ if not appstruct: return colander.null # depends on [control=['if'], data=[]] try: appstruct = coerce_to_pendulum(appstruct, assume_local=self.use_local_tz) # depends on [control=['try'], data=[]] except (ValueError, ParserError) as e: raise Invalid(node, '{!r} is not a pendulum.DateTime object; error was {!r}'.format(appstruct, e)) # depends on [control=['except'], data=['e']] return appstruct.isoformat()
def pop(self, till=None, priority=None): """ WAIT FOR NEXT ITEM ON THE QUEUE RETURN THREAD_STOP IF QUEUE IS CLOSED RETURN None IF till IS REACHED AND QUEUE IS STILL EMPTY :param till: A `Signal` to stop waiting and return None :return: A value, or a THREAD_STOP or None """ if till is not None and not isinstance(till, Signal): Log.error("expecting a signal") with self.lock: while True: if not priority: priority = self.highest_entry() if priority: value = self.queue[priority].queue.popleft() return value if self.closed: break if not self.lock.wait(till=till | self.closed): if self.closed: break return None (DEBUG or not self.silent) and Log.note(self.name + " queue stopped") return THREAD_STOP
def function[pop, parameter[self, till, priority]]: constant[ WAIT FOR NEXT ITEM ON THE QUEUE RETURN THREAD_STOP IF QUEUE IS CLOSED RETURN None IF till IS REACHED AND QUEUE IS STILL EMPTY :param till: A `Signal` to stop waiting and return None :return: A value, or a THREAD_STOP or None ] if <ast.BoolOp object at 0x7da1b0b6cc10> begin[:] call[name[Log].error, parameter[constant[expecting a signal]]] with name[self].lock begin[:] while constant[True] begin[:] if <ast.UnaryOp object at 0x7da1b0b6fd90> begin[:] variable[priority] assign[=] call[name[self].highest_entry, parameter[]] if name[priority] begin[:] variable[value] assign[=] call[call[name[self].queue][name[priority]].queue.popleft, parameter[]] return[name[value]] if name[self].closed begin[:] break if <ast.UnaryOp object at 0x7da1b0b6c4c0> begin[:] if name[self].closed begin[:] break return[constant[None]] <ast.BoolOp object at 0x7da1b0b6caf0> return[name[THREAD_STOP]]
keyword[def] identifier[pop] ( identifier[self] , identifier[till] = keyword[None] , identifier[priority] = keyword[None] ): literal[string] keyword[if] identifier[till] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[till] , identifier[Signal] ): identifier[Log] . identifier[error] ( literal[string] ) keyword[with] identifier[self] . identifier[lock] : keyword[while] keyword[True] : keyword[if] keyword[not] identifier[priority] : identifier[priority] = identifier[self] . identifier[highest_entry] () keyword[if] identifier[priority] : identifier[value] = identifier[self] . identifier[queue] [ identifier[priority] ]. identifier[queue] . identifier[popleft] () keyword[return] identifier[value] keyword[if] identifier[self] . identifier[closed] : keyword[break] keyword[if] keyword[not] identifier[self] . identifier[lock] . identifier[wait] ( identifier[till] = identifier[till] | identifier[self] . identifier[closed] ): keyword[if] identifier[self] . identifier[closed] : keyword[break] keyword[return] keyword[None] ( identifier[DEBUG] keyword[or] keyword[not] identifier[self] . identifier[silent] ) keyword[and] identifier[Log] . identifier[note] ( identifier[self] . identifier[name] + literal[string] ) keyword[return] identifier[THREAD_STOP]
def pop(self, till=None, priority=None): """ WAIT FOR NEXT ITEM ON THE QUEUE RETURN THREAD_STOP IF QUEUE IS CLOSED RETURN None IF till IS REACHED AND QUEUE IS STILL EMPTY :param till: A `Signal` to stop waiting and return None :return: A value, or a THREAD_STOP or None """ if till is not None and (not isinstance(till, Signal)): Log.error('expecting a signal') # depends on [control=['if'], data=[]] with self.lock: while True: if not priority: priority = self.highest_entry() # depends on [control=['if'], data=[]] if priority: value = self.queue[priority].queue.popleft() return value # depends on [control=['if'], data=[]] if self.closed: break # depends on [control=['if'], data=[]] if not self.lock.wait(till=till | self.closed): if self.closed: break # depends on [control=['if'], data=[]] return None # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['with'], data=[]] (DEBUG or not self.silent) and Log.note(self.name + ' queue stopped') return THREAD_STOP
def get_isd_filenames(self, year=None, with_host=False): """ Get filenames of raw ISD station data. """ return get_isd_filenames(self.usaf_id, year, with_host=with_host)
def function[get_isd_filenames, parameter[self, year, with_host]]: constant[ Get filenames of raw ISD station data. ] return[call[name[get_isd_filenames], parameter[name[self].usaf_id, name[year]]]]
keyword[def] identifier[get_isd_filenames] ( identifier[self] , identifier[year] = keyword[None] , identifier[with_host] = keyword[False] ): literal[string] keyword[return] identifier[get_isd_filenames] ( identifier[self] . identifier[usaf_id] , identifier[year] , identifier[with_host] = identifier[with_host] )
def get_isd_filenames(self, year=None, with_host=False): """ Get filenames of raw ISD station data. """ return get_isd_filenames(self.usaf_id, year, with_host=with_host)
def start_slave_nodes(slaves, cl_args): ''' Star slave nodes ''' pids = [] for slave in slaves: Log.info("Starting slave on %s" % slave) cmd = "%s agent -config %s >> /tmp/nomad_client.log 2>&1 &" \ % (get_nomad_path(cl_args), get_nomad_slave_config_file(cl_args)) if not is_self(slave): cmd = ssh_remote_execute(cmd, slave, cl_args) Log.debug(cmd) pid = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) pids.append({"pid": pid, "dest": slave}) errors = [] for entry in pids: pid = entry["pid"] return_code = pid.wait() output = pid.communicate() Log.debug("return code: %s output: %s" % (return_code, output)) if return_code != 0: errors.append("Failed to start slave on %s with error:\n%s" % (entry["dest"], output[1])) if errors: for error in errors: Log.error(error) sys.exit(-1) Log.info("Done starting slaves")
def function[start_slave_nodes, parameter[slaves, cl_args]]: constant[ Star slave nodes ] variable[pids] assign[=] list[[]] for taget[name[slave]] in starred[name[slaves]] begin[:] call[name[Log].info, parameter[binary_operation[constant[Starting slave on %s] <ast.Mod object at 0x7da2590d6920> name[slave]]]] variable[cmd] assign[=] binary_operation[constant[%s agent -config %s >> /tmp/nomad_client.log 2>&1 &] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20c76c730>, <ast.Call object at 0x7da20c76ed40>]]] if <ast.UnaryOp object at 0x7da20c76df00> begin[:] variable[cmd] assign[=] call[name[ssh_remote_execute], parameter[name[cmd], name[slave], name[cl_args]]] call[name[Log].debug, parameter[name[cmd]]] variable[pid] assign[=] call[name[subprocess].Popen, parameter[name[cmd]]] call[name[pids].append, parameter[dictionary[[<ast.Constant object at 0x7da20c76dcf0>, <ast.Constant object at 0x7da20c76e830>], [<ast.Name object at 0x7da20c76c5b0>, <ast.Name object at 0x7da20c76d2a0>]]]] variable[errors] assign[=] list[[]] for taget[name[entry]] in starred[name[pids]] begin[:] variable[pid] assign[=] call[name[entry]][constant[pid]] variable[return_code] assign[=] call[name[pid].wait, parameter[]] variable[output] assign[=] call[name[pid].communicate, parameter[]] call[name[Log].debug, parameter[binary_operation[constant[return code: %s output: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c76caf0>, <ast.Name object at 0x7da20c76ec20>]]]]] if compare[name[return_code] not_equal[!=] constant[0]] begin[:] call[name[errors].append, parameter[binary_operation[constant[Failed to start slave on %s with error: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da20c76e4a0>, <ast.Subscript object at 0x7da20c76c4f0>]]]]] if name[errors] begin[:] for taget[name[error]] in starred[name[errors]] begin[:] call[name[Log].error, parameter[name[error]]] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da20c76c250>]] call[name[Log].info, parameter[constant[Done starting slaves]]]
keyword[def] identifier[start_slave_nodes] ( identifier[slaves] , identifier[cl_args] ): literal[string] identifier[pids] =[] keyword[for] identifier[slave] keyword[in] identifier[slaves] : identifier[Log] . identifier[info] ( literal[string] % identifier[slave] ) identifier[cmd] = literal[string] %( identifier[get_nomad_path] ( identifier[cl_args] ), identifier[get_nomad_slave_config_file] ( identifier[cl_args] )) keyword[if] keyword[not] identifier[is_self] ( identifier[slave] ): identifier[cmd] = identifier[ssh_remote_execute] ( identifier[cmd] , identifier[slave] , identifier[cl_args] ) identifier[Log] . identifier[debug] ( identifier[cmd] ) identifier[pid] = identifier[subprocess] . identifier[Popen] ( identifier[cmd] , identifier[shell] = keyword[True] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] ) identifier[pids] . identifier[append] ({ literal[string] : identifier[pid] , literal[string] : identifier[slave] }) identifier[errors] =[] keyword[for] identifier[entry] keyword[in] identifier[pids] : identifier[pid] = identifier[entry] [ literal[string] ] identifier[return_code] = identifier[pid] . identifier[wait] () identifier[output] = identifier[pid] . identifier[communicate] () identifier[Log] . identifier[debug] ( literal[string] %( identifier[return_code] , identifier[output] )) keyword[if] identifier[return_code] != literal[int] : identifier[errors] . identifier[append] ( literal[string] %( identifier[entry] [ literal[string] ], identifier[output] [ literal[int] ])) keyword[if] identifier[errors] : keyword[for] identifier[error] keyword[in] identifier[errors] : identifier[Log] . identifier[error] ( identifier[error] ) identifier[sys] . identifier[exit] (- literal[int] ) identifier[Log] . identifier[info] ( literal[string] )
def start_slave_nodes(slaves, cl_args): """ Star slave nodes """ pids = [] for slave in slaves: Log.info('Starting slave on %s' % slave) cmd = '%s agent -config %s >> /tmp/nomad_client.log 2>&1 &' % (get_nomad_path(cl_args), get_nomad_slave_config_file(cl_args)) if not is_self(slave): cmd = ssh_remote_execute(cmd, slave, cl_args) # depends on [control=['if'], data=[]] Log.debug(cmd) pid = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) pids.append({'pid': pid, 'dest': slave}) # depends on [control=['for'], data=['slave']] errors = [] for entry in pids: pid = entry['pid'] return_code = pid.wait() output = pid.communicate() Log.debug('return code: %s output: %s' % (return_code, output)) if return_code != 0: errors.append('Failed to start slave on %s with error:\n%s' % (entry['dest'], output[1])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry']] if errors: for error in errors: Log.error(error) # depends on [control=['for'], data=['error']] sys.exit(-1) # depends on [control=['if'], data=[]] Log.info('Done starting slaves')
def calc_spectrogram(self, fmin=None, method='scipy-fourier', deg=False, window='hann', detrend='linear', nperseg=None, noverlap=None, boundary='constant', padded=True, wave='morlet', warn=True): """ Return the power spectrum density for each channel The power spectrum density is computed with the chosen method Parameters ---------- fmin : None / float The minimum frequency of interest If None, set to 5/T, where T is the whole time interval Used to constrain the number of points per window deg : bool Flag indicating whether to return the phase in deg (vs rad) method : str Flag indicating which method to use for computation: - 'scipy-fourier': uses scipy.signal.spectrogram() (windowed fast fourier transform) - 'scipy-stft': uses scipy.signal.stft() (short time fourier transform) - 'scipy-wavelet': uses scipy.signal.cwt() (continuous wavelet transform) The following keyword args are fed to one of these scipy functions See the corresponding online scipy documentation for details on each function and its arguments window : None / str / tuple If method='scipy-fourier' Flag indicating which type of window to use detrend : None / str If method='scipy-fourier' Flag indicating whether and how to remove the trend of the signal nperseg : None / int If method='scipy-fourier' Number of points to the used for each window If None, deduced from fmin noverlap: If method='scipy-fourier' Number of points on which successive windows should overlap If None, nperseg-1 boundary: If method='scipy-stft' padded : If method='scipy-stft' d wave: None / str If method='scipy-wavelet' Return ------ tf : np.ndarray Time vector of the spectrogram (1D) f: np.ndarray frequency vector of the spectrogram (1D) lspect: list of np.ndarrays list of () spectrograms """ if self._isSpectral(): msg = "spectrogram not implemented yet for spectral data class" raise Exception(msg) tf, f, lpsd, lang = _comp.spectrogram(self.data, self.t, fmin=fmin, deg=deg, method=method, window=window, detrend=detrend, nperseg=nperseg, noverlap=noverlap, boundary=boundary, padded=padded, wave=wave, warn=warn) return tf, f, lpsd, lang
def function[calc_spectrogram, parameter[self, fmin, method, deg, window, detrend, nperseg, noverlap, boundary, padded, wave, warn]]: constant[ Return the power spectrum density for each channel The power spectrum density is computed with the chosen method Parameters ---------- fmin : None / float The minimum frequency of interest If None, set to 5/T, where T is the whole time interval Used to constrain the number of points per window deg : bool Flag indicating whether to return the phase in deg (vs rad) method : str Flag indicating which method to use for computation: - 'scipy-fourier': uses scipy.signal.spectrogram() (windowed fast fourier transform) - 'scipy-stft': uses scipy.signal.stft() (short time fourier transform) - 'scipy-wavelet': uses scipy.signal.cwt() (continuous wavelet transform) The following keyword args are fed to one of these scipy functions See the corresponding online scipy documentation for details on each function and its arguments window : None / str / tuple If method='scipy-fourier' Flag indicating which type of window to use detrend : None / str If method='scipy-fourier' Flag indicating whether and how to remove the trend of the signal nperseg : None / int If method='scipy-fourier' Number of points to the used for each window If None, deduced from fmin noverlap: If method='scipy-fourier' Number of points on which successive windows should overlap If None, nperseg-1 boundary: If method='scipy-stft' padded : If method='scipy-stft' d wave: None / str If method='scipy-wavelet' Return ------ tf : np.ndarray Time vector of the spectrogram (1D) f: np.ndarray frequency vector of the spectrogram (1D) lspect: list of np.ndarrays list of () spectrograms ] if call[name[self]._isSpectral, parameter[]] begin[:] variable[msg] assign[=] constant[spectrogram not implemented yet for spectral data class] <ast.Raise object at 0x7da207f03070> <ast.Tuple object at 0x7da207f01930> assign[=] call[name[_comp].spectrogram, parameter[name[self].data, name[self].t]] return[tuple[[<ast.Name object at 0x7da207f01a80>, <ast.Name object at 0x7da207f027d0>, <ast.Name object at 0x7da207f00d30>, <ast.Name object at 0x7da207f02290>]]]
keyword[def] identifier[calc_spectrogram] ( identifier[self] , identifier[fmin] = keyword[None] , identifier[method] = literal[string] , identifier[deg] = keyword[False] , identifier[window] = literal[string] , identifier[detrend] = literal[string] , identifier[nperseg] = keyword[None] , identifier[noverlap] = keyword[None] , identifier[boundary] = literal[string] , identifier[padded] = keyword[True] , identifier[wave] = literal[string] , identifier[warn] = keyword[True] ): literal[string] keyword[if] identifier[self] . identifier[_isSpectral] (): identifier[msg] = literal[string] keyword[raise] identifier[Exception] ( identifier[msg] ) identifier[tf] , identifier[f] , identifier[lpsd] , identifier[lang] = identifier[_comp] . identifier[spectrogram] ( identifier[self] . identifier[data] , identifier[self] . identifier[t] , identifier[fmin] = identifier[fmin] , identifier[deg] = identifier[deg] , identifier[method] = identifier[method] , identifier[window] = identifier[window] , identifier[detrend] = identifier[detrend] , identifier[nperseg] = identifier[nperseg] , identifier[noverlap] = identifier[noverlap] , identifier[boundary] = identifier[boundary] , identifier[padded] = identifier[padded] , identifier[wave] = identifier[wave] , identifier[warn] = identifier[warn] ) keyword[return] identifier[tf] , identifier[f] , identifier[lpsd] , identifier[lang]
def calc_spectrogram(self, fmin=None, method='scipy-fourier', deg=False, window='hann', detrend='linear', nperseg=None, noverlap=None, boundary='constant', padded=True, wave='morlet', warn=True): """ Return the power spectrum density for each channel The power spectrum density is computed with the chosen method Parameters ---------- fmin : None / float The minimum frequency of interest If None, set to 5/T, where T is the whole time interval Used to constrain the number of points per window deg : bool Flag indicating whether to return the phase in deg (vs rad) method : str Flag indicating which method to use for computation: - 'scipy-fourier': uses scipy.signal.spectrogram() (windowed fast fourier transform) - 'scipy-stft': uses scipy.signal.stft() (short time fourier transform) - 'scipy-wavelet': uses scipy.signal.cwt() (continuous wavelet transform) The following keyword args are fed to one of these scipy functions See the corresponding online scipy documentation for details on each function and its arguments window : None / str / tuple If method='scipy-fourier' Flag indicating which type of window to use detrend : None / str If method='scipy-fourier' Flag indicating whether and how to remove the trend of the signal nperseg : None / int If method='scipy-fourier' Number of points to the used for each window If None, deduced from fmin noverlap: If method='scipy-fourier' Number of points on which successive windows should overlap If None, nperseg-1 boundary: If method='scipy-stft' padded : If method='scipy-stft' d wave: None / str If method='scipy-wavelet' Return ------ tf : np.ndarray Time vector of the spectrogram (1D) f: np.ndarray frequency vector of the spectrogram (1D) lspect: list of np.ndarrays list of () spectrograms """ if self._isSpectral(): msg = 'spectrogram not implemented yet for spectral data class' raise Exception(msg) # depends on [control=['if'], data=[]] (tf, f, lpsd, lang) = _comp.spectrogram(self.data, self.t, fmin=fmin, deg=deg, method=method, window=window, detrend=detrend, nperseg=nperseg, noverlap=noverlap, boundary=boundary, padded=padded, wave=wave, warn=warn) return (tf, f, lpsd, lang)
def source_file(self): """Return an open file for reading the source of the code unit.""" if os.path.exists(self.filename): # A regular text file: open it. return open_source(self.filename) # Maybe it's in a zip file? source = self.file_locator.get_zip_data(self.filename) if source is not None: return StringIO(source) # Couldn't find source. raise CoverageException( "No source for code '%s'." % self.filename )
def function[source_file, parameter[self]]: constant[Return an open file for reading the source of the code unit.] if call[name[os].path.exists, parameter[name[self].filename]] begin[:] return[call[name[open_source], parameter[name[self].filename]]] variable[source] assign[=] call[name[self].file_locator.get_zip_data, parameter[name[self].filename]] if compare[name[source] is_not constant[None]] begin[:] return[call[name[StringIO], parameter[name[source]]]] <ast.Raise object at 0x7da18fe91450>
keyword[def] identifier[source_file] ( identifier[self] ): literal[string] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[filename] ): keyword[return] identifier[open_source] ( identifier[self] . identifier[filename] ) identifier[source] = identifier[self] . identifier[file_locator] . identifier[get_zip_data] ( identifier[self] . identifier[filename] ) keyword[if] identifier[source] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[StringIO] ( identifier[source] ) keyword[raise] identifier[CoverageException] ( literal[string] % identifier[self] . identifier[filename] )
def source_file(self): """Return an open file for reading the source of the code unit.""" if os.path.exists(self.filename): # A regular text file: open it. return open_source(self.filename) # depends on [control=['if'], data=[]] # Maybe it's in a zip file? source = self.file_locator.get_zip_data(self.filename) if source is not None: return StringIO(source) # depends on [control=['if'], data=['source']] # Couldn't find source. raise CoverageException("No source for code '%s'." % self.filename)
def tryAccessModifiers(self, block): """Check for private, protected, public, signals etc... and assume we are in a class definition. Try to find a previous private/protected/private... or class and return its indentation or null if not found. """ if CFG_ACCESS_MODIFIERS < 0: return None if not re.match(r'^\s*((public|protected|private)\s*(slots|Q_SLOTS)?|(signals|Q_SIGNALS)\s*):\s*$', block.text()): return None try: block, notUsedColumn = self.findBracketBackward(block, 0, '{') except ValueError: return None indentation = self._blockIndent(block) for i in range(CFG_ACCESS_MODIFIERS): indentation = self._increaseIndent(indentation) dbg("tryAccessModifiers: success in line %d" % block.blockNumber()) return indentation
def function[tryAccessModifiers, parameter[self, block]]: constant[Check for private, protected, public, signals etc... and assume we are in a class definition. Try to find a previous private/protected/private... or class and return its indentation or null if not found. ] if compare[name[CFG_ACCESS_MODIFIERS] less[<] constant[0]] begin[:] return[constant[None]] if <ast.UnaryOp object at 0x7da18f720e80> begin[:] return[constant[None]] <ast.Try object at 0x7da20c6a95d0> variable[indentation] assign[=] call[name[self]._blockIndent, parameter[name[block]]] for taget[name[i]] in starred[call[name[range], parameter[name[CFG_ACCESS_MODIFIERS]]]] begin[:] variable[indentation] assign[=] call[name[self]._increaseIndent, parameter[name[indentation]]] call[name[dbg], parameter[binary_operation[constant[tryAccessModifiers: success in line %d] <ast.Mod object at 0x7da2590d6920> call[name[block].blockNumber, parameter[]]]]] return[name[indentation]]
keyword[def] identifier[tryAccessModifiers] ( identifier[self] , identifier[block] ): literal[string] keyword[if] identifier[CFG_ACCESS_MODIFIERS] < literal[int] : keyword[return] keyword[None] keyword[if] keyword[not] identifier[re] . identifier[match] ( literal[string] , identifier[block] . identifier[text] ()): keyword[return] keyword[None] keyword[try] : identifier[block] , identifier[notUsedColumn] = identifier[self] . identifier[findBracketBackward] ( identifier[block] , literal[int] , literal[string] ) keyword[except] identifier[ValueError] : keyword[return] keyword[None] identifier[indentation] = identifier[self] . identifier[_blockIndent] ( identifier[block] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[CFG_ACCESS_MODIFIERS] ): identifier[indentation] = identifier[self] . identifier[_increaseIndent] ( identifier[indentation] ) identifier[dbg] ( literal[string] % identifier[block] . identifier[blockNumber] ()) keyword[return] identifier[indentation]
def tryAccessModifiers(self, block): """Check for private, protected, public, signals etc... and assume we are in a class definition. Try to find a previous private/protected/private... or class and return its indentation or null if not found. """ if CFG_ACCESS_MODIFIERS < 0: return None # depends on [control=['if'], data=[]] if not re.match('^\\s*((public|protected|private)\\s*(slots|Q_SLOTS)?|(signals|Q_SIGNALS)\\s*):\\s*$', block.text()): return None # depends on [control=['if'], data=[]] try: (block, notUsedColumn) = self.findBracketBackward(block, 0, '{') # depends on [control=['try'], data=[]] except ValueError: return None # depends on [control=['except'], data=[]] indentation = self._blockIndent(block) for i in range(CFG_ACCESS_MODIFIERS): indentation = self._increaseIndent(indentation) # depends on [control=['for'], data=[]] dbg('tryAccessModifiers: success in line %d' % block.blockNumber()) return indentation