code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def _loop_use_cache(self, helper_function, num, fragment): """ Synthesize all fragments using the cache """ self.log([u"Examining fragment %d (cache)...", num]) fragment_info = (fragment.language, fragment.filtered_text) if self.cache.is_cached(fragment_info): self.log(u"Fragment cached: retrieving audio data from cache") # read data from file, whose path is in the cache file_handler, file_path = self.cache.get(fragment_info) self.log([u"Reading cached fragment at '%s'...", file_path]) succeeded, data = self._read_audio_data(file_path) if not succeeded: self.log_crit(u"An unexpected error occurred while reading cached audio file") return (False, None) self.log([u"Reading cached fragment at '%s'... done", file_path]) else: self.log(u"Fragment not cached: synthesizing and caching") # creating destination file file_info = gf.tmp_file(suffix=u".cache.wav", root=self.rconf[RuntimeConfiguration.TMP_PATH]) file_handler, file_path = file_info self.log([u"Synthesizing fragment to '%s'...", file_path]) # synthesize and get the duration of the output file voice_code = self._language_to_voice_code(fragment.language) self.log(u"Calling helper function") succeeded, data = helper_function( text=fragment.filtered_text, voice_code=voice_code, output_file_path=file_path, return_audio_data=True ) # check output if not succeeded: self.log_crit(u"An unexpected error occurred in helper_function") return (False, None) self.log([u"Synthesizing fragment to '%s'... done", file_path]) duration, sr_nu, enc_nu, samples = data if duration > 0: self.log(u"Fragment has > 0 duration, adding it to cache") self.cache.add(fragment_info, file_info) self.log(u"Added fragment to cache") else: self.log(u"Fragment has zero duration, not adding it to cache") self.log([u"Closing file handler for cached output file path '%s'", file_path]) gf.close_file_handler(file_handler) self.log([u"Examining fragment %d (cache)... done", num]) return (True, data)
def function[_loop_use_cache, parameter[self, helper_function, num, fragment]]: constant[ Synthesize all fragments using the cache ] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da1b1882680>, <ast.Name object at 0x7da1b1881120>]]]] variable[fragment_info] assign[=] tuple[[<ast.Attribute object at 0x7da1b1880e80>, <ast.Attribute object at 0x7da1b1880d00>]] if call[name[self].cache.is_cached, parameter[name[fragment_info]]] begin[:] call[name[self].log, parameter[constant[Fragment cached: retrieving audio data from cache]]] <ast.Tuple object at 0x7da1b1882ec0> assign[=] call[name[self].cache.get, parameter[name[fragment_info]]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da1b1883580>, <ast.Name object at 0x7da1b1880b80>]]]] <ast.Tuple object at 0x7da1b1883700> assign[=] call[name[self]._read_audio_data, parameter[name[file_path]]] if <ast.UnaryOp object at 0x7da1b1883910> begin[:] call[name[self].log_crit, parameter[constant[An unexpected error occurred while reading cached audio file]]] return[tuple[[<ast.Constant object at 0x7da1b1881a50>, <ast.Constant object at 0x7da1b18821a0>]]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da1b1881cf0>, <ast.Name object at 0x7da1b18838b0>]]]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da1b18a0130>, <ast.Name object at 0x7da1b18a33a0>]]]] return[tuple[[<ast.Constant object at 0x7da1b18a3070>, <ast.Name object at 0x7da1b18a3ee0>]]]
keyword[def] identifier[_loop_use_cache] ( identifier[self] , identifier[helper_function] , identifier[num] , identifier[fragment] ): literal[string] identifier[self] . identifier[log] ([ literal[string] , identifier[num] ]) identifier[fragment_info] =( identifier[fragment] . identifier[language] , identifier[fragment] . identifier[filtered_text] ) keyword[if] identifier[self] . identifier[cache] . identifier[is_cached] ( identifier[fragment_info] ): identifier[self] . identifier[log] ( literal[string] ) identifier[file_handler] , identifier[file_path] = identifier[self] . identifier[cache] . identifier[get] ( identifier[fragment_info] ) identifier[self] . identifier[log] ([ literal[string] , identifier[file_path] ]) identifier[succeeded] , identifier[data] = identifier[self] . identifier[_read_audio_data] ( identifier[file_path] ) keyword[if] keyword[not] identifier[succeeded] : identifier[self] . identifier[log_crit] ( literal[string] ) keyword[return] ( keyword[False] , keyword[None] ) identifier[self] . identifier[log] ([ literal[string] , identifier[file_path] ]) keyword[else] : identifier[self] . identifier[log] ( literal[string] ) identifier[file_info] = identifier[gf] . identifier[tmp_file] ( identifier[suffix] = literal[string] , identifier[root] = identifier[self] . identifier[rconf] [ identifier[RuntimeConfiguration] . identifier[TMP_PATH] ]) identifier[file_handler] , identifier[file_path] = identifier[file_info] identifier[self] . identifier[log] ([ literal[string] , identifier[file_path] ]) identifier[voice_code] = identifier[self] . identifier[_language_to_voice_code] ( identifier[fragment] . identifier[language] ) identifier[self] . identifier[log] ( literal[string] ) identifier[succeeded] , identifier[data] = identifier[helper_function] ( identifier[text] = identifier[fragment] . identifier[filtered_text] , identifier[voice_code] = identifier[voice_code] , identifier[output_file_path] = identifier[file_path] , identifier[return_audio_data] = keyword[True] ) keyword[if] keyword[not] identifier[succeeded] : identifier[self] . identifier[log_crit] ( literal[string] ) keyword[return] ( keyword[False] , keyword[None] ) identifier[self] . identifier[log] ([ literal[string] , identifier[file_path] ]) identifier[duration] , identifier[sr_nu] , identifier[enc_nu] , identifier[samples] = identifier[data] keyword[if] identifier[duration] > literal[int] : identifier[self] . identifier[log] ( literal[string] ) identifier[self] . identifier[cache] . identifier[add] ( identifier[fragment_info] , identifier[file_info] ) identifier[self] . identifier[log] ( literal[string] ) keyword[else] : identifier[self] . identifier[log] ( literal[string] ) identifier[self] . identifier[log] ([ literal[string] , identifier[file_path] ]) identifier[gf] . identifier[close_file_handler] ( identifier[file_handler] ) identifier[self] . identifier[log] ([ literal[string] , identifier[num] ]) keyword[return] ( keyword[True] , identifier[data] )
def _loop_use_cache(self, helper_function, num, fragment): """ Synthesize all fragments using the cache """ self.log([u'Examining fragment %d (cache)...', num]) fragment_info = (fragment.language, fragment.filtered_text) if self.cache.is_cached(fragment_info): self.log(u'Fragment cached: retrieving audio data from cache') # read data from file, whose path is in the cache (file_handler, file_path) = self.cache.get(fragment_info) self.log([u"Reading cached fragment at '%s'...", file_path]) (succeeded, data) = self._read_audio_data(file_path) if not succeeded: self.log_crit(u'An unexpected error occurred while reading cached audio file') return (False, None) # depends on [control=['if'], data=[]] self.log([u"Reading cached fragment at '%s'... done", file_path]) # depends on [control=['if'], data=[]] else: self.log(u'Fragment not cached: synthesizing and caching') # creating destination file file_info = gf.tmp_file(suffix=u'.cache.wav', root=self.rconf[RuntimeConfiguration.TMP_PATH]) (file_handler, file_path) = file_info self.log([u"Synthesizing fragment to '%s'...", file_path]) # synthesize and get the duration of the output file voice_code = self._language_to_voice_code(fragment.language) self.log(u'Calling helper function') (succeeded, data) = helper_function(text=fragment.filtered_text, voice_code=voice_code, output_file_path=file_path, return_audio_data=True) # check output if not succeeded: self.log_crit(u'An unexpected error occurred in helper_function') return (False, None) # depends on [control=['if'], data=[]] self.log([u"Synthesizing fragment to '%s'... done", file_path]) (duration, sr_nu, enc_nu, samples) = data if duration > 0: self.log(u'Fragment has > 0 duration, adding it to cache') self.cache.add(fragment_info, file_info) self.log(u'Added fragment to cache') # depends on [control=['if'], data=[]] else: self.log(u'Fragment has zero duration, not adding it to cache') self.log([u"Closing file handler for cached output file path '%s'", file_path]) gf.close_file_handler(file_handler) self.log([u'Examining fragment %d (cache)... done', num]) return (True, data)
def get_settings(self, index=None, name=None, params=None): """ Retrieve settings for one or more (or all) indices. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg name: The name of the settings that should be included :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default ['open', 'closed'], valid choices are: 'open', 'closed', 'none', 'all' :arg flat_settings: Return settings in flat format (default: false) :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg include_defaults: Whether to return all default setting for each of the indices., default False :arg local: Return local information, do not retrieve the state from master node (default: false) """ return self.transport.perform_request( "GET", _make_path(index, "_settings", name), params=params )
def function[get_settings, parameter[self, index, name, params]]: constant[ Retrieve settings for one or more (or all) indices. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg name: The name of the settings that should be included :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default ['open', 'closed'], valid choices are: 'open', 'closed', 'none', 'all' :arg flat_settings: Return settings in flat format (default: false) :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg include_defaults: Whether to return all default setting for each of the indices., default False :arg local: Return local information, do not retrieve the state from master node (default: false) ] return[call[name[self].transport.perform_request, parameter[constant[GET], call[name[_make_path], parameter[name[index], constant[_settings], name[name]]]]]]
keyword[def] identifier[get_settings] ( identifier[self] , identifier[index] = keyword[None] , identifier[name] = keyword[None] , identifier[params] = keyword[None] ): literal[string] keyword[return] identifier[self] . identifier[transport] . identifier[perform_request] ( literal[string] , identifier[_make_path] ( identifier[index] , literal[string] , identifier[name] ), identifier[params] = identifier[params] )
def get_settings(self, index=None, name=None, params=None): """ Retrieve settings for one or more (or all) indices. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html>`_ :arg index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :arg name: The name of the settings that should be included :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default ['open', 'closed'], valid choices are: 'open', 'closed', 'none', 'all' :arg flat_settings: Return settings in flat format (default: false) :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg include_defaults: Whether to return all default setting for each of the indices., default False :arg local: Return local information, do not retrieve the state from master node (default: false) """ return self.transport.perform_request('GET', _make_path(index, '_settings', name), params=params)
def crypto_sign(message, sk): """ Signs the message ``message`` using the secret key ``sk`` and returns the signed message. :param message: bytes :param sk: bytes :rtype: bytes """ signed = ffi.new("unsigned char[]", len(message) + crypto_sign_BYTES) signed_len = ffi.new("unsigned long long *") rc = lib.crypto_sign(signed, signed_len, message, len(message), sk) ensure(rc == 0, 'Unexpected library error', raising=exc.RuntimeError) return ffi.buffer(signed, signed_len[0])[:]
def function[crypto_sign, parameter[message, sk]]: constant[ Signs the message ``message`` using the secret key ``sk`` and returns the signed message. :param message: bytes :param sk: bytes :rtype: bytes ] variable[signed] assign[=] call[name[ffi].new, parameter[constant[unsigned char[]], binary_operation[call[name[len], parameter[name[message]]] + name[crypto_sign_BYTES]]]] variable[signed_len] assign[=] call[name[ffi].new, parameter[constant[unsigned long long *]]] variable[rc] assign[=] call[name[lib].crypto_sign, parameter[name[signed], name[signed_len], name[message], call[name[len], parameter[name[message]]], name[sk]]] call[name[ensure], parameter[compare[name[rc] equal[==] constant[0]], constant[Unexpected library error]]] return[call[call[name[ffi].buffer, parameter[name[signed], call[name[signed_len]][constant[0]]]]][<ast.Slice object at 0x7da18dc9a560>]]
keyword[def] identifier[crypto_sign] ( identifier[message] , identifier[sk] ): literal[string] identifier[signed] = identifier[ffi] . identifier[new] ( literal[string] , identifier[len] ( identifier[message] )+ identifier[crypto_sign_BYTES] ) identifier[signed_len] = identifier[ffi] . identifier[new] ( literal[string] ) identifier[rc] = identifier[lib] . identifier[crypto_sign] ( identifier[signed] , identifier[signed_len] , identifier[message] , identifier[len] ( identifier[message] ), identifier[sk] ) identifier[ensure] ( identifier[rc] == literal[int] , literal[string] , identifier[raising] = identifier[exc] . identifier[RuntimeError] ) keyword[return] identifier[ffi] . identifier[buffer] ( identifier[signed] , identifier[signed_len] [ literal[int] ])[:]
def crypto_sign(message, sk): """ Signs the message ``message`` using the secret key ``sk`` and returns the signed message. :param message: bytes :param sk: bytes :rtype: bytes """ signed = ffi.new('unsigned char[]', len(message) + crypto_sign_BYTES) signed_len = ffi.new('unsigned long long *') rc = lib.crypto_sign(signed, signed_len, message, len(message), sk) ensure(rc == 0, 'Unexpected library error', raising=exc.RuntimeError) return ffi.buffer(signed, signed_len[0])[:]
def __range(a, bins): '''Compute the histogram range of the values in the array a according to scipy.stats.histogram.''' a = numpy.asarray(a) a_max = a.max() a_min = a.min() s = 0.5 * (a_max - a_min) / float(bins - 1) return (a_min - s, a_max + s)
def function[__range, parameter[a, bins]]: constant[Compute the histogram range of the values in the array a according to scipy.stats.histogram.] variable[a] assign[=] call[name[numpy].asarray, parameter[name[a]]] variable[a_max] assign[=] call[name[a].max, parameter[]] variable[a_min] assign[=] call[name[a].min, parameter[]] variable[s] assign[=] binary_operation[binary_operation[constant[0.5] * binary_operation[name[a_max] - name[a_min]]] / call[name[float], parameter[binary_operation[name[bins] - constant[1]]]]] return[tuple[[<ast.BinOp object at 0x7da1b12daa40>, <ast.BinOp object at 0x7da1b12da380>]]]
keyword[def] identifier[__range] ( identifier[a] , identifier[bins] ): literal[string] identifier[a] = identifier[numpy] . identifier[asarray] ( identifier[a] ) identifier[a_max] = identifier[a] . identifier[max] () identifier[a_min] = identifier[a] . identifier[min] () identifier[s] = literal[int] *( identifier[a_max] - identifier[a_min] )/ identifier[float] ( identifier[bins] - literal[int] ) keyword[return] ( identifier[a_min] - identifier[s] , identifier[a_max] + identifier[s] )
def __range(a, bins): """Compute the histogram range of the values in the array a according to scipy.stats.histogram.""" a = numpy.asarray(a) a_max = a.max() a_min = a.min() s = 0.5 * (a_max - a_min) / float(bins - 1) return (a_min - s, a_max + s)
def make(self): """ reads through the definitions and generates an python class for each definition """ log.setLevel(self.log_level) created = [] prop_list = [item for item in self.defs if item.type == 'uri'] log.debug(" creating properties ... ") for prop in prop_list: make_property(self.defs[prop], prop, []) log.info(" property count: %s", len(prop_list))
def function[make, parameter[self]]: constant[ reads through the definitions and generates an python class for each definition ] call[name[log].setLevel, parameter[name[self].log_level]] variable[created] assign[=] list[[]] variable[prop_list] assign[=] <ast.ListComp object at 0x7da2054a7520> call[name[log].debug, parameter[constant[ creating properties ... ]]] for taget[name[prop]] in starred[name[prop_list]] begin[:] call[name[make_property], parameter[call[name[self].defs][name[prop]], name[prop], list[[]]]] call[name[log].info, parameter[constant[ property count: %s], call[name[len], parameter[name[prop_list]]]]]
keyword[def] identifier[make] ( identifier[self] ): literal[string] identifier[log] . identifier[setLevel] ( identifier[self] . identifier[log_level] ) identifier[created] =[] identifier[prop_list] =[ identifier[item] keyword[for] identifier[item] keyword[in] identifier[self] . identifier[defs] keyword[if] identifier[item] . identifier[type] == literal[string] ] identifier[log] . identifier[debug] ( literal[string] ) keyword[for] identifier[prop] keyword[in] identifier[prop_list] : identifier[make_property] ( identifier[self] . identifier[defs] [ identifier[prop] ], identifier[prop] ,[]) identifier[log] . identifier[info] ( literal[string] , identifier[len] ( identifier[prop_list] ))
def make(self): """ reads through the definitions and generates an python class for each definition """ log.setLevel(self.log_level) created = [] prop_list = [item for item in self.defs if item.type == 'uri'] log.debug(' creating properties ... ') for prop in prop_list: make_property(self.defs[prop], prop, []) # depends on [control=['for'], data=['prop']] log.info(' property count: %s', len(prop_list))
def wcparams(mol, type_): """ Calculate Wildman-Crippen logP Wildman S., Crippen G., Prediction of Physicochemical Parameters by Atomic Contribution, J. Chem. Inf. Model. 39 (1999) 868-873 """ try: assign_wctype(mol) except: return "N/A" scores = [] for i, atom in mol.atoms_iter(): scores.append(float(DATA[type_][atom.wctype])) if atom.H_count: if atom.symbol == "C": scores.append(DATA[type_]["H1"] * atom.H_count) elif atom.symbol == "N": scores.append(DATA[type_]["H3"] * atom.H_count) elif atom.wctype == "O2a": scores.append(DATA[type_]["H4"] * atom.H_count) else: scores.append(DATA[type_]["H2"] * atom.H_count) return round(sum(scores), 2)
def function[wcparams, parameter[mol, type_]]: constant[ Calculate Wildman-Crippen logP Wildman S., Crippen G., Prediction of Physicochemical Parameters by Atomic Contribution, J. Chem. Inf. Model. 39 (1999) 868-873 ] <ast.Try object at 0x7da1b2446230> variable[scores] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b24471f0>, <ast.Name object at 0x7da1b2445a20>]]] in starred[call[name[mol].atoms_iter, parameter[]]] begin[:] call[name[scores].append, parameter[call[name[float], parameter[call[call[name[DATA]][name[type_]]][name[atom].wctype]]]]] if name[atom].H_count begin[:] if compare[name[atom].symbol equal[==] constant[C]] begin[:] call[name[scores].append, parameter[binary_operation[call[call[name[DATA]][name[type_]]][constant[H1]] * name[atom].H_count]]] return[call[name[round], parameter[call[name[sum], parameter[name[scores]]], constant[2]]]]
keyword[def] identifier[wcparams] ( identifier[mol] , identifier[type_] ): literal[string] keyword[try] : identifier[assign_wctype] ( identifier[mol] ) keyword[except] : keyword[return] literal[string] identifier[scores] =[] keyword[for] identifier[i] , identifier[atom] keyword[in] identifier[mol] . identifier[atoms_iter] (): identifier[scores] . identifier[append] ( identifier[float] ( identifier[DATA] [ identifier[type_] ][ identifier[atom] . identifier[wctype] ])) keyword[if] identifier[atom] . identifier[H_count] : keyword[if] identifier[atom] . identifier[symbol] == literal[string] : identifier[scores] . identifier[append] ( identifier[DATA] [ identifier[type_] ][ literal[string] ]* identifier[atom] . identifier[H_count] ) keyword[elif] identifier[atom] . identifier[symbol] == literal[string] : identifier[scores] . identifier[append] ( identifier[DATA] [ identifier[type_] ][ literal[string] ]* identifier[atom] . identifier[H_count] ) keyword[elif] identifier[atom] . identifier[wctype] == literal[string] : identifier[scores] . identifier[append] ( identifier[DATA] [ identifier[type_] ][ literal[string] ]* identifier[atom] . identifier[H_count] ) keyword[else] : identifier[scores] . identifier[append] ( identifier[DATA] [ identifier[type_] ][ literal[string] ]* identifier[atom] . identifier[H_count] ) keyword[return] identifier[round] ( identifier[sum] ( identifier[scores] ), literal[int] )
def wcparams(mol, type_): """ Calculate Wildman-Crippen logP Wildman S., Crippen G., Prediction of Physicochemical Parameters by Atomic Contribution, J. Chem. Inf. Model. 39 (1999) 868-873 """ try: assign_wctype(mol) # depends on [control=['try'], data=[]] except: return 'N/A' # depends on [control=['except'], data=[]] scores = [] for (i, atom) in mol.atoms_iter(): scores.append(float(DATA[type_][atom.wctype])) if atom.H_count: if atom.symbol == 'C': scores.append(DATA[type_]['H1'] * atom.H_count) # depends on [control=['if'], data=[]] elif atom.symbol == 'N': scores.append(DATA[type_]['H3'] * atom.H_count) # depends on [control=['if'], data=[]] elif atom.wctype == 'O2a': scores.append(DATA[type_]['H4'] * atom.H_count) # depends on [control=['if'], data=[]] else: scores.append(DATA[type_]['H2'] * atom.H_count) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return round(sum(scores), 2)
def add_item(self, path, name, icon=None, url=None, order=None, permission=None, active_regex=None): """ Add new menu item to menu :param path: Path of menu :param name: Display name :param icon: CSS icon :param url: link to page :param order: Sort order :param permission: :return: """ if self.root_item is None: self.root_item = MenuItem('ROOT', 'ROOT') root_item = self.root_item current_path = '' for node in path.split('/')[:-1]: if not node: continue current_path = '/' + '{}/{}'.format(current_path, node).strip('/') new_root = root_item.child_by_code(node) if not new_root: # Create menu item if not exists new_root = MenuItem(current_path, name=str(node).capitalize()) root_item.add_child(new_root) root_item = new_root new_item = MenuItem(path, name, icon, url, order, permission, active_regex) current_item = root_item.child_by_code(path.split('/')[-1]) if current_item: current_item.merge(new_item) else: root_item.add_child(new_item)
def function[add_item, parameter[self, path, name, icon, url, order, permission, active_regex]]: constant[ Add new menu item to menu :param path: Path of menu :param name: Display name :param icon: CSS icon :param url: link to page :param order: Sort order :param permission: :return: ] if compare[name[self].root_item is constant[None]] begin[:] name[self].root_item assign[=] call[name[MenuItem], parameter[constant[ROOT], constant[ROOT]]] variable[root_item] assign[=] name[self].root_item variable[current_path] assign[=] constant[] for taget[name[node]] in starred[call[call[name[path].split, parameter[constant[/]]]][<ast.Slice object at 0x7da20c7c8400>]] begin[:] if <ast.UnaryOp object at 0x7da20c7c9510> begin[:] continue variable[current_path] assign[=] binary_operation[constant[/] + call[call[constant[{}/{}].format, parameter[name[current_path], name[node]]].strip, parameter[constant[/]]]] variable[new_root] assign[=] call[name[root_item].child_by_code, parameter[name[node]]] if <ast.UnaryOp object at 0x7da20c7c8100> begin[:] variable[new_root] assign[=] call[name[MenuItem], parameter[name[current_path]]] call[name[root_item].add_child, parameter[name[new_root]]] variable[root_item] assign[=] name[new_root] variable[new_item] assign[=] call[name[MenuItem], parameter[name[path], name[name], name[icon], name[url], name[order], name[permission], name[active_regex]]] variable[current_item] assign[=] call[name[root_item].child_by_code, parameter[call[call[name[path].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da20c7cba00>]]] if name[current_item] begin[:] call[name[current_item].merge, parameter[name[new_item]]]
keyword[def] identifier[add_item] ( identifier[self] , identifier[path] , identifier[name] , identifier[icon] = keyword[None] , identifier[url] = keyword[None] , identifier[order] = keyword[None] , identifier[permission] = keyword[None] , identifier[active_regex] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[root_item] keyword[is] keyword[None] : identifier[self] . identifier[root_item] = identifier[MenuItem] ( literal[string] , literal[string] ) identifier[root_item] = identifier[self] . identifier[root_item] identifier[current_path] = literal[string] keyword[for] identifier[node] keyword[in] identifier[path] . identifier[split] ( literal[string] )[:- literal[int] ]: keyword[if] keyword[not] identifier[node] : keyword[continue] identifier[current_path] = literal[string] + literal[string] . identifier[format] ( identifier[current_path] , identifier[node] ). identifier[strip] ( literal[string] ) identifier[new_root] = identifier[root_item] . identifier[child_by_code] ( identifier[node] ) keyword[if] keyword[not] identifier[new_root] : identifier[new_root] = identifier[MenuItem] ( identifier[current_path] , identifier[name] = identifier[str] ( identifier[node] ). identifier[capitalize] ()) identifier[root_item] . identifier[add_child] ( identifier[new_root] ) identifier[root_item] = identifier[new_root] identifier[new_item] = identifier[MenuItem] ( identifier[path] , identifier[name] , identifier[icon] , identifier[url] , identifier[order] , identifier[permission] , identifier[active_regex] ) identifier[current_item] = identifier[root_item] . identifier[child_by_code] ( identifier[path] . identifier[split] ( literal[string] )[- literal[int] ]) keyword[if] identifier[current_item] : identifier[current_item] . identifier[merge] ( identifier[new_item] ) keyword[else] : identifier[root_item] . identifier[add_child] ( identifier[new_item] )
def add_item(self, path, name, icon=None, url=None, order=None, permission=None, active_regex=None): """ Add new menu item to menu :param path: Path of menu :param name: Display name :param icon: CSS icon :param url: link to page :param order: Sort order :param permission: :return: """ if self.root_item is None: self.root_item = MenuItem('ROOT', 'ROOT') # depends on [control=['if'], data=[]] root_item = self.root_item current_path = '' for node in path.split('/')[:-1]: if not node: continue # depends on [control=['if'], data=[]] current_path = '/' + '{}/{}'.format(current_path, node).strip('/') new_root = root_item.child_by_code(node) if not new_root: # Create menu item if not exists new_root = MenuItem(current_path, name=str(node).capitalize()) root_item.add_child(new_root) # depends on [control=['if'], data=[]] root_item = new_root # depends on [control=['for'], data=['node']] new_item = MenuItem(path, name, icon, url, order, permission, active_regex) current_item = root_item.child_by_code(path.split('/')[-1]) if current_item: current_item.merge(new_item) # depends on [control=['if'], data=[]] else: root_item.add_child(new_item)
def get_user_data(self): """ Extracts user data, either from user session or from database (for each User, an InfoObject is used to store data of a certain kind (e.g., user customization, saved searches, etc.). If for a given user, no InfoObject exists for a given type of user-specific data, the default data is read from the settings and an InfoObject with default settings is created. The function returns a dictionary of form {'customization': <dict with user customization>, 'saved_searches': <dict with saved searches> } """ # Below, we retrieve user-specific data (user preferences, saved searches, etc.) # We take this data from the session -- if it has already been # loaded in the session. If not, then we load the data into the session first. # # Things are a bit tricky, because users can first be unauthenticated, # then log in, then log off again. This must be reflected in the user data # that is loaded into the session. # # There are four cases if settings exist within session scope: # 1.) unauthenticated user && non-anonymous settings exist in session --> load # 2.) unauthenticated user && anonymous settings --> pass # 3.) authenticated user && non-anonymous settings --> pass # 4.) authenticated user && anonymous settings --> load settings = self.request.session.get('customization') saved_searches = self.request.session.get('saved_searches') load_new_settings = False if settings: # case 1.) if (not self.request.user.is_authenticated()) \ and self.request.session.get('customization_for_authenticated'): load_new_settings = True # case 4.) elif self.request.user.is_authenticated() \ and not self.request.session.get('customization_for_authenticated'): load_new_settings = True else: load_new_settings = True if load_new_settings: # Load user settings. If for the current user, no user settings have been # stored, retrieve the default settings and store them (for authenticated users) if self.request.user.is_authenticated(): user_name = self.request.user.username else: user_name = "unauthenticated user" self.request.session['customization_for_authenticated']=self.request.user.is_authenticated() settings = UserData.get_user_data(user=self.request.user,data_kind=DINGOS_USER_PREFS_TYPE_NAME) if not settings: UserData.store_user_data(user=self.request.user, data_kind=DINGOS_USER_PREFS_TYPE_NAME, user_data=DINGOS_DEFAULT_USER_PREFS, iobject_name= "User preferences of user '%s'" % user_name) settings = UserData.get_user_data(user=self.request.user,data_kind=DINGOS_USER_PREFS_TYPE_NAME) # Do the same for saved searches saved_searches = UserData.get_user_data(user=self.request.user, data_kind=DINGOS_SAVED_SEARCHES_TYPE_NAME) if not saved_searches: UserData.store_user_data(user=self.request.user, data_kind=DINGOS_SAVED_SEARCHES_TYPE_NAME, user_data=DINGOS_DEFAULT_SAVED_SEARCHES, iobject_name = "Saved searches of user '%s'" % user_name) saved_searches = UserData.get_user_data(user=self.request.user, data_kind=DINGOS_SAVED_SEARCHES_TYPE_NAME) self.request.session['customization'] = settings self.request.session['saved_searches'] = saved_searches return {'customization': settings, 'saved_searches' : saved_searches}
def function[get_user_data, parameter[self]]: constant[ Extracts user data, either from user session or from database (for each User, an InfoObject is used to store data of a certain kind (e.g., user customization, saved searches, etc.). If for a given user, no InfoObject exists for a given type of user-specific data, the default data is read from the settings and an InfoObject with default settings is created. The function returns a dictionary of form {'customization': <dict with user customization>, 'saved_searches': <dict with saved searches> } ] variable[settings] assign[=] call[name[self].request.session.get, parameter[constant[customization]]] variable[saved_searches] assign[=] call[name[self].request.session.get, parameter[constant[saved_searches]]] variable[load_new_settings] assign[=] constant[False] if name[settings] begin[:] if <ast.BoolOp object at 0x7da20e9b27a0> begin[:] variable[load_new_settings] assign[=] constant[True] if name[load_new_settings] begin[:] if call[name[self].request.user.is_authenticated, parameter[]] begin[:] variable[user_name] assign[=] name[self].request.user.username call[name[self].request.session][constant[customization_for_authenticated]] assign[=] call[name[self].request.user.is_authenticated, parameter[]] variable[settings] assign[=] call[name[UserData].get_user_data, parameter[]] if <ast.UnaryOp object at 0x7da20e9b31c0> begin[:] call[name[UserData].store_user_data, parameter[]] variable[settings] assign[=] call[name[UserData].get_user_data, parameter[]] variable[saved_searches] assign[=] call[name[UserData].get_user_data, parameter[]] if <ast.UnaryOp object at 0x7da20e9b3c40> begin[:] call[name[UserData].store_user_data, parameter[]] variable[saved_searches] assign[=] call[name[UserData].get_user_data, parameter[]] call[name[self].request.session][constant[customization]] assign[=] name[settings] call[name[self].request.session][constant[saved_searches]] assign[=] name[saved_searches] return[dictionary[[<ast.Constant object at 0x7da20e9b1360>, <ast.Constant object at 0x7da20e9b3940>], [<ast.Name object at 0x7da20e9b02e0>, <ast.Name object at 0x7da20e9b01c0>]]]
keyword[def] identifier[get_user_data] ( identifier[self] ): literal[string] identifier[settings] = identifier[self] . identifier[request] . identifier[session] . identifier[get] ( literal[string] ) identifier[saved_searches] = identifier[self] . identifier[request] . identifier[session] . identifier[get] ( literal[string] ) identifier[load_new_settings] = keyword[False] keyword[if] identifier[settings] : keyword[if] ( keyword[not] identifier[self] . identifier[request] . identifier[user] . identifier[is_authenticated] ()) keyword[and] identifier[self] . identifier[request] . identifier[session] . identifier[get] ( literal[string] ): identifier[load_new_settings] = keyword[True] keyword[elif] identifier[self] . identifier[request] . identifier[user] . identifier[is_authenticated] () keyword[and] keyword[not] identifier[self] . identifier[request] . identifier[session] . identifier[get] ( literal[string] ): identifier[load_new_settings] = keyword[True] keyword[else] : identifier[load_new_settings] = keyword[True] keyword[if] identifier[load_new_settings] : keyword[if] identifier[self] . identifier[request] . identifier[user] . identifier[is_authenticated] (): identifier[user_name] = identifier[self] . identifier[request] . identifier[user] . identifier[username] keyword[else] : identifier[user_name] = literal[string] identifier[self] . identifier[request] . identifier[session] [ literal[string] ]= identifier[self] . identifier[request] . identifier[user] . identifier[is_authenticated] () identifier[settings] = identifier[UserData] . identifier[get_user_data] ( identifier[user] = identifier[self] . identifier[request] . identifier[user] , identifier[data_kind] = identifier[DINGOS_USER_PREFS_TYPE_NAME] ) keyword[if] keyword[not] identifier[settings] : identifier[UserData] . identifier[store_user_data] ( identifier[user] = identifier[self] . identifier[request] . identifier[user] , identifier[data_kind] = identifier[DINGOS_USER_PREFS_TYPE_NAME] , identifier[user_data] = identifier[DINGOS_DEFAULT_USER_PREFS] , identifier[iobject_name] = literal[string] % identifier[user_name] ) identifier[settings] = identifier[UserData] . identifier[get_user_data] ( identifier[user] = identifier[self] . identifier[request] . identifier[user] , identifier[data_kind] = identifier[DINGOS_USER_PREFS_TYPE_NAME] ) identifier[saved_searches] = identifier[UserData] . identifier[get_user_data] ( identifier[user] = identifier[self] . identifier[request] . identifier[user] , identifier[data_kind] = identifier[DINGOS_SAVED_SEARCHES_TYPE_NAME] ) keyword[if] keyword[not] identifier[saved_searches] : identifier[UserData] . identifier[store_user_data] ( identifier[user] = identifier[self] . identifier[request] . identifier[user] , identifier[data_kind] = identifier[DINGOS_SAVED_SEARCHES_TYPE_NAME] , identifier[user_data] = identifier[DINGOS_DEFAULT_SAVED_SEARCHES] , identifier[iobject_name] = literal[string] % identifier[user_name] ) identifier[saved_searches] = identifier[UserData] . identifier[get_user_data] ( identifier[user] = identifier[self] . identifier[request] . identifier[user] , identifier[data_kind] = identifier[DINGOS_SAVED_SEARCHES_TYPE_NAME] ) identifier[self] . identifier[request] . identifier[session] [ literal[string] ]= identifier[settings] identifier[self] . identifier[request] . identifier[session] [ literal[string] ]= identifier[saved_searches] keyword[return] { literal[string] : identifier[settings] , literal[string] : identifier[saved_searches] }
def get_user_data(self): """ Extracts user data, either from user session or from database (for each User, an InfoObject is used to store data of a certain kind (e.g., user customization, saved searches, etc.). If for a given user, no InfoObject exists for a given type of user-specific data, the default data is read from the settings and an InfoObject with default settings is created. The function returns a dictionary of form {'customization': <dict with user customization>, 'saved_searches': <dict with saved searches> } """ # Below, we retrieve user-specific data (user preferences, saved searches, etc.) # We take this data from the session -- if it has already been # loaded in the session. If not, then we load the data into the session first. # # Things are a bit tricky, because users can first be unauthenticated, # then log in, then log off again. This must be reflected in the user data # that is loaded into the session. # # There are four cases if settings exist within session scope: # 1.) unauthenticated user && non-anonymous settings exist in session --> load # 2.) unauthenticated user && anonymous settings --> pass # 3.) authenticated user && non-anonymous settings --> pass # 4.) authenticated user && anonymous settings --> load settings = self.request.session.get('customization') saved_searches = self.request.session.get('saved_searches') load_new_settings = False if settings: # case 1.) if not self.request.user.is_authenticated() and self.request.session.get('customization_for_authenticated'): load_new_settings = True # depends on [control=['if'], data=[]] # case 4.) elif self.request.user.is_authenticated() and (not self.request.session.get('customization_for_authenticated')): load_new_settings = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: load_new_settings = True if load_new_settings: # Load user settings. If for the current user, no user settings have been # stored, retrieve the default settings and store them (for authenticated users) if self.request.user.is_authenticated(): user_name = self.request.user.username # depends on [control=['if'], data=[]] else: user_name = 'unauthenticated user' self.request.session['customization_for_authenticated'] = self.request.user.is_authenticated() settings = UserData.get_user_data(user=self.request.user, data_kind=DINGOS_USER_PREFS_TYPE_NAME) if not settings: UserData.store_user_data(user=self.request.user, data_kind=DINGOS_USER_PREFS_TYPE_NAME, user_data=DINGOS_DEFAULT_USER_PREFS, iobject_name="User preferences of user '%s'" % user_name) settings = UserData.get_user_data(user=self.request.user, data_kind=DINGOS_USER_PREFS_TYPE_NAME) # depends on [control=['if'], data=[]] # Do the same for saved searches saved_searches = UserData.get_user_data(user=self.request.user, data_kind=DINGOS_SAVED_SEARCHES_TYPE_NAME) if not saved_searches: UserData.store_user_data(user=self.request.user, data_kind=DINGOS_SAVED_SEARCHES_TYPE_NAME, user_data=DINGOS_DEFAULT_SAVED_SEARCHES, iobject_name="Saved searches of user '%s'" % user_name) saved_searches = UserData.get_user_data(user=self.request.user, data_kind=DINGOS_SAVED_SEARCHES_TYPE_NAME) # depends on [control=['if'], data=[]] self.request.session['customization'] = settings self.request.session['saved_searches'] = saved_searches # depends on [control=['if'], data=[]] return {'customization': settings, 'saved_searches': saved_searches}
def _remove_vlan_from_all_sp_templates(self, handle, vlan_id, ucsm_ip): """Deletes VLAN config from all SP Templates that have it.""" sp_template_info_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sp_template_list.values()) vlan_name = self.make_vlan_name(vlan_id) virtio_port_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports) try: # sp_template_info_list is a list of tuples. # Each tuple is of the form : # (ucsm_ip, sp_template_path, sp_template) for sp_template_info in sp_template_info_list: sp_template_path = sp_template_info.path sp_template = sp_template_info.name sp_template_full_path = (sp_template_path + const.SP_TEMPLATE_PREFIX + sp_template) obj = handle.query_dn(sp_template_full_path) if not obj: LOG.error('UCS Manager network driver could not ' 'find Service Profile template %s', sp_template_full_path) continue eth_port_paths = ["%s%s" % (sp_template_full_path, ep) for ep in virtio_port_list] for eth_port_path in eth_port_paths: eth = handle.query_dn(eth_port_path) if eth: vlan_path = (eth_port_path + const.VLAN_PATH_PREFIX + vlan_name) vlan = handle.query_dn(vlan_path) if vlan: # Found vlan config. Now remove it. handle.remove_mo(vlan) else: LOG.debug('UCS Manager network driver did not ' 'find VLAN %s at %s', vlan_name, eth_port_path) else: LOG.debug('UCS Manager network driver did not ' 'find ethernet port at %s', eth_port_path) handle.commit() return True except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigDeleteFailed(config=vlan_id, ucsm_ip=ucsm_ip, exc=e)
def function[_remove_vlan_from_all_sp_templates, parameter[self, handle, vlan_id, ucsm_ip]]: constant[Deletes VLAN config from all SP Templates that have it.] variable[sp_template_info_list] assign[=] call[call[name[CONF].ml2_cisco_ucsm.ucsms][name[ucsm_ip]].sp_template_list.values, parameter[]] variable[vlan_name] assign[=] call[name[self].make_vlan_name, parameter[name[vlan_id]]] variable[virtio_port_list] assign[=] call[name[CONF].ml2_cisco_ucsm.ucsms][name[ucsm_ip]].ucsm_virtio_eth_ports <ast.Try object at 0x7da1b1b7de10>
keyword[def] identifier[_remove_vlan_from_all_sp_templates] ( identifier[self] , identifier[handle] , identifier[vlan_id] , identifier[ucsm_ip] ): literal[string] identifier[sp_template_info_list] =( identifier[CONF] . identifier[ml2_cisco_ucsm] . identifier[ucsms] [ identifier[ucsm_ip] ]. identifier[sp_template_list] . identifier[values] ()) identifier[vlan_name] = identifier[self] . identifier[make_vlan_name] ( identifier[vlan_id] ) identifier[virtio_port_list] =( identifier[CONF] . identifier[ml2_cisco_ucsm] . identifier[ucsms] [ identifier[ucsm_ip] ]. identifier[ucsm_virtio_eth_ports] ) keyword[try] : keyword[for] identifier[sp_template_info] keyword[in] identifier[sp_template_info_list] : identifier[sp_template_path] = identifier[sp_template_info] . identifier[path] identifier[sp_template] = identifier[sp_template_info] . identifier[name] identifier[sp_template_full_path] =( identifier[sp_template_path] + identifier[const] . identifier[SP_TEMPLATE_PREFIX] + identifier[sp_template] ) identifier[obj] = identifier[handle] . identifier[query_dn] ( identifier[sp_template_full_path] ) keyword[if] keyword[not] identifier[obj] : identifier[LOG] . identifier[error] ( literal[string] literal[string] , identifier[sp_template_full_path] ) keyword[continue] identifier[eth_port_paths] =[ literal[string] %( identifier[sp_template_full_path] , identifier[ep] ) keyword[for] identifier[ep] keyword[in] identifier[virtio_port_list] ] keyword[for] identifier[eth_port_path] keyword[in] identifier[eth_port_paths] : identifier[eth] = identifier[handle] . identifier[query_dn] ( identifier[eth_port_path] ) keyword[if] identifier[eth] : identifier[vlan_path] =( identifier[eth_port_path] + identifier[const] . identifier[VLAN_PATH_PREFIX] + identifier[vlan_name] ) identifier[vlan] = identifier[handle] . identifier[query_dn] ( identifier[vlan_path] ) keyword[if] identifier[vlan] : identifier[handle] . identifier[remove_mo] ( identifier[vlan] ) keyword[else] : identifier[LOG] . identifier[debug] ( literal[string] literal[string] , identifier[vlan_name] , identifier[eth_port_path] ) keyword[else] : identifier[LOG] . identifier[debug] ( literal[string] literal[string] , identifier[eth_port_path] ) identifier[handle] . identifier[commit] () keyword[return] keyword[True] keyword[except] identifier[Exception] keyword[as] identifier[e] : keyword[raise] identifier[cexc] . identifier[UcsmConfigDeleteFailed] ( identifier[config] = identifier[vlan_id] , identifier[ucsm_ip] = identifier[ucsm_ip] , identifier[exc] = identifier[e] )
def _remove_vlan_from_all_sp_templates(self, handle, vlan_id, ucsm_ip): """Deletes VLAN config from all SP Templates that have it.""" sp_template_info_list = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sp_template_list.values() vlan_name = self.make_vlan_name(vlan_id) virtio_port_list = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports try: # sp_template_info_list is a list of tuples. # Each tuple is of the form : # (ucsm_ip, sp_template_path, sp_template) for sp_template_info in sp_template_info_list: sp_template_path = sp_template_info.path sp_template = sp_template_info.name sp_template_full_path = sp_template_path + const.SP_TEMPLATE_PREFIX + sp_template obj = handle.query_dn(sp_template_full_path) if not obj: LOG.error('UCS Manager network driver could not find Service Profile template %s', sp_template_full_path) continue # depends on [control=['if'], data=[]] eth_port_paths = ['%s%s' % (sp_template_full_path, ep) for ep in virtio_port_list] for eth_port_path in eth_port_paths: eth = handle.query_dn(eth_port_path) if eth: vlan_path = eth_port_path + const.VLAN_PATH_PREFIX + vlan_name vlan = handle.query_dn(vlan_path) if vlan: # Found vlan config. Now remove it. handle.remove_mo(vlan) # depends on [control=['if'], data=[]] else: LOG.debug('UCS Manager network driver did not find VLAN %s at %s', vlan_name, eth_port_path) # depends on [control=['if'], data=[]] else: LOG.debug('UCS Manager network driver did not find ethernet port at %s', eth_port_path) # depends on [control=['for'], data=['eth_port_path']] handle.commit() return True # depends on [control=['for'], data=['sp_template_info']] # depends on [control=['try'], data=[]] except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigDeleteFailed(config=vlan_id, ucsm_ip=ucsm_ip, exc=e) # depends on [control=['except'], data=['e']]
def _strip_unsafe_kubernetes_special_chars(string): """ Kubernetes only supports lowercase alphanumeric characters and "-" and "." in the pod name However, there are special rules about how "-" and "." can be used so let's only keep alphanumeric chars see here for detail: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/ :param string: The requested Pod name :return: ``str`` Pod name stripped of any unsafe characters """ return ''.join(ch.lower() for ind, ch in enumerate(string) if ch.isalnum())
def function[_strip_unsafe_kubernetes_special_chars, parameter[string]]: constant[ Kubernetes only supports lowercase alphanumeric characters and "-" and "." in the pod name However, there are special rules about how "-" and "." can be used so let's only keep alphanumeric chars see here for detail: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/ :param string: The requested Pod name :return: ``str`` Pod name stripped of any unsafe characters ] return[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da1b05bd870>]]]
keyword[def] identifier[_strip_unsafe_kubernetes_special_chars] ( identifier[string] ): literal[string] keyword[return] literal[string] . identifier[join] ( identifier[ch] . identifier[lower] () keyword[for] identifier[ind] , identifier[ch] keyword[in] identifier[enumerate] ( identifier[string] ) keyword[if] identifier[ch] . identifier[isalnum] ())
def _strip_unsafe_kubernetes_special_chars(string): """ Kubernetes only supports lowercase alphanumeric characters and "-" and "." in the pod name However, there are special rules about how "-" and "." can be used so let's only keep alphanumeric chars see here for detail: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/ :param string: The requested Pod name :return: ``str`` Pod name stripped of any unsafe characters """ return ''.join((ch.lower() for (ind, ch) in enumerate(string) if ch.isalnum()))
def add_net(self, net): """ Add a net to the logic of the block. The passed net, which must be of type LogicNet, is checked and then added to the block. No wires are added by this member, they must be added seperately with add_wirevector.""" self.sanity_check_net(net) self.logic.add(net)
def function[add_net, parameter[self, net]]: constant[ Add a net to the logic of the block. The passed net, which must be of type LogicNet, is checked and then added to the block. No wires are added by this member, they must be added seperately with add_wirevector.] call[name[self].sanity_check_net, parameter[name[net]]] call[name[self].logic.add, parameter[name[net]]]
keyword[def] identifier[add_net] ( identifier[self] , identifier[net] ): literal[string] identifier[self] . identifier[sanity_check_net] ( identifier[net] ) identifier[self] . identifier[logic] . identifier[add] ( identifier[net] )
def add_net(self, net): """ Add a net to the logic of the block. The passed net, which must be of type LogicNet, is checked and then added to the block. No wires are added by this member, they must be added seperately with add_wirevector.""" self.sanity_check_net(net) self.logic.add(net)
def obfn_gvar(self): """Variable to be evaluated in computing regularisation term, depending on 'gEvalY' option value. """ if self.opt['gEvalY']: return self.Y else: return self.cnst_A(None, self.Xf) - self.cnst_c()
def function[obfn_gvar, parameter[self]]: constant[Variable to be evaluated in computing regularisation term, depending on 'gEvalY' option value. ] if call[name[self].opt][constant[gEvalY]] begin[:] return[name[self].Y]
keyword[def] identifier[obfn_gvar] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[opt] [ literal[string] ]: keyword[return] identifier[self] . identifier[Y] keyword[else] : keyword[return] identifier[self] . identifier[cnst_A] ( keyword[None] , identifier[self] . identifier[Xf] )- identifier[self] . identifier[cnst_c] ()
def obfn_gvar(self): """Variable to be evaluated in computing regularisation term, depending on 'gEvalY' option value. """ if self.opt['gEvalY']: return self.Y # depends on [control=['if'], data=[]] else: return self.cnst_A(None, self.Xf) - self.cnst_c()
def _get_all_tags(conn, load_balancer_names=None): ''' Retrieve all the metadata tags associated with your ELB(s). :type load_balancer_names: list :param load_balancer_names: An optional list of load balancer names. :rtype: list :return: A list of :class:`boto.ec2.elb.tag.Tag` objects ''' params = {} if load_balancer_names: conn.build_list_params(params, load_balancer_names, 'LoadBalancerNames.member.%d') tags = conn.get_object( 'DescribeTags', params, __utils__['boto_elb_tag.get_tag_descriptions'](), verb='POST' ) if tags[load_balancer_names]: return tags[load_balancer_names] else: return None
def function[_get_all_tags, parameter[conn, load_balancer_names]]: constant[ Retrieve all the metadata tags associated with your ELB(s). :type load_balancer_names: list :param load_balancer_names: An optional list of load balancer names. :rtype: list :return: A list of :class:`boto.ec2.elb.tag.Tag` objects ] variable[params] assign[=] dictionary[[], []] if name[load_balancer_names] begin[:] call[name[conn].build_list_params, parameter[name[params], name[load_balancer_names], constant[LoadBalancerNames.member.%d]]] variable[tags] assign[=] call[name[conn].get_object, parameter[constant[DescribeTags], name[params], call[call[name[__utils__]][constant[boto_elb_tag.get_tag_descriptions]], parameter[]]]] if call[name[tags]][name[load_balancer_names]] begin[:] return[call[name[tags]][name[load_balancer_names]]]
keyword[def] identifier[_get_all_tags] ( identifier[conn] , identifier[load_balancer_names] = keyword[None] ): literal[string] identifier[params] ={} keyword[if] identifier[load_balancer_names] : identifier[conn] . identifier[build_list_params] ( identifier[params] , identifier[load_balancer_names] , literal[string] ) identifier[tags] = identifier[conn] . identifier[get_object] ( literal[string] , identifier[params] , identifier[__utils__] [ literal[string] ](), identifier[verb] = literal[string] ) keyword[if] identifier[tags] [ identifier[load_balancer_names] ]: keyword[return] identifier[tags] [ identifier[load_balancer_names] ] keyword[else] : keyword[return] keyword[None]
def _get_all_tags(conn, load_balancer_names=None): """ Retrieve all the metadata tags associated with your ELB(s). :type load_balancer_names: list :param load_balancer_names: An optional list of load balancer names. :rtype: list :return: A list of :class:`boto.ec2.elb.tag.Tag` objects """ params = {} if load_balancer_names: conn.build_list_params(params, load_balancer_names, 'LoadBalancerNames.member.%d') # depends on [control=['if'], data=[]] tags = conn.get_object('DescribeTags', params, __utils__['boto_elb_tag.get_tag_descriptions'](), verb='POST') if tags[load_balancer_names]: return tags[load_balancer_names] # depends on [control=['if'], data=[]] else: return None
def setup_client(self): """The command registers the client for communication protection. This will be used to obtain an access token via the Get Client Token command. The access token will be passed as a protection_access_token parameter to other commands. Note: If you are using the oxd-https-extension, you must setup the client Returns: **dict:** the client setup information Example response:: { "oxd_id":"6F9619FF-8B86-D011-B42D-00CF4FC964FF", "op_host": "<op host>", "client_id":"<client id>", "client_secret":"<client secret>", "client_registration_access_token":"<Client registration access token>", "client_registration_client_uri":"<URI of client registration>", "client_id_issued_at":"<client_id issued at>", "client_secret_expires_at":"<client_secret expires at>" } """ # add required params for the command params = { "authorization_redirect_uri": self.authorization_redirect_uri, "oxd_rp_programming_language": "python", } # add other optional params if they exist in config for op in self.opt_params: if self.config.get("client", op): params[op] = self.config.get("client", op) for olp in self.opt_list_params: if self.config.get("client", olp): params[olp] = self.config.get("client", olp).split(",") logger.debug("Sending command `setup_client` with params %s", params) response = self.msgr.request("setup_client", **params) logger.debug("Received response: %s", response) if response['status'] == 'error': raise OxdServerError(response['data']) data = response["data"] self.oxd_id = data["oxd_id"] self.config.set("oxd", "id", data["oxd_id"]) self.config.set("client", "client_id", data["client_id"]) self.config.set("client", "client_secret", data["client_secret"]) if data["client_registration_access_token"]: self.config.set("client", "client_registration_access_token", data["client_registration_access_token"]) if data["client_registration_client_uri"]: self.config.set("client", "client_registration_client_uri", data["client_registration_client_uri"]) self.config.set("client", "client_id_issued_at", str(data["client_id_issued_at"])) return data
def function[setup_client, parameter[self]]: constant[The command registers the client for communication protection. This will be used to obtain an access token via the Get Client Token command. The access token will be passed as a protection_access_token parameter to other commands. Note: If you are using the oxd-https-extension, you must setup the client Returns: **dict:** the client setup information Example response:: { "oxd_id":"6F9619FF-8B86-D011-B42D-00CF4FC964FF", "op_host": "<op host>", "client_id":"<client id>", "client_secret":"<client secret>", "client_registration_access_token":"<Client registration access token>", "client_registration_client_uri":"<URI of client registration>", "client_id_issued_at":"<client_id issued at>", "client_secret_expires_at":"<client_secret expires at>" } ] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b04fdf90>, <ast.Constant object at 0x7da1b04fe4a0>], [<ast.Attribute object at 0x7da1b04fd1b0>, <ast.Constant object at 0x7da1b04fc700>]] for taget[name[op]] in starred[name[self].opt_params] begin[:] if call[name[self].config.get, parameter[constant[client], name[op]]] begin[:] call[name[params]][name[op]] assign[=] call[name[self].config.get, parameter[constant[client], name[op]]] for taget[name[olp]] in starred[name[self].opt_list_params] begin[:] if call[name[self].config.get, parameter[constant[client], name[olp]]] begin[:] call[name[params]][name[olp]] assign[=] call[call[name[self].config.get, parameter[constant[client], name[olp]]].split, parameter[constant[,]]] call[name[logger].debug, parameter[constant[Sending command `setup_client` with params %s], name[params]]] variable[response] assign[=] call[name[self].msgr.request, parameter[constant[setup_client]]] call[name[logger].debug, parameter[constant[Received response: %s], name[response]]] if compare[call[name[response]][constant[status]] equal[==] constant[error]] begin[:] <ast.Raise object at 0x7da1b04fc310> variable[data] assign[=] call[name[response]][constant[data]] name[self].oxd_id assign[=] call[name[data]][constant[oxd_id]] call[name[self].config.set, parameter[constant[oxd], constant[id], call[name[data]][constant[oxd_id]]]] call[name[self].config.set, parameter[constant[client], constant[client_id], call[name[data]][constant[client_id]]]] call[name[self].config.set, parameter[constant[client], constant[client_secret], call[name[data]][constant[client_secret]]]] if call[name[data]][constant[client_registration_access_token]] begin[:] call[name[self].config.set, parameter[constant[client], constant[client_registration_access_token], call[name[data]][constant[client_registration_access_token]]]] if call[name[data]][constant[client_registration_client_uri]] begin[:] call[name[self].config.set, parameter[constant[client], constant[client_registration_client_uri], call[name[data]][constant[client_registration_client_uri]]]] call[name[self].config.set, parameter[constant[client], constant[client_id_issued_at], call[name[str], parameter[call[name[data]][constant[client_id_issued_at]]]]]] return[name[data]]
keyword[def] identifier[setup_client] ( identifier[self] ): literal[string] identifier[params] ={ literal[string] : identifier[self] . identifier[authorization_redirect_uri] , literal[string] : literal[string] , } keyword[for] identifier[op] keyword[in] identifier[self] . identifier[opt_params] : keyword[if] identifier[self] . identifier[config] . identifier[get] ( literal[string] , identifier[op] ): identifier[params] [ identifier[op] ]= identifier[self] . identifier[config] . identifier[get] ( literal[string] , identifier[op] ) keyword[for] identifier[olp] keyword[in] identifier[self] . identifier[opt_list_params] : keyword[if] identifier[self] . identifier[config] . identifier[get] ( literal[string] , identifier[olp] ): identifier[params] [ identifier[olp] ]= identifier[self] . identifier[config] . identifier[get] ( literal[string] , identifier[olp] ). identifier[split] ( literal[string] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[params] ) identifier[response] = identifier[self] . identifier[msgr] . identifier[request] ( literal[string] ,** identifier[params] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[response] ) keyword[if] identifier[response] [ literal[string] ]== literal[string] : keyword[raise] identifier[OxdServerError] ( identifier[response] [ literal[string] ]) identifier[data] = identifier[response] [ literal[string] ] identifier[self] . identifier[oxd_id] = identifier[data] [ literal[string] ] identifier[self] . identifier[config] . identifier[set] ( literal[string] , literal[string] , identifier[data] [ literal[string] ]) identifier[self] . identifier[config] . identifier[set] ( literal[string] , literal[string] , identifier[data] [ literal[string] ]) identifier[self] . identifier[config] . identifier[set] ( literal[string] , literal[string] , identifier[data] [ literal[string] ]) keyword[if] identifier[data] [ literal[string] ]: identifier[self] . identifier[config] . identifier[set] ( literal[string] , literal[string] , identifier[data] [ literal[string] ]) keyword[if] identifier[data] [ literal[string] ]: identifier[self] . identifier[config] . identifier[set] ( literal[string] , literal[string] , identifier[data] [ literal[string] ]) identifier[self] . identifier[config] . identifier[set] ( literal[string] , literal[string] , identifier[str] ( identifier[data] [ literal[string] ])) keyword[return] identifier[data]
def setup_client(self): """The command registers the client for communication protection. This will be used to obtain an access token via the Get Client Token command. The access token will be passed as a protection_access_token parameter to other commands. Note: If you are using the oxd-https-extension, you must setup the client Returns: **dict:** the client setup information Example response:: { "oxd_id":"6F9619FF-8B86-D011-B42D-00CF4FC964FF", "op_host": "<op host>", "client_id":"<client id>", "client_secret":"<client secret>", "client_registration_access_token":"<Client registration access token>", "client_registration_client_uri":"<URI of client registration>", "client_id_issued_at":"<client_id issued at>", "client_secret_expires_at":"<client_secret expires at>" } """ # add required params for the command params = {'authorization_redirect_uri': self.authorization_redirect_uri, 'oxd_rp_programming_language': 'python'} # add other optional params if they exist in config for op in self.opt_params: if self.config.get('client', op): params[op] = self.config.get('client', op) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['op']] for olp in self.opt_list_params: if self.config.get('client', olp): params[olp] = self.config.get('client', olp).split(',') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['olp']] logger.debug('Sending command `setup_client` with params %s', params) response = self.msgr.request('setup_client', **params) logger.debug('Received response: %s', response) if response['status'] == 'error': raise OxdServerError(response['data']) # depends on [control=['if'], data=[]] data = response['data'] self.oxd_id = data['oxd_id'] self.config.set('oxd', 'id', data['oxd_id']) self.config.set('client', 'client_id', data['client_id']) self.config.set('client', 'client_secret', data['client_secret']) if data['client_registration_access_token']: self.config.set('client', 'client_registration_access_token', data['client_registration_access_token']) # depends on [control=['if'], data=[]] if data['client_registration_client_uri']: self.config.set('client', 'client_registration_client_uri', data['client_registration_client_uri']) # depends on [control=['if'], data=[]] self.config.set('client', 'client_id_issued_at', str(data['client_id_issued_at'])) return data
def multi_ping(dest_addrs, timeout, retry=0, ignore_lookup_errors=False): """ Combine send and receive measurement into single function. This offers a retry mechanism: Overall timeout time is divided by number of retries. Additional ICMPecho packets are sent to those addresses from which we have not received answers, yet. The retry mechanism is useful, because individual ICMP packets may get lost. If 'retry' is set to 0 then only a single packet is sent to each address. If 'ignore_lookup_errors' is set then any issues with resolving target names or looking up their address information will silently be ignored. Those targets simply appear in the 'no_results' return list. """ retry = int(retry) if retry < 0: retry = 0 timeout = float(timeout) if timeout < 0.1: raise MultiPingError("Timeout < 0.1 seconds not allowed") retry_timeout = float(timeout) / (retry + 1) if retry_timeout < 0.1: raise MultiPingError("Time between ping retries < 0.1 seconds") mp = MultiPing(dest_addrs, ignore_lookup_errors=ignore_lookup_errors) results = {} retry_count = 0 while retry_count <= retry: # Send a batch of pings mp.send() single_results, no_results = mp.receive(retry_timeout) # Add the results from the last sending of pings to the overall results results.update(single_results) if not no_results: # No addresses left? We are done. break retry_count += 1 return results, no_results
def function[multi_ping, parameter[dest_addrs, timeout, retry, ignore_lookup_errors]]: constant[ Combine send and receive measurement into single function. This offers a retry mechanism: Overall timeout time is divided by number of retries. Additional ICMPecho packets are sent to those addresses from which we have not received answers, yet. The retry mechanism is useful, because individual ICMP packets may get lost. If 'retry' is set to 0 then only a single packet is sent to each address. If 'ignore_lookup_errors' is set then any issues with resolving target names or looking up their address information will silently be ignored. Those targets simply appear in the 'no_results' return list. ] variable[retry] assign[=] call[name[int], parameter[name[retry]]] if compare[name[retry] less[<] constant[0]] begin[:] variable[retry] assign[=] constant[0] variable[timeout] assign[=] call[name[float], parameter[name[timeout]]] if compare[name[timeout] less[<] constant[0.1]] begin[:] <ast.Raise object at 0x7da2054a5630> variable[retry_timeout] assign[=] binary_operation[call[name[float], parameter[name[timeout]]] / binary_operation[name[retry] + constant[1]]] if compare[name[retry_timeout] less[<] constant[0.1]] begin[:] <ast.Raise object at 0x7da2054a6e00> variable[mp] assign[=] call[name[MultiPing], parameter[name[dest_addrs]]] variable[results] assign[=] dictionary[[], []] variable[retry_count] assign[=] constant[0] while compare[name[retry_count] less_or_equal[<=] name[retry]] begin[:] call[name[mp].send, parameter[]] <ast.Tuple object at 0x7da2054a7640> assign[=] call[name[mp].receive, parameter[name[retry_timeout]]] call[name[results].update, parameter[name[single_results]]] if <ast.UnaryOp object at 0x7da2054a53c0> begin[:] break <ast.AugAssign object at 0x7da2054a4100> return[tuple[[<ast.Name object at 0x7da2054a5810>, <ast.Name object at 0x7da2054a6d40>]]]
keyword[def] identifier[multi_ping] ( identifier[dest_addrs] , identifier[timeout] , identifier[retry] = literal[int] , identifier[ignore_lookup_errors] = keyword[False] ): literal[string] identifier[retry] = identifier[int] ( identifier[retry] ) keyword[if] identifier[retry] < literal[int] : identifier[retry] = literal[int] identifier[timeout] = identifier[float] ( identifier[timeout] ) keyword[if] identifier[timeout] < literal[int] : keyword[raise] identifier[MultiPingError] ( literal[string] ) identifier[retry_timeout] = identifier[float] ( identifier[timeout] )/( identifier[retry] + literal[int] ) keyword[if] identifier[retry_timeout] < literal[int] : keyword[raise] identifier[MultiPingError] ( literal[string] ) identifier[mp] = identifier[MultiPing] ( identifier[dest_addrs] , identifier[ignore_lookup_errors] = identifier[ignore_lookup_errors] ) identifier[results] ={} identifier[retry_count] = literal[int] keyword[while] identifier[retry_count] <= identifier[retry] : identifier[mp] . identifier[send] () identifier[single_results] , identifier[no_results] = identifier[mp] . identifier[receive] ( identifier[retry_timeout] ) identifier[results] . identifier[update] ( identifier[single_results] ) keyword[if] keyword[not] identifier[no_results] : keyword[break] identifier[retry_count] += literal[int] keyword[return] identifier[results] , identifier[no_results]
def multi_ping(dest_addrs, timeout, retry=0, ignore_lookup_errors=False): """ Combine send and receive measurement into single function. This offers a retry mechanism: Overall timeout time is divided by number of retries. Additional ICMPecho packets are sent to those addresses from which we have not received answers, yet. The retry mechanism is useful, because individual ICMP packets may get lost. If 'retry' is set to 0 then only a single packet is sent to each address. If 'ignore_lookup_errors' is set then any issues with resolving target names or looking up their address information will silently be ignored. Those targets simply appear in the 'no_results' return list. """ retry = int(retry) if retry < 0: retry = 0 # depends on [control=['if'], data=['retry']] timeout = float(timeout) if timeout < 0.1: raise MultiPingError('Timeout < 0.1 seconds not allowed') # depends on [control=['if'], data=[]] retry_timeout = float(timeout) / (retry + 1) if retry_timeout < 0.1: raise MultiPingError('Time between ping retries < 0.1 seconds') # depends on [control=['if'], data=[]] mp = MultiPing(dest_addrs, ignore_lookup_errors=ignore_lookup_errors) results = {} retry_count = 0 while retry_count <= retry: # Send a batch of pings mp.send() (single_results, no_results) = mp.receive(retry_timeout) # Add the results from the last sending of pings to the overall results results.update(single_results) if not no_results: # No addresses left? We are done. break # depends on [control=['if'], data=[]] retry_count += 1 # depends on [control=['while'], data=['retry_count']] return (results, no_results)
def delete(self, group_id, session): '''taobao.crm.group.delete 删除分组 将该分组下的所有会员移除出该组,同时删除该分组。注:删除分组为异步任务,必须先调用taobao.crm.grouptask.check 确保涉及属性上没有任务。''' request = TOPRequest('taobao.crm.group.delete') request['group_id'] = group_id self.create(self.execute(request, session), fields=['is_success',]) return self.is_success
def function[delete, parameter[self, group_id, session]]: constant[taobao.crm.group.delete 删除分组 将该分组下的所有会员移除出该组,同时删除该分组。注:删除分组为异步任务,必须先调用taobao.crm.grouptask.check 确保涉及属性上没有任务。] variable[request] assign[=] call[name[TOPRequest], parameter[constant[taobao.crm.group.delete]]] call[name[request]][constant[group_id]] assign[=] name[group_id] call[name[self].create, parameter[call[name[self].execute, parameter[name[request], name[session]]]]] return[name[self].is_success]
keyword[def] identifier[delete] ( identifier[self] , identifier[group_id] , identifier[session] ): literal[string] identifier[request] = identifier[TOPRequest] ( literal[string] ) identifier[request] [ literal[string] ]= identifier[group_id] identifier[self] . identifier[create] ( identifier[self] . identifier[execute] ( identifier[request] , identifier[session] ), identifier[fields] =[ literal[string] ,]) keyword[return] identifier[self] . identifier[is_success]
def delete(self, group_id, session): """taobao.crm.group.delete 删除分组 将该分组下的所有会员移除出该组,同时删除该分组。注:删除分组为异步任务,必须先调用taobao.crm.grouptask.check 确保涉及属性上没有任务。""" request = TOPRequest('taobao.crm.group.delete') request['group_id'] = group_id self.create(self.execute(request, session), fields=['is_success']) return self.is_success
def _add_reference(self, obj, ident=0): """ Adds a read reference to the marshaler storage :param obj: Reference to add :param ident: Log indentation level """ log_debug( "## New reference handle 0x{0:X}: {1} -> {2}".format( len(self.references) + self.BASE_REFERENCE_IDX, type(obj).__name__, repr(obj), ), ident, ) self.references.append(obj)
def function[_add_reference, parameter[self, obj, ident]]: constant[ Adds a read reference to the marshaler storage :param obj: Reference to add :param ident: Log indentation level ] call[name[log_debug], parameter[call[constant[## New reference handle 0x{0:X}: {1} -> {2}].format, parameter[binary_operation[call[name[len], parameter[name[self].references]] + name[self].BASE_REFERENCE_IDX], call[name[type], parameter[name[obj]]].__name__, call[name[repr], parameter[name[obj]]]]], name[ident]]] call[name[self].references.append, parameter[name[obj]]]
keyword[def] identifier[_add_reference] ( identifier[self] , identifier[obj] , identifier[ident] = literal[int] ): literal[string] identifier[log_debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[self] . identifier[references] )+ identifier[self] . identifier[BASE_REFERENCE_IDX] , identifier[type] ( identifier[obj] ). identifier[__name__] , identifier[repr] ( identifier[obj] ), ), identifier[ident] , ) identifier[self] . identifier[references] . identifier[append] ( identifier[obj] )
def _add_reference(self, obj, ident=0): """ Adds a read reference to the marshaler storage :param obj: Reference to add :param ident: Log indentation level """ log_debug('## New reference handle 0x{0:X}: {1} -> {2}'.format(len(self.references) + self.BASE_REFERENCE_IDX, type(obj).__name__, repr(obj)), ident) self.references.append(obj)
def xAxisIsMinor(self): ''' Returns True if the minor axis is parallel to the X axis, boolean. ''' return min(self.radius.x, self.radius.y) == self.radius.x
def function[xAxisIsMinor, parameter[self]]: constant[ Returns True if the minor axis is parallel to the X axis, boolean. ] return[compare[call[name[min], parameter[name[self].radius.x, name[self].radius.y]] equal[==] name[self].radius.x]]
keyword[def] identifier[xAxisIsMinor] ( identifier[self] ): literal[string] keyword[return] identifier[min] ( identifier[self] . identifier[radius] . identifier[x] , identifier[self] . identifier[radius] . identifier[y] )== identifier[self] . identifier[radius] . identifier[x]
def xAxisIsMinor(self): """ Returns True if the minor axis is parallel to the X axis, boolean. """ return min(self.radius.x, self.radius.y) == self.radius.x
def space_angle(phi1, theta1, phi2, theta2): """Also called Great-circle-distance -- use long-ass formula from wikipedia (last in section): https://en.wikipedia.org/wiki/Great-circle_distance#Computational_formulas Space angle only makes sense in lon-lat, so convert zenith -> latitude. """ from numpy import pi, sin, cos, arctan2, sqrt, square lamb1 = pi / 2 - theta1 lamb2 = pi / 2 - theta2 lambdelt = lamb2 - lamb1 under = sin(phi1) * sin(phi2) + cos(phi1) * cos(phi2) * cos(lambdelt) over = sqrt( np.square((cos(phi2) * sin(lambdelt))) + square(cos(phi1) * sin(phi2) - sin(phi1) * cos(phi2) * cos(lambdelt)) ) angle = arctan2(over, under) return angle
def function[space_angle, parameter[phi1, theta1, phi2, theta2]]: constant[Also called Great-circle-distance -- use long-ass formula from wikipedia (last in section): https://en.wikipedia.org/wiki/Great-circle_distance#Computational_formulas Space angle only makes sense in lon-lat, so convert zenith -> latitude. ] from relative_module[numpy] import module[pi], module[sin], module[cos], module[arctan2], module[sqrt], module[square] variable[lamb1] assign[=] binary_operation[binary_operation[name[pi] / constant[2]] - name[theta1]] variable[lamb2] assign[=] binary_operation[binary_operation[name[pi] / constant[2]] - name[theta2]] variable[lambdelt] assign[=] binary_operation[name[lamb2] - name[lamb1]] variable[under] assign[=] binary_operation[binary_operation[call[name[sin], parameter[name[phi1]]] * call[name[sin], parameter[name[phi2]]]] + binary_operation[binary_operation[call[name[cos], parameter[name[phi1]]] * call[name[cos], parameter[name[phi2]]]] * call[name[cos], parameter[name[lambdelt]]]]] variable[over] assign[=] call[name[sqrt], parameter[binary_operation[call[name[np].square, parameter[binary_operation[call[name[cos], parameter[name[phi2]]] * call[name[sin], parameter[name[lambdelt]]]]]] + call[name[square], parameter[binary_operation[binary_operation[call[name[cos], parameter[name[phi1]]] * call[name[sin], parameter[name[phi2]]]] - binary_operation[binary_operation[call[name[sin], parameter[name[phi1]]] * call[name[cos], parameter[name[phi2]]]] * call[name[cos], parameter[name[lambdelt]]]]]]]]]] variable[angle] assign[=] call[name[arctan2], parameter[name[over], name[under]]] return[name[angle]]
keyword[def] identifier[space_angle] ( identifier[phi1] , identifier[theta1] , identifier[phi2] , identifier[theta2] ): literal[string] keyword[from] identifier[numpy] keyword[import] identifier[pi] , identifier[sin] , identifier[cos] , identifier[arctan2] , identifier[sqrt] , identifier[square] identifier[lamb1] = identifier[pi] / literal[int] - identifier[theta1] identifier[lamb2] = identifier[pi] / literal[int] - identifier[theta2] identifier[lambdelt] = identifier[lamb2] - identifier[lamb1] identifier[under] = identifier[sin] ( identifier[phi1] )* identifier[sin] ( identifier[phi2] )+ identifier[cos] ( identifier[phi1] )* identifier[cos] ( identifier[phi2] )* identifier[cos] ( identifier[lambdelt] ) identifier[over] = identifier[sqrt] ( identifier[np] . identifier[square] (( identifier[cos] ( identifier[phi2] )* identifier[sin] ( identifier[lambdelt] )))+ identifier[square] ( identifier[cos] ( identifier[phi1] )* identifier[sin] ( identifier[phi2] )- identifier[sin] ( identifier[phi1] )* identifier[cos] ( identifier[phi2] )* identifier[cos] ( identifier[lambdelt] )) ) identifier[angle] = identifier[arctan2] ( identifier[over] , identifier[under] ) keyword[return] identifier[angle]
def space_angle(phi1, theta1, phi2, theta2): """Also called Great-circle-distance -- use long-ass formula from wikipedia (last in section): https://en.wikipedia.org/wiki/Great-circle_distance#Computational_formulas Space angle only makes sense in lon-lat, so convert zenith -> latitude. """ from numpy import pi, sin, cos, arctan2, sqrt, square lamb1 = pi / 2 - theta1 lamb2 = pi / 2 - theta2 lambdelt = lamb2 - lamb1 under = sin(phi1) * sin(phi2) + cos(phi1) * cos(phi2) * cos(lambdelt) over = sqrt(np.square(cos(phi2) * sin(lambdelt)) + square(cos(phi1) * sin(phi2) - sin(phi1) * cos(phi2) * cos(lambdelt))) angle = arctan2(over, under) return angle
def dispatch(): """ This methods runs the wheel. It is used to connect signal with their handlers, based on the aliases. :return: """ aliases = SignalDispatcher.signals.keys() for alias in aliases: handlers = SignalDispatcher.handlers.get(alias) signal = SignalDispatcher.signals.get(alias) if signal is None or handlers.__len__() == 0: continue for handler in handlers: signal.connect(handler)
def function[dispatch, parameter[]]: constant[ This methods runs the wheel. It is used to connect signal with their handlers, based on the aliases. :return: ] variable[aliases] assign[=] call[name[SignalDispatcher].signals.keys, parameter[]] for taget[name[alias]] in starred[name[aliases]] begin[:] variable[handlers] assign[=] call[name[SignalDispatcher].handlers.get, parameter[name[alias]]] variable[signal] assign[=] call[name[SignalDispatcher].signals.get, parameter[name[alias]]] if <ast.BoolOp object at 0x7da1b2717340> begin[:] continue for taget[name[handler]] in starred[name[handlers]] begin[:] call[name[signal].connect, parameter[name[handler]]]
keyword[def] identifier[dispatch] (): literal[string] identifier[aliases] = identifier[SignalDispatcher] . identifier[signals] . identifier[keys] () keyword[for] identifier[alias] keyword[in] identifier[aliases] : identifier[handlers] = identifier[SignalDispatcher] . identifier[handlers] . identifier[get] ( identifier[alias] ) identifier[signal] = identifier[SignalDispatcher] . identifier[signals] . identifier[get] ( identifier[alias] ) keyword[if] identifier[signal] keyword[is] keyword[None] keyword[or] identifier[handlers] . identifier[__len__] ()== literal[int] : keyword[continue] keyword[for] identifier[handler] keyword[in] identifier[handlers] : identifier[signal] . identifier[connect] ( identifier[handler] )
def dispatch(): """ This methods runs the wheel. It is used to connect signal with their handlers, based on the aliases. :return: """ aliases = SignalDispatcher.signals.keys() for alias in aliases: handlers = SignalDispatcher.handlers.get(alias) signal = SignalDispatcher.signals.get(alias) if signal is None or handlers.__len__() == 0: continue # depends on [control=['if'], data=[]] for handler in handlers: signal.connect(handler) # depends on [control=['for'], data=['handler']] # depends on [control=['for'], data=['alias']]
def subject(sid, subjects_path=None, meta_data=None, default_alignment='MSMAll'): ''' subject(sid) yields a HCP Subject object for the subject with the given subject id; sid may be a path to a subject or a subject id, in which case the subject paths are searched for it. subject(None, path) yields a non-standard HCP subject at the given path. subject(sid, path) yields the specific HCP Subject at the given path. Subjects are cached and not reloaded. Note that subects returned by subject() are always persistent Immutable objects; this means that you must create a transient version of the subject to modify it via the member function sub.transient(). Better, you can make copies of the objects with desired modifications using the copy method. This function works with the neuropythy.hcp.auto_download() function; if you have enabled auto- downloading, then subjects returned from this subject may be downloading themselves lazily. ''' if subjects_path is None: if os.path.isdir(str(sid)): (fdir, fnm) = os.path.split(str(sid)) try: sid = to_subject_id(fnm) except Exception: sid = None pth = fdir else: sid = to_subject_id(sid) fnm = str(sid) fdir = find_subject_path(sid) if fdir is None: raise ValueError('Could not locate subject with id \'%s\'' % sid) pth = os.path.split(fdir)[0] else: if sid is None: (pth, fnm) = os.path.split(subjects_path) else: sid = to_subject_id(sid) fnm = str(sid) fdir = subjects_path fdir = os.path.abspath(os.path.join(pth, fnm)) if fdir in subject._cache: return subject._cache[fdir] sub = Subject(sid, fdir, meta_data=meta_data, default_alignment=default_alignment).persist() if isinstance(sub, Subject): subject._cache[fdir] = sub return sub
def function[subject, parameter[sid, subjects_path, meta_data, default_alignment]]: constant[ subject(sid) yields a HCP Subject object for the subject with the given subject id; sid may be a path to a subject or a subject id, in which case the subject paths are searched for it. subject(None, path) yields a non-standard HCP subject at the given path. subject(sid, path) yields the specific HCP Subject at the given path. Subjects are cached and not reloaded. Note that subects returned by subject() are always persistent Immutable objects; this means that you must create a transient version of the subject to modify it via the member function sub.transient(). Better, you can make copies of the objects with desired modifications using the copy method. This function works with the neuropythy.hcp.auto_download() function; if you have enabled auto- downloading, then subjects returned from this subject may be downloading themselves lazily. ] if compare[name[subjects_path] is constant[None]] begin[:] if call[name[os].path.isdir, parameter[call[name[str], parameter[name[sid]]]]] begin[:] <ast.Tuple object at 0x7da1b0e39ae0> assign[=] call[name[os].path.split, parameter[call[name[str], parameter[name[sid]]]]] <ast.Try object at 0x7da1b0e3bc40> variable[pth] assign[=] name[fdir] variable[fdir] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.join, parameter[name[pth], name[fnm]]]]] if compare[name[fdir] in name[subject]._cache] begin[:] return[call[name[subject]._cache][name[fdir]]] variable[sub] assign[=] call[call[name[Subject], parameter[name[sid], name[fdir]]].persist, parameter[]] if call[name[isinstance], parameter[name[sub], name[Subject]]] begin[:] call[name[subject]._cache][name[fdir]] assign[=] name[sub] return[name[sub]]
keyword[def] identifier[subject] ( identifier[sid] , identifier[subjects_path] = keyword[None] , identifier[meta_data] = keyword[None] , identifier[default_alignment] = literal[string] ): literal[string] keyword[if] identifier[subjects_path] keyword[is] keyword[None] : keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[str] ( identifier[sid] )): ( identifier[fdir] , identifier[fnm] )= identifier[os] . identifier[path] . identifier[split] ( identifier[str] ( identifier[sid] )) keyword[try] : identifier[sid] = identifier[to_subject_id] ( identifier[fnm] ) keyword[except] identifier[Exception] : identifier[sid] = keyword[None] identifier[pth] = identifier[fdir] keyword[else] : identifier[sid] = identifier[to_subject_id] ( identifier[sid] ) identifier[fnm] = identifier[str] ( identifier[sid] ) identifier[fdir] = identifier[find_subject_path] ( identifier[sid] ) keyword[if] identifier[fdir] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[sid] ) identifier[pth] = identifier[os] . identifier[path] . identifier[split] ( identifier[fdir] )[ literal[int] ] keyword[else] : keyword[if] identifier[sid] keyword[is] keyword[None] : ( identifier[pth] , identifier[fnm] )= identifier[os] . identifier[path] . identifier[split] ( identifier[subjects_path] ) keyword[else] : identifier[sid] = identifier[to_subject_id] ( identifier[sid] ) identifier[fnm] = identifier[str] ( identifier[sid] ) identifier[fdir] = identifier[subjects_path] identifier[fdir] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[pth] , identifier[fnm] )) keyword[if] identifier[fdir] keyword[in] identifier[subject] . identifier[_cache] : keyword[return] identifier[subject] . identifier[_cache] [ identifier[fdir] ] identifier[sub] = identifier[Subject] ( identifier[sid] , identifier[fdir] , identifier[meta_data] = identifier[meta_data] , identifier[default_alignment] = identifier[default_alignment] ). identifier[persist] () keyword[if] identifier[isinstance] ( identifier[sub] , identifier[Subject] ): identifier[subject] . identifier[_cache] [ identifier[fdir] ]= identifier[sub] keyword[return] identifier[sub]
def subject(sid, subjects_path=None, meta_data=None, default_alignment='MSMAll'): """ subject(sid) yields a HCP Subject object for the subject with the given subject id; sid may be a path to a subject or a subject id, in which case the subject paths are searched for it. subject(None, path) yields a non-standard HCP subject at the given path. subject(sid, path) yields the specific HCP Subject at the given path. Subjects are cached and not reloaded. Note that subects returned by subject() are always persistent Immutable objects; this means that you must create a transient version of the subject to modify it via the member function sub.transient(). Better, you can make copies of the objects with desired modifications using the copy method. This function works with the neuropythy.hcp.auto_download() function; if you have enabled auto- downloading, then subjects returned from this subject may be downloading themselves lazily. """ if subjects_path is None: if os.path.isdir(str(sid)): (fdir, fnm) = os.path.split(str(sid)) try: sid = to_subject_id(fnm) # depends on [control=['try'], data=[]] except Exception: sid = None # depends on [control=['except'], data=[]] pth = fdir # depends on [control=['if'], data=[]] else: sid = to_subject_id(sid) fnm = str(sid) fdir = find_subject_path(sid) if fdir is None: raise ValueError("Could not locate subject with id '%s'" % sid) # depends on [control=['if'], data=[]] pth = os.path.split(fdir)[0] # depends on [control=['if'], data=[]] elif sid is None: (pth, fnm) = os.path.split(subjects_path) # depends on [control=['if'], data=[]] else: sid = to_subject_id(sid) fnm = str(sid) fdir = subjects_path fdir = os.path.abspath(os.path.join(pth, fnm)) if fdir in subject._cache: return subject._cache[fdir] # depends on [control=['if'], data=['fdir']] sub = Subject(sid, fdir, meta_data=meta_data, default_alignment=default_alignment).persist() if isinstance(sub, Subject): subject._cache[fdir] = sub # depends on [control=['if'], data=[]] return sub
def _get_adc_value(self, channel, average=None): '''Read ADC ''' conf = self.SCAN_OFF | self.SINGLE_ENDED | ((0x1e) & (channel << 1)) self._intf.write(self._base_addr + self.MAX_1239_ADD, array('B', pack('B', conf))) def read_data(): ret = self._intf.read(self._base_addr + self.MAX_1239_ADD | 1, size=2) ret.reverse() ret[1] = ret[1] & 0x0f # 12-bit ADC return unpack_from('H', ret)[0] if average: raw = 0 for _ in range(average): raw += read_data() raw /= average else: raw = read_data() return raw
def function[_get_adc_value, parameter[self, channel, average]]: constant[Read ADC ] variable[conf] assign[=] binary_operation[binary_operation[name[self].SCAN_OFF <ast.BitOr object at 0x7da2590d6aa0> name[self].SINGLE_ENDED] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[constant[30] <ast.BitAnd object at 0x7da2590d6b60> binary_operation[name[channel] <ast.LShift object at 0x7da2590d69e0> constant[1]]]] call[name[self]._intf.write, parameter[binary_operation[name[self]._base_addr + name[self].MAX_1239_ADD], call[name[array], parameter[constant[B], call[name[pack], parameter[constant[B], name[conf]]]]]]] def function[read_data, parameter[]]: variable[ret] assign[=] call[name[self]._intf.read, parameter[binary_operation[binary_operation[name[self]._base_addr + name[self].MAX_1239_ADD] <ast.BitOr object at 0x7da2590d6aa0> constant[1]]]] call[name[ret].reverse, parameter[]] call[name[ret]][constant[1]] assign[=] binary_operation[call[name[ret]][constant[1]] <ast.BitAnd object at 0x7da2590d6b60> constant[15]] return[call[call[name[unpack_from], parameter[constant[H], name[ret]]]][constant[0]]] if name[average] begin[:] variable[raw] assign[=] constant[0] for taget[name[_]] in starred[call[name[range], parameter[name[average]]]] begin[:] <ast.AugAssign object at 0x7da1b05058d0> <ast.AugAssign object at 0x7da1b05065f0> return[name[raw]]
keyword[def] identifier[_get_adc_value] ( identifier[self] , identifier[channel] , identifier[average] = keyword[None] ): literal[string] identifier[conf] = identifier[self] . identifier[SCAN_OFF] | identifier[self] . identifier[SINGLE_ENDED] |(( literal[int] )&( identifier[channel] << literal[int] )) identifier[self] . identifier[_intf] . identifier[write] ( identifier[self] . identifier[_base_addr] + identifier[self] . identifier[MAX_1239_ADD] , identifier[array] ( literal[string] , identifier[pack] ( literal[string] , identifier[conf] ))) keyword[def] identifier[read_data] (): identifier[ret] = identifier[self] . identifier[_intf] . identifier[read] ( identifier[self] . identifier[_base_addr] + identifier[self] . identifier[MAX_1239_ADD] | literal[int] , identifier[size] = literal[int] ) identifier[ret] . identifier[reverse] () identifier[ret] [ literal[int] ]= identifier[ret] [ literal[int] ]& literal[int] keyword[return] identifier[unpack_from] ( literal[string] , identifier[ret] )[ literal[int] ] keyword[if] identifier[average] : identifier[raw] = literal[int] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[average] ): identifier[raw] += identifier[read_data] () identifier[raw] /= identifier[average] keyword[else] : identifier[raw] = identifier[read_data] () keyword[return] identifier[raw]
def _get_adc_value(self, channel, average=None): """Read ADC """ conf = self.SCAN_OFF | self.SINGLE_ENDED | 30 & channel << 1 self._intf.write(self._base_addr + self.MAX_1239_ADD, array('B', pack('B', conf))) def read_data(): ret = self._intf.read(self._base_addr + self.MAX_1239_ADD | 1, size=2) ret.reverse() ret[1] = ret[1] & 15 # 12-bit ADC return unpack_from('H', ret)[0] if average: raw = 0 for _ in range(average): raw += read_data() # depends on [control=['for'], data=[]] raw /= average # depends on [control=['if'], data=[]] else: raw = read_data() return raw
def get_products(self, product_ids): """ This function (and backend API) is being obsoleted. Don't use it anymore. """ if self.product_set_id is None: raise ValueError('product_set_id must be specified') data = {'ids': product_ids} return self.client.get(self.base_url + '/products', json=data)
def function[get_products, parameter[self, product_ids]]: constant[ This function (and backend API) is being obsoleted. Don't use it anymore. ] if compare[name[self].product_set_id is constant[None]] begin[:] <ast.Raise object at 0x7da1b10e42e0> variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b10e6b30>], [<ast.Name object at 0x7da1b10e73a0>]] return[call[name[self].client.get, parameter[binary_operation[name[self].base_url + constant[/products]]]]]
keyword[def] identifier[get_products] ( identifier[self] , identifier[product_ids] ): literal[string] keyword[if] identifier[self] . identifier[product_set_id] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[data] ={ literal[string] : identifier[product_ids] } keyword[return] identifier[self] . identifier[client] . identifier[get] ( identifier[self] . identifier[base_url] + literal[string] , identifier[json] = identifier[data] )
def get_products(self, product_ids): """ This function (and backend API) is being obsoleted. Don't use it anymore. """ if self.product_set_id is None: raise ValueError('product_set_id must be specified') # depends on [control=['if'], data=[]] data = {'ids': product_ids} return self.client.get(self.base_url + '/products', json=data)
def unmarshall_value(self, value): """ Unmarshalls a Crash object read from the database. @type value: str @param value: Object to convert. @rtype: L{Crash} @return: Converted object. """ value = str(value) if self.escapeValues: value = value.decode('hex') if self.compressValues: value = zlib.decompress(value) value = pickle.loads(value) return value
def function[unmarshall_value, parameter[self, value]]: constant[ Unmarshalls a Crash object read from the database. @type value: str @param value: Object to convert. @rtype: L{Crash} @return: Converted object. ] variable[value] assign[=] call[name[str], parameter[name[value]]] if name[self].escapeValues begin[:] variable[value] assign[=] call[name[value].decode, parameter[constant[hex]]] if name[self].compressValues begin[:] variable[value] assign[=] call[name[zlib].decompress, parameter[name[value]]] variable[value] assign[=] call[name[pickle].loads, parameter[name[value]]] return[name[value]]
keyword[def] identifier[unmarshall_value] ( identifier[self] , identifier[value] ): literal[string] identifier[value] = identifier[str] ( identifier[value] ) keyword[if] identifier[self] . identifier[escapeValues] : identifier[value] = identifier[value] . identifier[decode] ( literal[string] ) keyword[if] identifier[self] . identifier[compressValues] : identifier[value] = identifier[zlib] . identifier[decompress] ( identifier[value] ) identifier[value] = identifier[pickle] . identifier[loads] ( identifier[value] ) keyword[return] identifier[value]
def unmarshall_value(self, value): """ Unmarshalls a Crash object read from the database. @type value: str @param value: Object to convert. @rtype: L{Crash} @return: Converted object. """ value = str(value) if self.escapeValues: value = value.decode('hex') # depends on [control=['if'], data=[]] if self.compressValues: value = zlib.decompress(value) # depends on [control=['if'], data=[]] value = pickle.loads(value) return value
def _list_vlans_by_name(self, name): """Returns a list of IDs of VLANs which match the given VLAN name. :param string name: a VLAN name :returns: List of matching IDs """ results = self.list_vlans(name=name, mask='id') return [result['id'] for result in results]
def function[_list_vlans_by_name, parameter[self, name]]: constant[Returns a list of IDs of VLANs which match the given VLAN name. :param string name: a VLAN name :returns: List of matching IDs ] variable[results] assign[=] call[name[self].list_vlans, parameter[]] return[<ast.ListComp object at 0x7da18dc06b30>]
keyword[def] identifier[_list_vlans_by_name] ( identifier[self] , identifier[name] ): literal[string] identifier[results] = identifier[self] . identifier[list_vlans] ( identifier[name] = identifier[name] , identifier[mask] = literal[string] ) keyword[return] [ identifier[result] [ literal[string] ] keyword[for] identifier[result] keyword[in] identifier[results] ]
def _list_vlans_by_name(self, name): """Returns a list of IDs of VLANs which match the given VLAN name. :param string name: a VLAN name :returns: List of matching IDs """ results = self.list_vlans(name=name, mask='id') return [result['id'] for result in results]
def calc_delta_c(c200): """Calculate characteristic overdensity from concentration. Parameters ---------- c200 : ndarray or float Cluster concentration parameter. Returns ---------- ndarray or float Cluster characteristic overdensity, of same type as c200. """ top = (200. / 3.) * c200**3. bottom = np.log(1. + c200) - (c200 / (1. + c200)) return (top / bottom)
def function[calc_delta_c, parameter[c200]]: constant[Calculate characteristic overdensity from concentration. Parameters ---------- c200 : ndarray or float Cluster concentration parameter. Returns ---------- ndarray or float Cluster characteristic overdensity, of same type as c200. ] variable[top] assign[=] binary_operation[binary_operation[constant[200.0] / constant[3.0]] * binary_operation[name[c200] ** constant[3.0]]] variable[bottom] assign[=] binary_operation[call[name[np].log, parameter[binary_operation[constant[1.0] + name[c200]]]] - binary_operation[name[c200] / binary_operation[constant[1.0] + name[c200]]]] return[binary_operation[name[top] / name[bottom]]]
keyword[def] identifier[calc_delta_c] ( identifier[c200] ): literal[string] identifier[top] =( literal[int] / literal[int] )* identifier[c200] ** literal[int] identifier[bottom] = identifier[np] . identifier[log] ( literal[int] + identifier[c200] )-( identifier[c200] /( literal[int] + identifier[c200] )) keyword[return] ( identifier[top] / identifier[bottom] )
def calc_delta_c(c200): """Calculate characteristic overdensity from concentration. Parameters ---------- c200 : ndarray or float Cluster concentration parameter. Returns ---------- ndarray or float Cluster characteristic overdensity, of same type as c200. """ top = 200.0 / 3.0 * c200 ** 3.0 bottom = np.log(1.0 + c200) - c200 / (1.0 + c200) return top / bottom
def destroy(self): """ Destroy this transport """ self.setOnMessageReceivedCallback(None) self.setOnNodeConnectedCallback(None) self.setOnNodeDisconnectedCallback(None) self.setOnReadonlyNodeConnectedCallback(None) self.setOnReadonlyNodeDisconnectedCallback(None) for node in self._nodes | self._readonlyNodes: self.dropNode(node) if self._server is not None: self._server.unbind() for conn in self._unknownConnections: conn.disconnect() self._unknownConnections = set()
def function[destroy, parameter[self]]: constant[ Destroy this transport ] call[name[self].setOnMessageReceivedCallback, parameter[constant[None]]] call[name[self].setOnNodeConnectedCallback, parameter[constant[None]]] call[name[self].setOnNodeDisconnectedCallback, parameter[constant[None]]] call[name[self].setOnReadonlyNodeConnectedCallback, parameter[constant[None]]] call[name[self].setOnReadonlyNodeDisconnectedCallback, parameter[constant[None]]] for taget[name[node]] in starred[binary_operation[name[self]._nodes <ast.BitOr object at 0x7da2590d6aa0> name[self]._readonlyNodes]] begin[:] call[name[self].dropNode, parameter[name[node]]] if compare[name[self]._server is_not constant[None]] begin[:] call[name[self]._server.unbind, parameter[]] for taget[name[conn]] in starred[name[self]._unknownConnections] begin[:] call[name[conn].disconnect, parameter[]] name[self]._unknownConnections assign[=] call[name[set], parameter[]]
keyword[def] identifier[destroy] ( identifier[self] ): literal[string] identifier[self] . identifier[setOnMessageReceivedCallback] ( keyword[None] ) identifier[self] . identifier[setOnNodeConnectedCallback] ( keyword[None] ) identifier[self] . identifier[setOnNodeDisconnectedCallback] ( keyword[None] ) identifier[self] . identifier[setOnReadonlyNodeConnectedCallback] ( keyword[None] ) identifier[self] . identifier[setOnReadonlyNodeDisconnectedCallback] ( keyword[None] ) keyword[for] identifier[node] keyword[in] identifier[self] . identifier[_nodes] | identifier[self] . identifier[_readonlyNodes] : identifier[self] . identifier[dropNode] ( identifier[node] ) keyword[if] identifier[self] . identifier[_server] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[_server] . identifier[unbind] () keyword[for] identifier[conn] keyword[in] identifier[self] . identifier[_unknownConnections] : identifier[conn] . identifier[disconnect] () identifier[self] . identifier[_unknownConnections] = identifier[set] ()
def destroy(self): """ Destroy this transport """ self.setOnMessageReceivedCallback(None) self.setOnNodeConnectedCallback(None) self.setOnNodeDisconnectedCallback(None) self.setOnReadonlyNodeConnectedCallback(None) self.setOnReadonlyNodeDisconnectedCallback(None) for node in self._nodes | self._readonlyNodes: self.dropNode(node) # depends on [control=['for'], data=['node']] if self._server is not None: self._server.unbind() # depends on [control=['if'], data=[]] for conn in self._unknownConnections: conn.disconnect() # depends on [control=['for'], data=['conn']] self._unknownConnections = set()
def write_frames(self, channel_id, frames_out): """Marshal and write multiple outgoing pamqp frames to the Socket. :param int channel_id: Channel ID/ :param list frames_out: Amqp frames. :return: """ data_out = EMPTY_BUFFER for single_frame in frames_out: data_out += pamqp_frame.marshal(single_frame, channel_id) self.heartbeat.register_write() self._io.write_to_socket(data_out)
def function[write_frames, parameter[self, channel_id, frames_out]]: constant[Marshal and write multiple outgoing pamqp frames to the Socket. :param int channel_id: Channel ID/ :param list frames_out: Amqp frames. :return: ] variable[data_out] assign[=] name[EMPTY_BUFFER] for taget[name[single_frame]] in starred[name[frames_out]] begin[:] <ast.AugAssign object at 0x7da18f721660> call[name[self].heartbeat.register_write, parameter[]] call[name[self]._io.write_to_socket, parameter[name[data_out]]]
keyword[def] identifier[write_frames] ( identifier[self] , identifier[channel_id] , identifier[frames_out] ): literal[string] identifier[data_out] = identifier[EMPTY_BUFFER] keyword[for] identifier[single_frame] keyword[in] identifier[frames_out] : identifier[data_out] += identifier[pamqp_frame] . identifier[marshal] ( identifier[single_frame] , identifier[channel_id] ) identifier[self] . identifier[heartbeat] . identifier[register_write] () identifier[self] . identifier[_io] . identifier[write_to_socket] ( identifier[data_out] )
def write_frames(self, channel_id, frames_out): """Marshal and write multiple outgoing pamqp frames to the Socket. :param int channel_id: Channel ID/ :param list frames_out: Amqp frames. :return: """ data_out = EMPTY_BUFFER for single_frame in frames_out: data_out += pamqp_frame.marshal(single_frame, channel_id) # depends on [control=['for'], data=['single_frame']] self.heartbeat.register_write() self._io.write_to_socket(data_out)
def _populate_ranking_payoff_arrays(payoff_arrays, scores, costs): """ Populate the ndarrays in `payoff_arrays` with the payoff values of the ranking game given `scores` and `costs`. Parameters ---------- payoff_arrays : tuple(ndarray(float, ndim=2)) Tuple of 2 ndarrays of shape (n, n). Modified in place. scores : ndarray(int, ndim=2) ndarray of shape (2, n) containing score values corresponding to the effort levels for the two players. costs : ndarray(float, ndim=2) ndarray of shape (2, n-1) containing cost values corresponding to the n-1 positive effort levels for the two players, with the assumption that the cost of the zero effort action is zero. """ n = payoff_arrays[0].shape[0] for p, payoff_array in enumerate(payoff_arrays): payoff_array[0, :] = 0 for i in range(1, n): for j in range(n): payoff_array[i, j] = -costs[p, i-1] prize = 1. for i in range(n): for j in range(n): if scores[0, i] > scores[1, j]: payoff_arrays[0][i, j] += prize elif scores[0, i] < scores[1, j]: payoff_arrays[1][j, i] += prize else: payoff_arrays[0][i, j] += prize / 2 payoff_arrays[1][j, i] += prize / 2
def function[_populate_ranking_payoff_arrays, parameter[payoff_arrays, scores, costs]]: constant[ Populate the ndarrays in `payoff_arrays` with the payoff values of the ranking game given `scores` and `costs`. Parameters ---------- payoff_arrays : tuple(ndarray(float, ndim=2)) Tuple of 2 ndarrays of shape (n, n). Modified in place. scores : ndarray(int, ndim=2) ndarray of shape (2, n) containing score values corresponding to the effort levels for the two players. costs : ndarray(float, ndim=2) ndarray of shape (2, n-1) containing cost values corresponding to the n-1 positive effort levels for the two players, with the assumption that the cost of the zero effort action is zero. ] variable[n] assign[=] call[call[name[payoff_arrays]][constant[0]].shape][constant[0]] for taget[tuple[[<ast.Name object at 0x7da204567af0>, <ast.Name object at 0x7da204565e40>]]] in starred[call[name[enumerate], parameter[name[payoff_arrays]]]] begin[:] call[name[payoff_array]][tuple[[<ast.Constant object at 0x7da204564a30>, <ast.Slice object at 0x7da204566a10>]]] assign[=] constant[0] for taget[name[i]] in starred[call[name[range], parameter[constant[1], name[n]]]] begin[:] for taget[name[j]] in starred[call[name[range], parameter[name[n]]]] begin[:] call[name[payoff_array]][tuple[[<ast.Name object at 0x7da204565f00>, <ast.Name object at 0x7da204564160>]]] assign[=] <ast.UnaryOp object at 0x7da2045651b0> variable[prize] assign[=] constant[1.0] for taget[name[i]] in starred[call[name[range], parameter[name[n]]]] begin[:] for taget[name[j]] in starred[call[name[range], parameter[name[n]]]] begin[:] if compare[call[name[scores]][tuple[[<ast.Constant object at 0x7da204565c00>, <ast.Name object at 0x7da204567b50>]]] greater[>] call[name[scores]][tuple[[<ast.Constant object at 0x7da204564d00>, <ast.Name object at 0x7da204566bf0>]]]] begin[:] <ast.AugAssign object at 0x7da204565ae0>
keyword[def] identifier[_populate_ranking_payoff_arrays] ( identifier[payoff_arrays] , identifier[scores] , identifier[costs] ): literal[string] identifier[n] = identifier[payoff_arrays] [ literal[int] ]. identifier[shape] [ literal[int] ] keyword[for] identifier[p] , identifier[payoff_array] keyword[in] identifier[enumerate] ( identifier[payoff_arrays] ): identifier[payoff_array] [ literal[int] ,:]= literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[n] ): keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[n] ): identifier[payoff_array] [ identifier[i] , identifier[j] ]=- identifier[costs] [ identifier[p] , identifier[i] - literal[int] ] identifier[prize] = literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] ): keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[n] ): keyword[if] identifier[scores] [ literal[int] , identifier[i] ]> identifier[scores] [ literal[int] , identifier[j] ]: identifier[payoff_arrays] [ literal[int] ][ identifier[i] , identifier[j] ]+= identifier[prize] keyword[elif] identifier[scores] [ literal[int] , identifier[i] ]< identifier[scores] [ literal[int] , identifier[j] ]: identifier[payoff_arrays] [ literal[int] ][ identifier[j] , identifier[i] ]+= identifier[prize] keyword[else] : identifier[payoff_arrays] [ literal[int] ][ identifier[i] , identifier[j] ]+= identifier[prize] / literal[int] identifier[payoff_arrays] [ literal[int] ][ identifier[j] , identifier[i] ]+= identifier[prize] / literal[int]
def _populate_ranking_payoff_arrays(payoff_arrays, scores, costs): """ Populate the ndarrays in `payoff_arrays` with the payoff values of the ranking game given `scores` and `costs`. Parameters ---------- payoff_arrays : tuple(ndarray(float, ndim=2)) Tuple of 2 ndarrays of shape (n, n). Modified in place. scores : ndarray(int, ndim=2) ndarray of shape (2, n) containing score values corresponding to the effort levels for the two players. costs : ndarray(float, ndim=2) ndarray of shape (2, n-1) containing cost values corresponding to the n-1 positive effort levels for the two players, with the assumption that the cost of the zero effort action is zero. """ n = payoff_arrays[0].shape[0] for (p, payoff_array) in enumerate(payoff_arrays): payoff_array[0, :] = 0 for i in range(1, n): for j in range(n): payoff_array[i, j] = -costs[p, i - 1] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=[]] prize = 1.0 for i in range(n): for j in range(n): if scores[0, i] > scores[1, j]: payoff_arrays[0][i, j] += prize # depends on [control=['if'], data=[]] elif scores[0, i] < scores[1, j]: payoff_arrays[1][j, i] += prize # depends on [control=['if'], data=[]] else: payoff_arrays[0][i, j] += prize / 2 payoff_arrays[1][j, i] += prize / 2 # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
def update(client, revision, no_output, siblings, paths): """Update existing files by rerunning their outdated workflow.""" graph = Graph(client) outputs = graph.build(revision=revision, can_be_cwl=no_output, paths=paths) outputs = {node for node in outputs if graph.need_update(node)} if not outputs: click.secho( 'All files were generated from the latest inputs.', fg='green' ) sys.exit(0) # Check or extend siblings of outputs. outputs = siblings(graph, outputs) output_paths = {node.path for node in outputs if _safe_path(node.path)} # Get all clean nodes. input_paths = {node.path for node in graph.nodes} - output_paths # Store the generated workflow used for updating paths. import yaml output_file = client.workflow_path / '{0}.cwl'.format(uuid.uuid4().hex) workflow = graph.ascwl( input_paths=input_paths, output_paths=output_paths, outputs=outputs, ) # Make sure all inputs are pulled from a storage. client.pull_paths_from_storage( *(path for _, path in workflow.iter_input_files(client.workflow_path)) ) with output_file.open('w') as f: f.write( yaml.dump( ascwl( workflow, filter=lambda _, x: x is not None, basedir=client.workflow_path, ), default_flow_style=False ) ) from ._cwl import execute execute(client, output_file, output_paths=output_paths)
def function[update, parameter[client, revision, no_output, siblings, paths]]: constant[Update existing files by rerunning their outdated workflow.] variable[graph] assign[=] call[name[Graph], parameter[name[client]]] variable[outputs] assign[=] call[name[graph].build, parameter[]] variable[outputs] assign[=] <ast.SetComp object at 0x7da1b0381120> if <ast.UnaryOp object at 0x7da1b0382560> begin[:] call[name[click].secho, parameter[constant[All files were generated from the latest inputs.]]] call[name[sys].exit, parameter[constant[0]]] variable[outputs] assign[=] call[name[siblings], parameter[name[graph], name[outputs]]] variable[output_paths] assign[=] <ast.SetComp object at 0x7da1b0383940> variable[input_paths] assign[=] binary_operation[<ast.SetComp object at 0x7da1b02e4460> - name[output_paths]] import module[yaml] variable[output_file] assign[=] binary_operation[name[client].workflow_path / call[constant[{0}.cwl].format, parameter[call[name[uuid].uuid4, parameter[]].hex]]] variable[workflow] assign[=] call[name[graph].ascwl, parameter[]] call[name[client].pull_paths_from_storage, parameter[<ast.Starred object at 0x7da1b02e6770>]] with call[name[output_file].open, parameter[constant[w]]] begin[:] call[name[f].write, parameter[call[name[yaml].dump, parameter[call[name[ascwl], parameter[name[workflow]]]]]]] from relative_module[_cwl] import module[execute] call[name[execute], parameter[name[client], name[output_file]]]
keyword[def] identifier[update] ( identifier[client] , identifier[revision] , identifier[no_output] , identifier[siblings] , identifier[paths] ): literal[string] identifier[graph] = identifier[Graph] ( identifier[client] ) identifier[outputs] = identifier[graph] . identifier[build] ( identifier[revision] = identifier[revision] , identifier[can_be_cwl] = identifier[no_output] , identifier[paths] = identifier[paths] ) identifier[outputs] ={ identifier[node] keyword[for] identifier[node] keyword[in] identifier[outputs] keyword[if] identifier[graph] . identifier[need_update] ( identifier[node] )} keyword[if] keyword[not] identifier[outputs] : identifier[click] . identifier[secho] ( literal[string] , identifier[fg] = literal[string] ) identifier[sys] . identifier[exit] ( literal[int] ) identifier[outputs] = identifier[siblings] ( identifier[graph] , identifier[outputs] ) identifier[output_paths] ={ identifier[node] . identifier[path] keyword[for] identifier[node] keyword[in] identifier[outputs] keyword[if] identifier[_safe_path] ( identifier[node] . identifier[path] )} identifier[input_paths] ={ identifier[node] . identifier[path] keyword[for] identifier[node] keyword[in] identifier[graph] . identifier[nodes] }- identifier[output_paths] keyword[import] identifier[yaml] identifier[output_file] = identifier[client] . identifier[workflow_path] / literal[string] . identifier[format] ( identifier[uuid] . identifier[uuid4] (). identifier[hex] ) identifier[workflow] = identifier[graph] . identifier[ascwl] ( identifier[input_paths] = identifier[input_paths] , identifier[output_paths] = identifier[output_paths] , identifier[outputs] = identifier[outputs] , ) identifier[client] . identifier[pull_paths_from_storage] ( *( identifier[path] keyword[for] identifier[_] , identifier[path] keyword[in] identifier[workflow] . identifier[iter_input_files] ( identifier[client] . identifier[workflow_path] )) ) keyword[with] identifier[output_file] . identifier[open] ( literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[yaml] . identifier[dump] ( identifier[ascwl] ( identifier[workflow] , identifier[filter] = keyword[lambda] identifier[_] , identifier[x] : identifier[x] keyword[is] keyword[not] keyword[None] , identifier[basedir] = identifier[client] . identifier[workflow_path] , ), identifier[default_flow_style] = keyword[False] ) ) keyword[from] . identifier[_cwl] keyword[import] identifier[execute] identifier[execute] ( identifier[client] , identifier[output_file] , identifier[output_paths] = identifier[output_paths] )
def update(client, revision, no_output, siblings, paths): """Update existing files by rerunning their outdated workflow.""" graph = Graph(client) outputs = graph.build(revision=revision, can_be_cwl=no_output, paths=paths) outputs = {node for node in outputs if graph.need_update(node)} if not outputs: click.secho('All files were generated from the latest inputs.', fg='green') sys.exit(0) # depends on [control=['if'], data=[]] # Check or extend siblings of outputs. outputs = siblings(graph, outputs) output_paths = {node.path for node in outputs if _safe_path(node.path)} # Get all clean nodes. input_paths = {node.path for node in graph.nodes} - output_paths # Store the generated workflow used for updating paths. import yaml output_file = client.workflow_path / '{0}.cwl'.format(uuid.uuid4().hex) workflow = graph.ascwl(input_paths=input_paths, output_paths=output_paths, outputs=outputs) # Make sure all inputs are pulled from a storage. client.pull_paths_from_storage(*(path for (_, path) in workflow.iter_input_files(client.workflow_path))) with output_file.open('w') as f: f.write(yaml.dump(ascwl(workflow, filter=lambda _, x: x is not None, basedir=client.workflow_path), default_flow_style=False)) # depends on [control=['with'], data=['f']] from ._cwl import execute execute(client, output_file, output_paths=output_paths)
def match_regex(self, regex: Pattern, required: bool = False, meaning: str = "") -> str: """Parse input based on a regular expression . Args: regex: Compiled regular expression object. required: Should the exception be raised on unexpected input? meaning: Meaning of `regex` (for use in error messages). Raises: UnexpectedInput: If no syntactically correct keyword is found. """ mo = regex.match(self.input, self.offset) if mo: self.offset = mo.end() return mo.group() if required: raise UnexpectedInput(self, meaning)
def function[match_regex, parameter[self, regex, required, meaning]]: constant[Parse input based on a regular expression . Args: regex: Compiled regular expression object. required: Should the exception be raised on unexpected input? meaning: Meaning of `regex` (for use in error messages). Raises: UnexpectedInput: If no syntactically correct keyword is found. ] variable[mo] assign[=] call[name[regex].match, parameter[name[self].input, name[self].offset]] if name[mo] begin[:] name[self].offset assign[=] call[name[mo].end, parameter[]] return[call[name[mo].group, parameter[]]] if name[required] begin[:] <ast.Raise object at 0x7da1b05293c0>
keyword[def] identifier[match_regex] ( identifier[self] , identifier[regex] : identifier[Pattern] , identifier[required] : identifier[bool] = keyword[False] , identifier[meaning] : identifier[str] = literal[string] )-> identifier[str] : literal[string] identifier[mo] = identifier[regex] . identifier[match] ( identifier[self] . identifier[input] , identifier[self] . identifier[offset] ) keyword[if] identifier[mo] : identifier[self] . identifier[offset] = identifier[mo] . identifier[end] () keyword[return] identifier[mo] . identifier[group] () keyword[if] identifier[required] : keyword[raise] identifier[UnexpectedInput] ( identifier[self] , identifier[meaning] )
def match_regex(self, regex: Pattern, required: bool=False, meaning: str='') -> str: """Parse input based on a regular expression . Args: regex: Compiled regular expression object. required: Should the exception be raised on unexpected input? meaning: Meaning of `regex` (for use in error messages). Raises: UnexpectedInput: If no syntactically correct keyword is found. """ mo = regex.match(self.input, self.offset) if mo: self.offset = mo.end() return mo.group() # depends on [control=['if'], data=[]] if required: raise UnexpectedInput(self, meaning) # depends on [control=['if'], data=[]]
async def reload_modules(self, pathlist): """ Reload modules with a full path in the pathlist """ loadedModules = [] failures = [] for path in pathlist: p, module = findModule(path, False) if module is not None and hasattr(module, '_instance') and module._instance.state != ModuleLoadStateChanged.UNLOADED: loadedModules.append(module) # Unload all modules ums = [ModuleLoadStateChanged.createMatcher(m, ModuleLoadStateChanged.UNLOADED) for m in loadedModules] for m in loadedModules: # Only unload the module itself, not its dependencies, since we will restart the module soon enough self.subroutine(self.unloadmodule(m, True), False) await self.wait_for_all(*ums) # Group modules by package grouped = {} for path in pathlist: dotpos = path.rfind('.') if dotpos == -1: raise ModuleLoadException('Must specify module with full path, including package name') package = path[:dotpos] classname = path[dotpos + 1:] mlist = grouped.setdefault(package, []) p, module = findModule(path, False) mlist.append((classname, module)) for package, mlist in grouped.items(): # Reload each package only once try: p = sys.modules[package] # Remove cache to ensure a clean import from source file removeCache(p) p = reload(p) except KeyError: try: p = __import__(package, fromlist=[m[0] for m in mlist]) except Exception: self._logger.warning('Failed to import a package: %r, resume others', package, exc_info = True) failures.append('Failed to import: ' + package) continue except Exception: self._logger.warning('Failed to import a package: %r, resume others', package, exc_info = True) failures.append('Failed to import: ' + package) continue for cn, module in mlist: try: module2 = getattr(p, cn) except AttributeError: self._logger.warning('Cannot find module %r in package %r, resume others', package, cn) failures.append('Failed to import: ' + package + '.' + cn) continue if module is not None and module is not module2: # Update the references try: lpos = loadedModules.index(module) loaded = True except Exception: loaded = False for d in module.depends: # The new reference is automatically added on import, only remove the old reference d.referencedBy.remove(module) if loaded and hasattr(d, '_instance'): try: d._instance.dependedBy.remove(module) d._instance.dependedBy.add(module2) except ValueError: pass if hasattr(module, 'referencedBy'): for d in module.referencedBy: pos = d.depends.index(module) d.depends[pos] = module2 if not hasattr(module2, 'referencedBy'): module2.referencedBy = [] module2.referencedBy.append(d) if loaded: loadedModules[lpos] = module2 # Start the uploaded modules for m in loadedModules: self.subroutine(self.loadmodule(m)) if failures: raise ModuleLoadException('Following errors occurred during reloading, check log for more details:\n' + '\n'.join(failures))
<ast.AsyncFunctionDef object at 0x7da20c7c8e50>
keyword[async] keyword[def] identifier[reload_modules] ( identifier[self] , identifier[pathlist] ): literal[string] identifier[loadedModules] =[] identifier[failures] =[] keyword[for] identifier[path] keyword[in] identifier[pathlist] : identifier[p] , identifier[module] = identifier[findModule] ( identifier[path] , keyword[False] ) keyword[if] identifier[module] keyword[is] keyword[not] keyword[None] keyword[and] identifier[hasattr] ( identifier[module] , literal[string] ) keyword[and] identifier[module] . identifier[_instance] . identifier[state] != identifier[ModuleLoadStateChanged] . identifier[UNLOADED] : identifier[loadedModules] . identifier[append] ( identifier[module] ) identifier[ums] =[ identifier[ModuleLoadStateChanged] . identifier[createMatcher] ( identifier[m] , identifier[ModuleLoadStateChanged] . identifier[UNLOADED] ) keyword[for] identifier[m] keyword[in] identifier[loadedModules] ] keyword[for] identifier[m] keyword[in] identifier[loadedModules] : identifier[self] . identifier[subroutine] ( identifier[self] . identifier[unloadmodule] ( identifier[m] , keyword[True] ), keyword[False] ) keyword[await] identifier[self] . identifier[wait_for_all] (* identifier[ums] ) identifier[grouped] ={} keyword[for] identifier[path] keyword[in] identifier[pathlist] : identifier[dotpos] = identifier[path] . identifier[rfind] ( literal[string] ) keyword[if] identifier[dotpos] ==- literal[int] : keyword[raise] identifier[ModuleLoadException] ( literal[string] ) identifier[package] = identifier[path] [: identifier[dotpos] ] identifier[classname] = identifier[path] [ identifier[dotpos] + literal[int] :] identifier[mlist] = identifier[grouped] . identifier[setdefault] ( identifier[package] ,[]) identifier[p] , identifier[module] = identifier[findModule] ( identifier[path] , keyword[False] ) identifier[mlist] . identifier[append] (( identifier[classname] , identifier[module] )) keyword[for] identifier[package] , identifier[mlist] keyword[in] identifier[grouped] . identifier[items] (): keyword[try] : identifier[p] = identifier[sys] . identifier[modules] [ identifier[package] ] identifier[removeCache] ( identifier[p] ) identifier[p] = identifier[reload] ( identifier[p] ) keyword[except] identifier[KeyError] : keyword[try] : identifier[p] = identifier[__import__] ( identifier[package] , identifier[fromlist] =[ identifier[m] [ literal[int] ] keyword[for] identifier[m] keyword[in] identifier[mlist] ]) keyword[except] identifier[Exception] : identifier[self] . identifier[_logger] . identifier[warning] ( literal[string] , identifier[package] , identifier[exc_info] = keyword[True] ) identifier[failures] . identifier[append] ( literal[string] + identifier[package] ) keyword[continue] keyword[except] identifier[Exception] : identifier[self] . identifier[_logger] . identifier[warning] ( literal[string] , identifier[package] , identifier[exc_info] = keyword[True] ) identifier[failures] . identifier[append] ( literal[string] + identifier[package] ) keyword[continue] keyword[for] identifier[cn] , identifier[module] keyword[in] identifier[mlist] : keyword[try] : identifier[module2] = identifier[getattr] ( identifier[p] , identifier[cn] ) keyword[except] identifier[AttributeError] : identifier[self] . identifier[_logger] . identifier[warning] ( literal[string] , identifier[package] , identifier[cn] ) identifier[failures] . identifier[append] ( literal[string] + identifier[package] + literal[string] + identifier[cn] ) keyword[continue] keyword[if] identifier[module] keyword[is] keyword[not] keyword[None] keyword[and] identifier[module] keyword[is] keyword[not] identifier[module2] : keyword[try] : identifier[lpos] = identifier[loadedModules] . identifier[index] ( identifier[module] ) identifier[loaded] = keyword[True] keyword[except] identifier[Exception] : identifier[loaded] = keyword[False] keyword[for] identifier[d] keyword[in] identifier[module] . identifier[depends] : identifier[d] . identifier[referencedBy] . identifier[remove] ( identifier[module] ) keyword[if] identifier[loaded] keyword[and] identifier[hasattr] ( identifier[d] , literal[string] ): keyword[try] : identifier[d] . identifier[_instance] . identifier[dependedBy] . identifier[remove] ( identifier[module] ) identifier[d] . identifier[_instance] . identifier[dependedBy] . identifier[add] ( identifier[module2] ) keyword[except] identifier[ValueError] : keyword[pass] keyword[if] identifier[hasattr] ( identifier[module] , literal[string] ): keyword[for] identifier[d] keyword[in] identifier[module] . identifier[referencedBy] : identifier[pos] = identifier[d] . identifier[depends] . identifier[index] ( identifier[module] ) identifier[d] . identifier[depends] [ identifier[pos] ]= identifier[module2] keyword[if] keyword[not] identifier[hasattr] ( identifier[module2] , literal[string] ): identifier[module2] . identifier[referencedBy] =[] identifier[module2] . identifier[referencedBy] . identifier[append] ( identifier[d] ) keyword[if] identifier[loaded] : identifier[loadedModules] [ identifier[lpos] ]= identifier[module2] keyword[for] identifier[m] keyword[in] identifier[loadedModules] : identifier[self] . identifier[subroutine] ( identifier[self] . identifier[loadmodule] ( identifier[m] )) keyword[if] identifier[failures] : keyword[raise] identifier[ModuleLoadException] ( literal[string] + literal[string] . identifier[join] ( identifier[failures] ))
async def reload_modules(self, pathlist): """ Reload modules with a full path in the pathlist """ loadedModules = [] failures = [] for path in pathlist: (p, module) = findModule(path, False) if module is not None and hasattr(module, '_instance') and (module._instance.state != ModuleLoadStateChanged.UNLOADED): loadedModules.append(module) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']] # Unload all modules ums = [ModuleLoadStateChanged.createMatcher(m, ModuleLoadStateChanged.UNLOADED) for m in loadedModules] for m in loadedModules: # Only unload the module itself, not its dependencies, since we will restart the module soon enough self.subroutine(self.unloadmodule(m, True), False) # depends on [control=['for'], data=['m']] await self.wait_for_all(*ums) # Group modules by package grouped = {} for path in pathlist: dotpos = path.rfind('.') if dotpos == -1: raise ModuleLoadException('Must specify module with full path, including package name') # depends on [control=['if'], data=[]] package = path[:dotpos] classname = path[dotpos + 1:] mlist = grouped.setdefault(package, []) (p, module) = findModule(path, False) mlist.append((classname, module)) # depends on [control=['for'], data=['path']] for (package, mlist) in grouped.items(): # Reload each package only once try: p = sys.modules[package] # Remove cache to ensure a clean import from source file removeCache(p) p = reload(p) # depends on [control=['try'], data=[]] except KeyError: try: p = __import__(package, fromlist=[m[0] for m in mlist]) # depends on [control=['try'], data=[]] except Exception: self._logger.warning('Failed to import a package: %r, resume others', package, exc_info=True) failures.append('Failed to import: ' + package) continue # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] except Exception: self._logger.warning('Failed to import a package: %r, resume others', package, exc_info=True) failures.append('Failed to import: ' + package) continue # depends on [control=['except'], data=[]] for (cn, module) in mlist: try: module2 = getattr(p, cn) # depends on [control=['try'], data=[]] except AttributeError: self._logger.warning('Cannot find module %r in package %r, resume others', package, cn) failures.append('Failed to import: ' + package + '.' + cn) continue # depends on [control=['except'], data=[]] if module is not None and module is not module2: # Update the references try: lpos = loadedModules.index(module) loaded = True # depends on [control=['try'], data=[]] except Exception: loaded = False # depends on [control=['except'], data=[]] for d in module.depends: # The new reference is automatically added on import, only remove the old reference d.referencedBy.remove(module) if loaded and hasattr(d, '_instance'): try: d._instance.dependedBy.remove(module) d._instance.dependedBy.add(module2) # depends on [control=['try'], data=[]] except ValueError: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['d']] if hasattr(module, 'referencedBy'): for d in module.referencedBy: pos = d.depends.index(module) d.depends[pos] = module2 if not hasattr(module2, 'referencedBy'): module2.referencedBy = [] # depends on [control=['if'], data=[]] module2.referencedBy.append(d) # depends on [control=['for'], data=['d']] # depends on [control=['if'], data=[]] if loaded: loadedModules[lpos] = module2 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # Start the uploaded modules for m in loadedModules: self.subroutine(self.loadmodule(m)) # depends on [control=['for'], data=['m']] if failures: raise ModuleLoadException('Following errors occurred during reloading, check log for more details:\n' + '\n'.join(failures)) # depends on [control=['if'], data=[]]
def rgbline(x, y, red, green, blue, alpha=1, linestyles="solid", linewidth=2.5): """Get a RGB coloured line for plotting. Args: x (list): x-axis data. y (list): y-axis data (can be multidimensional array). red (list): Red data (must have same shape as ``y``). green (list): Green data (must have same shape as ``y``). blue (list): blue data (must have same shape as ``y``). alpha (:obj:`list` or :obj:`int`, optional): Alpha (transparency) data (must have same shape as ``y`` or be an :obj:`int`). linestyles (:obj:`str`, optional): Linestyle for plot. Options are ``"solid"`` or ``"dotted"``. """ y = np.array(y) if len(y.shape) == 1: y = np.array([y]) red = np.array([red]) green = np.array([green]) blue = np.array([blue]) alpha = np.array([alpha]) elif isinstance(alpha, int): alpha = [alpha] * len(y) seg = [] colours = [] for yy, rr, gg, bb, aa in zip(y, red, green, blue, alpha): pts = np.array([x, yy]).T.reshape(-1, 1, 2) seg.extend(np.concatenate([pts[:-1], pts[1:]], axis=1)) nseg = len(x) - 1 r = [0.5 * (rr[i] + rr[i + 1]) for i in range(nseg)] g = [0.5 * (gg[i] + gg[i + 1]) for i in range(nseg)] b = [0.5 * (bb[i] + bb[i + 1]) for i in range(nseg)] a = np.ones(nseg, np.float) * aa colours.extend(list(zip(r, g, b, a))) lc = LineCollection(seg, colors=colours, rasterized=True, linewidth=linewidth, linestyles=linestyles) return lc
def function[rgbline, parameter[x, y, red, green, blue, alpha, linestyles, linewidth]]: constant[Get a RGB coloured line for plotting. Args: x (list): x-axis data. y (list): y-axis data (can be multidimensional array). red (list): Red data (must have same shape as ``y``). green (list): Green data (must have same shape as ``y``). blue (list): blue data (must have same shape as ``y``). alpha (:obj:`list` or :obj:`int`, optional): Alpha (transparency) data (must have same shape as ``y`` or be an :obj:`int`). linestyles (:obj:`str`, optional): Linestyle for plot. Options are ``"solid"`` or ``"dotted"``. ] variable[y] assign[=] call[name[np].array, parameter[name[y]]] if compare[call[name[len], parameter[name[y].shape]] equal[==] constant[1]] begin[:] variable[y] assign[=] call[name[np].array, parameter[list[[<ast.Name object at 0x7da1b246f190>]]]] variable[red] assign[=] call[name[np].array, parameter[list[[<ast.Name object at 0x7da1b246f040>]]]] variable[green] assign[=] call[name[np].array, parameter[list[[<ast.Name object at 0x7da1b246eef0>]]]] variable[blue] assign[=] call[name[np].array, parameter[list[[<ast.Name object at 0x7da1b246eda0>]]]] variable[alpha] assign[=] call[name[np].array, parameter[list[[<ast.Name object at 0x7da1b246ec50>]]]] variable[seg] assign[=] list[[]] variable[colours] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b246e620>, <ast.Name object at 0x7da1b246e5f0>, <ast.Name object at 0x7da1b246e5c0>, <ast.Name object at 0x7da1b246e590>, <ast.Name object at 0x7da1b246e560>]]] in starred[call[name[zip], parameter[name[y], name[red], name[green], name[blue], name[alpha]]]] begin[:] variable[pts] assign[=] call[call[name[np].array, parameter[list[[<ast.Name object at 0x7da1b246fdf0>, <ast.Name object at 0x7da18eb55810>]]]].T.reshape, parameter[<ast.UnaryOp object at 0x7da18eb54130>, constant[1], constant[2]]] call[name[seg].extend, parameter[call[name[np].concatenate, parameter[list[[<ast.Subscript object at 0x7da1b2406770>, <ast.Subscript object at 0x7da1b2405690>]]]]]] variable[nseg] assign[=] binary_operation[call[name[len], parameter[name[x]]] - constant[1]] variable[r] assign[=] <ast.ListComp object at 0x7da1b2405b70> variable[g] assign[=] <ast.ListComp object at 0x7da1b2405f90> variable[b] assign[=] <ast.ListComp object at 0x7da1b2405a20> variable[a] assign[=] binary_operation[call[name[np].ones, parameter[name[nseg], name[np].float]] * name[aa]] call[name[colours].extend, parameter[call[name[list], parameter[call[name[zip], parameter[name[r], name[g], name[b], name[a]]]]]]] variable[lc] assign[=] call[name[LineCollection], parameter[name[seg]]] return[name[lc]]
keyword[def] identifier[rgbline] ( identifier[x] , identifier[y] , identifier[red] , identifier[green] , identifier[blue] , identifier[alpha] = literal[int] , identifier[linestyles] = literal[string] , identifier[linewidth] = literal[int] ): literal[string] identifier[y] = identifier[np] . identifier[array] ( identifier[y] ) keyword[if] identifier[len] ( identifier[y] . identifier[shape] )== literal[int] : identifier[y] = identifier[np] . identifier[array] ([ identifier[y] ]) identifier[red] = identifier[np] . identifier[array] ([ identifier[red] ]) identifier[green] = identifier[np] . identifier[array] ([ identifier[green] ]) identifier[blue] = identifier[np] . identifier[array] ([ identifier[blue] ]) identifier[alpha] = identifier[np] . identifier[array] ([ identifier[alpha] ]) keyword[elif] identifier[isinstance] ( identifier[alpha] , identifier[int] ): identifier[alpha] =[ identifier[alpha] ]* identifier[len] ( identifier[y] ) identifier[seg] =[] identifier[colours] =[] keyword[for] identifier[yy] , identifier[rr] , identifier[gg] , identifier[bb] , identifier[aa] keyword[in] identifier[zip] ( identifier[y] , identifier[red] , identifier[green] , identifier[blue] , identifier[alpha] ): identifier[pts] = identifier[np] . identifier[array] ([ identifier[x] , identifier[yy] ]). identifier[T] . identifier[reshape] (- literal[int] , literal[int] , literal[int] ) identifier[seg] . identifier[extend] ( identifier[np] . identifier[concatenate] ([ identifier[pts] [:- literal[int] ], identifier[pts] [ literal[int] :]], identifier[axis] = literal[int] )) identifier[nseg] = identifier[len] ( identifier[x] )- literal[int] identifier[r] =[ literal[int] *( identifier[rr] [ identifier[i] ]+ identifier[rr] [ identifier[i] + literal[int] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nseg] )] identifier[g] =[ literal[int] *( identifier[gg] [ identifier[i] ]+ identifier[gg] [ identifier[i] + literal[int] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nseg] )] identifier[b] =[ literal[int] *( identifier[bb] [ identifier[i] ]+ identifier[bb] [ identifier[i] + literal[int] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nseg] )] identifier[a] = identifier[np] . identifier[ones] ( identifier[nseg] , identifier[np] . identifier[float] )* identifier[aa] identifier[colours] . identifier[extend] ( identifier[list] ( identifier[zip] ( identifier[r] , identifier[g] , identifier[b] , identifier[a] ))) identifier[lc] = identifier[LineCollection] ( identifier[seg] , identifier[colors] = identifier[colours] , identifier[rasterized] = keyword[True] , identifier[linewidth] = identifier[linewidth] , identifier[linestyles] = identifier[linestyles] ) keyword[return] identifier[lc]
def rgbline(x, y, red, green, blue, alpha=1, linestyles='solid', linewidth=2.5): """Get a RGB coloured line for plotting. Args: x (list): x-axis data. y (list): y-axis data (can be multidimensional array). red (list): Red data (must have same shape as ``y``). green (list): Green data (must have same shape as ``y``). blue (list): blue data (must have same shape as ``y``). alpha (:obj:`list` or :obj:`int`, optional): Alpha (transparency) data (must have same shape as ``y`` or be an :obj:`int`). linestyles (:obj:`str`, optional): Linestyle for plot. Options are ``"solid"`` or ``"dotted"``. """ y = np.array(y) if len(y.shape) == 1: y = np.array([y]) red = np.array([red]) green = np.array([green]) blue = np.array([blue]) alpha = np.array([alpha]) # depends on [control=['if'], data=[]] elif isinstance(alpha, int): alpha = [alpha] * len(y) # depends on [control=['if'], data=[]] seg = [] colours = [] for (yy, rr, gg, bb, aa) in zip(y, red, green, blue, alpha): pts = np.array([x, yy]).T.reshape(-1, 1, 2) seg.extend(np.concatenate([pts[:-1], pts[1:]], axis=1)) nseg = len(x) - 1 r = [0.5 * (rr[i] + rr[i + 1]) for i in range(nseg)] g = [0.5 * (gg[i] + gg[i + 1]) for i in range(nseg)] b = [0.5 * (bb[i] + bb[i + 1]) for i in range(nseg)] a = np.ones(nseg, np.float) * aa colours.extend(list(zip(r, g, b, a))) # depends on [control=['for'], data=[]] lc = LineCollection(seg, colors=colours, rasterized=True, linewidth=linewidth, linestyles=linestyles) return lc
def register_handler(self, name, handler, esc_strings): """Register a handler instance by name with esc_strings.""" self._handlers[name] = handler for esc_str in esc_strings: self._esc_handlers[esc_str] = handler
def function[register_handler, parameter[self, name, handler, esc_strings]]: constant[Register a handler instance by name with esc_strings.] call[name[self]._handlers][name[name]] assign[=] name[handler] for taget[name[esc_str]] in starred[name[esc_strings]] begin[:] call[name[self]._esc_handlers][name[esc_str]] assign[=] name[handler]
keyword[def] identifier[register_handler] ( identifier[self] , identifier[name] , identifier[handler] , identifier[esc_strings] ): literal[string] identifier[self] . identifier[_handlers] [ identifier[name] ]= identifier[handler] keyword[for] identifier[esc_str] keyword[in] identifier[esc_strings] : identifier[self] . identifier[_esc_handlers] [ identifier[esc_str] ]= identifier[handler]
def register_handler(self, name, handler, esc_strings): """Register a handler instance by name with esc_strings.""" self._handlers[name] = handler for esc_str in esc_strings: self._esc_handlers[esc_str] = handler # depends on [control=['for'], data=['esc_str']]
def vel_horizontal(HeightWaterCritical): """Return the horizontal velocity.""" #Checking input validity ut.check_range([HeightWaterCritical, ">0", "Critical height of water"]) return np.sqrt(gravity.magnitude * HeightWaterCritical)
def function[vel_horizontal, parameter[HeightWaterCritical]]: constant[Return the horizontal velocity.] call[name[ut].check_range, parameter[list[[<ast.Name object at 0x7da1b23465c0>, <ast.Constant object at 0x7da1b2347b50>, <ast.Constant object at 0x7da1b2347820>]]]] return[call[name[np].sqrt, parameter[binary_operation[name[gravity].magnitude * name[HeightWaterCritical]]]]]
keyword[def] identifier[vel_horizontal] ( identifier[HeightWaterCritical] ): literal[string] identifier[ut] . identifier[check_range] ([ identifier[HeightWaterCritical] , literal[string] , literal[string] ]) keyword[return] identifier[np] . identifier[sqrt] ( identifier[gravity] . identifier[magnitude] * identifier[HeightWaterCritical] )
def vel_horizontal(HeightWaterCritical): """Return the horizontal velocity.""" #Checking input validity ut.check_range([HeightWaterCritical, '>0', 'Critical height of water']) return np.sqrt(gravity.magnitude * HeightWaterCritical)
def tell(self): """Return the current position in the stream (ignoring bit position) :returns: int for the position in the stream """ res = self._stream.tell() if len(self._bits) > 0: res -= 1 return res
def function[tell, parameter[self]]: constant[Return the current position in the stream (ignoring bit position) :returns: int for the position in the stream ] variable[res] assign[=] call[name[self]._stream.tell, parameter[]] if compare[call[name[len], parameter[name[self]._bits]] greater[>] constant[0]] begin[:] <ast.AugAssign object at 0x7da204623f70> return[name[res]]
keyword[def] identifier[tell] ( identifier[self] ): literal[string] identifier[res] = identifier[self] . identifier[_stream] . identifier[tell] () keyword[if] identifier[len] ( identifier[self] . identifier[_bits] )> literal[int] : identifier[res] -= literal[int] keyword[return] identifier[res]
def tell(self): """Return the current position in the stream (ignoring bit position) :returns: int for the position in the stream """ res = self._stream.tell() if len(self._bits) > 0: res -= 1 # depends on [control=['if'], data=[]] return res
def delete(self, query_id=None, **kwargs): # pragma: no cover """Delete a query job. Uses the DELETE HTTP method to delete a query job. After calling this endpoint, it is an error to poll for query results using the queryId specified here. Args: query_id (str): Specifies the ID of the query job. **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Returns: requests.Response: Requests Response() object. Examples: Refer to ``logging_query.py`` example. """ path = "/logging-service/v1/queries/{}".format(query_id) r = self._httpclient.request( method="DELETE", url=self.url, path=path, **kwargs ) return r
def function[delete, parameter[self, query_id]]: constant[Delete a query job. Uses the DELETE HTTP method to delete a query job. After calling this endpoint, it is an error to poll for query results using the queryId specified here. Args: query_id (str): Specifies the ID of the query job. **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Returns: requests.Response: Requests Response() object. Examples: Refer to ``logging_query.py`` example. ] variable[path] assign[=] call[constant[/logging-service/v1/queries/{}].format, parameter[name[query_id]]] variable[r] assign[=] call[name[self]._httpclient.request, parameter[]] return[name[r]]
keyword[def] identifier[delete] ( identifier[self] , identifier[query_id] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[path] = literal[string] . identifier[format] ( identifier[query_id] ) identifier[r] = identifier[self] . identifier[_httpclient] . identifier[request] ( identifier[method] = literal[string] , identifier[url] = identifier[self] . identifier[url] , identifier[path] = identifier[path] , ** identifier[kwargs] ) keyword[return] identifier[r]
def delete(self, query_id=None, **kwargs): # pragma: no cover 'Delete a query job.\n\n Uses the DELETE HTTP method to delete a query job. After calling\n this endpoint, it is an error to poll for query results using\n the queryId specified here.\n\n Args:\n query_id (str): Specifies the ID of the query job.\n **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters.\n\n Returns:\n requests.Response: Requests Response() object.\n\n Examples:\n Refer to ``logging_query.py`` example.\n\n ' path = '/logging-service/v1/queries/{}'.format(query_id) r = self._httpclient.request(method='DELETE', url=self.url, path=path, **kwargs) return r
def translations_generator_to_dataframe(translations_generator): """ Given a generator of (Variant, [Translation]) pairs, returns a DataFrame of translated protein fragments with columns for each field of a Translation object (and chr/pos/ref/alt per variant). """ return dataframe_from_generator( element_class=Translation, variant_and_elements_generator=translations_generator, exclude=[], converters={ "untrimmed_variant_sequence": lambda vs: vs.sequence, "variant_sequence_in_reading_frame": ( lambda vs: vs.in_frame_cdna_sequence), "reference_context": ( lambda rc: ";".join([ transcript.name for transcript in rc.transcripts])) }, extra_column_fns={ "untrimmed_variant_sequence_read_count": ( lambda _, t: len(t.untrimmed_variant_sequence.reads)), })
def function[translations_generator_to_dataframe, parameter[translations_generator]]: constant[ Given a generator of (Variant, [Translation]) pairs, returns a DataFrame of translated protein fragments with columns for each field of a Translation object (and chr/pos/ref/alt per variant). ] return[call[name[dataframe_from_generator], parameter[]]]
keyword[def] identifier[translations_generator_to_dataframe] ( identifier[translations_generator] ): literal[string] keyword[return] identifier[dataframe_from_generator] ( identifier[element_class] = identifier[Translation] , identifier[variant_and_elements_generator] = identifier[translations_generator] , identifier[exclude] =[], identifier[converters] ={ literal[string] : keyword[lambda] identifier[vs] : identifier[vs] . identifier[sequence] , literal[string] :( keyword[lambda] identifier[vs] : identifier[vs] . identifier[in_frame_cdna_sequence] ), literal[string] :( keyword[lambda] identifier[rc] : literal[string] . identifier[join] ([ identifier[transcript] . identifier[name] keyword[for] identifier[transcript] keyword[in] identifier[rc] . identifier[transcripts] ])) }, identifier[extra_column_fns] ={ literal[string] :( keyword[lambda] identifier[_] , identifier[t] : identifier[len] ( identifier[t] . identifier[untrimmed_variant_sequence] . identifier[reads] )), })
def translations_generator_to_dataframe(translations_generator): """ Given a generator of (Variant, [Translation]) pairs, returns a DataFrame of translated protein fragments with columns for each field of a Translation object (and chr/pos/ref/alt per variant). """ return dataframe_from_generator(element_class=Translation, variant_and_elements_generator=translations_generator, exclude=[], converters={'untrimmed_variant_sequence': lambda vs: vs.sequence, 'variant_sequence_in_reading_frame': lambda vs: vs.in_frame_cdna_sequence, 'reference_context': lambda rc: ';'.join([transcript.name for transcript in rc.transcripts])}, extra_column_fns={'untrimmed_variant_sequence_read_count': lambda _, t: len(t.untrimmed_variant_sequence.reads)})
def commit(self, *args, **kwargs): """Store changes on current instance in database and index it.""" return super(Deposit, self).commit(*args, **kwargs)
def function[commit, parameter[self]]: constant[Store changes on current instance in database and index it.] return[call[call[name[super], parameter[name[Deposit], name[self]]].commit, parameter[<ast.Starred object at 0x7da1aff1fd60>]]]
keyword[def] identifier[commit] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[super] ( identifier[Deposit] , identifier[self] ). identifier[commit] (* identifier[args] ,** identifier[kwargs] )
def commit(self, *args, **kwargs): """Store changes on current instance in database and index it.""" return super(Deposit, self).commit(*args, **kwargs)
def constraint(self): """Constraint string""" constraint_arr = [] if self._not_null: constraint_arr.append("PRIMARY KEY" if self._pk else "NOT NULL") if self._unique: constraint_arr.append("UNIQUE") return " ".join(constraint_arr)
def function[constraint, parameter[self]]: constant[Constraint string] variable[constraint_arr] assign[=] list[[]] if name[self]._not_null begin[:] call[name[constraint_arr].append, parameter[<ast.IfExp object at 0x7da18bccb040>]] if name[self]._unique begin[:] call[name[constraint_arr].append, parameter[constant[UNIQUE]]] return[call[constant[ ].join, parameter[name[constraint_arr]]]]
keyword[def] identifier[constraint] ( identifier[self] ): literal[string] identifier[constraint_arr] =[] keyword[if] identifier[self] . identifier[_not_null] : identifier[constraint_arr] . identifier[append] ( literal[string] keyword[if] identifier[self] . identifier[_pk] keyword[else] literal[string] ) keyword[if] identifier[self] . identifier[_unique] : identifier[constraint_arr] . identifier[append] ( literal[string] ) keyword[return] literal[string] . identifier[join] ( identifier[constraint_arr] )
def constraint(self): """Constraint string""" constraint_arr = [] if self._not_null: constraint_arr.append('PRIMARY KEY' if self._pk else 'NOT NULL') # depends on [control=['if'], data=[]] if self._unique: constraint_arr.append('UNIQUE') # depends on [control=['if'], data=[]] return ' '.join(constraint_arr)
def set_chuid(ctx, management_key, pin): """ Generate and set a CHUID on the YubiKey. """ controller = ctx.obj['controller'] _ensure_authenticated(ctx, controller, pin, management_key) controller.update_chuid()
def function[set_chuid, parameter[ctx, management_key, pin]]: constant[ Generate and set a CHUID on the YubiKey. ] variable[controller] assign[=] call[name[ctx].obj][constant[controller]] call[name[_ensure_authenticated], parameter[name[ctx], name[controller], name[pin], name[management_key]]] call[name[controller].update_chuid, parameter[]]
keyword[def] identifier[set_chuid] ( identifier[ctx] , identifier[management_key] , identifier[pin] ): literal[string] identifier[controller] = identifier[ctx] . identifier[obj] [ literal[string] ] identifier[_ensure_authenticated] ( identifier[ctx] , identifier[controller] , identifier[pin] , identifier[management_key] ) identifier[controller] . identifier[update_chuid] ()
def set_chuid(ctx, management_key, pin): """ Generate and set a CHUID on the YubiKey. """ controller = ctx.obj['controller'] _ensure_authenticated(ctx, controller, pin, management_key) controller.update_chuid()
def find_nearest_color_index(r, g, b, color_table=None, method='euclid'): ''' Given three integers representing R, G, and B, return the nearest color index. Arguments: r: int - of range 0…255 g: int - of range 0…255 b: int - of range 0…255 Returns: int, None: index, or None on error. ''' shortest_distance = 257*257*3 # max eucl. distance from #000000 to #ffffff index = 0 # default to black if not color_table: if not color_table8: build_color_tables() color_table = color_table8 for i, values in enumerate(color_table): rd = r - values[0] gd = g - values[1] bd = b - values[2] this_distance = (rd * rd) + (gd * gd) + (bd * bd) if this_distance < shortest_distance: # closer index = i shortest_distance = this_distance return index
def function[find_nearest_color_index, parameter[r, g, b, color_table, method]]: constant[ Given three integers representing R, G, and B, return the nearest color index. Arguments: r: int - of range 0…255 g: int - of range 0…255 b: int - of range 0…255 Returns: int, None: index, or None on error. ] variable[shortest_distance] assign[=] binary_operation[binary_operation[constant[257] * constant[257]] * constant[3]] variable[index] assign[=] constant[0] if <ast.UnaryOp object at 0x7da1b2666350> begin[:] if <ast.UnaryOp object at 0x7da1b2666680> begin[:] call[name[build_color_tables], parameter[]] variable[color_table] assign[=] name[color_table8] for taget[tuple[[<ast.Name object at 0x7da1b268cdf0>, <ast.Name object at 0x7da1b268df60>]]] in starred[call[name[enumerate], parameter[name[color_table]]]] begin[:] variable[rd] assign[=] binary_operation[name[r] - call[name[values]][constant[0]]] variable[gd] assign[=] binary_operation[name[g] - call[name[values]][constant[1]]] variable[bd] assign[=] binary_operation[name[b] - call[name[values]][constant[2]]] variable[this_distance] assign[=] binary_operation[binary_operation[binary_operation[name[rd] * name[rd]] + binary_operation[name[gd] * name[gd]]] + binary_operation[name[bd] * name[bd]]] if compare[name[this_distance] less[<] name[shortest_distance]] begin[:] variable[index] assign[=] name[i] variable[shortest_distance] assign[=] name[this_distance] return[name[index]]
keyword[def] identifier[find_nearest_color_index] ( identifier[r] , identifier[g] , identifier[b] , identifier[color_table] = keyword[None] , identifier[method] = literal[string] ): literal[string] identifier[shortest_distance] = literal[int] * literal[int] * literal[int] identifier[index] = literal[int] keyword[if] keyword[not] identifier[color_table] : keyword[if] keyword[not] identifier[color_table8] : identifier[build_color_tables] () identifier[color_table] = identifier[color_table8] keyword[for] identifier[i] , identifier[values] keyword[in] identifier[enumerate] ( identifier[color_table] ): identifier[rd] = identifier[r] - identifier[values] [ literal[int] ] identifier[gd] = identifier[g] - identifier[values] [ literal[int] ] identifier[bd] = identifier[b] - identifier[values] [ literal[int] ] identifier[this_distance] =( identifier[rd] * identifier[rd] )+( identifier[gd] * identifier[gd] )+( identifier[bd] * identifier[bd] ) keyword[if] identifier[this_distance] < identifier[shortest_distance] : identifier[index] = identifier[i] identifier[shortest_distance] = identifier[this_distance] keyword[return] identifier[index]
def find_nearest_color_index(r, g, b, color_table=None, method='euclid'): """ Given three integers representing R, G, and B, return the nearest color index. Arguments: r: int - of range 0…255 g: int - of range 0…255 b: int - of range 0…255 Returns: int, None: index, or None on error. """ shortest_distance = 257 * 257 * 3 # max eucl. distance from #000000 to #ffffff index = 0 # default to black if not color_table: if not color_table8: build_color_tables() # depends on [control=['if'], data=[]] color_table = color_table8 # depends on [control=['if'], data=[]] for (i, values) in enumerate(color_table): rd = r - values[0] gd = g - values[1] bd = b - values[2] this_distance = rd * rd + gd * gd + bd * bd if this_distance < shortest_distance: # closer index = i shortest_distance = this_distance # depends on [control=['if'], data=['this_distance', 'shortest_distance']] # depends on [control=['for'], data=[]] return index
def detuning_combinations(lists): r"""This function recieves a list of length Nl with the number of transitions each laser induces. It returns the cartesian product of all these posibilities as a list of all possible combinations. """ Nl = len(lists) comb = [[i] for i in range(lists[0])] for l in range(1, Nl): combn = [] for c0 in comb: for cl in range(lists[l]): combn += [c0[:]+[cl]] comb = combn[:] return comb
def function[detuning_combinations, parameter[lists]]: constant[This function recieves a list of length Nl with the number of transitions each laser induces. It returns the cartesian product of all these posibilities as a list of all possible combinations. ] variable[Nl] assign[=] call[name[len], parameter[name[lists]]] variable[comb] assign[=] <ast.ListComp object at 0x7da1b1a3c3a0> for taget[name[l]] in starred[call[name[range], parameter[constant[1], name[Nl]]]] begin[:] variable[combn] assign[=] list[[]] for taget[name[c0]] in starred[name[comb]] begin[:] for taget[name[cl]] in starred[call[name[range], parameter[call[name[lists]][name[l]]]]] begin[:] <ast.AugAssign object at 0x7da18f58e2f0> variable[comb] assign[=] call[name[combn]][<ast.Slice object at 0x7da18f58dab0>] return[name[comb]]
keyword[def] identifier[detuning_combinations] ( identifier[lists] ): literal[string] identifier[Nl] = identifier[len] ( identifier[lists] ) identifier[comb] =[[ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[lists] [ literal[int] ])] keyword[for] identifier[l] keyword[in] identifier[range] ( literal[int] , identifier[Nl] ): identifier[combn] =[] keyword[for] identifier[c0] keyword[in] identifier[comb] : keyword[for] identifier[cl] keyword[in] identifier[range] ( identifier[lists] [ identifier[l] ]): identifier[combn] +=[ identifier[c0] [:]+[ identifier[cl] ]] identifier[comb] = identifier[combn] [:] keyword[return] identifier[comb]
def detuning_combinations(lists): """This function recieves a list of length Nl with the number of transitions each laser induces. It returns the cartesian product of all these posibilities as a list of all possible combinations. """ Nl = len(lists) comb = [[i] for i in range(lists[0])] for l in range(1, Nl): combn = [] for c0 in comb: for cl in range(lists[l]): combn += [c0[:] + [cl]] # depends on [control=['for'], data=['cl']] # depends on [control=['for'], data=['c0']] comb = combn[:] # depends on [control=['for'], data=['l']] return comb
def _parse_modes(mode_string, unary_modes=""): """ Parse the mode_string and return a list of triples. If no string is supplied return an empty list. >>> _parse_modes('') [] If no sign is supplied, return an empty list. >>> _parse_modes('ab') [] Discard unused args. >>> _parse_modes('+a foo bar baz') [['+', 'a', None]] Return none for unary args when not provided >>> _parse_modes('+abc foo', unary_modes='abc') [['+', 'a', 'foo'], ['+', 'b', None], ['+', 'c', None]] This function never throws an error: >>> import random >>> def random_text(min_len = 3, max_len = 80): ... len = random.randint(min_len, max_len) ... chars_to_choose = [chr(x) for x in range(0,1024)] ... chars = (random.choice(chars_to_choose) for x in range(len)) ... return ''.join(chars) >>> def random_texts(min_len = 3, max_len = 80): ... while True: ... yield random_text(min_len, max_len) >>> import itertools >>> texts = itertools.islice(random_texts(), 1000) >>> set(type(_parse_modes(text)) for text in texts) == {list} True """ # mode_string must be non-empty and begin with a sign if not mode_string or not mode_string[0] in '+-': return [] modes = [] parts = mode_string.split() mode_part, args = parts[0], parts[1:] for ch in mode_part: if ch in "+-": sign = ch continue arg = args.pop(0) if ch in unary_modes and args else None modes.append([sign, ch, arg]) return modes
def function[_parse_modes, parameter[mode_string, unary_modes]]: constant[ Parse the mode_string and return a list of triples. If no string is supplied return an empty list. >>> _parse_modes('') [] If no sign is supplied, return an empty list. >>> _parse_modes('ab') [] Discard unused args. >>> _parse_modes('+a foo bar baz') [['+', 'a', None]] Return none for unary args when not provided >>> _parse_modes('+abc foo', unary_modes='abc') [['+', 'a', 'foo'], ['+', 'b', None], ['+', 'c', None]] This function never throws an error: >>> import random >>> def random_text(min_len = 3, max_len = 80): ... len = random.randint(min_len, max_len) ... chars_to_choose = [chr(x) for x in range(0,1024)] ... chars = (random.choice(chars_to_choose) for x in range(len)) ... return ''.join(chars) >>> def random_texts(min_len = 3, max_len = 80): ... while True: ... yield random_text(min_len, max_len) >>> import itertools >>> texts = itertools.islice(random_texts(), 1000) >>> set(type(_parse_modes(text)) for text in texts) == {list} True ] if <ast.BoolOp object at 0x7da1b0b44400> begin[:] return[list[[]]] variable[modes] assign[=] list[[]] variable[parts] assign[=] call[name[mode_string].split, parameter[]] <ast.Tuple object at 0x7da1b0b46110> assign[=] tuple[[<ast.Subscript object at 0x7da1b0b44f70>, <ast.Subscript object at 0x7da1b0b47d00>]] for taget[name[ch]] in starred[name[mode_part]] begin[:] if compare[name[ch] in constant[+-]] begin[:] variable[sign] assign[=] name[ch] continue variable[arg] assign[=] <ast.IfExp object at 0x7da1b0b474f0> call[name[modes].append, parameter[list[[<ast.Name object at 0x7da1b0b46dd0>, <ast.Name object at 0x7da1b0b45f30>, <ast.Name object at 0x7da1b0b479a0>]]]] return[name[modes]]
keyword[def] identifier[_parse_modes] ( identifier[mode_string] , identifier[unary_modes] = literal[string] ): literal[string] keyword[if] keyword[not] identifier[mode_string] keyword[or] keyword[not] identifier[mode_string] [ literal[int] ] keyword[in] literal[string] : keyword[return] [] identifier[modes] =[] identifier[parts] = identifier[mode_string] . identifier[split] () identifier[mode_part] , identifier[args] = identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] :] keyword[for] identifier[ch] keyword[in] identifier[mode_part] : keyword[if] identifier[ch] keyword[in] literal[string] : identifier[sign] = identifier[ch] keyword[continue] identifier[arg] = identifier[args] . identifier[pop] ( literal[int] ) keyword[if] identifier[ch] keyword[in] identifier[unary_modes] keyword[and] identifier[args] keyword[else] keyword[None] identifier[modes] . identifier[append] ([ identifier[sign] , identifier[ch] , identifier[arg] ]) keyword[return] identifier[modes]
def _parse_modes(mode_string, unary_modes=''): """ Parse the mode_string and return a list of triples. If no string is supplied return an empty list. >>> _parse_modes('') [] If no sign is supplied, return an empty list. >>> _parse_modes('ab') [] Discard unused args. >>> _parse_modes('+a foo bar baz') [['+', 'a', None]] Return none for unary args when not provided >>> _parse_modes('+abc foo', unary_modes='abc') [['+', 'a', 'foo'], ['+', 'b', None], ['+', 'c', None]] This function never throws an error: >>> import random >>> def random_text(min_len = 3, max_len = 80): ... len = random.randint(min_len, max_len) ... chars_to_choose = [chr(x) for x in range(0,1024)] ... chars = (random.choice(chars_to_choose) for x in range(len)) ... return ''.join(chars) >>> def random_texts(min_len = 3, max_len = 80): ... while True: ... yield random_text(min_len, max_len) >>> import itertools >>> texts = itertools.islice(random_texts(), 1000) >>> set(type(_parse_modes(text)) for text in texts) == {list} True """ # mode_string must be non-empty and begin with a sign if not mode_string or not mode_string[0] in '+-': return [] # depends on [control=['if'], data=[]] modes = [] parts = mode_string.split() (mode_part, args) = (parts[0], parts[1:]) for ch in mode_part: if ch in '+-': sign = ch continue # depends on [control=['if'], data=['ch']] arg = args.pop(0) if ch in unary_modes and args else None modes.append([sign, ch, arg]) # depends on [control=['for'], data=['ch']] return modes
def getReadGroupSetByName(self, name): """ Returns a ReadGroupSet with the specified name, or raises a ReadGroupSetNameNotFoundException if it does not exist. """ if name not in self._readGroupSetNameMap: raise exceptions.ReadGroupSetNameNotFoundException(name) return self._readGroupSetNameMap[name]
def function[getReadGroupSetByName, parameter[self, name]]: constant[ Returns a ReadGroupSet with the specified name, or raises a ReadGroupSetNameNotFoundException if it does not exist. ] if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self]._readGroupSetNameMap] begin[:] <ast.Raise object at 0x7da18f00d960> return[call[name[self]._readGroupSetNameMap][name[name]]]
keyword[def] identifier[getReadGroupSetByName] ( identifier[self] , identifier[name] ): literal[string] keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[_readGroupSetNameMap] : keyword[raise] identifier[exceptions] . identifier[ReadGroupSetNameNotFoundException] ( identifier[name] ) keyword[return] identifier[self] . identifier[_readGroupSetNameMap] [ identifier[name] ]
def getReadGroupSetByName(self, name): """ Returns a ReadGroupSet with the specified name, or raises a ReadGroupSetNameNotFoundException if it does not exist. """ if name not in self._readGroupSetNameMap: raise exceptions.ReadGroupSetNameNotFoundException(name) # depends on [control=['if'], data=['name']] return self._readGroupSetNameMap[name]
def get_url(url: str) -> Union[dict, int, float, str]: '''Perform a GET request for the url and return a dictionary parsed from the JSON response.''' request = Request(url, headers={"User-Agent": "pypeerassets"}) response = cast(HTTPResponse, urlopen(request)) if response.status != 200: raise Exception(response.reason) return json.loads(response.read().decode())
def function[get_url, parameter[url]]: constant[Perform a GET request for the url and return a dictionary parsed from the JSON response.] variable[request] assign[=] call[name[Request], parameter[name[url]]] variable[response] assign[=] call[name[cast], parameter[name[HTTPResponse], call[name[urlopen], parameter[name[request]]]]] if compare[name[response].status not_equal[!=] constant[200]] begin[:] <ast.Raise object at 0x7da1b25ee4a0> return[call[name[json].loads, parameter[call[call[name[response].read, parameter[]].decode, parameter[]]]]]
keyword[def] identifier[get_url] ( identifier[url] : identifier[str] )-> identifier[Union] [ identifier[dict] , identifier[int] , identifier[float] , identifier[str] ]: literal[string] identifier[request] = identifier[Request] ( identifier[url] , identifier[headers] ={ literal[string] : literal[string] }) identifier[response] = identifier[cast] ( identifier[HTTPResponse] , identifier[urlopen] ( identifier[request] )) keyword[if] identifier[response] . identifier[status] != literal[int] : keyword[raise] identifier[Exception] ( identifier[response] . identifier[reason] ) keyword[return] identifier[json] . identifier[loads] ( identifier[response] . identifier[read] (). identifier[decode] ())
def get_url(url: str) -> Union[dict, int, float, str]: """Perform a GET request for the url and return a dictionary parsed from the JSON response.""" request = Request(url, headers={'User-Agent': 'pypeerassets'}) response = cast(HTTPResponse, urlopen(request)) if response.status != 200: raise Exception(response.reason) # depends on [control=['if'], data=[]] return json.loads(response.read().decode())
def get_project(self, resource): """ Get attributes of the data model object named by the given resource. Args: resource (intern.resource.boss.BossResource): resource.name as well as any parents must be identified to succeed. Returns: (intern.resource.boss.BossResource): Returns resource of type requested on success. Raises: requests.HTTPError on failure. """ self.project_service.set_auth(self._token_project) return self.project_service.get(resource)
def function[get_project, parameter[self, resource]]: constant[ Get attributes of the data model object named by the given resource. Args: resource (intern.resource.boss.BossResource): resource.name as well as any parents must be identified to succeed. Returns: (intern.resource.boss.BossResource): Returns resource of type requested on success. Raises: requests.HTTPError on failure. ] call[name[self].project_service.set_auth, parameter[name[self]._token_project]] return[call[name[self].project_service.get, parameter[name[resource]]]]
keyword[def] identifier[get_project] ( identifier[self] , identifier[resource] ): literal[string] identifier[self] . identifier[project_service] . identifier[set_auth] ( identifier[self] . identifier[_token_project] ) keyword[return] identifier[self] . identifier[project_service] . identifier[get] ( identifier[resource] )
def get_project(self, resource): """ Get attributes of the data model object named by the given resource. Args: resource (intern.resource.boss.BossResource): resource.name as well as any parents must be identified to succeed. Returns: (intern.resource.boss.BossResource): Returns resource of type requested on success. Raises: requests.HTTPError on failure. """ self.project_service.set_auth(self._token_project) return self.project_service.get(resource)
def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts """ def wrap(f): @wraps(f) def wrapped_f(*args, **kwargs): # Run the original function first f(*args, **kwargs) # Set workload status now that contexts have been # acted on set_os_workload_status(configs, required_interfaces, charm_func) return wrapped_f return wrap
def function[os_workload_status, parameter[configs, required_interfaces, charm_func]]: constant[ Decorator to set workload status based on complete contexts ] def function[wrap, parameter[f]]: def function[wrapped_f, parameter[]]: call[name[f], parameter[<ast.Starred object at 0x7da1b121a350>]] call[name[set_os_workload_status], parameter[name[configs], name[required_interfaces], name[charm_func]]] return[name[wrapped_f]] return[name[wrap]]
keyword[def] identifier[os_workload_status] ( identifier[configs] , identifier[required_interfaces] , identifier[charm_func] = keyword[None] ): literal[string] keyword[def] identifier[wrap] ( identifier[f] ): @ identifier[wraps] ( identifier[f] ) keyword[def] identifier[wrapped_f] (* identifier[args] ,** identifier[kwargs] ): identifier[f] (* identifier[args] ,** identifier[kwargs] ) identifier[set_os_workload_status] ( identifier[configs] , identifier[required_interfaces] , identifier[charm_func] ) keyword[return] identifier[wrapped_f] keyword[return] identifier[wrap]
def os_workload_status(configs, required_interfaces, charm_func=None): """ Decorator to set workload status based on complete contexts """ def wrap(f): @wraps(f) def wrapped_f(*args, **kwargs): # Run the original function first f(*args, **kwargs) # Set workload status now that contexts have been # acted on set_os_workload_status(configs, required_interfaces, charm_func) return wrapped_f return wrap
def get_regular_expressions(taxonomy_name, rebuild=False, no_cache=False): """Return a list of patterns compiled from the RDF/SKOS ontology. Uses cache if it exists and if the taxonomy hasn't changed. """ # Translate the ontology name into a local path. Check if the name # relates to an existing ontology. onto_name, onto_path, onto_url = _get_ontology(taxonomy_name) if not onto_path: raise TaxonomyError("Unable to locate the taxonomy: '%s'." % taxonomy_name) cache_path = _get_cache_path(onto_name) current_app.logger.debug( 'Taxonomy discovered, now we load it ' '(from cache: %s, onto_path: %s, cache_path: %s)' % (not no_cache, onto_path, cache_path) ) if os.access(cache_path, os.R_OK): if os.access(onto_path, os.R_OK): if rebuild or no_cache: current_app.logger.debug( "Cache generation was manually forced.") return _build_cache(onto_path, skip_cache=no_cache) else: # ontology file not found. Use the cache instead. current_app.logger.warning( "The ontology couldn't be located. However " "a cached version of it is available. Using it as a " "reference." ) return _get_cache(cache_path, source_file=onto_path) if (os.path.getmtime(cache_path) > os.path.getmtime(onto_path)): # Cache is more recent than the ontology: use cache. current_app.logger.debug( "Normal situation, cache is older than ontology," " so we load it from cache" ) return _get_cache(cache_path, source_file=onto_path) else: # Ontology is more recent than the cache: rebuild cache. current_app.logger.warning( "Cache '%s' is older than '%s'. " "We will rebuild the cache" % (cache_path, onto_path) ) return _build_cache(onto_path, skip_cache=no_cache) elif os.access(onto_path, os.R_OK): if not no_cache and\ os.path.exists(cache_path) and\ not os.access(cache_path, os.W_OK): raise TaxonomyError('We cannot read/write into: %s. ' 'Aborting!' % cache_path) elif not no_cache and os.path.exists(cache_path): current_app.logger.warning( 'Cache %s exists, but is not readable!' % cache_path) current_app.logger.info( "Cache not available. Building it now: %s" % onto_path) return _build_cache(onto_path, skip_cache=no_cache) else: raise TaxonomyError("We miss both source and cache" " of the taxonomy: %s" % taxonomy_name)
def function[get_regular_expressions, parameter[taxonomy_name, rebuild, no_cache]]: constant[Return a list of patterns compiled from the RDF/SKOS ontology. Uses cache if it exists and if the taxonomy hasn't changed. ] <ast.Tuple object at 0x7da18eb549d0> assign[=] call[name[_get_ontology], parameter[name[taxonomy_name]]] if <ast.UnaryOp object at 0x7da18eb56980> begin[:] <ast.Raise object at 0x7da18eb57040> variable[cache_path] assign[=] call[name[_get_cache_path], parameter[name[onto_name]]] call[name[current_app].logger.debug, parameter[binary_operation[constant[Taxonomy discovered, now we load it (from cache: %s, onto_path: %s, cache_path: %s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.UnaryOp object at 0x7da18dc986a0>, <ast.Name object at 0x7da18dc9b820>, <ast.Name object at 0x7da18dc9a620>]]]]] if call[name[os].access, parameter[name[cache_path], name[os].R_OK]] begin[:] if call[name[os].access, parameter[name[onto_path], name[os].R_OK]] begin[:] if <ast.BoolOp object at 0x7da18dc99ba0> begin[:] call[name[current_app].logger.debug, parameter[constant[Cache generation was manually forced.]]] return[call[name[_build_cache], parameter[name[onto_path]]]] if compare[call[name[os].path.getmtime, parameter[name[cache_path]]] greater[>] call[name[os].path.getmtime, parameter[name[onto_path]]]] begin[:] call[name[current_app].logger.debug, parameter[constant[Normal situation, cache is older than ontology, so we load it from cache]]] return[call[name[_get_cache], parameter[name[cache_path]]]]
keyword[def] identifier[get_regular_expressions] ( identifier[taxonomy_name] , identifier[rebuild] = keyword[False] , identifier[no_cache] = keyword[False] ): literal[string] identifier[onto_name] , identifier[onto_path] , identifier[onto_url] = identifier[_get_ontology] ( identifier[taxonomy_name] ) keyword[if] keyword[not] identifier[onto_path] : keyword[raise] identifier[TaxonomyError] ( literal[string] % identifier[taxonomy_name] ) identifier[cache_path] = identifier[_get_cache_path] ( identifier[onto_name] ) identifier[current_app] . identifier[logger] . identifier[debug] ( literal[string] literal[string] %( keyword[not] identifier[no_cache] , identifier[onto_path] , identifier[cache_path] ) ) keyword[if] identifier[os] . identifier[access] ( identifier[cache_path] , identifier[os] . identifier[R_OK] ): keyword[if] identifier[os] . identifier[access] ( identifier[onto_path] , identifier[os] . identifier[R_OK] ): keyword[if] identifier[rebuild] keyword[or] identifier[no_cache] : identifier[current_app] . identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[_build_cache] ( identifier[onto_path] , identifier[skip_cache] = identifier[no_cache] ) keyword[else] : identifier[current_app] . identifier[logger] . identifier[warning] ( literal[string] literal[string] literal[string] ) keyword[return] identifier[_get_cache] ( identifier[cache_path] , identifier[source_file] = identifier[onto_path] ) keyword[if] ( identifier[os] . identifier[path] . identifier[getmtime] ( identifier[cache_path] )> identifier[os] . identifier[path] . identifier[getmtime] ( identifier[onto_path] )): identifier[current_app] . identifier[logger] . identifier[debug] ( literal[string] literal[string] ) keyword[return] identifier[_get_cache] ( identifier[cache_path] , identifier[source_file] = identifier[onto_path] ) keyword[else] : identifier[current_app] . identifier[logger] . identifier[warning] ( literal[string] literal[string] % ( identifier[cache_path] , identifier[onto_path] ) ) keyword[return] identifier[_build_cache] ( identifier[onto_path] , identifier[skip_cache] = identifier[no_cache] ) keyword[elif] identifier[os] . identifier[access] ( identifier[onto_path] , identifier[os] . identifier[R_OK] ): keyword[if] keyword[not] identifier[no_cache] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[cache_path] ) keyword[and] keyword[not] identifier[os] . identifier[access] ( identifier[cache_path] , identifier[os] . identifier[W_OK] ): keyword[raise] identifier[TaxonomyError] ( literal[string] literal[string] % identifier[cache_path] ) keyword[elif] keyword[not] identifier[no_cache] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[cache_path] ): identifier[current_app] . identifier[logger] . identifier[warning] ( literal[string] % identifier[cache_path] ) identifier[current_app] . identifier[logger] . identifier[info] ( literal[string] % identifier[onto_path] ) keyword[return] identifier[_build_cache] ( identifier[onto_path] , identifier[skip_cache] = identifier[no_cache] ) keyword[else] : keyword[raise] identifier[TaxonomyError] ( literal[string] literal[string] % identifier[taxonomy_name] )
def get_regular_expressions(taxonomy_name, rebuild=False, no_cache=False): """Return a list of patterns compiled from the RDF/SKOS ontology. Uses cache if it exists and if the taxonomy hasn't changed. """ # Translate the ontology name into a local path. Check if the name # relates to an existing ontology. (onto_name, onto_path, onto_url) = _get_ontology(taxonomy_name) if not onto_path: raise TaxonomyError("Unable to locate the taxonomy: '%s'." % taxonomy_name) # depends on [control=['if'], data=[]] cache_path = _get_cache_path(onto_name) current_app.logger.debug('Taxonomy discovered, now we load it (from cache: %s, onto_path: %s, cache_path: %s)' % (not no_cache, onto_path, cache_path)) if os.access(cache_path, os.R_OK): if os.access(onto_path, os.R_OK): if rebuild or no_cache: current_app.logger.debug('Cache generation was manually forced.') return _build_cache(onto_path, skip_cache=no_cache) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: # ontology file not found. Use the cache instead. current_app.logger.warning("The ontology couldn't be located. However a cached version of it is available. Using it as a reference.") return _get_cache(cache_path, source_file=onto_path) if os.path.getmtime(cache_path) > os.path.getmtime(onto_path): # Cache is more recent than the ontology: use cache. current_app.logger.debug('Normal situation, cache is older than ontology, so we load it from cache') return _get_cache(cache_path, source_file=onto_path) # depends on [control=['if'], data=[]] else: # Ontology is more recent than the cache: rebuild cache. current_app.logger.warning("Cache '%s' is older than '%s'. We will rebuild the cache" % (cache_path, onto_path)) return _build_cache(onto_path, skip_cache=no_cache) # depends on [control=['if'], data=[]] elif os.access(onto_path, os.R_OK): if not no_cache and os.path.exists(cache_path) and (not os.access(cache_path, os.W_OK)): raise TaxonomyError('We cannot read/write into: %s. Aborting!' % cache_path) # depends on [control=['if'], data=[]] elif not no_cache and os.path.exists(cache_path): current_app.logger.warning('Cache %s exists, but is not readable!' % cache_path) # depends on [control=['if'], data=[]] current_app.logger.info('Cache not available. Building it now: %s' % onto_path) return _build_cache(onto_path, skip_cache=no_cache) # depends on [control=['if'], data=[]] else: raise TaxonomyError('We miss both source and cache of the taxonomy: %s' % taxonomy_name)
def get_terms(self): ''' GROUP BY is a shortcut to only getting the first in every list of group ''' if not self.terms.empty: return self.terms if self.from_backup: self.terms = open_pickle(TERMS_BACKUP_PATH) return self.terms engine = create_engine(self.db_url) data = """ SELECT t.id as tid, t.ilx, t.label, t.definition, t.type, t.comment, t.version FROM terms t GROUP BY t.ilx """ self.terms = pd.read_sql(data, engine) create_pickle(self.terms, TERMS_BACKUP_PATH) return self.terms
def function[get_terms, parameter[self]]: constant[ GROUP BY is a shortcut to only getting the first in every list of group ] if <ast.UnaryOp object at 0x7da1b1a221a0> begin[:] return[name[self].terms] if name[self].from_backup begin[:] name[self].terms assign[=] call[name[open_pickle], parameter[name[TERMS_BACKUP_PATH]]] return[name[self].terms] variable[engine] assign[=] call[name[create_engine], parameter[name[self].db_url]] variable[data] assign[=] constant[ SELECT t.id as tid, t.ilx, t.label, t.definition, t.type, t.comment, t.version FROM terms t GROUP BY t.ilx ] name[self].terms assign[=] call[name[pd].read_sql, parameter[name[data], name[engine]]] call[name[create_pickle], parameter[name[self].terms, name[TERMS_BACKUP_PATH]]] return[name[self].terms]
keyword[def] identifier[get_terms] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[terms] . identifier[empty] : keyword[return] identifier[self] . identifier[terms] keyword[if] identifier[self] . identifier[from_backup] : identifier[self] . identifier[terms] = identifier[open_pickle] ( identifier[TERMS_BACKUP_PATH] ) keyword[return] identifier[self] . identifier[terms] identifier[engine] = identifier[create_engine] ( identifier[self] . identifier[db_url] ) identifier[data] = literal[string] identifier[self] . identifier[terms] = identifier[pd] . identifier[read_sql] ( identifier[data] , identifier[engine] ) identifier[create_pickle] ( identifier[self] . identifier[terms] , identifier[TERMS_BACKUP_PATH] ) keyword[return] identifier[self] . identifier[terms]
def get_terms(self): """ GROUP BY is a shortcut to only getting the first in every list of group """ if not self.terms.empty: return self.terms # depends on [control=['if'], data=[]] if self.from_backup: self.terms = open_pickle(TERMS_BACKUP_PATH) return self.terms # depends on [control=['if'], data=[]] engine = create_engine(self.db_url) data = '\n SELECT t.id as tid, t.ilx, t.label, t.definition, t.type, t.comment, t.version\n FROM terms t\n GROUP BY t.ilx\n ' self.terms = pd.read_sql(data, engine) create_pickle(self.terms, TERMS_BACKUP_PATH) return self.terms
def coauthor_likelihoods(p1, p2): """returns the likelihoods of observing the actual number coauthors shared by c{p1} and c{p2}, conditioned on whether or not p1 and p2 are a match """ def get_coauthors(p): ret = set() for m in p.mentions: for co_m in article_to_mentions[m.article_id]: if m != co_m: co_p = Agglomerator.MENTION_TO_CLUSTER[co_m] ret.add(co_p) return ret def num_common_coauthors(p1, p2): return len(set.intersection(get_coauthors(p1), get_coauthors(p2))) num_common = num_common_coauthors(p1, p2) if num_common >= len(config.p_coauthor[0]): num_common = len(config.p_coauthor[0]) - 1 likelihood0 = config.p_coauthor[0][num_common] likelihood1 = config.p_coauthor[1][num_common] return likelihood1, likelihood0
def function[coauthor_likelihoods, parameter[p1, p2]]: constant[returns the likelihoods of observing the actual number coauthors shared by c{p1} and c{p2}, conditioned on whether or not p1 and p2 are a match ] def function[get_coauthors, parameter[p]]: variable[ret] assign[=] call[name[set], parameter[]] for taget[name[m]] in starred[name[p].mentions] begin[:] for taget[name[co_m]] in starred[call[name[article_to_mentions]][name[m].article_id]] begin[:] if compare[name[m] not_equal[!=] name[co_m]] begin[:] variable[co_p] assign[=] call[name[Agglomerator].MENTION_TO_CLUSTER][name[co_m]] call[name[ret].add, parameter[name[co_p]]] return[name[ret]] def function[num_common_coauthors, parameter[p1, p2]]: return[call[name[len], parameter[call[name[set].intersection, parameter[call[name[get_coauthors], parameter[name[p1]]], call[name[get_coauthors], parameter[name[p2]]]]]]]] variable[num_common] assign[=] call[name[num_common_coauthors], parameter[name[p1], name[p2]]] if compare[name[num_common] greater_or_equal[>=] call[name[len], parameter[call[name[config].p_coauthor][constant[0]]]]] begin[:] variable[num_common] assign[=] binary_operation[call[name[len], parameter[call[name[config].p_coauthor][constant[0]]]] - constant[1]] variable[likelihood0] assign[=] call[call[name[config].p_coauthor][constant[0]]][name[num_common]] variable[likelihood1] assign[=] call[call[name[config].p_coauthor][constant[1]]][name[num_common]] return[tuple[[<ast.Name object at 0x7da2043474c0>, <ast.Name object at 0x7da204344670>]]]
keyword[def] identifier[coauthor_likelihoods] ( identifier[p1] , identifier[p2] ): literal[string] keyword[def] identifier[get_coauthors] ( identifier[p] ): identifier[ret] = identifier[set] () keyword[for] identifier[m] keyword[in] identifier[p] . identifier[mentions] : keyword[for] identifier[co_m] keyword[in] identifier[article_to_mentions] [ identifier[m] . identifier[article_id] ]: keyword[if] identifier[m] != identifier[co_m] : identifier[co_p] = identifier[Agglomerator] . identifier[MENTION_TO_CLUSTER] [ identifier[co_m] ] identifier[ret] . identifier[add] ( identifier[co_p] ) keyword[return] identifier[ret] keyword[def] identifier[num_common_coauthors] ( identifier[p1] , identifier[p2] ): keyword[return] identifier[len] ( identifier[set] . identifier[intersection] ( identifier[get_coauthors] ( identifier[p1] ), identifier[get_coauthors] ( identifier[p2] ))) identifier[num_common] = identifier[num_common_coauthors] ( identifier[p1] , identifier[p2] ) keyword[if] identifier[num_common] >= identifier[len] ( identifier[config] . identifier[p_coauthor] [ literal[int] ]): identifier[num_common] = identifier[len] ( identifier[config] . identifier[p_coauthor] [ literal[int] ])- literal[int] identifier[likelihood0] = identifier[config] . identifier[p_coauthor] [ literal[int] ][ identifier[num_common] ] identifier[likelihood1] = identifier[config] . identifier[p_coauthor] [ literal[int] ][ identifier[num_common] ] keyword[return] identifier[likelihood1] , identifier[likelihood0]
def coauthor_likelihoods(p1, p2): """returns the likelihoods of observing the actual number coauthors shared by c{p1} and c{p2}, conditioned on whether or not p1 and p2 are a match """ def get_coauthors(p): ret = set() for m in p.mentions: for co_m in article_to_mentions[m.article_id]: if m != co_m: co_p = Agglomerator.MENTION_TO_CLUSTER[co_m] ret.add(co_p) # depends on [control=['if'], data=['co_m']] # depends on [control=['for'], data=['co_m']] # depends on [control=['for'], data=['m']] return ret def num_common_coauthors(p1, p2): return len(set.intersection(get_coauthors(p1), get_coauthors(p2))) num_common = num_common_coauthors(p1, p2) if num_common >= len(config.p_coauthor[0]): num_common = len(config.p_coauthor[0]) - 1 # depends on [control=['if'], data=['num_common']] likelihood0 = config.p_coauthor[0][num_common] likelihood1 = config.p_coauthor[1][num_common] return (likelihood1, likelihood0)
def to_wire(self, origin=None, max_size=65535): """Return a string containing the update in DNS compressed wire format. @rtype: string""" if origin is None: origin = self.origin return super(Update, self).to_wire(origin, max_size)
def function[to_wire, parameter[self, origin, max_size]]: constant[Return a string containing the update in DNS compressed wire format. @rtype: string] if compare[name[origin] is constant[None]] begin[:] variable[origin] assign[=] name[self].origin return[call[call[name[super], parameter[name[Update], name[self]]].to_wire, parameter[name[origin], name[max_size]]]]
keyword[def] identifier[to_wire] ( identifier[self] , identifier[origin] = keyword[None] , identifier[max_size] = literal[int] ): literal[string] keyword[if] identifier[origin] keyword[is] keyword[None] : identifier[origin] = identifier[self] . identifier[origin] keyword[return] identifier[super] ( identifier[Update] , identifier[self] ). identifier[to_wire] ( identifier[origin] , identifier[max_size] )
def to_wire(self, origin=None, max_size=65535): """Return a string containing the update in DNS compressed wire format. @rtype: string""" if origin is None: origin = self.origin # depends on [control=['if'], data=['origin']] return super(Update, self).to_wire(origin, max_size)
def get_band_structure_dict(self): """Returns calclated band structures Returns ------- dict keys: qpoints, distances, frequencies, eigenvectors, and group_velocities Each dict value is a list containing properties on number of paths. The number of q-points on one path can be different from that on the other path. Each set of properties on a path is ndarray and is explained as below: qpoints[i]: ndarray q-points in reduced coordinates of reciprocal space without 2pi. shape=(q-points, 3), dtype='double' distances[i]: ndarray Distances in reciprocal space along paths. shape=(q-points,), dtype='double' frequencies[i]: ndarray Phonon frequencies shape=(q-points, bands), dtype='double' eigenvectors[i]: ndarray Phonon eigenvectors. None if eigenvectors are not stored. shape=(q-points, bands, bands), dtype='complex' group_velocities[i]: ndarray Phonon group velocities. None if group velocities are not calculated. shape=(q-points, bands, 3), dtype='double' """ if self._band_structure is None: msg = ("run_band_structure has to be done.") raise RuntimeError(msg) retdict = {'qpoints': self._band_structure.qpoints, 'distances': self._band_structure.distances, 'frequencies': self._band_structure.frequencies, 'eigenvectors': self._band_structure.eigenvectors, 'group_velocities': self._band_structure.group_velocities} return retdict
def function[get_band_structure_dict, parameter[self]]: constant[Returns calclated band structures Returns ------- dict keys: qpoints, distances, frequencies, eigenvectors, and group_velocities Each dict value is a list containing properties on number of paths. The number of q-points on one path can be different from that on the other path. Each set of properties on a path is ndarray and is explained as below: qpoints[i]: ndarray q-points in reduced coordinates of reciprocal space without 2pi. shape=(q-points, 3), dtype='double' distances[i]: ndarray Distances in reciprocal space along paths. shape=(q-points,), dtype='double' frequencies[i]: ndarray Phonon frequencies shape=(q-points, bands), dtype='double' eigenvectors[i]: ndarray Phonon eigenvectors. None if eigenvectors are not stored. shape=(q-points, bands, bands), dtype='complex' group_velocities[i]: ndarray Phonon group velocities. None if group velocities are not calculated. shape=(q-points, bands, 3), dtype='double' ] if compare[name[self]._band_structure is constant[None]] begin[:] variable[msg] assign[=] constant[run_band_structure has to be done.] <ast.Raise object at 0x7da2044c3430> variable[retdict] assign[=] dictionary[[<ast.Constant object at 0x7da20e74be20>, <ast.Constant object at 0x7da20e74bc70>, <ast.Constant object at 0x7da20e74b070>, <ast.Constant object at 0x7da20e748730>, <ast.Constant object at 0x7da20e74ae60>], [<ast.Attribute object at 0x7da20e748820>, <ast.Attribute object at 0x7da20e748700>, <ast.Attribute object at 0x7da20e74b7f0>, <ast.Attribute object at 0x7da20e749540>, <ast.Attribute object at 0x7da20e74beb0>]] return[name[retdict]]
keyword[def] identifier[get_band_structure_dict] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_band_structure] keyword[is] keyword[None] : identifier[msg] =( literal[string] ) keyword[raise] identifier[RuntimeError] ( identifier[msg] ) identifier[retdict] ={ literal[string] : identifier[self] . identifier[_band_structure] . identifier[qpoints] , literal[string] : identifier[self] . identifier[_band_structure] . identifier[distances] , literal[string] : identifier[self] . identifier[_band_structure] . identifier[frequencies] , literal[string] : identifier[self] . identifier[_band_structure] . identifier[eigenvectors] , literal[string] : identifier[self] . identifier[_band_structure] . identifier[group_velocities] } keyword[return] identifier[retdict]
def get_band_structure_dict(self): """Returns calclated band structures Returns ------- dict keys: qpoints, distances, frequencies, eigenvectors, and group_velocities Each dict value is a list containing properties on number of paths. The number of q-points on one path can be different from that on the other path. Each set of properties on a path is ndarray and is explained as below: qpoints[i]: ndarray q-points in reduced coordinates of reciprocal space without 2pi. shape=(q-points, 3), dtype='double' distances[i]: ndarray Distances in reciprocal space along paths. shape=(q-points,), dtype='double' frequencies[i]: ndarray Phonon frequencies shape=(q-points, bands), dtype='double' eigenvectors[i]: ndarray Phonon eigenvectors. None if eigenvectors are not stored. shape=(q-points, bands, bands), dtype='complex' group_velocities[i]: ndarray Phonon group velocities. None if group velocities are not calculated. shape=(q-points, bands, 3), dtype='double' """ if self._band_structure is None: msg = 'run_band_structure has to be done.' raise RuntimeError(msg) # depends on [control=['if'], data=[]] retdict = {'qpoints': self._band_structure.qpoints, 'distances': self._band_structure.distances, 'frequencies': self._band_structure.frequencies, 'eigenvectors': self._band_structure.eigenvectors, 'group_velocities': self._band_structure.group_velocities} return retdict
def _igamc(a, x): """Complemented incomplete Gamma integral. SYNOPSIS: double a, x, y, igamc(); y = igamc( a, x ); DESCRIPTION: The function is defined by:: igamc(a,x) = 1 - igam(a,x) inf. - 1 | | -t a-1 = ----- | e t dt. - | | | (a) - x In this implementation both arguments must be positive. The integral is evaluated by either a power series or continued fraction expansion, depending on the relative values of a and x. """ # Compute x**a * exp(-x) / Gamma(a) ax = math.exp(a * math.log(x) - x - math.lgamma(a)) # Continued fraction y = 1.0 - a z = x + y + 1.0 c = 0.0 pkm2 = 1.0 qkm2 = x pkm1 = x + 1.0 qkm1 = z * x ans = pkm1 / qkm1 while True: c += 1.0 y += 1.0 z += 2.0 yc = y * c pk = pkm1 * z - pkm2 * yc qk = qkm1 * z - qkm2 * yc if qk != 0: r = pk/qk t = abs((ans - r) / r) ans = r else: t = 1.0; pkm2 = pkm1 pkm1 = pk qkm2 = qkm1 qkm1 = qk if abs(pk) > BIG: pkm2 *= BIGINV; pkm1 *= BIGINV; qkm2 *= BIGINV; qkm1 *= BIGINV; if t <= MACHEP: return ans * ax
def function[_igamc, parameter[a, x]]: constant[Complemented incomplete Gamma integral. SYNOPSIS: double a, x, y, igamc(); y = igamc( a, x ); DESCRIPTION: The function is defined by:: igamc(a,x) = 1 - igam(a,x) inf. - 1 | | -t a-1 = ----- | e t dt. - | | | (a) - x In this implementation both arguments must be positive. The integral is evaluated by either a power series or continued fraction expansion, depending on the relative values of a and x. ] variable[ax] assign[=] call[name[math].exp, parameter[binary_operation[binary_operation[binary_operation[name[a] * call[name[math].log, parameter[name[x]]]] - name[x]] - call[name[math].lgamma, parameter[name[a]]]]]] variable[y] assign[=] binary_operation[constant[1.0] - name[a]] variable[z] assign[=] binary_operation[binary_operation[name[x] + name[y]] + constant[1.0]] variable[c] assign[=] constant[0.0] variable[pkm2] assign[=] constant[1.0] variable[qkm2] assign[=] name[x] variable[pkm1] assign[=] binary_operation[name[x] + constant[1.0]] variable[qkm1] assign[=] binary_operation[name[z] * name[x]] variable[ans] assign[=] binary_operation[name[pkm1] / name[qkm1]] while constant[True] begin[:] <ast.AugAssign object at 0x7da1b235f9a0> <ast.AugAssign object at 0x7da1b235eb90> <ast.AugAssign object at 0x7da1b235f370> variable[yc] assign[=] binary_operation[name[y] * name[c]] variable[pk] assign[=] binary_operation[binary_operation[name[pkm1] * name[z]] - binary_operation[name[pkm2] * name[yc]]] variable[qk] assign[=] binary_operation[binary_operation[name[qkm1] * name[z]] - binary_operation[name[qkm2] * name[yc]]] if compare[name[qk] not_equal[!=] constant[0]] begin[:] variable[r] assign[=] binary_operation[name[pk] / name[qk]] variable[t] assign[=] call[name[abs], parameter[binary_operation[binary_operation[name[ans] - name[r]] / name[r]]]] variable[ans] assign[=] name[r] variable[pkm2] assign[=] name[pkm1] variable[pkm1] assign[=] name[pk] variable[qkm2] assign[=] name[qkm1] variable[qkm1] assign[=] name[qk] if compare[call[name[abs], parameter[name[pk]]] greater[>] name[BIG]] begin[:] <ast.AugAssign object at 0x7da1b235eb60> <ast.AugAssign object at 0x7da1b2449e70> <ast.AugAssign object at 0x7da1b2449570> <ast.AugAssign object at 0x7da1b24494e0> if compare[name[t] less_or_equal[<=] name[MACHEP]] begin[:] return[binary_operation[name[ans] * name[ax]]]
keyword[def] identifier[_igamc] ( identifier[a] , identifier[x] ): literal[string] identifier[ax] = identifier[math] . identifier[exp] ( identifier[a] * identifier[math] . identifier[log] ( identifier[x] )- identifier[x] - identifier[math] . identifier[lgamma] ( identifier[a] )) identifier[y] = literal[int] - identifier[a] identifier[z] = identifier[x] + identifier[y] + literal[int] identifier[c] = literal[int] identifier[pkm2] = literal[int] identifier[qkm2] = identifier[x] identifier[pkm1] = identifier[x] + literal[int] identifier[qkm1] = identifier[z] * identifier[x] identifier[ans] = identifier[pkm1] / identifier[qkm1] keyword[while] keyword[True] : identifier[c] += literal[int] identifier[y] += literal[int] identifier[z] += literal[int] identifier[yc] = identifier[y] * identifier[c] identifier[pk] = identifier[pkm1] * identifier[z] - identifier[pkm2] * identifier[yc] identifier[qk] = identifier[qkm1] * identifier[z] - identifier[qkm2] * identifier[yc] keyword[if] identifier[qk] != literal[int] : identifier[r] = identifier[pk] / identifier[qk] identifier[t] = identifier[abs] (( identifier[ans] - identifier[r] )/ identifier[r] ) identifier[ans] = identifier[r] keyword[else] : identifier[t] = literal[int] ; identifier[pkm2] = identifier[pkm1] identifier[pkm1] = identifier[pk] identifier[qkm2] = identifier[qkm1] identifier[qkm1] = identifier[qk] keyword[if] identifier[abs] ( identifier[pk] )> identifier[BIG] : identifier[pkm2] *= identifier[BIGINV] ; identifier[pkm1] *= identifier[BIGINV] ; identifier[qkm2] *= identifier[BIGINV] ; identifier[qkm1] *= identifier[BIGINV] ; keyword[if] identifier[t] <= identifier[MACHEP] : keyword[return] identifier[ans] * identifier[ax]
def _igamc(a, x): """Complemented incomplete Gamma integral. SYNOPSIS: double a, x, y, igamc(); y = igamc( a, x ); DESCRIPTION: The function is defined by:: igamc(a,x) = 1 - igam(a,x) inf. - 1 | | -t a-1 = ----- | e t dt. - | | | (a) - x In this implementation both arguments must be positive. The integral is evaluated by either a power series or continued fraction expansion, depending on the relative values of a and x. """ # Compute x**a * exp(-x) / Gamma(a) ax = math.exp(a * math.log(x) - x - math.lgamma(a)) # Continued fraction y = 1.0 - a z = x + y + 1.0 c = 0.0 pkm2 = 1.0 qkm2 = x pkm1 = x + 1.0 qkm1 = z * x ans = pkm1 / qkm1 while True: c += 1.0 y += 1.0 z += 2.0 yc = y * c pk = pkm1 * z - pkm2 * yc qk = qkm1 * z - qkm2 * yc if qk != 0: r = pk / qk t = abs((ans - r) / r) ans = r # depends on [control=['if'], data=['qk']] else: t = 1.0 pkm2 = pkm1 pkm1 = pk qkm2 = qkm1 qkm1 = qk if abs(pk) > BIG: pkm2 *= BIGINV pkm1 *= BIGINV qkm2 *= BIGINV qkm1 *= BIGINV # depends on [control=['if'], data=[]] if t <= MACHEP: return ans * ax # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
def putTextAlpha(img, text, alpha, org, fontFace, fontScale, color, thickness): # , lineType=None ''' Extends cv2.putText with [alpha] argument ''' x, y = cv2.getTextSize(text, fontFace, fontScale, thickness)[0] ox, oy = org imgcut = img[oy - y - 3:oy, ox:ox + x] if img.ndim == 3: txtarr = np.zeros(shape=(y + 3, x, 3), dtype=np.uint8) else: txtarr = np.zeros(shape=(y + 3, x), dtype=np.uint8) cv2.putText(txtarr, text, (0, y), fontFace, fontScale, color, thickness=thickness #, lineType=lineType ) cv2.addWeighted(txtarr, alpha, imgcut, 1, 0, imgcut, -1) return img
def function[putTextAlpha, parameter[img, text, alpha, org, fontFace, fontScale, color, thickness]]: constant[ Extends cv2.putText with [alpha] argument ] <ast.Tuple object at 0x7da1b1114610> assign[=] call[call[name[cv2].getTextSize, parameter[name[text], name[fontFace], name[fontScale], name[thickness]]]][constant[0]] <ast.Tuple object at 0x7da1b1114970> assign[=] name[org] variable[imgcut] assign[=] call[name[img]][tuple[[<ast.Slice object at 0x7da1b1116890>, <ast.Slice object at 0x7da1b1116710>]]] if compare[name[img].ndim equal[==] constant[3]] begin[:] variable[txtarr] assign[=] call[name[np].zeros, parameter[]] call[name[cv2].putText, parameter[name[txtarr], name[text], tuple[[<ast.Constant object at 0x7da1b11dac80>, <ast.Name object at 0x7da1b11d8400>]], name[fontFace], name[fontScale], name[color]]] call[name[cv2].addWeighted, parameter[name[txtarr], name[alpha], name[imgcut], constant[1], constant[0], name[imgcut], <ast.UnaryOp object at 0x7da1b11d9e70>]] return[name[img]]
keyword[def] identifier[putTextAlpha] ( identifier[img] , identifier[text] , identifier[alpha] , identifier[org] , identifier[fontFace] , identifier[fontScale] , identifier[color] , identifier[thickness] ): literal[string] identifier[x] , identifier[y] = identifier[cv2] . identifier[getTextSize] ( identifier[text] , identifier[fontFace] , identifier[fontScale] , identifier[thickness] )[ literal[int] ] identifier[ox] , identifier[oy] = identifier[org] identifier[imgcut] = identifier[img] [ identifier[oy] - identifier[y] - literal[int] : identifier[oy] , identifier[ox] : identifier[ox] + identifier[x] ] keyword[if] identifier[img] . identifier[ndim] == literal[int] : identifier[txtarr] = identifier[np] . identifier[zeros] ( identifier[shape] =( identifier[y] + literal[int] , identifier[x] , literal[int] ), identifier[dtype] = identifier[np] . identifier[uint8] ) keyword[else] : identifier[txtarr] = identifier[np] . identifier[zeros] ( identifier[shape] =( identifier[y] + literal[int] , identifier[x] ), identifier[dtype] = identifier[np] . identifier[uint8] ) identifier[cv2] . identifier[putText] ( identifier[txtarr] , identifier[text] ,( literal[int] , identifier[y] ), identifier[fontFace] , identifier[fontScale] , identifier[color] , identifier[thickness] = identifier[thickness] ) identifier[cv2] . identifier[addWeighted] ( identifier[txtarr] , identifier[alpha] , identifier[imgcut] , literal[int] , literal[int] , identifier[imgcut] ,- literal[int] ) keyword[return] identifier[img]
def putTextAlpha(img, text, alpha, org, fontFace, fontScale, color, thickness): # , lineType=None '\n Extends cv2.putText with [alpha] argument\n ' (x, y) = cv2.getTextSize(text, fontFace, fontScale, thickness)[0] (ox, oy) = org imgcut = img[oy - y - 3:oy, ox:ox + x] if img.ndim == 3: txtarr = np.zeros(shape=(y + 3, x, 3), dtype=np.uint8) # depends on [control=['if'], data=[]] else: txtarr = np.zeros(shape=(y + 3, x), dtype=np.uint8) #, lineType=lineType cv2.putText(txtarr, text, (0, y), fontFace, fontScale, color, thickness=thickness) cv2.addWeighted(txtarr, alpha, imgcut, 1, 0, imgcut, -1) return img
def get_partition_trees(self, p): """ Return the trees associated with a partition, p """ trees = [] for grp in p.get_membership(): try: result = self.get_group_result(grp) trees.append(result['ml_tree']) except ValueError: trees.append(None) logger.error('No tree found for group {}'.format(grp)) return trees
def function[get_partition_trees, parameter[self, p]]: constant[ Return the trees associated with a partition, p ] variable[trees] assign[=] list[[]] for taget[name[grp]] in starred[call[name[p].get_membership, parameter[]]] begin[:] <ast.Try object at 0x7da20c6aa3e0> return[name[trees]]
keyword[def] identifier[get_partition_trees] ( identifier[self] , identifier[p] ): literal[string] identifier[trees] =[] keyword[for] identifier[grp] keyword[in] identifier[p] . identifier[get_membership] (): keyword[try] : identifier[result] = identifier[self] . identifier[get_group_result] ( identifier[grp] ) identifier[trees] . identifier[append] ( identifier[result] [ literal[string] ]) keyword[except] identifier[ValueError] : identifier[trees] . identifier[append] ( keyword[None] ) identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[grp] )) keyword[return] identifier[trees]
def get_partition_trees(self, p): """ Return the trees associated with a partition, p """ trees = [] for grp in p.get_membership(): try: result = self.get_group_result(grp) trees.append(result['ml_tree']) # depends on [control=['try'], data=[]] except ValueError: trees.append(None) logger.error('No tree found for group {}'.format(grp)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['grp']] return trees
def section_strahler_orders(neurites, neurite_type=NeuriteType.all): '''Inter-segment opening angles in a section''' return map_sections(sectionfunc.strahler_order, neurites, neurite_type)
def function[section_strahler_orders, parameter[neurites, neurite_type]]: constant[Inter-segment opening angles in a section] return[call[name[map_sections], parameter[name[sectionfunc].strahler_order, name[neurites], name[neurite_type]]]]
keyword[def] identifier[section_strahler_orders] ( identifier[neurites] , identifier[neurite_type] = identifier[NeuriteType] . identifier[all] ): literal[string] keyword[return] identifier[map_sections] ( identifier[sectionfunc] . identifier[strahler_order] , identifier[neurites] , identifier[neurite_type] )
def section_strahler_orders(neurites, neurite_type=NeuriteType.all): """Inter-segment opening angles in a section""" return map_sections(sectionfunc.strahler_order, neurites, neurite_type)
def _get_anchor(module_to_name, fullname): """Turn a full member name into an anchor. Args: module_to_name: Dictionary mapping modules to short names. fullname: Fully qualified name of symbol. Returns: HTML anchor string. The longest module name prefix of fullname is removed to make the anchor. Raises: ValueError: If fullname uses characters invalid in an anchor. """ if not _anchor_re.match(fullname): raise ValueError("'%s' is not a valid anchor" % fullname) anchor = fullname for module_name in module_to_name.values(): if fullname.startswith(module_name + "."): rest = fullname[len(module_name)+1:] # Use this prefix iff it is longer than any found before if len(anchor) > len(rest): anchor = rest return anchor
def function[_get_anchor, parameter[module_to_name, fullname]]: constant[Turn a full member name into an anchor. Args: module_to_name: Dictionary mapping modules to short names. fullname: Fully qualified name of symbol. Returns: HTML anchor string. The longest module name prefix of fullname is removed to make the anchor. Raises: ValueError: If fullname uses characters invalid in an anchor. ] if <ast.UnaryOp object at 0x7da18eb55b10> begin[:] <ast.Raise object at 0x7da18eb54670> variable[anchor] assign[=] name[fullname] for taget[name[module_name]] in starred[call[name[module_to_name].values, parameter[]]] begin[:] if call[name[fullname].startswith, parameter[binary_operation[name[module_name] + constant[.]]]] begin[:] variable[rest] assign[=] call[name[fullname]][<ast.Slice object at 0x7da2044c1780>] if compare[call[name[len], parameter[name[anchor]]] greater[>] call[name[len], parameter[name[rest]]]] begin[:] variable[anchor] assign[=] name[rest] return[name[anchor]]
keyword[def] identifier[_get_anchor] ( identifier[module_to_name] , identifier[fullname] ): literal[string] keyword[if] keyword[not] identifier[_anchor_re] . identifier[match] ( identifier[fullname] ): keyword[raise] identifier[ValueError] ( literal[string] % identifier[fullname] ) identifier[anchor] = identifier[fullname] keyword[for] identifier[module_name] keyword[in] identifier[module_to_name] . identifier[values] (): keyword[if] identifier[fullname] . identifier[startswith] ( identifier[module_name] + literal[string] ): identifier[rest] = identifier[fullname] [ identifier[len] ( identifier[module_name] )+ literal[int] :] keyword[if] identifier[len] ( identifier[anchor] )> identifier[len] ( identifier[rest] ): identifier[anchor] = identifier[rest] keyword[return] identifier[anchor]
def _get_anchor(module_to_name, fullname): """Turn a full member name into an anchor. Args: module_to_name: Dictionary mapping modules to short names. fullname: Fully qualified name of symbol. Returns: HTML anchor string. The longest module name prefix of fullname is removed to make the anchor. Raises: ValueError: If fullname uses characters invalid in an anchor. """ if not _anchor_re.match(fullname): raise ValueError("'%s' is not a valid anchor" % fullname) # depends on [control=['if'], data=[]] anchor = fullname for module_name in module_to_name.values(): if fullname.startswith(module_name + '.'): rest = fullname[len(module_name) + 1:] # Use this prefix iff it is longer than any found before if len(anchor) > len(rest): anchor = rest # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['module_name']] return anchor
def p_moduleIdentityClause(self, p): """moduleIdentityClause : LOWERCASE_IDENTIFIER MODULE_IDENTITY SubjectCategoriesPart LAST_UPDATED ExtUTCTime ORGANIZATION Text CONTACT_INFO Text DESCRIPTION Text RevisionPart COLON_COLON_EQUAL '{' objectIdentifier '}'""" p[0] = ('moduleIdentityClause', p[1], # id # p[2], # MODULE_IDENTITY # XXX p[3], # SubjectCategoriesPart (p[4], p[5]), # last updated (p[6], p[7]), # organization (p[8], p[9]), # contact info (p[10], p[11]), # description p[12], # RevisionPart p[15])
def function[p_moduleIdentityClause, parameter[self, p]]: constant[moduleIdentityClause : LOWERCASE_IDENTIFIER MODULE_IDENTITY SubjectCategoriesPart LAST_UPDATED ExtUTCTime ORGANIZATION Text CONTACT_INFO Text DESCRIPTION Text RevisionPart COLON_COLON_EQUAL '{' objectIdentifier '}'] call[name[p]][constant[0]] assign[=] tuple[[<ast.Constant object at 0x7da1b016f3a0>, <ast.Subscript object at 0x7da1b016d570>, <ast.Tuple object at 0x7da1b016ec20>, <ast.Tuple object at 0x7da1b016cf40>, <ast.Tuple object at 0x7da1b010bd30>, <ast.Tuple object at 0x7da1b010bbe0>, <ast.Subscript object at 0x7da1b010ba90>, <ast.Subscript object at 0x7da1b010ba00>]]
keyword[def] identifier[p_moduleIdentityClause] ( identifier[self] , identifier[p] ): literal[string] identifier[p] [ literal[int] ]=( literal[string] , identifier[p] [ literal[int] ], ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ]), ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ]), ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ]), ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ]), identifier[p] [ literal[int] ], identifier[p] [ literal[int] ])
def p_moduleIdentityClause(self, p): """moduleIdentityClause : LOWERCASE_IDENTIFIER MODULE_IDENTITY SubjectCategoriesPart LAST_UPDATED ExtUTCTime ORGANIZATION Text CONTACT_INFO Text DESCRIPTION Text RevisionPart COLON_COLON_EQUAL '{' objectIdentifier '}'""" # id # p[2], # MODULE_IDENTITY # XXX p[3], # SubjectCategoriesPart # last updated # organization # contact info # description # RevisionPart p[0] = ('moduleIdentityClause', p[1], (p[4], p[5]), (p[6], p[7]), (p[8], p[9]), (p[10], p[11]), p[12], p[15])
def cart_to_polar(arr_c): """Return cartesian vectors in their polar representation. Parameters ---------- arr_c: array, shape (a1, a2, ..., d) Cartesian vectors, with last axis indexing the dimension. Returns ------- arr_p: array, shape of arr_c Polar vectors, using (radius, inclination, azimuth) convention. """ if arr_c.shape[-1] == 1: arr_p = arr_c.copy() elif arr_c.shape[-1] == 2: arr_p = np.empty_like(arr_c) arr_p[..., 0] = vector_mag(arr_c) arr_p[..., 1] = np.arctan2(arr_c[..., 1], arr_c[..., 0]) elif arr_c.shape[-1] == 3: arr_p = np.empty_like(arr_c) arr_p[..., 0] = vector_mag(arr_c) arr_p[..., 1] = np.arccos(arr_c[..., 2] / arr_p[..., 0]) arr_p[..., 2] = np.arctan2(arr_c[..., 1], arr_c[..., 0]) else: raise Exception('Invalid vector for polar representation') return arr_p
def function[cart_to_polar, parameter[arr_c]]: constant[Return cartesian vectors in their polar representation. Parameters ---------- arr_c: array, shape (a1, a2, ..., d) Cartesian vectors, with last axis indexing the dimension. Returns ------- arr_p: array, shape of arr_c Polar vectors, using (radius, inclination, azimuth) convention. ] if compare[call[name[arr_c].shape][<ast.UnaryOp object at 0x7da1b1500f10>] equal[==] constant[1]] begin[:] variable[arr_p] assign[=] call[name[arr_c].copy, parameter[]] return[name[arr_p]]
keyword[def] identifier[cart_to_polar] ( identifier[arr_c] ): literal[string] keyword[if] identifier[arr_c] . identifier[shape] [- literal[int] ]== literal[int] : identifier[arr_p] = identifier[arr_c] . identifier[copy] () keyword[elif] identifier[arr_c] . identifier[shape] [- literal[int] ]== literal[int] : identifier[arr_p] = identifier[np] . identifier[empty_like] ( identifier[arr_c] ) identifier[arr_p] [..., literal[int] ]= identifier[vector_mag] ( identifier[arr_c] ) identifier[arr_p] [..., literal[int] ]= identifier[np] . identifier[arctan2] ( identifier[arr_c] [..., literal[int] ], identifier[arr_c] [..., literal[int] ]) keyword[elif] identifier[arr_c] . identifier[shape] [- literal[int] ]== literal[int] : identifier[arr_p] = identifier[np] . identifier[empty_like] ( identifier[arr_c] ) identifier[arr_p] [..., literal[int] ]= identifier[vector_mag] ( identifier[arr_c] ) identifier[arr_p] [..., literal[int] ]= identifier[np] . identifier[arccos] ( identifier[arr_c] [..., literal[int] ]/ identifier[arr_p] [..., literal[int] ]) identifier[arr_p] [..., literal[int] ]= identifier[np] . identifier[arctan2] ( identifier[arr_c] [..., literal[int] ], identifier[arr_c] [..., literal[int] ]) keyword[else] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[return] identifier[arr_p]
def cart_to_polar(arr_c): """Return cartesian vectors in their polar representation. Parameters ---------- arr_c: array, shape (a1, a2, ..., d) Cartesian vectors, with last axis indexing the dimension. Returns ------- arr_p: array, shape of arr_c Polar vectors, using (radius, inclination, azimuth) convention. """ if arr_c.shape[-1] == 1: arr_p = arr_c.copy() # depends on [control=['if'], data=[]] elif arr_c.shape[-1] == 2: arr_p = np.empty_like(arr_c) arr_p[..., 0] = vector_mag(arr_c) arr_p[..., 1] = np.arctan2(arr_c[..., 1], arr_c[..., 0]) # depends on [control=['if'], data=[]] elif arr_c.shape[-1] == 3: arr_p = np.empty_like(arr_c) arr_p[..., 0] = vector_mag(arr_c) arr_p[..., 1] = np.arccos(arr_c[..., 2] / arr_p[..., 0]) arr_p[..., 2] = np.arctan2(arr_c[..., 1], arr_c[..., 0]) # depends on [control=['if'], data=[]] else: raise Exception('Invalid vector for polar representation') return arr_p
def _validate(self, qobj): """Semantic validations of the qobj which cannot be done via schemas. Some of these may later move to backend schemas. 1. No shots 2. No measurements in the middle """ n_qubits = qobj.config.n_qubits max_qubits = self.configuration().n_qubits if n_qubits > max_qubits: raise BasicAerError('Number of qubits {} '.format(n_qubits) + 'is greater than maximum ({}) '.format(max_qubits) + 'for "{}".'.format(self.name())) if hasattr(qobj.config, 'shots') and qobj.config.shots != 1: logger.info('"%s" only supports 1 shot. Setting shots=1.', self.name()) qobj.config.shots = 1 for experiment in qobj.experiments: name = experiment.header.name if getattr(experiment.config, 'shots', 1) != 1: logger.info('"%s" only supports 1 shot. ' 'Setting shots=1 for circuit "%s".', self.name(), name) experiment.config.shots = 1 for operation in experiment.instructions: if operation.name in ['measure', 'reset']: raise BasicAerError('Unsupported "%s" instruction "%s" ' + 'in circuit "%s" ', self.name(), operation.name, name)
def function[_validate, parameter[self, qobj]]: constant[Semantic validations of the qobj which cannot be done via schemas. Some of these may later move to backend schemas. 1. No shots 2. No measurements in the middle ] variable[n_qubits] assign[=] name[qobj].config.n_qubits variable[max_qubits] assign[=] call[name[self].configuration, parameter[]].n_qubits if compare[name[n_qubits] greater[>] name[max_qubits]] begin[:] <ast.Raise object at 0x7da20c993280> if <ast.BoolOp object at 0x7da20c991360> begin[:] call[name[logger].info, parameter[constant["%s" only supports 1 shot. Setting shots=1.], call[name[self].name, parameter[]]]] name[qobj].config.shots assign[=] constant[1] for taget[name[experiment]] in starred[name[qobj].experiments] begin[:] variable[name] assign[=] name[experiment].header.name if compare[call[name[getattr], parameter[name[experiment].config, constant[shots], constant[1]]] not_equal[!=] constant[1]] begin[:] call[name[logger].info, parameter[constant["%s" only supports 1 shot. Setting shots=1 for circuit "%s".], call[name[self].name, parameter[]], name[name]]] name[experiment].config.shots assign[=] constant[1] for taget[name[operation]] in starred[name[experiment].instructions] begin[:] if compare[name[operation].name in list[[<ast.Constant object at 0x7da20c6e7e50>, <ast.Constant object at 0x7da20c6e57b0>]]] begin[:] <ast.Raise object at 0x7da20c6e4bb0>
keyword[def] identifier[_validate] ( identifier[self] , identifier[qobj] ): literal[string] identifier[n_qubits] = identifier[qobj] . identifier[config] . identifier[n_qubits] identifier[max_qubits] = identifier[self] . identifier[configuration] (). identifier[n_qubits] keyword[if] identifier[n_qubits] > identifier[max_qubits] : keyword[raise] identifier[BasicAerError] ( literal[string] . identifier[format] ( identifier[n_qubits] )+ literal[string] . identifier[format] ( identifier[max_qubits] )+ literal[string] . identifier[format] ( identifier[self] . identifier[name] ())) keyword[if] identifier[hasattr] ( identifier[qobj] . identifier[config] , literal[string] ) keyword[and] identifier[qobj] . identifier[config] . identifier[shots] != literal[int] : identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[name] ()) identifier[qobj] . identifier[config] . identifier[shots] = literal[int] keyword[for] identifier[experiment] keyword[in] identifier[qobj] . identifier[experiments] : identifier[name] = identifier[experiment] . identifier[header] . identifier[name] keyword[if] identifier[getattr] ( identifier[experiment] . identifier[config] , literal[string] , literal[int] )!= literal[int] : identifier[logger] . identifier[info] ( literal[string] literal[string] , identifier[self] . identifier[name] (), identifier[name] ) identifier[experiment] . identifier[config] . identifier[shots] = literal[int] keyword[for] identifier[operation] keyword[in] identifier[experiment] . identifier[instructions] : keyword[if] identifier[operation] . identifier[name] keyword[in] [ literal[string] , literal[string] ]: keyword[raise] identifier[BasicAerError] ( literal[string] + literal[string] , identifier[self] . identifier[name] (), identifier[operation] . identifier[name] , identifier[name] )
def _validate(self, qobj): """Semantic validations of the qobj which cannot be done via schemas. Some of these may later move to backend schemas. 1. No shots 2. No measurements in the middle """ n_qubits = qobj.config.n_qubits max_qubits = self.configuration().n_qubits if n_qubits > max_qubits: raise BasicAerError('Number of qubits {} '.format(n_qubits) + 'is greater than maximum ({}) '.format(max_qubits) + 'for "{}".'.format(self.name())) # depends on [control=['if'], data=['n_qubits', 'max_qubits']] if hasattr(qobj.config, 'shots') and qobj.config.shots != 1: logger.info('"%s" only supports 1 shot. Setting shots=1.', self.name()) qobj.config.shots = 1 # depends on [control=['if'], data=[]] for experiment in qobj.experiments: name = experiment.header.name if getattr(experiment.config, 'shots', 1) != 1: logger.info('"%s" only supports 1 shot. Setting shots=1 for circuit "%s".', self.name(), name) experiment.config.shots = 1 # depends on [control=['if'], data=[]] for operation in experiment.instructions: if operation.name in ['measure', 'reset']: raise BasicAerError('Unsupported "%s" instruction "%s" ' + 'in circuit "%s" ', self.name(), operation.name, name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['operation']] # depends on [control=['for'], data=['experiment']]
def dfsmooth (window, df, ucol, k=None): """Smooth a :class:`pandas.DataFrame` according to a window, weighting based on uncertainties. Arguments are: window The smoothing window. df The :class:`pandas.DataFrame`. ucol The name of the column in *df* that contains the uncertainties to weight by. k = None If specified, only every *k*-th point of the results will be kept. If k is None (the default), it is set to ``window.size``, i.e. correlated points will be discarded. Returns: a smoothed data frame. The returned data frame has a default integer index. Example:: sdata = numutil.dfsmooth (np.hamming (7), data, 'u_temp') """ import pandas as pd if k is None: k = window.size conv = lambda q, r: np.convolve (q, r, mode='valid') w = df[ucol] ** -2 invcw = 1. / conv (w, window) # XXX: we're not smoothing the index. res = {} for col in df.columns: if col == ucol: res[col] = np.sqrt (conv (w, window**2)) * invcw else: res[col] = conv (w * df[col], window) * invcw res = pd.DataFrame (res) return res[::k]
def function[dfsmooth, parameter[window, df, ucol, k]]: constant[Smooth a :class:`pandas.DataFrame` according to a window, weighting based on uncertainties. Arguments are: window The smoothing window. df The :class:`pandas.DataFrame`. ucol The name of the column in *df* that contains the uncertainties to weight by. k = None If specified, only every *k*-th point of the results will be kept. If k is None (the default), it is set to ``window.size``, i.e. correlated points will be discarded. Returns: a smoothed data frame. The returned data frame has a default integer index. Example:: sdata = numutil.dfsmooth (np.hamming (7), data, 'u_temp') ] import module[pandas] as alias[pd] if compare[name[k] is constant[None]] begin[:] variable[k] assign[=] name[window].size variable[conv] assign[=] <ast.Lambda object at 0x7da18c4ce710> variable[w] assign[=] binary_operation[call[name[df]][name[ucol]] ** <ast.UnaryOp object at 0x7da18c4cf250>] variable[invcw] assign[=] binary_operation[constant[1.0] / call[name[conv], parameter[name[w], name[window]]]] variable[res] assign[=] dictionary[[], []] for taget[name[col]] in starred[name[df].columns] begin[:] if compare[name[col] equal[==] name[ucol]] begin[:] call[name[res]][name[col]] assign[=] binary_operation[call[name[np].sqrt, parameter[call[name[conv], parameter[name[w], binary_operation[name[window] ** constant[2]]]]]] * name[invcw]] variable[res] assign[=] call[name[pd].DataFrame, parameter[name[res]]] return[call[name[res]][<ast.Slice object at 0x7da1b27a7700>]]
keyword[def] identifier[dfsmooth] ( identifier[window] , identifier[df] , identifier[ucol] , identifier[k] = keyword[None] ): literal[string] keyword[import] identifier[pandas] keyword[as] identifier[pd] keyword[if] identifier[k] keyword[is] keyword[None] : identifier[k] = identifier[window] . identifier[size] identifier[conv] = keyword[lambda] identifier[q] , identifier[r] : identifier[np] . identifier[convolve] ( identifier[q] , identifier[r] , identifier[mode] = literal[string] ) identifier[w] = identifier[df] [ identifier[ucol] ]**- literal[int] identifier[invcw] = literal[int] / identifier[conv] ( identifier[w] , identifier[window] ) identifier[res] ={} keyword[for] identifier[col] keyword[in] identifier[df] . identifier[columns] : keyword[if] identifier[col] == identifier[ucol] : identifier[res] [ identifier[col] ]= identifier[np] . identifier[sqrt] ( identifier[conv] ( identifier[w] , identifier[window] ** literal[int] ))* identifier[invcw] keyword[else] : identifier[res] [ identifier[col] ]= identifier[conv] ( identifier[w] * identifier[df] [ identifier[col] ], identifier[window] )* identifier[invcw] identifier[res] = identifier[pd] . identifier[DataFrame] ( identifier[res] ) keyword[return] identifier[res] [:: identifier[k] ]
def dfsmooth(window, df, ucol, k=None): """Smooth a :class:`pandas.DataFrame` according to a window, weighting based on uncertainties. Arguments are: window The smoothing window. df The :class:`pandas.DataFrame`. ucol The name of the column in *df* that contains the uncertainties to weight by. k = None If specified, only every *k*-th point of the results will be kept. If k is None (the default), it is set to ``window.size``, i.e. correlated points will be discarded. Returns: a smoothed data frame. The returned data frame has a default integer index. Example:: sdata = numutil.dfsmooth (np.hamming (7), data, 'u_temp') """ import pandas as pd if k is None: k = window.size # depends on [control=['if'], data=['k']] conv = lambda q, r: np.convolve(q, r, mode='valid') w = df[ucol] ** (-2) invcw = 1.0 / conv(w, window) # XXX: we're not smoothing the index. res = {} for col in df.columns: if col == ucol: res[col] = np.sqrt(conv(w, window ** 2)) * invcw # depends on [control=['if'], data=['col']] else: res[col] = conv(w * df[col], window) * invcw # depends on [control=['for'], data=['col']] res = pd.DataFrame(res) return res[::k]
def add_role_to_user(self, user, role): """ Adds a role to user """ user.add_role(role) self.save(user) events.user_got_role_event.send(user, role=role)
def function[add_role_to_user, parameter[self, user, role]]: constant[ Adds a role to user ] call[name[user].add_role, parameter[name[role]]] call[name[self].save, parameter[name[user]]] call[name[events].user_got_role_event.send, parameter[name[user]]]
keyword[def] identifier[add_role_to_user] ( identifier[self] , identifier[user] , identifier[role] ): literal[string] identifier[user] . identifier[add_role] ( identifier[role] ) identifier[self] . identifier[save] ( identifier[user] ) identifier[events] . identifier[user_got_role_event] . identifier[send] ( identifier[user] , identifier[role] = identifier[role] )
def add_role_to_user(self, user, role): """ Adds a role to user """ user.add_role(role) self.save(user) events.user_got_role_event.send(user, role=role)
def batch_encode(self, iterator, *args, dim=0, **kwargs): """ Args: iterator (iterator): Batch of labels to encode. *args: Arguments passed to ``Encoder.batch_encode``. dim (int, optional): Dimension along which to concatenate tensors. **kwargs: Keyword arguments passed to ``Encoder.batch_encode``. Returns: torch.Tensor: Tensor of encoded labels. """ return torch.stack(super().batch_encode(iterator, *args, **kwargs), dim=dim)
def function[batch_encode, parameter[self, iterator]]: constant[ Args: iterator (iterator): Batch of labels to encode. *args: Arguments passed to ``Encoder.batch_encode``. dim (int, optional): Dimension along which to concatenate tensors. **kwargs: Keyword arguments passed to ``Encoder.batch_encode``. Returns: torch.Tensor: Tensor of encoded labels. ] return[call[name[torch].stack, parameter[call[call[name[super], parameter[]].batch_encode, parameter[name[iterator], <ast.Starred object at 0x7da18dc04130>]]]]]
keyword[def] identifier[batch_encode] ( identifier[self] , identifier[iterator] ,* identifier[args] , identifier[dim] = literal[int] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[torch] . identifier[stack] ( identifier[super] (). identifier[batch_encode] ( identifier[iterator] ,* identifier[args] ,** identifier[kwargs] ), identifier[dim] = identifier[dim] )
def batch_encode(self, iterator, *args, dim=0, **kwargs): """ Args: iterator (iterator): Batch of labels to encode. *args: Arguments passed to ``Encoder.batch_encode``. dim (int, optional): Dimension along which to concatenate tensors. **kwargs: Keyword arguments passed to ``Encoder.batch_encode``. Returns: torch.Tensor: Tensor of encoded labels. """ return torch.stack(super().batch_encode(iterator, *args, **kwargs), dim=dim)
def _pyfftw_destroys_input(flags, direction, halfcomplex, ndim): """Return ``True`` if FFTW destroys an input array, ``False`` otherwise.""" if any(flag in flags or _pyfftw_to_local(flag) in flags for flag in ('FFTW_MEASURE', 'FFTW_PATIENT', 'FFTW_EXHAUSTIVE', 'FFTW_DESTROY_INPUT')): return True elif (direction in ('backward', 'FFTW_BACKWARD') and halfcomplex and ndim != 1): return True else: return False
def function[_pyfftw_destroys_input, parameter[flags, direction, halfcomplex, ndim]]: constant[Return ``True`` if FFTW destroys an input array, ``False`` otherwise.] if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b1e53be0>]] begin[:] return[constant[True]]
keyword[def] identifier[_pyfftw_destroys_input] ( identifier[flags] , identifier[direction] , identifier[halfcomplex] , identifier[ndim] ): literal[string] keyword[if] identifier[any] ( identifier[flag] keyword[in] identifier[flags] keyword[or] identifier[_pyfftw_to_local] ( identifier[flag] ) keyword[in] identifier[flags] keyword[for] identifier[flag] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] )): keyword[return] keyword[True] keyword[elif] ( identifier[direction] keyword[in] ( literal[string] , literal[string] ) keyword[and] identifier[halfcomplex] keyword[and] identifier[ndim] != literal[int] ): keyword[return] keyword[True] keyword[else] : keyword[return] keyword[False]
def _pyfftw_destroys_input(flags, direction, halfcomplex, ndim): """Return ``True`` if FFTW destroys an input array, ``False`` otherwise.""" if any((flag in flags or _pyfftw_to_local(flag) in flags for flag in ('FFTW_MEASURE', 'FFTW_PATIENT', 'FFTW_EXHAUSTIVE', 'FFTW_DESTROY_INPUT'))): return True # depends on [control=['if'], data=[]] elif direction in ('backward', 'FFTW_BACKWARD') and halfcomplex and (ndim != 1): return True # depends on [control=['if'], data=[]] else: return False
def prefix(self, prefix, lowercase=True): ''' Returns a dictionary of keys with the same prefix. Compat with kr/env, lowercased. > xdg = env.prefix('XDG_') > for key, value in xdg.items(): print('%-20s' % key, value[:6], '…') config_dirs /etc/x… current_desktop MATE data_dirs /usr/s… … ''' env_subset = {} for key in self._envars.keys(): if key.startswith(prefix): new_key = key[len(prefix):] # cut front new_key = new_key.lower() if lowercase else new_key env_subset[new_key] = str(self._envars[key]) # str strips Entry return Environment(environ=env_subset, sensitive=self._sensitive, blankify=self._blankify, noneify=self._noneify, writable=self._writable, )
def function[prefix, parameter[self, prefix, lowercase]]: constant[ Returns a dictionary of keys with the same prefix. Compat with kr/env, lowercased. > xdg = env.prefix('XDG_') > for key, value in xdg.items(): print('%-20s' % key, value[:6], '…') config_dirs /etc/x… current_desktop MATE data_dirs /usr/s… … ] variable[env_subset] assign[=] dictionary[[], []] for taget[name[key]] in starred[call[name[self]._envars.keys, parameter[]]] begin[:] if call[name[key].startswith, parameter[name[prefix]]] begin[:] variable[new_key] assign[=] call[name[key]][<ast.Slice object at 0x7da1b13057e0>] variable[new_key] assign[=] <ast.IfExp object at 0x7da1b1304c40> call[name[env_subset]][name[new_key]] assign[=] call[name[str], parameter[call[name[self]._envars][name[key]]]] return[call[name[Environment], parameter[]]]
keyword[def] identifier[prefix] ( identifier[self] , identifier[prefix] , identifier[lowercase] = keyword[True] ): literal[string] identifier[env_subset] ={} keyword[for] identifier[key] keyword[in] identifier[self] . identifier[_envars] . identifier[keys] (): keyword[if] identifier[key] . identifier[startswith] ( identifier[prefix] ): identifier[new_key] = identifier[key] [ identifier[len] ( identifier[prefix] ):] identifier[new_key] = identifier[new_key] . identifier[lower] () keyword[if] identifier[lowercase] keyword[else] identifier[new_key] identifier[env_subset] [ identifier[new_key] ]= identifier[str] ( identifier[self] . identifier[_envars] [ identifier[key] ]) keyword[return] identifier[Environment] ( identifier[environ] = identifier[env_subset] , identifier[sensitive] = identifier[self] . identifier[_sensitive] , identifier[blankify] = identifier[self] . identifier[_blankify] , identifier[noneify] = identifier[self] . identifier[_noneify] , identifier[writable] = identifier[self] . identifier[_writable] , )
def prefix(self, prefix, lowercase=True): """ Returns a dictionary of keys with the same prefix. Compat with kr/env, lowercased. > xdg = env.prefix('XDG_') > for key, value in xdg.items(): print('%-20s' % key, value[:6], '…') config_dirs /etc/x… current_desktop MATE data_dirs /usr/s… … """ env_subset = {} for key in self._envars.keys(): if key.startswith(prefix): new_key = key[len(prefix):] # cut front new_key = new_key.lower() if lowercase else new_key env_subset[new_key] = str(self._envars[key]) # str strips Entry # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] return Environment(environ=env_subset, sensitive=self._sensitive, blankify=self._blankify, noneify=self._noneify, writable=self._writable)
def create_hlammagplot(plotman, h, ratio, alpha, options): '''Plot the data of the tomodir in one overview plot. ''' sizex, sizez = getfigsize(plotman) # create figure f, ax = plt.subplots(1, 3, figsize=(3 * sizex, sizez)) if options.title is not None: plt.suptitle(options.title, fontsize=18) plt.subplots_adjust(wspace=1, top=0.8) # plot magnitue if options.cmaglin: cidh = plotman.parman.add_data(np.power(10, h)) cidv = plotman.parman.add_data( np.divide(np.power(10, h), np.power(10, ratio))) loglin = 'rho' else: cidh = plotman.parman.add_data(h) cidv = plotman.parman.add_data( np.log10(np.divide(np.power(10, h), np.power(10, ratio)))) loglin = 'log_rho' cidr = plotman.parman.add_data(np.power(10, ratio)) plot_mag(cidh, ax[0], plotman, 'horizontal', loglin, alpha, options.mag_vmin, options.mag_vmax, options.xmin, options.xmax, options.zmin, options.zmax, options.unit, options.mag_cbtiks, options.no_elecs, ) plot_mag(cidv, ax[1], plotman, 'vertical', loglin, alpha, options.mag_vmin, options.mag_vmax, options.xmin, options.xmax, options.zmin, options.zmax, options.unit, options.mag_cbtiks, options.no_elecs, ) plot_ratio(cidr, ax[2], plotman, 'hor/ver', alpha, options.rat_vmin, options.rat_vmax, options.xmin, options.xmax, options.zmin, options.zmax, options.unit, options.mag_cbtiks, options.no_elecs, ) f.tight_layout() f.savefig('mag_hlam.png', dpi=300) return f, ax
def function[create_hlammagplot, parameter[plotman, h, ratio, alpha, options]]: constant[Plot the data of the tomodir in one overview plot. ] <ast.Tuple object at 0x7da1b2248f40> assign[=] call[name[getfigsize], parameter[name[plotman]]] <ast.Tuple object at 0x7da1b224ac20> assign[=] call[name[plt].subplots, parameter[constant[1], constant[3]]] if compare[name[options].title is_not constant[None]] begin[:] call[name[plt].suptitle, parameter[name[options].title]] call[name[plt].subplots_adjust, parameter[]] if name[options].cmaglin begin[:] variable[cidh] assign[=] call[name[plotman].parman.add_data, parameter[call[name[np].power, parameter[constant[10], name[h]]]]] variable[cidv] assign[=] call[name[plotman].parman.add_data, parameter[call[name[np].divide, parameter[call[name[np].power, parameter[constant[10], name[h]]], call[name[np].power, parameter[constant[10], name[ratio]]]]]]] variable[loglin] assign[=] constant[rho] variable[cidr] assign[=] call[name[plotman].parman.add_data, parameter[call[name[np].power, parameter[constant[10], name[ratio]]]]] call[name[plot_mag], parameter[name[cidh], call[name[ax]][constant[0]], name[plotman], constant[horizontal], name[loglin], name[alpha], name[options].mag_vmin, name[options].mag_vmax, name[options].xmin, name[options].xmax, name[options].zmin, name[options].zmax, name[options].unit, name[options].mag_cbtiks, name[options].no_elecs]] call[name[plot_mag], parameter[name[cidv], call[name[ax]][constant[1]], name[plotman], constant[vertical], name[loglin], name[alpha], name[options].mag_vmin, name[options].mag_vmax, name[options].xmin, name[options].xmax, name[options].zmin, name[options].zmax, name[options].unit, name[options].mag_cbtiks, name[options].no_elecs]] call[name[plot_ratio], parameter[name[cidr], call[name[ax]][constant[2]], name[plotman], constant[hor/ver], name[alpha], name[options].rat_vmin, name[options].rat_vmax, name[options].xmin, name[options].xmax, name[options].zmin, name[options].zmax, name[options].unit, name[options].mag_cbtiks, name[options].no_elecs]] call[name[f].tight_layout, parameter[]] call[name[f].savefig, parameter[constant[mag_hlam.png]]] return[tuple[[<ast.Name object at 0x7da1b222ce50>, <ast.Name object at 0x7da1b222ff10>]]]
keyword[def] identifier[create_hlammagplot] ( identifier[plotman] , identifier[h] , identifier[ratio] , identifier[alpha] , identifier[options] ): literal[string] identifier[sizex] , identifier[sizez] = identifier[getfigsize] ( identifier[plotman] ) identifier[f] , identifier[ax] = identifier[plt] . identifier[subplots] ( literal[int] , literal[int] , identifier[figsize] =( literal[int] * identifier[sizex] , identifier[sizez] )) keyword[if] identifier[options] . identifier[title] keyword[is] keyword[not] keyword[None] : identifier[plt] . identifier[suptitle] ( identifier[options] . identifier[title] , identifier[fontsize] = literal[int] ) identifier[plt] . identifier[subplots_adjust] ( identifier[wspace] = literal[int] , identifier[top] = literal[int] ) keyword[if] identifier[options] . identifier[cmaglin] : identifier[cidh] = identifier[plotman] . identifier[parman] . identifier[add_data] ( identifier[np] . identifier[power] ( literal[int] , identifier[h] )) identifier[cidv] = identifier[plotman] . identifier[parman] . identifier[add_data] ( identifier[np] . identifier[divide] ( identifier[np] . identifier[power] ( literal[int] , identifier[h] ), identifier[np] . identifier[power] ( literal[int] , identifier[ratio] ))) identifier[loglin] = literal[string] keyword[else] : identifier[cidh] = identifier[plotman] . identifier[parman] . identifier[add_data] ( identifier[h] ) identifier[cidv] = identifier[plotman] . identifier[parman] . identifier[add_data] ( identifier[np] . identifier[log10] ( identifier[np] . identifier[divide] ( identifier[np] . identifier[power] ( literal[int] , identifier[h] ), identifier[np] . identifier[power] ( literal[int] , identifier[ratio] )))) identifier[loglin] = literal[string] identifier[cidr] = identifier[plotman] . identifier[parman] . identifier[add_data] ( identifier[np] . identifier[power] ( literal[int] , identifier[ratio] )) identifier[plot_mag] ( identifier[cidh] , identifier[ax] [ literal[int] ], identifier[plotman] , literal[string] , identifier[loglin] , identifier[alpha] , identifier[options] . identifier[mag_vmin] , identifier[options] . identifier[mag_vmax] , identifier[options] . identifier[xmin] , identifier[options] . identifier[xmax] , identifier[options] . identifier[zmin] , identifier[options] . identifier[zmax] , identifier[options] . identifier[unit] , identifier[options] . identifier[mag_cbtiks] , identifier[options] . identifier[no_elecs] , ) identifier[plot_mag] ( identifier[cidv] , identifier[ax] [ literal[int] ], identifier[plotman] , literal[string] , identifier[loglin] , identifier[alpha] , identifier[options] . identifier[mag_vmin] , identifier[options] . identifier[mag_vmax] , identifier[options] . identifier[xmin] , identifier[options] . identifier[xmax] , identifier[options] . identifier[zmin] , identifier[options] . identifier[zmax] , identifier[options] . identifier[unit] , identifier[options] . identifier[mag_cbtiks] , identifier[options] . identifier[no_elecs] , ) identifier[plot_ratio] ( identifier[cidr] , identifier[ax] [ literal[int] ], identifier[plotman] , literal[string] , identifier[alpha] , identifier[options] . identifier[rat_vmin] , identifier[options] . identifier[rat_vmax] , identifier[options] . identifier[xmin] , identifier[options] . identifier[xmax] , identifier[options] . identifier[zmin] , identifier[options] . identifier[zmax] , identifier[options] . identifier[unit] , identifier[options] . identifier[mag_cbtiks] , identifier[options] . identifier[no_elecs] , ) identifier[f] . identifier[tight_layout] () identifier[f] . identifier[savefig] ( literal[string] , identifier[dpi] = literal[int] ) keyword[return] identifier[f] , identifier[ax]
def create_hlammagplot(plotman, h, ratio, alpha, options): """Plot the data of the tomodir in one overview plot. """ (sizex, sizez) = getfigsize(plotman) # create figure (f, ax) = plt.subplots(1, 3, figsize=(3 * sizex, sizez)) if options.title is not None: plt.suptitle(options.title, fontsize=18) plt.subplots_adjust(wspace=1, top=0.8) # depends on [control=['if'], data=[]] # plot magnitue if options.cmaglin: cidh = plotman.parman.add_data(np.power(10, h)) cidv = plotman.parman.add_data(np.divide(np.power(10, h), np.power(10, ratio))) loglin = 'rho' # depends on [control=['if'], data=[]] else: cidh = plotman.parman.add_data(h) cidv = plotman.parman.add_data(np.log10(np.divide(np.power(10, h), np.power(10, ratio)))) loglin = 'log_rho' cidr = plotman.parman.add_data(np.power(10, ratio)) plot_mag(cidh, ax[0], plotman, 'horizontal', loglin, alpha, options.mag_vmin, options.mag_vmax, options.xmin, options.xmax, options.zmin, options.zmax, options.unit, options.mag_cbtiks, options.no_elecs) plot_mag(cidv, ax[1], plotman, 'vertical', loglin, alpha, options.mag_vmin, options.mag_vmax, options.xmin, options.xmax, options.zmin, options.zmax, options.unit, options.mag_cbtiks, options.no_elecs) plot_ratio(cidr, ax[2], plotman, 'hor/ver', alpha, options.rat_vmin, options.rat_vmax, options.xmin, options.xmax, options.zmin, options.zmax, options.unit, options.mag_cbtiks, options.no_elecs) f.tight_layout() f.savefig('mag_hlam.png', dpi=300) return (f, ax)
def get_file_service_properties(self, timeout=None): ''' Gets the properties of a storage account's File service, including Azure Storage Analytics. :param int timeout: The timeout parameter is expressed in seconds. :return: The file service properties. :rtype: :class:`~azure.storage.common.models.ServiceProperties` ''' request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations() request.path = _get_path() request.query = { 'restype': 'service', 'comp': 'properties', 'timeout': _int_to_str(timeout), } return self._perform_request(request, _convert_xml_to_service_properties)
def function[get_file_service_properties, parameter[self, timeout]]: constant[ Gets the properties of a storage account's File service, including Azure Storage Analytics. :param int timeout: The timeout parameter is expressed in seconds. :return: The file service properties. :rtype: :class:`~azure.storage.common.models.ServiceProperties` ] variable[request] assign[=] call[name[HTTPRequest], parameter[]] name[request].method assign[=] constant[GET] name[request].host_locations assign[=] call[name[self]._get_host_locations, parameter[]] name[request].path assign[=] call[name[_get_path], parameter[]] name[request].query assign[=] dictionary[[<ast.Constant object at 0x7da1b1d3bbb0>, <ast.Constant object at 0x7da1b1d3a200>, <ast.Constant object at 0x7da1b1d3b8e0>], [<ast.Constant object at 0x7da1b1d3b9d0>, <ast.Constant object at 0x7da1b1d391e0>, <ast.Call object at 0x7da1b1d391b0>]] return[call[name[self]._perform_request, parameter[name[request], name[_convert_xml_to_service_properties]]]]
keyword[def] identifier[get_file_service_properties] ( identifier[self] , identifier[timeout] = keyword[None] ): literal[string] identifier[request] = identifier[HTTPRequest] () identifier[request] . identifier[method] = literal[string] identifier[request] . identifier[host_locations] = identifier[self] . identifier[_get_host_locations] () identifier[request] . identifier[path] = identifier[_get_path] () identifier[request] . identifier[query] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : identifier[_int_to_str] ( identifier[timeout] ), } keyword[return] identifier[self] . identifier[_perform_request] ( identifier[request] , identifier[_convert_xml_to_service_properties] )
def get_file_service_properties(self, timeout=None): """ Gets the properties of a storage account's File service, including Azure Storage Analytics. :param int timeout: The timeout parameter is expressed in seconds. :return: The file service properties. :rtype: :class:`~azure.storage.common.models.ServiceProperties` """ request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations() request.path = _get_path() request.query = {'restype': 'service', 'comp': 'properties', 'timeout': _int_to_str(timeout)} return self._perform_request(request, _convert_xml_to_service_properties)
def write(self, chunk): """WSGI callable to write unbuffered data to the client. This method is also used internally by start_response (to write data from the iterable returned by the WSGI application). """ if not self.started_response: raise AssertionError('WSGI write called before start_response.') chunklen = len(chunk) rbo = self.remaining_bytes_out if rbo is not None and chunklen > rbo: if not self.req.sent_headers: # Whew. We can send a 500 to the client. self.req.simple_response( '500 Internal Server Error', 'The requested resource returned more bytes than the ' 'declared Content-Length.', ) else: # Dang. We have probably already sent data. Truncate the chunk # to fit (so the client doesn't hang) and raise an error later. chunk = chunk[:rbo] self.req.ensure_headers_sent() self.req.write(chunk) if rbo is not None: rbo -= chunklen if rbo < 0: raise ValueError( 'Response body exceeds the declared Content-Length.', )
def function[write, parameter[self, chunk]]: constant[WSGI callable to write unbuffered data to the client. This method is also used internally by start_response (to write data from the iterable returned by the WSGI application). ] if <ast.UnaryOp object at 0x7da204623760> begin[:] <ast.Raise object at 0x7da204620340> variable[chunklen] assign[=] call[name[len], parameter[name[chunk]]] variable[rbo] assign[=] name[self].remaining_bytes_out if <ast.BoolOp object at 0x7da204622c80> begin[:] if <ast.UnaryOp object at 0x7da204620c70> begin[:] call[name[self].req.simple_response, parameter[constant[500 Internal Server Error], constant[The requested resource returned more bytes than the declared Content-Length.]]] call[name[self].req.ensure_headers_sent, parameter[]] call[name[self].req.write, parameter[name[chunk]]] if compare[name[rbo] is_not constant[None]] begin[:] <ast.AugAssign object at 0x7da2046211b0> if compare[name[rbo] less[<] constant[0]] begin[:] <ast.Raise object at 0x7da2046214b0>
keyword[def] identifier[write] ( identifier[self] , identifier[chunk] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[started_response] : keyword[raise] identifier[AssertionError] ( literal[string] ) identifier[chunklen] = identifier[len] ( identifier[chunk] ) identifier[rbo] = identifier[self] . identifier[remaining_bytes_out] keyword[if] identifier[rbo] keyword[is] keyword[not] keyword[None] keyword[and] identifier[chunklen] > identifier[rbo] : keyword[if] keyword[not] identifier[self] . identifier[req] . identifier[sent_headers] : identifier[self] . identifier[req] . identifier[simple_response] ( literal[string] , literal[string] literal[string] , ) keyword[else] : identifier[chunk] = identifier[chunk] [: identifier[rbo] ] identifier[self] . identifier[req] . identifier[ensure_headers_sent] () identifier[self] . identifier[req] . identifier[write] ( identifier[chunk] ) keyword[if] identifier[rbo] keyword[is] keyword[not] keyword[None] : identifier[rbo] -= identifier[chunklen] keyword[if] identifier[rbo] < literal[int] : keyword[raise] identifier[ValueError] ( literal[string] , )
def write(self, chunk): """WSGI callable to write unbuffered data to the client. This method is also used internally by start_response (to write data from the iterable returned by the WSGI application). """ if not self.started_response: raise AssertionError('WSGI write called before start_response.') # depends on [control=['if'], data=[]] chunklen = len(chunk) rbo = self.remaining_bytes_out if rbo is not None and chunklen > rbo: if not self.req.sent_headers: # Whew. We can send a 500 to the client. self.req.simple_response('500 Internal Server Error', 'The requested resource returned more bytes than the declared Content-Length.') # depends on [control=['if'], data=[]] else: # Dang. We have probably already sent data. Truncate the chunk # to fit (so the client doesn't hang) and raise an error later. chunk = chunk[:rbo] # depends on [control=['if'], data=[]] self.req.ensure_headers_sent() self.req.write(chunk) if rbo is not None: rbo -= chunklen if rbo < 0: raise ValueError('Response body exceeds the declared Content-Length.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['rbo']]
def next(self): '''Iterator next. Build up count of returned elements during iteration.''' # if iteration has not begun, begin it. if not self._iterator: self.__iter__() next = self._iterator.next() if next is not StopIteration: self._returned_inc(next) return next
def function[next, parameter[self]]: constant[Iterator next. Build up count of returned elements during iteration.] if <ast.UnaryOp object at 0x7da18f09ce50> begin[:] call[name[self].__iter__, parameter[]] variable[next] assign[=] call[name[self]._iterator.next, parameter[]] if compare[name[next] is_not name[StopIteration]] begin[:] call[name[self]._returned_inc, parameter[name[next]]] return[name[next]]
keyword[def] identifier[next] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_iterator] : identifier[self] . identifier[__iter__] () identifier[next] = identifier[self] . identifier[_iterator] . identifier[next] () keyword[if] identifier[next] keyword[is] keyword[not] identifier[StopIteration] : identifier[self] . identifier[_returned_inc] ( identifier[next] ) keyword[return] identifier[next]
def next(self): """Iterator next. Build up count of returned elements during iteration.""" # if iteration has not begun, begin it. if not self._iterator: self.__iter__() # depends on [control=['if'], data=[]] next = self._iterator.next() if next is not StopIteration: self._returned_inc(next) # depends on [control=['if'], data=['next']] return next
def create_release_branch(self, branch_name): """ Create a new release branch. :param branch_name: The name of the release branch to create (a string). :raises: The following exceptions can be raised: - :exc:`~exceptions.TypeError` when :attr:`release_scheme` isn't set to 'branches'. - :exc:`~exceptions.ValueError` when the branch name doesn't match the configured :attr:`release_filter` or no parent release branches are available. This method automatically checks out the new release branch, but note that the new branch may not actually exist until a commit has been made on the branch. """ # Validate the release scheme. self.ensure_release_scheme('branches') # Validate the name of the release branch. if self.compiled_filter.match(branch_name) is None: msg = "The branch name '%s' doesn't match the release filter!" raise ValueError(msg % branch_name) # Make sure the local repository exists. self.create() # Figure out the correct parent release branch. candidates = natsort([r.revision.branch for r in self.ordered_releases] + [branch_name]) index = candidates.index(branch_name) - 1 if index < 0: msg = "Failed to determine suitable parent branch for release branch '%s'!" raise ValueError(msg % branch_name) parent_branch = candidates[index] self.checkout(parent_branch) self.create_branch(branch_name)
def function[create_release_branch, parameter[self, branch_name]]: constant[ Create a new release branch. :param branch_name: The name of the release branch to create (a string). :raises: The following exceptions can be raised: - :exc:`~exceptions.TypeError` when :attr:`release_scheme` isn't set to 'branches'. - :exc:`~exceptions.ValueError` when the branch name doesn't match the configured :attr:`release_filter` or no parent release branches are available. This method automatically checks out the new release branch, but note that the new branch may not actually exist until a commit has been made on the branch. ] call[name[self].ensure_release_scheme, parameter[constant[branches]]] if compare[call[name[self].compiled_filter.match, parameter[name[branch_name]]] is constant[None]] begin[:] variable[msg] assign[=] constant[The branch name '%s' doesn't match the release filter!] <ast.Raise object at 0x7da1b0aa45e0> call[name[self].create, parameter[]] variable[candidates] assign[=] call[name[natsort], parameter[binary_operation[<ast.ListComp object at 0x7da1b0a06140> + list[[<ast.Name object at 0x7da1b0a05db0>]]]]] variable[index] assign[=] binary_operation[call[name[candidates].index, parameter[name[branch_name]]] - constant[1]] if compare[name[index] less[<] constant[0]] begin[:] variable[msg] assign[=] constant[Failed to determine suitable parent branch for release branch '%s'!] <ast.Raise object at 0x7da1b0a05600> variable[parent_branch] assign[=] call[name[candidates]][name[index]] call[name[self].checkout, parameter[name[parent_branch]]] call[name[self].create_branch, parameter[name[branch_name]]]
keyword[def] identifier[create_release_branch] ( identifier[self] , identifier[branch_name] ): literal[string] identifier[self] . identifier[ensure_release_scheme] ( literal[string] ) keyword[if] identifier[self] . identifier[compiled_filter] . identifier[match] ( identifier[branch_name] ) keyword[is] keyword[None] : identifier[msg] = literal[string] keyword[raise] identifier[ValueError] ( identifier[msg] % identifier[branch_name] ) identifier[self] . identifier[create] () identifier[candidates] = identifier[natsort] ([ identifier[r] . identifier[revision] . identifier[branch] keyword[for] identifier[r] keyword[in] identifier[self] . identifier[ordered_releases] ]+[ identifier[branch_name] ]) identifier[index] = identifier[candidates] . identifier[index] ( identifier[branch_name] )- literal[int] keyword[if] identifier[index] < literal[int] : identifier[msg] = literal[string] keyword[raise] identifier[ValueError] ( identifier[msg] % identifier[branch_name] ) identifier[parent_branch] = identifier[candidates] [ identifier[index] ] identifier[self] . identifier[checkout] ( identifier[parent_branch] ) identifier[self] . identifier[create_branch] ( identifier[branch_name] )
def create_release_branch(self, branch_name): """ Create a new release branch. :param branch_name: The name of the release branch to create (a string). :raises: The following exceptions can be raised: - :exc:`~exceptions.TypeError` when :attr:`release_scheme` isn't set to 'branches'. - :exc:`~exceptions.ValueError` when the branch name doesn't match the configured :attr:`release_filter` or no parent release branches are available. This method automatically checks out the new release branch, but note that the new branch may not actually exist until a commit has been made on the branch. """ # Validate the release scheme. self.ensure_release_scheme('branches') # Validate the name of the release branch. if self.compiled_filter.match(branch_name) is None: msg = "The branch name '%s' doesn't match the release filter!" raise ValueError(msg % branch_name) # depends on [control=['if'], data=[]] # Make sure the local repository exists. self.create() # Figure out the correct parent release branch. candidates = natsort([r.revision.branch for r in self.ordered_releases] + [branch_name]) index = candidates.index(branch_name) - 1 if index < 0: msg = "Failed to determine suitable parent branch for release branch '%s'!" raise ValueError(msg % branch_name) # depends on [control=['if'], data=[]] parent_branch = candidates[index] self.checkout(parent_branch) self.create_branch(branch_name)
def decorate(self, output_tree, output_tax, unique_names): ''' Decorate a tree with taxonomy. This code does not allow inconsistent taxonomy within a clade. If one sequence in a clade has a different annotation to the rest, it will split the clade. Paraphyletic group names are distinguished if unique_names = True using a simple tally of each group (see unique_names below). Parameters ---------- output_tree : string File to which the decorated tree will be written. output_tax : string File to which the taxonomy strings for each tip in the tree will be written. unique_names : boolean True indicating that a unique number will be appended to the end of a taxonomic rank if it is found more than once in the tree (i.e. it is paraphyletic in the tree). If false, multiple clades may be assigned with the same name. ''' logging.info("Decorating tree") encountered_taxonomies = {} tc = TaxonomyCleaner() for node in self.tree.preorder_internal_node_iter(exclude_seed_node=True): max_tax_string_length = 0 for tip in node.leaf_nodes(): tip_label=tip.taxon.label.replace(' ', '_') if tip_label in self.taxonomy: tax_string_length \ = len(self.taxonomy[tip.taxon.label.replace(' ', '_')]) if tax_string_length > max_tax_string_length: max_tax_string_length = tax_string_length logging.debug("Number of ranks found for node: %i" % max_tax_string_length) tax_string_array = [] for rank in range(max_tax_string_length): rank_tax = [] for tip in node.leaf_nodes(): tip_label = tip.taxon.label.replace(' ', '_') if tip_label in self.taxonomy: tip_tax = self.taxonomy[tip_label] if len(tip_tax) > rank: tip_rank = tip_tax[rank] if tip_rank not in rank_tax: rank_tax.append(tip_rank) consistent_taxonomy = len(rank_tax) == 1 if consistent_taxonomy: tax=rank_tax.pop() logging.debug("Consistent taxonomy found for node: %s" \ % tax) if tax not in tc.meaningless_taxonomic_names: if unique_names: if tax in encountered_taxonomies: encountered_taxonomies[tax]+=0 tax = "%s_%i" \ % (tax, encountered_taxonomies[tax]) else: encountered_taxonomies[tax]=0 tax_string_array.append(tax) if any(tax_string_array): index = 0 for anc in node.ancestor_iter(): try: index+=anc.tax except: continue tax_string_array = tax_string_array[index:] if any(tax_string_array): self._rename(node, '; '.join(tax_string_array)) node.tax = len(tax_string_array) logging.info("Writing decorated tree to file: %s" % output_tree) if output_tree: self.tree.write(path=output_tree, schema="newick") if output_tax: self._write_consensus_strings(output_tax)
def function[decorate, parameter[self, output_tree, output_tax, unique_names]]: constant[ Decorate a tree with taxonomy. This code does not allow inconsistent taxonomy within a clade. If one sequence in a clade has a different annotation to the rest, it will split the clade. Paraphyletic group names are distinguished if unique_names = True using a simple tally of each group (see unique_names below). Parameters ---------- output_tree : string File to which the decorated tree will be written. output_tax : string File to which the taxonomy strings for each tip in the tree will be written. unique_names : boolean True indicating that a unique number will be appended to the end of a taxonomic rank if it is found more than once in the tree (i.e. it is paraphyletic in the tree). If false, multiple clades may be assigned with the same name. ] call[name[logging].info, parameter[constant[Decorating tree]]] variable[encountered_taxonomies] assign[=] dictionary[[], []] variable[tc] assign[=] call[name[TaxonomyCleaner], parameter[]] for taget[name[node]] in starred[call[name[self].tree.preorder_internal_node_iter, parameter[]]] begin[:] variable[max_tax_string_length] assign[=] constant[0] for taget[name[tip]] in starred[call[name[node].leaf_nodes, parameter[]]] begin[:] variable[tip_label] assign[=] call[name[tip].taxon.label.replace, parameter[constant[ ], constant[_]]] if compare[name[tip_label] in name[self].taxonomy] begin[:] variable[tax_string_length] assign[=] call[name[len], parameter[call[name[self].taxonomy][call[name[tip].taxon.label.replace, parameter[constant[ ], constant[_]]]]]] if compare[name[tax_string_length] greater[>] name[max_tax_string_length]] begin[:] variable[max_tax_string_length] assign[=] name[tax_string_length] call[name[logging].debug, parameter[binary_operation[constant[Number of ranks found for node: %i] <ast.Mod object at 0x7da2590d6920> name[max_tax_string_length]]]] variable[tax_string_array] assign[=] list[[]] for taget[name[rank]] in starred[call[name[range], parameter[name[max_tax_string_length]]]] begin[:] variable[rank_tax] assign[=] list[[]] for taget[name[tip]] in starred[call[name[node].leaf_nodes, parameter[]]] begin[:] variable[tip_label] assign[=] call[name[tip].taxon.label.replace, parameter[constant[ ], constant[_]]] if compare[name[tip_label] in name[self].taxonomy] begin[:] variable[tip_tax] assign[=] call[name[self].taxonomy][name[tip_label]] if compare[call[name[len], parameter[name[tip_tax]]] greater[>] name[rank]] begin[:] variable[tip_rank] assign[=] call[name[tip_tax]][name[rank]] if compare[name[tip_rank] <ast.NotIn object at 0x7da2590d7190> name[rank_tax]] begin[:] call[name[rank_tax].append, parameter[name[tip_rank]]] variable[consistent_taxonomy] assign[=] compare[call[name[len], parameter[name[rank_tax]]] equal[==] constant[1]] if name[consistent_taxonomy] begin[:] variable[tax] assign[=] call[name[rank_tax].pop, parameter[]] call[name[logging].debug, parameter[binary_operation[constant[Consistent taxonomy found for node: %s] <ast.Mod object at 0x7da2590d6920> name[tax]]]] if compare[name[tax] <ast.NotIn object at 0x7da2590d7190> name[tc].meaningless_taxonomic_names] begin[:] if name[unique_names] begin[:] if compare[name[tax] in name[encountered_taxonomies]] begin[:] <ast.AugAssign object at 0x7da204567970> variable[tax] assign[=] binary_operation[constant[%s_%i] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2045679a0>, <ast.Subscript object at 0x7da2045655d0>]]] call[name[tax_string_array].append, parameter[name[tax]]] if call[name[any], parameter[name[tax_string_array]]] begin[:] variable[index] assign[=] constant[0] for taget[name[anc]] in starred[call[name[node].ancestor_iter, parameter[]]] begin[:] <ast.Try object at 0x7da204564340> variable[tax_string_array] assign[=] call[name[tax_string_array]][<ast.Slice object at 0x7da204567310>] if call[name[any], parameter[name[tax_string_array]]] begin[:] call[name[self]._rename, parameter[name[node], call[constant[; ].join, parameter[name[tax_string_array]]]]] name[node].tax assign[=] call[name[len], parameter[name[tax_string_array]]] call[name[logging].info, parameter[binary_operation[constant[Writing decorated tree to file: %s] <ast.Mod object at 0x7da2590d6920> name[output_tree]]]] if name[output_tree] begin[:] call[name[self].tree.write, parameter[]] if name[output_tax] begin[:] call[name[self]._write_consensus_strings, parameter[name[output_tax]]]
keyword[def] identifier[decorate] ( identifier[self] , identifier[output_tree] , identifier[output_tax] , identifier[unique_names] ): literal[string] identifier[logging] . identifier[info] ( literal[string] ) identifier[encountered_taxonomies] ={} identifier[tc] = identifier[TaxonomyCleaner] () keyword[for] identifier[node] keyword[in] identifier[self] . identifier[tree] . identifier[preorder_internal_node_iter] ( identifier[exclude_seed_node] = keyword[True] ): identifier[max_tax_string_length] = literal[int] keyword[for] identifier[tip] keyword[in] identifier[node] . identifier[leaf_nodes] (): identifier[tip_label] = identifier[tip] . identifier[taxon] . identifier[label] . identifier[replace] ( literal[string] , literal[string] ) keyword[if] identifier[tip_label] keyword[in] identifier[self] . identifier[taxonomy] : identifier[tax_string_length] = identifier[len] ( identifier[self] . identifier[taxonomy] [ identifier[tip] . identifier[taxon] . identifier[label] . identifier[replace] ( literal[string] , literal[string] )]) keyword[if] identifier[tax_string_length] > identifier[max_tax_string_length] : identifier[max_tax_string_length] = identifier[tax_string_length] identifier[logging] . identifier[debug] ( literal[string] % identifier[max_tax_string_length] ) identifier[tax_string_array] =[] keyword[for] identifier[rank] keyword[in] identifier[range] ( identifier[max_tax_string_length] ): identifier[rank_tax] =[] keyword[for] identifier[tip] keyword[in] identifier[node] . identifier[leaf_nodes] (): identifier[tip_label] = identifier[tip] . identifier[taxon] . identifier[label] . identifier[replace] ( literal[string] , literal[string] ) keyword[if] identifier[tip_label] keyword[in] identifier[self] . identifier[taxonomy] : identifier[tip_tax] = identifier[self] . identifier[taxonomy] [ identifier[tip_label] ] keyword[if] identifier[len] ( identifier[tip_tax] )> identifier[rank] : identifier[tip_rank] = identifier[tip_tax] [ identifier[rank] ] keyword[if] identifier[tip_rank] keyword[not] keyword[in] identifier[rank_tax] : identifier[rank_tax] . identifier[append] ( identifier[tip_rank] ) identifier[consistent_taxonomy] = identifier[len] ( identifier[rank_tax] )== literal[int] keyword[if] identifier[consistent_taxonomy] : identifier[tax] = identifier[rank_tax] . identifier[pop] () identifier[logging] . identifier[debug] ( literal[string] % identifier[tax] ) keyword[if] identifier[tax] keyword[not] keyword[in] identifier[tc] . identifier[meaningless_taxonomic_names] : keyword[if] identifier[unique_names] : keyword[if] identifier[tax] keyword[in] identifier[encountered_taxonomies] : identifier[encountered_taxonomies] [ identifier[tax] ]+= literal[int] identifier[tax] = literal[string] %( identifier[tax] , identifier[encountered_taxonomies] [ identifier[tax] ]) keyword[else] : identifier[encountered_taxonomies] [ identifier[tax] ]= literal[int] identifier[tax_string_array] . identifier[append] ( identifier[tax] ) keyword[if] identifier[any] ( identifier[tax_string_array] ): identifier[index] = literal[int] keyword[for] identifier[anc] keyword[in] identifier[node] . identifier[ancestor_iter] (): keyword[try] : identifier[index] += identifier[anc] . identifier[tax] keyword[except] : keyword[continue] identifier[tax_string_array] = identifier[tax_string_array] [ identifier[index] :] keyword[if] identifier[any] ( identifier[tax_string_array] ): identifier[self] . identifier[_rename] ( identifier[node] , literal[string] . identifier[join] ( identifier[tax_string_array] )) identifier[node] . identifier[tax] = identifier[len] ( identifier[tax_string_array] ) identifier[logging] . identifier[info] ( literal[string] % identifier[output_tree] ) keyword[if] identifier[output_tree] : identifier[self] . identifier[tree] . identifier[write] ( identifier[path] = identifier[output_tree] , identifier[schema] = literal[string] ) keyword[if] identifier[output_tax] : identifier[self] . identifier[_write_consensus_strings] ( identifier[output_tax] )
def decorate(self, output_tree, output_tax, unique_names): """ Decorate a tree with taxonomy. This code does not allow inconsistent taxonomy within a clade. If one sequence in a clade has a different annotation to the rest, it will split the clade. Paraphyletic group names are distinguished if unique_names = True using a simple tally of each group (see unique_names below). Parameters ---------- output_tree : string File to which the decorated tree will be written. output_tax : string File to which the taxonomy strings for each tip in the tree will be written. unique_names : boolean True indicating that a unique number will be appended to the end of a taxonomic rank if it is found more than once in the tree (i.e. it is paraphyletic in the tree). If false, multiple clades may be assigned with the same name. """ logging.info('Decorating tree') encountered_taxonomies = {} tc = TaxonomyCleaner() for node in self.tree.preorder_internal_node_iter(exclude_seed_node=True): max_tax_string_length = 0 for tip in node.leaf_nodes(): tip_label = tip.taxon.label.replace(' ', '_') if tip_label in self.taxonomy: tax_string_length = len(self.taxonomy[tip.taxon.label.replace(' ', '_')]) if tax_string_length > max_tax_string_length: max_tax_string_length = tax_string_length # depends on [control=['if'], data=['tax_string_length', 'max_tax_string_length']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tip']] logging.debug('Number of ranks found for node: %i' % max_tax_string_length) tax_string_array = [] for rank in range(max_tax_string_length): rank_tax = [] for tip in node.leaf_nodes(): tip_label = tip.taxon.label.replace(' ', '_') if tip_label in self.taxonomy: tip_tax = self.taxonomy[tip_label] if len(tip_tax) > rank: tip_rank = tip_tax[rank] if tip_rank not in rank_tax: rank_tax.append(tip_rank) # depends on [control=['if'], data=['tip_rank', 'rank_tax']] # depends on [control=['if'], data=['rank']] # depends on [control=['if'], data=['tip_label']] # depends on [control=['for'], data=['tip']] consistent_taxonomy = len(rank_tax) == 1 if consistent_taxonomy: tax = rank_tax.pop() logging.debug('Consistent taxonomy found for node: %s' % tax) if tax not in tc.meaningless_taxonomic_names: if unique_names: if tax in encountered_taxonomies: encountered_taxonomies[tax] += 0 tax = '%s_%i' % (tax, encountered_taxonomies[tax]) # depends on [control=['if'], data=['tax', 'encountered_taxonomies']] else: encountered_taxonomies[tax] = 0 # depends on [control=['if'], data=[]] tax_string_array.append(tax) # depends on [control=['if'], data=['tax']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rank']] if any(tax_string_array): index = 0 for anc in node.ancestor_iter(): try: index += anc.tax # depends on [control=['try'], data=[]] except: continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['anc']] tax_string_array = tax_string_array[index:] if any(tax_string_array): self._rename(node, '; '.join(tax_string_array)) # depends on [control=['if'], data=[]] node.tax = len(tax_string_array) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']] logging.info('Writing decorated tree to file: %s' % output_tree) if output_tree: self.tree.write(path=output_tree, schema='newick') # depends on [control=['if'], data=[]] if output_tax: self._write_consensus_strings(output_tax) # depends on [control=['if'], data=[]]
def parse_dsn(dsn): """Parse dsn string.""" parsed_dsn = urlparse(dsn) parsed_path = parse_path(parsed_dsn.path) return { 'scheme': parsed_dsn.scheme, 'sender': parsed_dsn.username, 'token': parsed_dsn.password, 'domain': parsed_dsn.hostname, 'port': parsed_dsn.port or 80, 'version': parsed_path.get('version'), 'project': parsed_path.get('project'), }
def function[parse_dsn, parameter[dsn]]: constant[Parse dsn string.] variable[parsed_dsn] assign[=] call[name[urlparse], parameter[name[dsn]]] variable[parsed_path] assign[=] call[name[parse_path], parameter[name[parsed_dsn].path]] return[dictionary[[<ast.Constant object at 0x7da20c991480>, <ast.Constant object at 0x7da20c991210>, <ast.Constant object at 0x7da20c991150>, <ast.Constant object at 0x7da20c993790>, <ast.Constant object at 0x7da20c991a50>, <ast.Constant object at 0x7da20c992f80>, <ast.Constant object at 0x7da20c991f60>], [<ast.Attribute object at 0x7da20c992c50>, <ast.Attribute object at 0x7da20c991b70>, <ast.Attribute object at 0x7da20c990790>, <ast.Attribute object at 0x7da20c9907c0>, <ast.BoolOp object at 0x7da20c990730>, <ast.Call object at 0x7da20c9932b0>, <ast.Call object at 0x7da20c9901c0>]]]
keyword[def] identifier[parse_dsn] ( identifier[dsn] ): literal[string] identifier[parsed_dsn] = identifier[urlparse] ( identifier[dsn] ) identifier[parsed_path] = identifier[parse_path] ( identifier[parsed_dsn] . identifier[path] ) keyword[return] { literal[string] : identifier[parsed_dsn] . identifier[scheme] , literal[string] : identifier[parsed_dsn] . identifier[username] , literal[string] : identifier[parsed_dsn] . identifier[password] , literal[string] : identifier[parsed_dsn] . identifier[hostname] , literal[string] : identifier[parsed_dsn] . identifier[port] keyword[or] literal[int] , literal[string] : identifier[parsed_path] . identifier[get] ( literal[string] ), literal[string] : identifier[parsed_path] . identifier[get] ( literal[string] ), }
def parse_dsn(dsn): """Parse dsn string.""" parsed_dsn = urlparse(dsn) parsed_path = parse_path(parsed_dsn.path) return {'scheme': parsed_dsn.scheme, 'sender': parsed_dsn.username, 'token': parsed_dsn.password, 'domain': parsed_dsn.hostname, 'port': parsed_dsn.port or 80, 'version': parsed_path.get('version'), 'project': parsed_path.get('project')}
def bounter(size_mb=None, need_iteration=True, need_counts=True, log_counting=None): """Factory method for bounter implementation. Args: size_mb (int): Desired memory footprint of the counter. need_iteration (Bool): With `True`, create a `HashTable` implementation which can iterate over inserted key/value pairs. With `False`, create a `CountMinSketch` implementation which performs better in limited-memory scenarios, but does not support iteration over elements. need_counts (Bool): With `True`, construct the structure normally. With `False`, ignore all remaining parameters and create a minimalistic cardinality counter based on hyperloglog which only takes 64KB memory. log_counting (int): Counting to use with `CountMinSketch` implementation. Accepted values are `None` (default counting with 32-bit integers), 1024 (16-bit), 8 (8-bit). See `CountMinSketch` documentation for details. Raise ValueError if not `None `and `need_iteration` is `True`. """ if not need_counts: return CardinalityEstimator() if size_mb is None: raise ValueError("Max size in MB must be provided.") if need_iteration: if log_counting: raise ValueError("Log counting is only supported with CMS implementation (need_iteration=False).") return HashTable(size_mb=size_mb) else: return CountMinSketch(size_mb=size_mb, log_counting=log_counting)
def function[bounter, parameter[size_mb, need_iteration, need_counts, log_counting]]: constant[Factory method for bounter implementation. Args: size_mb (int): Desired memory footprint of the counter. need_iteration (Bool): With `True`, create a `HashTable` implementation which can iterate over inserted key/value pairs. With `False`, create a `CountMinSketch` implementation which performs better in limited-memory scenarios, but does not support iteration over elements. need_counts (Bool): With `True`, construct the structure normally. With `False`, ignore all remaining parameters and create a minimalistic cardinality counter based on hyperloglog which only takes 64KB memory. log_counting (int): Counting to use with `CountMinSketch` implementation. Accepted values are `None` (default counting with 32-bit integers), 1024 (16-bit), 8 (8-bit). See `CountMinSketch` documentation for details. Raise ValueError if not `None `and `need_iteration` is `True`. ] if <ast.UnaryOp object at 0x7da20e9b3e80> begin[:] return[call[name[CardinalityEstimator], parameter[]]] if compare[name[size_mb] is constant[None]] begin[:] <ast.Raise object at 0x7da20e9b0550> if name[need_iteration] begin[:] if name[log_counting] begin[:] <ast.Raise object at 0x7da20e9b2110> return[call[name[HashTable], parameter[]]]
keyword[def] identifier[bounter] ( identifier[size_mb] = keyword[None] , identifier[need_iteration] = keyword[True] , identifier[need_counts] = keyword[True] , identifier[log_counting] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[need_counts] : keyword[return] identifier[CardinalityEstimator] () keyword[if] identifier[size_mb] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[need_iteration] : keyword[if] identifier[log_counting] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[HashTable] ( identifier[size_mb] = identifier[size_mb] ) keyword[else] : keyword[return] identifier[CountMinSketch] ( identifier[size_mb] = identifier[size_mb] , identifier[log_counting] = identifier[log_counting] )
def bounter(size_mb=None, need_iteration=True, need_counts=True, log_counting=None): """Factory method for bounter implementation. Args: size_mb (int): Desired memory footprint of the counter. need_iteration (Bool): With `True`, create a `HashTable` implementation which can iterate over inserted key/value pairs. With `False`, create a `CountMinSketch` implementation which performs better in limited-memory scenarios, but does not support iteration over elements. need_counts (Bool): With `True`, construct the structure normally. With `False`, ignore all remaining parameters and create a minimalistic cardinality counter based on hyperloglog which only takes 64KB memory. log_counting (int): Counting to use with `CountMinSketch` implementation. Accepted values are `None` (default counting with 32-bit integers), 1024 (16-bit), 8 (8-bit). See `CountMinSketch` documentation for details. Raise ValueError if not `None `and `need_iteration` is `True`. """ if not need_counts: return CardinalityEstimator() # depends on [control=['if'], data=[]] if size_mb is None: raise ValueError('Max size in MB must be provided.') # depends on [control=['if'], data=[]] if need_iteration: if log_counting: raise ValueError('Log counting is only supported with CMS implementation (need_iteration=False).') # depends on [control=['if'], data=[]] return HashTable(size_mb=size_mb) # depends on [control=['if'], data=[]] else: return CountMinSketch(size_mb=size_mb, log_counting=log_counting)
def hex(x): ''' x-->bytes | bytearray Returns-->bytes: hex-encoded ''' if isinstance(x, bytearray): x = bytes(x) return encode(x, 'hex')
def function[hex, parameter[x]]: constant[ x-->bytes | bytearray Returns-->bytes: hex-encoded ] if call[name[isinstance], parameter[name[x], name[bytearray]]] begin[:] variable[x] assign[=] call[name[bytes], parameter[name[x]]] return[call[name[encode], parameter[name[x], constant[hex]]]]
keyword[def] identifier[hex] ( identifier[x] ): literal[string] keyword[if] identifier[isinstance] ( identifier[x] , identifier[bytearray] ): identifier[x] = identifier[bytes] ( identifier[x] ) keyword[return] identifier[encode] ( identifier[x] , literal[string] )
def hex(x): """ x-->bytes | bytearray Returns-->bytes: hex-encoded """ if isinstance(x, bytearray): x = bytes(x) # depends on [control=['if'], data=[]] return encode(x, 'hex')
def toLily(self): ''' Method which converts the object instance and its attributes to a string of lilypond code :return: str of lilypond code ''' lilystring = "" if hasattr(self, "size"): try: size = float(self.size) lilystring += "\\abs-fontsize #" + str(self.size) + " " except: lilystring += "\\" + str(self.size) + " " if hasattr(self, "font"): fonts_available = ["sans", "typewriter", "roman"] if self.font in fonts_available: lilystring += "\\" + self.font + " " else: rand = random.Random() selected = rand.choice(fonts_available) lilystring += "\\" + selected + " " valid = False for char in self.text: if char in string.ascii_letters or char in [ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]: if not hasattr(self, "noquotes"): lilystring += "\"" lilystring += self.text if not hasattr(self, "noquotes"): lilystring += "\" " valid = True break else: valid = False if not valid: lilystring = "" return lilystring
def function[toLily, parameter[self]]: constant[ Method which converts the object instance and its attributes to a string of lilypond code :return: str of lilypond code ] variable[lilystring] assign[=] constant[] if call[name[hasattr], parameter[name[self], constant[size]]] begin[:] <ast.Try object at 0x7da1b25dad40> if call[name[hasattr], parameter[name[self], constant[font]]] begin[:] variable[fonts_available] assign[=] list[[<ast.Constant object at 0x7da1b25dbd30>, <ast.Constant object at 0x7da1b25d9c60>, <ast.Constant object at 0x7da1b25d8c10>]] if compare[name[self].font in name[fonts_available]] begin[:] <ast.AugAssign object at 0x7da1b2449870> variable[valid] assign[=] constant[False] for taget[name[char]] in starred[name[self].text] begin[:] if <ast.BoolOp object at 0x7da1b2449d80> begin[:] if <ast.UnaryOp object at 0x7da1b2449d20> begin[:] <ast.AugAssign object at 0x7da1b2449750> <ast.AugAssign object at 0x7da1b2448d30> if <ast.UnaryOp object at 0x7da1b244b0d0> begin[:] <ast.AugAssign object at 0x7da1b2380310> variable[valid] assign[=] constant[True] break if <ast.UnaryOp object at 0x7da1b23815a0> begin[:] variable[lilystring] assign[=] constant[] return[name[lilystring]]
keyword[def] identifier[toLily] ( identifier[self] ): literal[string] identifier[lilystring] = literal[string] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): keyword[try] : identifier[size] = identifier[float] ( identifier[self] . identifier[size] ) identifier[lilystring] += literal[string] + identifier[str] ( identifier[self] . identifier[size] )+ literal[string] keyword[except] : identifier[lilystring] += literal[string] + identifier[str] ( identifier[self] . identifier[size] )+ literal[string] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[fonts_available] =[ literal[string] , literal[string] , literal[string] ] keyword[if] identifier[self] . identifier[font] keyword[in] identifier[fonts_available] : identifier[lilystring] += literal[string] + identifier[self] . identifier[font] + literal[string] keyword[else] : identifier[rand] = identifier[random] . identifier[Random] () identifier[selected] = identifier[rand] . identifier[choice] ( identifier[fonts_available] ) identifier[lilystring] += literal[string] + identifier[selected] + literal[string] identifier[valid] = keyword[False] keyword[for] identifier[char] keyword[in] identifier[self] . identifier[text] : keyword[if] identifier[char] keyword[in] identifier[string] . identifier[ascii_letters] keyword[or] identifier[char] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[lilystring] += literal[string] identifier[lilystring] += identifier[self] . identifier[text] keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[lilystring] += literal[string] identifier[valid] = keyword[True] keyword[break] keyword[else] : identifier[valid] = keyword[False] keyword[if] keyword[not] identifier[valid] : identifier[lilystring] = literal[string] keyword[return] identifier[lilystring]
def toLily(self): """ Method which converts the object instance and its attributes to a string of lilypond code :return: str of lilypond code """ lilystring = '' if hasattr(self, 'size'): try: size = float(self.size) lilystring += '\\abs-fontsize #' + str(self.size) + ' ' # depends on [control=['try'], data=[]] except: lilystring += '\\' + str(self.size) + ' ' # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] if hasattr(self, 'font'): fonts_available = ['sans', 'typewriter', 'roman'] if self.font in fonts_available: lilystring += '\\' + self.font + ' ' # depends on [control=['if'], data=[]] else: rand = random.Random() selected = rand.choice(fonts_available) lilystring += '\\' + selected + ' ' # depends on [control=['if'], data=[]] valid = False for char in self.text: if char in string.ascii_letters or char in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']: if not hasattr(self, 'noquotes'): lilystring += '"' # depends on [control=['if'], data=[]] lilystring += self.text if not hasattr(self, 'noquotes'): lilystring += '" ' # depends on [control=['if'], data=[]] valid = True break # depends on [control=['if'], data=[]] else: valid = False # depends on [control=['for'], data=['char']] if not valid: lilystring = '' # depends on [control=['if'], data=[]] return lilystring
def _ir_calibrate(header, data, irchn, calib_type, mask=False): """IR calibration *calib_type* in brightness_temperature, radiance, count """ count = data["hrpt"][:, :, irchn + 2].astype(np.float) if calib_type == 0: return count # Mask unnaturally low values mask |= count == 0.0 k1_ = np.expand_dims(data['calir'][:, irchn, 0, 0] / 1.0e9, 1) k2_ = np.expand_dims(data['calir'][:, irchn, 0, 1] / 1.0e6, 1) k3_ = np.expand_dims(data['calir'][:, irchn, 0, 2] / 1.0e6, 1) # Count to radiance conversion: rad = k1_ * count * count + k2_ * count + k3_ all_zero = np.logical_and( np.logical_and( np.equal(k1_, 0), np.equal(k2_, 0)), np.equal(k3_, 0)) idx = np.indices((all_zero.shape[0], )) suspect_line_nums = np.repeat(idx[0], all_zero[:, 0]) if suspect_line_nums.any(): logger.info("Suspicious scan lines: %s", str(suspect_line_nums)) if calib_type == 2: mask |= rad <= 0.0 return np.where(mask, np.nan, rad) # Central wavenumber: cwnum = header['radtempcnv'][0, irchn, 0] if irchn == 0: cwnum = cwnum / 1.0e2 else: cwnum = cwnum / 1.0e3 bandcor_2 = header['radtempcnv'][0, irchn, 1] / 1e5 bandcor_3 = header['radtempcnv'][0, irchn, 2] / 1e6 ir_const_1 = 1.1910659e-5 ir_const_2 = 1.438833 t_planck = (ir_const_2 * cwnum) / \ np.log(1 + ir_const_1 * cwnum * cwnum * cwnum / rad) # Band corrections applied to t_planck to get correct # brightness temperature for channel: if bandcor_2 < 0: # Post AAPP-v4 tb_ = bandcor_2 + bandcor_3 * t_planck else: # AAPP 1 to 4 tb_ = (t_planck - bandcor_2) / bandcor_3 # Mask unnaturally low values # mask |= tb_ < 0.1 return np.where(mask, np.nan, tb_)
def function[_ir_calibrate, parameter[header, data, irchn, calib_type, mask]]: constant[IR calibration *calib_type* in brightness_temperature, radiance, count ] variable[count] assign[=] call[call[call[name[data]][constant[hrpt]]][tuple[[<ast.Slice object at 0x7da1b22a6620>, <ast.Slice object at 0x7da1b22a6920>, <ast.BinOp object at 0x7da1b22a68c0>]]].astype, parameter[name[np].float]] if compare[name[calib_type] equal[==] constant[0]] begin[:] return[name[count]] <ast.AugAssign object at 0x7da1b22a7b50> variable[k1_] assign[=] call[name[np].expand_dims, parameter[binary_operation[call[call[name[data]][constant[calir]]][tuple[[<ast.Slice object at 0x7da1b22a67d0>, <ast.Name object at 0x7da1b22a6740>, <ast.Constant object at 0x7da1b22a6950>, <ast.Constant object at 0x7da1b22a6b00>]]] / constant[1000000000.0]], constant[1]]] variable[k2_] assign[=] call[name[np].expand_dims, parameter[binary_operation[call[call[name[data]][constant[calir]]][tuple[[<ast.Slice object at 0x7da1b22a4880>, <ast.Name object at 0x7da1b22a50f0>, <ast.Constant object at 0x7da1b22a50c0>, <ast.Constant object at 0x7da1b22a4850>]]] / constant[1000000.0]], constant[1]]] variable[k3_] assign[=] call[name[np].expand_dims, parameter[binary_operation[call[call[name[data]][constant[calir]]][tuple[[<ast.Slice object at 0x7da1b22a7970>, <ast.Name object at 0x7da1b22a7ca0>, <ast.Constant object at 0x7da1b22a6590>, <ast.Constant object at 0x7da1b22a7cd0>]]] / constant[1000000.0]], constant[1]]] variable[rad] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[k1_] * name[count]] * name[count]] + binary_operation[name[k2_] * name[count]]] + name[k3_]] variable[all_zero] assign[=] call[name[np].logical_and, parameter[call[name[np].logical_and, parameter[call[name[np].equal, parameter[name[k1_], constant[0]]], call[name[np].equal, parameter[name[k2_], constant[0]]]]], call[name[np].equal, parameter[name[k3_], constant[0]]]]] variable[idx] assign[=] call[name[np].indices, parameter[tuple[[<ast.Subscript object at 0x7da1b2257520>]]]] variable[suspect_line_nums] assign[=] call[name[np].repeat, parameter[call[name[idx]][constant[0]], call[name[all_zero]][tuple[[<ast.Slice object at 0x7da1b2255bd0>, <ast.Constant object at 0x7da1b22540a0>]]]]] if call[name[suspect_line_nums].any, parameter[]] begin[:] call[name[logger].info, parameter[constant[Suspicious scan lines: %s], call[name[str], parameter[name[suspect_line_nums]]]]] if compare[name[calib_type] equal[==] constant[2]] begin[:] <ast.AugAssign object at 0x7da1b2257730> return[call[name[np].where, parameter[name[mask], name[np].nan, name[rad]]]] variable[cwnum] assign[=] call[call[name[header]][constant[radtempcnv]]][tuple[[<ast.Constant object at 0x7da1b22ba650>, <ast.Name object at 0x7da1b22b8610>, <ast.Constant object at 0x7da1b22ba590>]]] if compare[name[irchn] equal[==] constant[0]] begin[:] variable[cwnum] assign[=] binary_operation[name[cwnum] / constant[100.0]] variable[bandcor_2] assign[=] binary_operation[call[call[name[header]][constant[radtempcnv]]][tuple[[<ast.Constant object at 0x7da1b22b9510>, <ast.Name object at 0x7da1b22bb1c0>, <ast.Constant object at 0x7da1b22bad70>]]] / constant[100000.0]] variable[bandcor_3] assign[=] binary_operation[call[call[name[header]][constant[radtempcnv]]][tuple[[<ast.Constant object at 0x7da1b22b9060>, <ast.Name object at 0x7da1b22badd0>, <ast.Constant object at 0x7da1b22b9030>]]] / constant[1000000.0]] variable[ir_const_1] assign[=] constant[1.1910659e-05] variable[ir_const_2] assign[=] constant[1.438833] variable[t_planck] assign[=] binary_operation[binary_operation[name[ir_const_2] * name[cwnum]] / call[name[np].log, parameter[binary_operation[constant[1] + binary_operation[binary_operation[binary_operation[binary_operation[name[ir_const_1] * name[cwnum]] * name[cwnum]] * name[cwnum]] / name[rad]]]]]] if compare[name[bandcor_2] less[<] constant[0]] begin[:] variable[tb_] assign[=] binary_operation[name[bandcor_2] + binary_operation[name[bandcor_3] * name[t_planck]]] return[call[name[np].where, parameter[name[mask], name[np].nan, name[tb_]]]]
keyword[def] identifier[_ir_calibrate] ( identifier[header] , identifier[data] , identifier[irchn] , identifier[calib_type] , identifier[mask] = keyword[False] ): literal[string] identifier[count] = identifier[data] [ literal[string] ][:,:, identifier[irchn] + literal[int] ]. identifier[astype] ( identifier[np] . identifier[float] ) keyword[if] identifier[calib_type] == literal[int] : keyword[return] identifier[count] identifier[mask] |= identifier[count] == literal[int] identifier[k1_] = identifier[np] . identifier[expand_dims] ( identifier[data] [ literal[string] ][:, identifier[irchn] , literal[int] , literal[int] ]/ literal[int] , literal[int] ) identifier[k2_] = identifier[np] . identifier[expand_dims] ( identifier[data] [ literal[string] ][:, identifier[irchn] , literal[int] , literal[int] ]/ literal[int] , literal[int] ) identifier[k3_] = identifier[np] . identifier[expand_dims] ( identifier[data] [ literal[string] ][:, identifier[irchn] , literal[int] , literal[int] ]/ literal[int] , literal[int] ) identifier[rad] = identifier[k1_] * identifier[count] * identifier[count] + identifier[k2_] * identifier[count] + identifier[k3_] identifier[all_zero] = identifier[np] . identifier[logical_and] ( identifier[np] . identifier[logical_and] ( identifier[np] . identifier[equal] ( identifier[k1_] , literal[int] ), identifier[np] . identifier[equal] ( identifier[k2_] , literal[int] )), identifier[np] . identifier[equal] ( identifier[k3_] , literal[int] )) identifier[idx] = identifier[np] . identifier[indices] (( identifier[all_zero] . identifier[shape] [ literal[int] ],)) identifier[suspect_line_nums] = identifier[np] . identifier[repeat] ( identifier[idx] [ literal[int] ], identifier[all_zero] [:, literal[int] ]) keyword[if] identifier[suspect_line_nums] . identifier[any] (): identifier[logger] . identifier[info] ( literal[string] , identifier[str] ( identifier[suspect_line_nums] )) keyword[if] identifier[calib_type] == literal[int] : identifier[mask] |= identifier[rad] <= literal[int] keyword[return] identifier[np] . identifier[where] ( identifier[mask] , identifier[np] . identifier[nan] , identifier[rad] ) identifier[cwnum] = identifier[header] [ literal[string] ][ literal[int] , identifier[irchn] , literal[int] ] keyword[if] identifier[irchn] == literal[int] : identifier[cwnum] = identifier[cwnum] / literal[int] keyword[else] : identifier[cwnum] = identifier[cwnum] / literal[int] identifier[bandcor_2] = identifier[header] [ literal[string] ][ literal[int] , identifier[irchn] , literal[int] ]/ literal[int] identifier[bandcor_3] = identifier[header] [ literal[string] ][ literal[int] , identifier[irchn] , literal[int] ]/ literal[int] identifier[ir_const_1] = literal[int] identifier[ir_const_2] = literal[int] identifier[t_planck] =( identifier[ir_const_2] * identifier[cwnum] )/ identifier[np] . identifier[log] ( literal[int] + identifier[ir_const_1] * identifier[cwnum] * identifier[cwnum] * identifier[cwnum] / identifier[rad] ) keyword[if] identifier[bandcor_2] < literal[int] : identifier[tb_] = identifier[bandcor_2] + identifier[bandcor_3] * identifier[t_planck] keyword[else] : identifier[tb_] =( identifier[t_planck] - identifier[bandcor_2] )/ identifier[bandcor_3] keyword[return] identifier[np] . identifier[where] ( identifier[mask] , identifier[np] . identifier[nan] , identifier[tb_] )
def _ir_calibrate(header, data, irchn, calib_type, mask=False): """IR calibration *calib_type* in brightness_temperature, radiance, count """ count = data['hrpt'][:, :, irchn + 2].astype(np.float) if calib_type == 0: return count # depends on [control=['if'], data=[]] # Mask unnaturally low values mask |= count == 0.0 k1_ = np.expand_dims(data['calir'][:, irchn, 0, 0] / 1000000000.0, 1) k2_ = np.expand_dims(data['calir'][:, irchn, 0, 1] / 1000000.0, 1) k3_ = np.expand_dims(data['calir'][:, irchn, 0, 2] / 1000000.0, 1) # Count to radiance conversion: rad = k1_ * count * count + k2_ * count + k3_ all_zero = np.logical_and(np.logical_and(np.equal(k1_, 0), np.equal(k2_, 0)), np.equal(k3_, 0)) idx = np.indices((all_zero.shape[0],)) suspect_line_nums = np.repeat(idx[0], all_zero[:, 0]) if suspect_line_nums.any(): logger.info('Suspicious scan lines: %s', str(suspect_line_nums)) # depends on [control=['if'], data=[]] if calib_type == 2: mask |= rad <= 0.0 return np.where(mask, np.nan, rad) # depends on [control=['if'], data=[]] # Central wavenumber: cwnum = header['radtempcnv'][0, irchn, 0] if irchn == 0: cwnum = cwnum / 100.0 # depends on [control=['if'], data=[]] else: cwnum = cwnum / 1000.0 bandcor_2 = header['radtempcnv'][0, irchn, 1] / 100000.0 bandcor_3 = header['radtempcnv'][0, irchn, 2] / 1000000.0 ir_const_1 = 1.1910659e-05 ir_const_2 = 1.438833 t_planck = ir_const_2 * cwnum / np.log(1 + ir_const_1 * cwnum * cwnum * cwnum / rad) # Band corrections applied to t_planck to get correct # brightness temperature for channel: if bandcor_2 < 0: # Post AAPP-v4 tb_ = bandcor_2 + bandcor_3 * t_planck # depends on [control=['if'], data=['bandcor_2']] else: # AAPP 1 to 4 tb_ = (t_planck - bandcor_2) / bandcor_3 # Mask unnaturally low values # mask |= tb_ < 0.1 return np.where(mask, np.nan, tb_)
def determine_encoding(path, default=None): """Determines the encoding of a file based on byte order marks. Arguments: path (str): The path to the file. default (str, optional): The encoding to return if the byte-order-mark lookup does not return an answer. Returns: str: The encoding of the file. """ byte_order_marks = ( ('utf-8-sig', (codecs.BOM_UTF8, )), ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)), ('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)), ) try: with open(path, 'rb') as infile: raw = infile.read(4) except IOError: return default for encoding, boms in byte_order_marks: if any(raw.startswith(bom) for bom in boms): return encoding return default
def function[determine_encoding, parameter[path, default]]: constant[Determines the encoding of a file based on byte order marks. Arguments: path (str): The path to the file. default (str, optional): The encoding to return if the byte-order-mark lookup does not return an answer. Returns: str: The encoding of the file. ] variable[byte_order_marks] assign[=] tuple[[<ast.Tuple object at 0x7da1b12947f0>, <ast.Tuple object at 0x7da1b1295e10>, <ast.Tuple object at 0x7da1b1295330>]] <ast.Try object at 0x7da1b1296920> for taget[tuple[[<ast.Name object at 0x7da1b1295240>, <ast.Name object at 0x7da1b1297dc0>]]] in starred[name[byte_order_marks]] begin[:] if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b12963b0>]] begin[:] return[name[encoding]] return[name[default]]
keyword[def] identifier[determine_encoding] ( identifier[path] , identifier[default] = keyword[None] ): literal[string] identifier[byte_order_marks] =( ( literal[string] ,( identifier[codecs] . identifier[BOM_UTF8] ,)), ( literal[string] ,( identifier[codecs] . identifier[BOM_UTF16_LE] , identifier[codecs] . identifier[BOM_UTF16_BE] )), ( literal[string] ,( identifier[codecs] . identifier[BOM_UTF32_LE] , identifier[codecs] . identifier[BOM_UTF32_BE] )), ) keyword[try] : keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[infile] : identifier[raw] = identifier[infile] . identifier[read] ( literal[int] ) keyword[except] identifier[IOError] : keyword[return] identifier[default] keyword[for] identifier[encoding] , identifier[boms] keyword[in] identifier[byte_order_marks] : keyword[if] identifier[any] ( identifier[raw] . identifier[startswith] ( identifier[bom] ) keyword[for] identifier[bom] keyword[in] identifier[boms] ): keyword[return] identifier[encoding] keyword[return] identifier[default]
def determine_encoding(path, default=None): """Determines the encoding of a file based on byte order marks. Arguments: path (str): The path to the file. default (str, optional): The encoding to return if the byte-order-mark lookup does not return an answer. Returns: str: The encoding of the file. """ byte_order_marks = (('utf-8-sig', (codecs.BOM_UTF8,)), ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)), ('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE))) try: with open(path, 'rb') as infile: raw = infile.read(4) # depends on [control=['with'], data=['infile']] # depends on [control=['try'], data=[]] except IOError: return default # depends on [control=['except'], data=[]] for (encoding, boms) in byte_order_marks: if any((raw.startswith(bom) for bom in boms)): return encoding # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return default
def convert2geojson(jsonfile, src_srs, dst_srs, src_file): """convert shapefile to geojson file""" if os.path.exists(jsonfile): os.remove(jsonfile) if sysstr == 'Windows': exepath = '"%s/Lib/site-packages/osgeo/ogr2ogr"' % sys.exec_prefix else: exepath = FileClass.get_executable_fullpath('ogr2ogr') # os.system(s) s = '%s -f GeoJSON -s_srs "%s" -t_srs %s %s %s' % ( exepath, src_srs, dst_srs, jsonfile, src_file) UtilClass.run_command(s)
def function[convert2geojson, parameter[jsonfile, src_srs, dst_srs, src_file]]: constant[convert shapefile to geojson file] if call[name[os].path.exists, parameter[name[jsonfile]]] begin[:] call[name[os].remove, parameter[name[jsonfile]]] if compare[name[sysstr] equal[==] constant[Windows]] begin[:] variable[exepath] assign[=] binary_operation[constant["%s/Lib/site-packages/osgeo/ogr2ogr"] <ast.Mod object at 0x7da2590d6920> name[sys].exec_prefix] variable[s] assign[=] binary_operation[constant[%s -f GeoJSON -s_srs "%s" -t_srs %s %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b23b2f20>, <ast.Name object at 0x7da1b23b1b70>, <ast.Name object at 0x7da1b23b2d70>, <ast.Name object at 0x7da1b23b0550>, <ast.Name object at 0x7da1b23b1480>]]] call[name[UtilClass].run_command, parameter[name[s]]]
keyword[def] identifier[convert2geojson] ( identifier[jsonfile] , identifier[src_srs] , identifier[dst_srs] , identifier[src_file] ): literal[string] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[jsonfile] ): identifier[os] . identifier[remove] ( identifier[jsonfile] ) keyword[if] identifier[sysstr] == literal[string] : identifier[exepath] = literal[string] % identifier[sys] . identifier[exec_prefix] keyword[else] : identifier[exepath] = identifier[FileClass] . identifier[get_executable_fullpath] ( literal[string] ) identifier[s] = literal[string] %( identifier[exepath] , identifier[src_srs] , identifier[dst_srs] , identifier[jsonfile] , identifier[src_file] ) identifier[UtilClass] . identifier[run_command] ( identifier[s] )
def convert2geojson(jsonfile, src_srs, dst_srs, src_file): """convert shapefile to geojson file""" if os.path.exists(jsonfile): os.remove(jsonfile) # depends on [control=['if'], data=[]] if sysstr == 'Windows': exepath = '"%s/Lib/site-packages/osgeo/ogr2ogr"' % sys.exec_prefix # depends on [control=['if'], data=[]] else: exepath = FileClass.get_executable_fullpath('ogr2ogr') # os.system(s) s = '%s -f GeoJSON -s_srs "%s" -t_srs %s %s %s' % (exepath, src_srs, dst_srs, jsonfile, src_file) UtilClass.run_command(s)
def get_vnetwork_portgroups_output_vnetwork_pgs_datacenter(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups") config = get_vnetwork_portgroups output = ET.SubElement(get_vnetwork_portgroups, "output") vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs") datacenter = ET.SubElement(vnetwork_pgs, "datacenter") datacenter.text = kwargs.pop('datacenter') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[get_vnetwork_portgroups_output_vnetwork_pgs_datacenter, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[get_vnetwork_portgroups] assign[=] call[name[ET].Element, parameter[constant[get_vnetwork_portgroups]]] variable[config] assign[=] name[get_vnetwork_portgroups] variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_vnetwork_portgroups], constant[output]]] variable[vnetwork_pgs] assign[=] call[name[ET].SubElement, parameter[name[output], constant[vnetwork-pgs]]] variable[datacenter] assign[=] call[name[ET].SubElement, parameter[name[vnetwork_pgs], constant[datacenter]]] name[datacenter].text assign[=] call[name[kwargs].pop, parameter[constant[datacenter]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[get_vnetwork_portgroups_output_vnetwork_pgs_datacenter] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[get_vnetwork_portgroups] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[config] = identifier[get_vnetwork_portgroups] identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_vnetwork_portgroups] , literal[string] ) identifier[vnetwork_pgs] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] ) identifier[datacenter] = identifier[ET] . identifier[SubElement] ( identifier[vnetwork_pgs] , literal[string] ) identifier[datacenter] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def get_vnetwork_portgroups_output_vnetwork_pgs_datacenter(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') get_vnetwork_portgroups = ET.Element('get_vnetwork_portgroups') config = get_vnetwork_portgroups output = ET.SubElement(get_vnetwork_portgroups, 'output') vnetwork_pgs = ET.SubElement(output, 'vnetwork-pgs') datacenter = ET.SubElement(vnetwork_pgs, 'datacenter') datacenter.text = kwargs.pop('datacenter') callback = kwargs.pop('callback', self._callback) return callback(config)
def box_score(game_id): """Gets the box score information for the game with matching id.""" # get data data = mlbgame.data.get_box_score(game_id) # parse data parsed = etree.parse(data) root = parsed.getroot() linescore = root.find('linescore') result = dict() result['game_id'] = game_id # loop through innings and add them to output for x in linescore: inning = x.attrib['inning'] home = value_to_int(x.attrib, 'home') away = value_to_int(x.attrib, 'away') result[int(inning)] = {'home': home, 'away': away} return result
def function[box_score, parameter[game_id]]: constant[Gets the box score information for the game with matching id.] variable[data] assign[=] call[name[mlbgame].data.get_box_score, parameter[name[game_id]]] variable[parsed] assign[=] call[name[etree].parse, parameter[name[data]]] variable[root] assign[=] call[name[parsed].getroot, parameter[]] variable[linescore] assign[=] call[name[root].find, parameter[constant[linescore]]] variable[result] assign[=] call[name[dict], parameter[]] call[name[result]][constant[game_id]] assign[=] name[game_id] for taget[name[x]] in starred[name[linescore]] begin[:] variable[inning] assign[=] call[name[x].attrib][constant[inning]] variable[home] assign[=] call[name[value_to_int], parameter[name[x].attrib, constant[home]]] variable[away] assign[=] call[name[value_to_int], parameter[name[x].attrib, constant[away]]] call[name[result]][call[name[int], parameter[name[inning]]]] assign[=] dictionary[[<ast.Constant object at 0x7da18f723eb0>, <ast.Constant object at 0x7da18f720370>], [<ast.Name object at 0x7da18f723850>, <ast.Name object at 0x7da18f722320>]] return[name[result]]
keyword[def] identifier[box_score] ( identifier[game_id] ): literal[string] identifier[data] = identifier[mlbgame] . identifier[data] . identifier[get_box_score] ( identifier[game_id] ) identifier[parsed] = identifier[etree] . identifier[parse] ( identifier[data] ) identifier[root] = identifier[parsed] . identifier[getroot] () identifier[linescore] = identifier[root] . identifier[find] ( literal[string] ) identifier[result] = identifier[dict] () identifier[result] [ literal[string] ]= identifier[game_id] keyword[for] identifier[x] keyword[in] identifier[linescore] : identifier[inning] = identifier[x] . identifier[attrib] [ literal[string] ] identifier[home] = identifier[value_to_int] ( identifier[x] . identifier[attrib] , literal[string] ) identifier[away] = identifier[value_to_int] ( identifier[x] . identifier[attrib] , literal[string] ) identifier[result] [ identifier[int] ( identifier[inning] )]={ literal[string] : identifier[home] , literal[string] : identifier[away] } keyword[return] identifier[result]
def box_score(game_id): """Gets the box score information for the game with matching id.""" # get data data = mlbgame.data.get_box_score(game_id) # parse data parsed = etree.parse(data) root = parsed.getroot() linescore = root.find('linescore') result = dict() result['game_id'] = game_id # loop through innings and add them to output for x in linescore: inning = x.attrib['inning'] home = value_to_int(x.attrib, 'home') away = value_to_int(x.attrib, 'away') result[int(inning)] = {'home': home, 'away': away} # depends on [control=['for'], data=['x']] return result
def _GetInstanceConfig(self): """Get the instance configuration specified in metadata. Returns: string, the instance configuration data. """ try: instance_data = self.metadata_dict['instance']['attributes'] except KeyError: instance_data = {} self.logger.warning('Instance attributes were not found.') try: project_data = self.metadata_dict['project']['attributes'] except KeyError: project_data = {} self.logger.warning('Project attributes were not found.') return (instance_data.get('google-instance-configs') or project_data.get('google-instance-configs'))
def function[_GetInstanceConfig, parameter[self]]: constant[Get the instance configuration specified in metadata. Returns: string, the instance configuration data. ] <ast.Try object at 0x7da2044c1ae0> <ast.Try object at 0x7da2044c0940> return[<ast.BoolOp object at 0x7da2044c1600>]
keyword[def] identifier[_GetInstanceConfig] ( identifier[self] ): literal[string] keyword[try] : identifier[instance_data] = identifier[self] . identifier[metadata_dict] [ literal[string] ][ literal[string] ] keyword[except] identifier[KeyError] : identifier[instance_data] ={} identifier[self] . identifier[logger] . identifier[warning] ( literal[string] ) keyword[try] : identifier[project_data] = identifier[self] . identifier[metadata_dict] [ literal[string] ][ literal[string] ] keyword[except] identifier[KeyError] : identifier[project_data] ={} identifier[self] . identifier[logger] . identifier[warning] ( literal[string] ) keyword[return] ( identifier[instance_data] . identifier[get] ( literal[string] ) keyword[or] identifier[project_data] . identifier[get] ( literal[string] ))
def _GetInstanceConfig(self): """Get the instance configuration specified in metadata. Returns: string, the instance configuration data. """ try: instance_data = self.metadata_dict['instance']['attributes'] # depends on [control=['try'], data=[]] except KeyError: instance_data = {} self.logger.warning('Instance attributes were not found.') # depends on [control=['except'], data=[]] try: project_data = self.metadata_dict['project']['attributes'] # depends on [control=['try'], data=[]] except KeyError: project_data = {} self.logger.warning('Project attributes were not found.') # depends on [control=['except'], data=[]] return instance_data.get('google-instance-configs') or project_data.get('google-instance-configs')
def search_star(star): ''' It is also possible to query the stars by label, here is an example of querying for the star labeled as Sun. http://star-api.herokuapp.com/api/v1/stars/Sun ''' base_url = "http://star-api.herokuapp.com/api/v1/stars/" if not isinstance(star, str): raise ValueError("The star arg you provided is not the type of str") else: base_url += star return dispatch_http_get(base_url)
def function[search_star, parameter[star]]: constant[ It is also possible to query the stars by label, here is an example of querying for the star labeled as Sun. http://star-api.herokuapp.com/api/v1/stars/Sun ] variable[base_url] assign[=] constant[http://star-api.herokuapp.com/api/v1/stars/] if <ast.UnaryOp object at 0x7da1b25b3be0> begin[:] <ast.Raise object at 0x7da1b25b3c10> return[call[name[dispatch_http_get], parameter[name[base_url]]]]
keyword[def] identifier[search_star] ( identifier[star] ): literal[string] identifier[base_url] = literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[star] , identifier[str] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[else] : identifier[base_url] += identifier[star] keyword[return] identifier[dispatch_http_get] ( identifier[base_url] )
def search_star(star): """ It is also possible to query the stars by label, here is an example of querying for the star labeled as Sun. http://star-api.herokuapp.com/api/v1/stars/Sun """ base_url = 'http://star-api.herokuapp.com/api/v1/stars/' if not isinstance(star, str): raise ValueError('The star arg you provided is not the type of str') # depends on [control=['if'], data=[]] else: base_url += star return dispatch_http_get(base_url)
def Schwartzentruber(self, T, full=True, quick=True): r'''Method to calculate `a_alpha` and its first and second derivatives according to Schwartzentruber et al. (1990) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Three coefficients needed. .. math:: \alpha = \left(c_{4} \left(- \sqrt{\frac{T}{Tc}} + 1\right) - \left(- \sqrt{\frac{T}{Tc}} + 1\right) \left(\frac{T^{2} c_{3}} {Tc^{2}} + \frac{T c_{2}}{Tc} + c_{1}\right) + 1\right)^{2} References ---------- .. [1] J. Schwartzentruber, H. Renon, and S. Watanasiri, "K-values for Non-Ideal Systems:An Easier Way," Chem. Eng., March 1990, 118-124. ''' c1, c2, c3 = self.alpha_function_coeffs T, Tc, a = self.T, self.Tc, self.a a_alpha = a*((c4*(-sqrt(T/Tc) + 1) - (-sqrt(T/Tc) + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1) + 1)**2) if not full: return a_alpha else: da_alpha_dT = a*((c4*(-sqrt(T/Tc) + 1) - (-sqrt(T/Tc) + 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1) + 1)*(-2*(-sqrt(T/Tc) + 1)*(2*T*c3/Tc**2 + c2/Tc) - c4*sqrt(T/Tc)/T + sqrt(T/Tc)*(T**2*c3/Tc**2 + T*c2/Tc + c1)/T)) d2a_alpha_dT2 = a*(((-c4*(sqrt(T/Tc) - 1) + (sqrt(T/Tc) - 1)*(T**2*c3/Tc**2 + T*c2/Tc + c1) + 1)*(8*c3*(sqrt(T/Tc) - 1)/Tc**2 + 4*sqrt(T/Tc)*(2*T*c3/Tc + c2)/(T*Tc) + c4*sqrt(T/Tc)/T**2 - sqrt(T/Tc)*(T**2*c3/Tc**2 + T*c2/Tc + c1)/T**2) + (2*(sqrt(T/Tc) - 1)*(2*T*c3/Tc + c2)/Tc - c4*sqrt(T/Tc)/T + sqrt(T/Tc)*(T**2*c3/Tc**2 + T*c2/Tc + c1)/T)**2)/2) return a_alpha, da_alpha_dT, d2a_alpha_dT2
def function[Schwartzentruber, parameter[self, T, full, quick]]: constant[Method to calculate `a_alpha` and its first and second derivatives according to Schwartzentruber et al. (1990) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Three coefficients needed. .. math:: \alpha = \left(c_{4} \left(- \sqrt{\frac{T}{Tc}} + 1\right) - \left(- \sqrt{\frac{T}{Tc}} + 1\right) \left(\frac{T^{2} c_{3}} {Tc^{2}} + \frac{T c_{2}}{Tc} + c_{1}\right) + 1\right)^{2} References ---------- .. [1] J. Schwartzentruber, H. Renon, and S. Watanasiri, "K-values for Non-Ideal Systems:An Easier Way," Chem. Eng., March 1990, 118-124. ] <ast.Tuple object at 0x7da2049616c0> assign[=] name[self].alpha_function_coeffs <ast.Tuple object at 0x7da2049605e0> assign[=] tuple[[<ast.Attribute object at 0x7da2049632e0>, <ast.Attribute object at 0x7da204962e00>, <ast.Attribute object at 0x7da204962830>]] variable[a_alpha] assign[=] binary_operation[name[a] * binary_operation[binary_operation[binary_operation[binary_operation[name[c4] * binary_operation[<ast.UnaryOp object at 0x7da204962fb0> + constant[1]]] - binary_operation[binary_operation[<ast.UnaryOp object at 0x7da204961de0> + constant[1]] * binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[T] ** constant[2]] * name[c3]] / binary_operation[name[Tc] ** constant[2]]] + binary_operation[binary_operation[name[T] * name[c2]] / name[Tc]]] + name[c1]]]] + constant[1]] ** constant[2]]] if <ast.UnaryOp object at 0x7da204962b60> begin[:] return[name[a_alpha]]
keyword[def] identifier[Schwartzentruber] ( identifier[self] , identifier[T] , identifier[full] = keyword[True] , identifier[quick] = keyword[True] ): literal[string] identifier[c1] , identifier[c2] , identifier[c3] = identifier[self] . identifier[alpha_function_coeffs] identifier[T] , identifier[Tc] , identifier[a] = identifier[self] . identifier[T] , identifier[self] . identifier[Tc] , identifier[self] . identifier[a] identifier[a_alpha] = identifier[a] *(( identifier[c4] *(- identifier[sqrt] ( identifier[T] / identifier[Tc] )+ literal[int] )-(- identifier[sqrt] ( identifier[T] / identifier[Tc] )+ literal[int] )*( identifier[T] ** literal[int] * identifier[c3] / identifier[Tc] ** literal[int] + identifier[T] * identifier[c2] / identifier[Tc] + identifier[c1] )+ literal[int] )** literal[int] ) keyword[if] keyword[not] identifier[full] : keyword[return] identifier[a_alpha] keyword[else] : identifier[da_alpha_dT] = identifier[a] *(( identifier[c4] *(- identifier[sqrt] ( identifier[T] / identifier[Tc] )+ literal[int] )-(- identifier[sqrt] ( identifier[T] / identifier[Tc] )+ literal[int] )*( identifier[T] ** literal[int] * identifier[c3] / identifier[Tc] ** literal[int] + identifier[T] * identifier[c2] / identifier[Tc] + identifier[c1] )+ literal[int] )*(- literal[int] *(- identifier[sqrt] ( identifier[T] / identifier[Tc] )+ literal[int] )*( literal[int] * identifier[T] * identifier[c3] / identifier[Tc] ** literal[int] + identifier[c2] / identifier[Tc] )- identifier[c4] * identifier[sqrt] ( identifier[T] / identifier[Tc] )/ identifier[T] + identifier[sqrt] ( identifier[T] / identifier[Tc] )*( identifier[T] ** literal[int] * identifier[c3] / identifier[Tc] ** literal[int] + identifier[T] * identifier[c2] / identifier[Tc] + identifier[c1] )/ identifier[T] )) identifier[d2a_alpha_dT2] = identifier[a] *(((- identifier[c4] *( identifier[sqrt] ( identifier[T] / identifier[Tc] )- literal[int] )+( identifier[sqrt] ( identifier[T] / identifier[Tc] )- literal[int] )*( identifier[T] ** literal[int] * identifier[c3] / identifier[Tc] ** literal[int] + identifier[T] * identifier[c2] / identifier[Tc] + identifier[c1] )+ literal[int] )*( literal[int] * identifier[c3] *( identifier[sqrt] ( identifier[T] / identifier[Tc] )- literal[int] )/ identifier[Tc] ** literal[int] + literal[int] * identifier[sqrt] ( identifier[T] / identifier[Tc] )*( literal[int] * identifier[T] * identifier[c3] / identifier[Tc] + identifier[c2] )/( identifier[T] * identifier[Tc] )+ identifier[c4] * identifier[sqrt] ( identifier[T] / identifier[Tc] )/ identifier[T] ** literal[int] - identifier[sqrt] ( identifier[T] / identifier[Tc] )*( identifier[T] ** literal[int] * identifier[c3] / identifier[Tc] ** literal[int] + identifier[T] * identifier[c2] / identifier[Tc] + identifier[c1] )/ identifier[T] ** literal[int] )+( literal[int] *( identifier[sqrt] ( identifier[T] / identifier[Tc] )- literal[int] )*( literal[int] * identifier[T] * identifier[c3] / identifier[Tc] + identifier[c2] )/ identifier[Tc] - identifier[c4] * identifier[sqrt] ( identifier[T] / identifier[Tc] )/ identifier[T] + identifier[sqrt] ( identifier[T] / identifier[Tc] )*( identifier[T] ** literal[int] * identifier[c3] / identifier[Tc] ** literal[int] + identifier[T] * identifier[c2] / identifier[Tc] + identifier[c1] )/ identifier[T] )** literal[int] )/ literal[int] ) keyword[return] identifier[a_alpha] , identifier[da_alpha_dT] , identifier[d2a_alpha_dT2]
def Schwartzentruber(self, T, full=True, quick=True): """Method to calculate `a_alpha` and its first and second derivatives according to Schwartzentruber et al. (1990) [1]_. Returns `a_alpha`, `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more documentation. Three coefficients needed. .. math:: \\alpha = \\left(c_{4} \\left(- \\sqrt{\\frac{T}{Tc}} + 1\\right) - \\left(- \\sqrt{\\frac{T}{Tc}} + 1\\right) \\left(\\frac{T^{2} c_{3}} {Tc^{2}} + \\frac{T c_{2}}{Tc} + c_{1}\\right) + 1\\right)^{2} References ---------- .. [1] J. Schwartzentruber, H. Renon, and S. Watanasiri, "K-values for Non-Ideal Systems:An Easier Way," Chem. Eng., March 1990, 118-124. """ (c1, c2, c3) = self.alpha_function_coeffs (T, Tc, a) = (self.T, self.Tc, self.a) a_alpha = a * (c4 * (-sqrt(T / Tc) + 1) - (-sqrt(T / Tc) + 1) * (T ** 2 * c3 / Tc ** 2 + T * c2 / Tc + c1) + 1) ** 2 if not full: return a_alpha # depends on [control=['if'], data=[]] else: da_alpha_dT = a * ((c4 * (-sqrt(T / Tc) + 1) - (-sqrt(T / Tc) + 1) * (T ** 2 * c3 / Tc ** 2 + T * c2 / Tc + c1) + 1) * (-2 * (-sqrt(T / Tc) + 1) * (2 * T * c3 / Tc ** 2 + c2 / Tc) - c4 * sqrt(T / Tc) / T + sqrt(T / Tc) * (T ** 2 * c3 / Tc ** 2 + T * c2 / Tc + c1) / T)) d2a_alpha_dT2 = a * (((-c4 * (sqrt(T / Tc) - 1) + (sqrt(T / Tc) - 1) * (T ** 2 * c3 / Tc ** 2 + T * c2 / Tc + c1) + 1) * (8 * c3 * (sqrt(T / Tc) - 1) / Tc ** 2 + 4 * sqrt(T / Tc) * (2 * T * c3 / Tc + c2) / (T * Tc) + c4 * sqrt(T / Tc) / T ** 2 - sqrt(T / Tc) * (T ** 2 * c3 / Tc ** 2 + T * c2 / Tc + c1) / T ** 2) + (2 * (sqrt(T / Tc) - 1) * (2 * T * c3 / Tc + c2) / Tc - c4 * sqrt(T / Tc) / T + sqrt(T / Tc) * (T ** 2 * c3 / Tc ** 2 + T * c2 / Tc + c1) / T) ** 2) / 2) return (a_alpha, da_alpha_dT, d2a_alpha_dT2)
def updatePhysicalInterface(self, physicalInterfaceId, name, schemaId, description=None): """ Update a physical interface. Parameters: - physicalInterfaceId (string) - name (string) - schemaId (string) - description (string, optional) Throws APIException on failure. """ req = ApiClient.onePhysicalInterfacesUrl % (self.host, "/draft", physicalInterfaceId) body = {"name" : name, "schemaId" : schemaId} if description: body["description"] = description resp = requests.put(req, auth=self.credentials, headers={"Content-Type":"application/json"}, data=json.dumps(body), verify=self.verify) if resp.status_code == 200: self.logger.debug("physical interface updated") else: raise ibmiotf.APIException(resp.status_code, "HTTP error updating physical interface", resp) return resp.json()
def function[updatePhysicalInterface, parameter[self, physicalInterfaceId, name, schemaId, description]]: constant[ Update a physical interface. Parameters: - physicalInterfaceId (string) - name (string) - schemaId (string) - description (string, optional) Throws APIException on failure. ] variable[req] assign[=] binary_operation[name[ApiClient].onePhysicalInterfacesUrl <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b26ac430>, <ast.Constant object at 0x7da1b26af9d0>, <ast.Name object at 0x7da1b26ac9d0>]]] variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da1b26ae380>, <ast.Constant object at 0x7da1b26ae2f0>], [<ast.Name object at 0x7da1b26af7f0>, <ast.Name object at 0x7da1b26add80>]] if name[description] begin[:] call[name[body]][constant[description]] assign[=] name[description] variable[resp] assign[=] call[name[requests].put, parameter[name[req]]] if compare[name[resp].status_code equal[==] constant[200]] begin[:] call[name[self].logger.debug, parameter[constant[physical interface updated]]] return[call[name[resp].json, parameter[]]]
keyword[def] identifier[updatePhysicalInterface] ( identifier[self] , identifier[physicalInterfaceId] , identifier[name] , identifier[schemaId] , identifier[description] = keyword[None] ): literal[string] identifier[req] = identifier[ApiClient] . identifier[onePhysicalInterfacesUrl] %( identifier[self] . identifier[host] , literal[string] , identifier[physicalInterfaceId] ) identifier[body] ={ literal[string] : identifier[name] , literal[string] : identifier[schemaId] } keyword[if] identifier[description] : identifier[body] [ literal[string] ]= identifier[description] identifier[resp] = identifier[requests] . identifier[put] ( identifier[req] , identifier[auth] = identifier[self] . identifier[credentials] , identifier[headers] ={ literal[string] : literal[string] }, identifier[data] = identifier[json] . identifier[dumps] ( identifier[body] ), identifier[verify] = identifier[self] . identifier[verify] ) keyword[if] identifier[resp] . identifier[status_code] == literal[int] : identifier[self] . identifier[logger] . identifier[debug] ( literal[string] ) keyword[else] : keyword[raise] identifier[ibmiotf] . identifier[APIException] ( identifier[resp] . identifier[status_code] , literal[string] , identifier[resp] ) keyword[return] identifier[resp] . identifier[json] ()
def updatePhysicalInterface(self, physicalInterfaceId, name, schemaId, description=None): """ Update a physical interface. Parameters: - physicalInterfaceId (string) - name (string) - schemaId (string) - description (string, optional) Throws APIException on failure. """ req = ApiClient.onePhysicalInterfacesUrl % (self.host, '/draft', physicalInterfaceId) body = {'name': name, 'schemaId': schemaId} if description: body['description'] = description # depends on [control=['if'], data=[]] resp = requests.put(req, auth=self.credentials, headers={'Content-Type': 'application/json'}, data=json.dumps(body), verify=self.verify) if resp.status_code == 200: self.logger.debug('physical interface updated') # depends on [control=['if'], data=[]] else: raise ibmiotf.APIException(resp.status_code, 'HTTP error updating physical interface', resp) return resp.json()
def rm_rf(path): """ Recursively (if needed) delete path. """ if os.path.isdir(path) and not os.path.islink(path): shutil.rmtree(path) elif os.path.lexists(path): os.remove(path)
def function[rm_rf, parameter[path]]: constant[ Recursively (if needed) delete path. ] if <ast.BoolOp object at 0x7da1b26a48b0> begin[:] call[name[shutil].rmtree, parameter[name[path]]]
keyword[def] identifier[rm_rf] ( identifier[path] ): literal[string] keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ) keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[islink] ( identifier[path] ): identifier[shutil] . identifier[rmtree] ( identifier[path] ) keyword[elif] identifier[os] . identifier[path] . identifier[lexists] ( identifier[path] ): identifier[os] . identifier[remove] ( identifier[path] )
def rm_rf(path): """ Recursively (if needed) delete path. """ if os.path.isdir(path) and (not os.path.islink(path)): shutil.rmtree(path) # depends on [control=['if'], data=[]] elif os.path.lexists(path): os.remove(path) # depends on [control=['if'], data=[]]
def should_collect(self, value): """Decide whether a given value should be collected.""" return ( # decorated with @transition isinstance(value, TransitionWrapper) # Relates to a compatible transition and value.trname in self.workflow.transitions # Either not bound to a state field or bound to the current one and (not value.field or value.field == self.state_field))
def function[should_collect, parameter[self, value]]: constant[Decide whether a given value should be collected.] return[<ast.BoolOp object at 0x7da18ede4730>]
keyword[def] identifier[should_collect] ( identifier[self] , identifier[value] ): literal[string] keyword[return] ( identifier[isinstance] ( identifier[value] , identifier[TransitionWrapper] ) keyword[and] identifier[value] . identifier[trname] keyword[in] identifier[self] . identifier[workflow] . identifier[transitions] keyword[and] ( keyword[not] identifier[value] . identifier[field] keyword[or] identifier[value] . identifier[field] == identifier[self] . identifier[state_field] ))
def should_collect(self, value): """Decide whether a given value should be collected.""" # decorated with @transition # Relates to a compatible transition # Either not bound to a state field or bound to the current one return isinstance(value, TransitionWrapper) and value.trname in self.workflow.transitions and (not value.field or value.field == self.state_field)
def asDict( self ): """ Returns the named parse results as a nested dictionary. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) result_dict = result.asDict() print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} # even though a ParseResults supports dict-like access, sometime you just need to have a dict import json print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} """ if PY_3: item_fn = self.items else: item_fn = self.iteritems def toItem(obj): if isinstance(obj, ParseResults): if obj.haskeys(): return obj.asDict() else: return [toItem(v) for v in obj] else: return obj return dict((k,toItem(v)) for k,v in item_fn())
def function[asDict, parameter[self]]: constant[ Returns the named parse results as a nested dictionary. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) result_dict = result.asDict() print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} # even though a ParseResults supports dict-like access, sometime you just need to have a dict import json print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} ] if name[PY_3] begin[:] variable[item_fn] assign[=] name[self].items def function[toItem, parameter[obj]]: if call[name[isinstance], parameter[name[obj], name[ParseResults]]] begin[:] if call[name[obj].haskeys, parameter[]] begin[:] return[call[name[obj].asDict, parameter[]]] return[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da2054a5ab0>]]]
keyword[def] identifier[asDict] ( identifier[self] ): literal[string] keyword[if] identifier[PY_3] : identifier[item_fn] = identifier[self] . identifier[items] keyword[else] : identifier[item_fn] = identifier[self] . identifier[iteritems] keyword[def] identifier[toItem] ( identifier[obj] ): keyword[if] identifier[isinstance] ( identifier[obj] , identifier[ParseResults] ): keyword[if] identifier[obj] . identifier[haskeys] (): keyword[return] identifier[obj] . identifier[asDict] () keyword[else] : keyword[return] [ identifier[toItem] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[obj] ] keyword[else] : keyword[return] identifier[obj] keyword[return] identifier[dict] (( identifier[k] , identifier[toItem] ( identifier[v] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[item_fn] ())
def asDict(self): """ Returns the named parse results as a nested dictionary. Example:: integer = Word(nums) date_str = integer("year") + '/' + integer("month") + '/' + integer("day") result = date_str.parseString('12/31/1999') print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) result_dict = result.asDict() print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} # even though a ParseResults supports dict-like access, sometime you just need to have a dict import json print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} """ if PY_3: item_fn = self.items # depends on [control=['if'], data=[]] else: item_fn = self.iteritems def toItem(obj): if isinstance(obj, ParseResults): if obj.haskeys(): return obj.asDict() # depends on [control=['if'], data=[]] else: return [toItem(v) for v in obj] # depends on [control=['if'], data=[]] else: return obj return dict(((k, toItem(v)) for (k, v) in item_fn()))
def read(self, dataframe_name: str) -> pandas.DataFrame: """Evaluate and retrieve a Spark dataframe in the managed session. :param dataframe_name: The name of the Spark dataframe to read. """ code = serialise_dataframe_code(dataframe_name, self.kind) output = self._execute(code) output.raise_for_status() if output.text is None: raise RuntimeError("statement had no text output") return deserialise_dataframe(output.text)
def function[read, parameter[self, dataframe_name]]: constant[Evaluate and retrieve a Spark dataframe in the managed session. :param dataframe_name: The name of the Spark dataframe to read. ] variable[code] assign[=] call[name[serialise_dataframe_code], parameter[name[dataframe_name], name[self].kind]] variable[output] assign[=] call[name[self]._execute, parameter[name[code]]] call[name[output].raise_for_status, parameter[]] if compare[name[output].text is constant[None]] begin[:] <ast.Raise object at 0x7da1b0efe7a0> return[call[name[deserialise_dataframe], parameter[name[output].text]]]
keyword[def] identifier[read] ( identifier[self] , identifier[dataframe_name] : identifier[str] )-> identifier[pandas] . identifier[DataFrame] : literal[string] identifier[code] = identifier[serialise_dataframe_code] ( identifier[dataframe_name] , identifier[self] . identifier[kind] ) identifier[output] = identifier[self] . identifier[_execute] ( identifier[code] ) identifier[output] . identifier[raise_for_status] () keyword[if] identifier[output] . identifier[text] keyword[is] keyword[None] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[return] identifier[deserialise_dataframe] ( identifier[output] . identifier[text] )
def read(self, dataframe_name: str) -> pandas.DataFrame: """Evaluate and retrieve a Spark dataframe in the managed session. :param dataframe_name: The name of the Spark dataframe to read. """ code = serialise_dataframe_code(dataframe_name, self.kind) output = self._execute(code) output.raise_for_status() if output.text is None: raise RuntimeError('statement had no text output') # depends on [control=['if'], data=[]] return deserialise_dataframe(output.text)
def rl_force_redisplay() -> None: # pragma: no cover """ Causes readline to display the prompt and input text wherever the cursor is and start reading input from this location. This is the proper way to restore the input line after printing to the screen """ if not sys.stdout.isatty(): return if rl_type == RlType.GNU: readline_lib.rl_forced_update_display() # After manually updating the display, readline asks that rl_display_fixed be set to 1 for efficiency display_fixed = ctypes.c_int.in_dll(readline_lib, "rl_display_fixed") display_fixed.value = 1 elif rl_type == RlType.PYREADLINE: # Call _print_prompt() first to set the new location of the prompt readline.rl.mode._print_prompt() readline.rl.mode._update_line()
def function[rl_force_redisplay, parameter[]]: constant[ Causes readline to display the prompt and input text wherever the cursor is and start reading input from this location. This is the proper way to restore the input line after printing to the screen ] if <ast.UnaryOp object at 0x7da1b26ae470> begin[:] return[None] if compare[name[rl_type] equal[==] name[RlType].GNU] begin[:] call[name[readline_lib].rl_forced_update_display, parameter[]] variable[display_fixed] assign[=] call[name[ctypes].c_int.in_dll, parameter[name[readline_lib], constant[rl_display_fixed]]] name[display_fixed].value assign[=] constant[1]
keyword[def] identifier[rl_force_redisplay] ()-> keyword[None] : literal[string] keyword[if] keyword[not] identifier[sys] . identifier[stdout] . identifier[isatty] (): keyword[return] keyword[if] identifier[rl_type] == identifier[RlType] . identifier[GNU] : identifier[readline_lib] . identifier[rl_forced_update_display] () identifier[display_fixed] = identifier[ctypes] . identifier[c_int] . identifier[in_dll] ( identifier[readline_lib] , literal[string] ) identifier[display_fixed] . identifier[value] = literal[int] keyword[elif] identifier[rl_type] == identifier[RlType] . identifier[PYREADLINE] : identifier[readline] . identifier[rl] . identifier[mode] . identifier[_print_prompt] () identifier[readline] . identifier[rl] . identifier[mode] . identifier[_update_line] ()
def rl_force_redisplay() -> None: # pragma: no cover '\n Causes readline to display the prompt and input text wherever the cursor is and start\n reading input from this location. This is the proper way to restore the input line after\n printing to the screen\n ' if not sys.stdout.isatty(): return # depends on [control=['if'], data=[]] if rl_type == RlType.GNU: readline_lib.rl_forced_update_display() # After manually updating the display, readline asks that rl_display_fixed be set to 1 for efficiency display_fixed = ctypes.c_int.in_dll(readline_lib, 'rl_display_fixed') display_fixed.value = 1 # depends on [control=['if'], data=[]] elif rl_type == RlType.PYREADLINE: # Call _print_prompt() first to set the new location of the prompt readline.rl.mode._print_prompt() readline.rl.mode._update_line() # depends on [control=['if'], data=[]]
def get_phone_numbers(self): """ : returns: dict of type and phone number list :rtype: dict(str, list(str)) """ phone_dict = {} for child in self.vcard.getChildren(): if child.name == "TEL": # phone types type = helpers.list_to_string( self._get_types_for_vcard_object(child, "voice"), ", ") if type not in phone_dict: phone_dict[type] = [] # phone value # # vcard version 4.0 allows URI scheme "tel" in phone attribute value # Doc: https://tools.ietf.org/html/rfc6350#section-6.4.1 # example: TEL;VALUE=uri;PREF=1;TYPE="voice,home":tel:+1-555-555-5555;ext=5555 if child.value.lower().startswith("tel:"): # cut off the "tel:" uri prefix phone_dict[type].append(child.value[4:]) else: # free text field phone_dict[type].append(child.value) # sort phone number lists for number_list in phone_dict.values(): number_list.sort() return phone_dict
def function[get_phone_numbers, parameter[self]]: constant[ : returns: dict of type and phone number list :rtype: dict(str, list(str)) ] variable[phone_dict] assign[=] dictionary[[], []] for taget[name[child]] in starred[call[name[self].vcard.getChildren, parameter[]]] begin[:] if compare[name[child].name equal[==] constant[TEL]] begin[:] variable[type] assign[=] call[name[helpers].list_to_string, parameter[call[name[self]._get_types_for_vcard_object, parameter[name[child], constant[voice]]], constant[, ]]] if compare[name[type] <ast.NotIn object at 0x7da2590d7190> name[phone_dict]] begin[:] call[name[phone_dict]][name[type]] assign[=] list[[]] if call[call[name[child].value.lower, parameter[]].startswith, parameter[constant[tel:]]] begin[:] call[call[name[phone_dict]][name[type]].append, parameter[call[name[child].value][<ast.Slice object at 0x7da1b05be230>]]] for taget[name[number_list]] in starred[call[name[phone_dict].values, parameter[]]] begin[:] call[name[number_list].sort, parameter[]] return[name[phone_dict]]
keyword[def] identifier[get_phone_numbers] ( identifier[self] ): literal[string] identifier[phone_dict] ={} keyword[for] identifier[child] keyword[in] identifier[self] . identifier[vcard] . identifier[getChildren] (): keyword[if] identifier[child] . identifier[name] == literal[string] : identifier[type] = identifier[helpers] . identifier[list_to_string] ( identifier[self] . identifier[_get_types_for_vcard_object] ( identifier[child] , literal[string] ), literal[string] ) keyword[if] identifier[type] keyword[not] keyword[in] identifier[phone_dict] : identifier[phone_dict] [ identifier[type] ]=[] keyword[if] identifier[child] . identifier[value] . identifier[lower] (). identifier[startswith] ( literal[string] ): identifier[phone_dict] [ identifier[type] ]. identifier[append] ( identifier[child] . identifier[value] [ literal[int] :]) keyword[else] : identifier[phone_dict] [ identifier[type] ]. identifier[append] ( identifier[child] . identifier[value] ) keyword[for] identifier[number_list] keyword[in] identifier[phone_dict] . identifier[values] (): identifier[number_list] . identifier[sort] () keyword[return] identifier[phone_dict]
def get_phone_numbers(self): """ : returns: dict of type and phone number list :rtype: dict(str, list(str)) """ phone_dict = {} for child in self.vcard.getChildren(): if child.name == 'TEL': # phone types type = helpers.list_to_string(self._get_types_for_vcard_object(child, 'voice'), ', ') if type not in phone_dict: phone_dict[type] = [] # depends on [control=['if'], data=['type', 'phone_dict']] # phone value # # vcard version 4.0 allows URI scheme "tel" in phone attribute value # Doc: https://tools.ietf.org/html/rfc6350#section-6.4.1 # example: TEL;VALUE=uri;PREF=1;TYPE="voice,home":tel:+1-555-555-5555;ext=5555 if child.value.lower().startswith('tel:'): # cut off the "tel:" uri prefix phone_dict[type].append(child.value[4:]) # depends on [control=['if'], data=[]] else: # free text field phone_dict[type].append(child.value) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']] # sort phone number lists for number_list in phone_dict.values(): number_list.sort() # depends on [control=['for'], data=['number_list']] return phone_dict
def send_command(self, data, read_delay=None): """Write "data" to the port and return the response form it""" self._write(data) if read_delay: time.sleep(read_delay) return self._read()
def function[send_command, parameter[self, data, read_delay]]: constant[Write "data" to the port and return the response form it] call[name[self]._write, parameter[name[data]]] if name[read_delay] begin[:] call[name[time].sleep, parameter[name[read_delay]]] return[call[name[self]._read, parameter[]]]
keyword[def] identifier[send_command] ( identifier[self] , identifier[data] , identifier[read_delay] = keyword[None] ): literal[string] identifier[self] . identifier[_write] ( identifier[data] ) keyword[if] identifier[read_delay] : identifier[time] . identifier[sleep] ( identifier[read_delay] ) keyword[return] identifier[self] . identifier[_read] ()
def send_command(self, data, read_delay=None): """Write "data" to the port and return the response form it""" self._write(data) if read_delay: time.sleep(read_delay) # depends on [control=['if'], data=[]] return self._read()
def prepare_denovo_input_narrowpeak(inputfile, params, outdir): """Prepare a narrowPeak file for de novo motif prediction. All regions to same size; split in test and validation set; converted to FASTA. Parameters ---------- inputfile : str BED file with input regions. params : dict Dictionary with parameters. outdir : str Output directory to save files. """ bedfile = os.path.join(outdir, "input.from.narrowpeak.bed") p = re.compile(r'^(#|track|browser)') width = int(params["width"]) logger.info("preparing input (narrowPeak to BED, width %s)", width) warn_no_summit = True with open(bedfile, "w") as f_out: with open(inputfile) as f_in: for line in f_in: if p.search(line): continue vals = line.strip().split("\t") start, end = int(vals[1]), int(vals[2]) summit = int(vals[9]) if summit == -1: if warn_no_summit: logger.warn("No summit present in narrowPeak file, using the peak center.") warn_no_summit = False summit = (end - start) // 2 start = start + summit - (width // 2) end = start + width f_out.write("{}\t{}\t{}\t{}\n".format( vals[0], start, end, vals[6] )) prepare_denovo_input_bed(bedfile, params, outdir)
def function[prepare_denovo_input_narrowpeak, parameter[inputfile, params, outdir]]: constant[Prepare a narrowPeak file for de novo motif prediction. All regions to same size; split in test and validation set; converted to FASTA. Parameters ---------- inputfile : str BED file with input regions. params : dict Dictionary with parameters. outdir : str Output directory to save files. ] variable[bedfile] assign[=] call[name[os].path.join, parameter[name[outdir], constant[input.from.narrowpeak.bed]]] variable[p] assign[=] call[name[re].compile, parameter[constant[^(#|track|browser)]]] variable[width] assign[=] call[name[int], parameter[call[name[params]][constant[width]]]] call[name[logger].info, parameter[constant[preparing input (narrowPeak to BED, width %s)], name[width]]] variable[warn_no_summit] assign[=] constant[True] with call[name[open], parameter[name[bedfile], constant[w]]] begin[:] with call[name[open], parameter[name[inputfile]]] begin[:] for taget[name[line]] in starred[name[f_in]] begin[:] if call[name[p].search, parameter[name[line]]] begin[:] continue variable[vals] assign[=] call[call[name[line].strip, parameter[]].split, parameter[constant[ ]]] <ast.Tuple object at 0x7da18f09f490> assign[=] tuple[[<ast.Call object at 0x7da18f09f2e0>, <ast.Call object at 0x7da18f09ef20>]] variable[summit] assign[=] call[name[int], parameter[call[name[vals]][constant[9]]]] if compare[name[summit] equal[==] <ast.UnaryOp object at 0x7da18f09cb80>] begin[:] if name[warn_no_summit] begin[:] call[name[logger].warn, parameter[constant[No summit present in narrowPeak file, using the peak center.]]] variable[warn_no_summit] assign[=] constant[False] variable[summit] assign[=] binary_operation[binary_operation[name[end] - name[start]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]] variable[start] assign[=] binary_operation[binary_operation[name[start] + name[summit]] - binary_operation[name[width] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]] variable[end] assign[=] binary_operation[name[start] + name[width]] call[name[f_out].write, parameter[call[constant[{} {} {} {} ].format, parameter[call[name[vals]][constant[0]], name[start], name[end], call[name[vals]][constant[6]]]]]] call[name[prepare_denovo_input_bed], parameter[name[bedfile], name[params], name[outdir]]]
keyword[def] identifier[prepare_denovo_input_narrowpeak] ( identifier[inputfile] , identifier[params] , identifier[outdir] ): literal[string] identifier[bedfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[outdir] , literal[string] ) identifier[p] = identifier[re] . identifier[compile] ( literal[string] ) identifier[width] = identifier[int] ( identifier[params] [ literal[string] ]) identifier[logger] . identifier[info] ( literal[string] , identifier[width] ) identifier[warn_no_summit] = keyword[True] keyword[with] identifier[open] ( identifier[bedfile] , literal[string] ) keyword[as] identifier[f_out] : keyword[with] identifier[open] ( identifier[inputfile] ) keyword[as] identifier[f_in] : keyword[for] identifier[line] keyword[in] identifier[f_in] : keyword[if] identifier[p] . identifier[search] ( identifier[line] ): keyword[continue] identifier[vals] = identifier[line] . identifier[strip] (). identifier[split] ( literal[string] ) identifier[start] , identifier[end] = identifier[int] ( identifier[vals] [ literal[int] ]), identifier[int] ( identifier[vals] [ literal[int] ]) identifier[summit] = identifier[int] ( identifier[vals] [ literal[int] ]) keyword[if] identifier[summit] ==- literal[int] : keyword[if] identifier[warn_no_summit] : identifier[logger] . identifier[warn] ( literal[string] ) identifier[warn_no_summit] = keyword[False] identifier[summit] =( identifier[end] - identifier[start] )// literal[int] identifier[start] = identifier[start] + identifier[summit] -( identifier[width] // literal[int] ) identifier[end] = identifier[start] + identifier[width] identifier[f_out] . identifier[write] ( literal[string] . identifier[format] ( identifier[vals] [ literal[int] ], identifier[start] , identifier[end] , identifier[vals] [ literal[int] ] )) identifier[prepare_denovo_input_bed] ( identifier[bedfile] , identifier[params] , identifier[outdir] )
def prepare_denovo_input_narrowpeak(inputfile, params, outdir): """Prepare a narrowPeak file for de novo motif prediction. All regions to same size; split in test and validation set; converted to FASTA. Parameters ---------- inputfile : str BED file with input regions. params : dict Dictionary with parameters. outdir : str Output directory to save files. """ bedfile = os.path.join(outdir, 'input.from.narrowpeak.bed') p = re.compile('^(#|track|browser)') width = int(params['width']) logger.info('preparing input (narrowPeak to BED, width %s)', width) warn_no_summit = True with open(bedfile, 'w') as f_out: with open(inputfile) as f_in: for line in f_in: if p.search(line): continue # depends on [control=['if'], data=[]] vals = line.strip().split('\t') (start, end) = (int(vals[1]), int(vals[2])) summit = int(vals[9]) if summit == -1: if warn_no_summit: logger.warn('No summit present in narrowPeak file, using the peak center.') warn_no_summit = False # depends on [control=['if'], data=[]] summit = (end - start) // 2 # depends on [control=['if'], data=['summit']] start = start + summit - width // 2 end = start + width f_out.write('{}\t{}\t{}\t{}\n'.format(vals[0], start, end, vals[6])) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f_in']] # depends on [control=['with'], data=['open', 'f_out']] prepare_denovo_input_bed(bedfile, params, outdir)
def max_age(self, value): """ Set the MaxAge of the response. :type value: int :param value: the MaxAge option """ option = Option() option.number = defines.OptionRegistry.MAX_AGE.number option.value = int(value) self.del_option_by_number(defines.OptionRegistry.MAX_AGE.number) self.add_option(option)
def function[max_age, parameter[self, value]]: constant[ Set the MaxAge of the response. :type value: int :param value: the MaxAge option ] variable[option] assign[=] call[name[Option], parameter[]] name[option].number assign[=] name[defines].OptionRegistry.MAX_AGE.number name[option].value assign[=] call[name[int], parameter[name[value]]] call[name[self].del_option_by_number, parameter[name[defines].OptionRegistry.MAX_AGE.number]] call[name[self].add_option, parameter[name[option]]]
keyword[def] identifier[max_age] ( identifier[self] , identifier[value] ): literal[string] identifier[option] = identifier[Option] () identifier[option] . identifier[number] = identifier[defines] . identifier[OptionRegistry] . identifier[MAX_AGE] . identifier[number] identifier[option] . identifier[value] = identifier[int] ( identifier[value] ) identifier[self] . identifier[del_option_by_number] ( identifier[defines] . identifier[OptionRegistry] . identifier[MAX_AGE] . identifier[number] ) identifier[self] . identifier[add_option] ( identifier[option] )
def max_age(self, value): """ Set the MaxAge of the response. :type value: int :param value: the MaxAge option """ option = Option() option.number = defines.OptionRegistry.MAX_AGE.number option.value = int(value) self.del_option_by_number(defines.OptionRegistry.MAX_AGE.number) self.add_option(option)
def _check_obj_properties(self, pub, name="pub"): """ Make sure, that `pub` has the right interface. Args: pub (obj): Instance which will be checked. name (str): Name of the instance. Used in exception. Default `pub`. Raises: InvalidType: When the `pub` is not instance of `obj_type`. """ if not hasattr(pub, "indexes"): raise InvalidType("`%s` doesn't have .indexes property!" % name) if not pub.indexes: raise InvalidType("`%s.indexes` is not set!" % name) if not hasattr(pub, "project_key"): raise InvalidType( "`%s` doesn't have .project_key property!" % name ) if not pub.project_key: raise InvalidType("`%s.project_key` is not set!" % name)
def function[_check_obj_properties, parameter[self, pub, name]]: constant[ Make sure, that `pub` has the right interface. Args: pub (obj): Instance which will be checked. name (str): Name of the instance. Used in exception. Default `pub`. Raises: InvalidType: When the `pub` is not instance of `obj_type`. ] if <ast.UnaryOp object at 0x7da1b26ad150> begin[:] <ast.Raise object at 0x7da1b26ad810> if <ast.UnaryOp object at 0x7da1b26ac880> begin[:] <ast.Raise object at 0x7da1b26af400> if <ast.UnaryOp object at 0x7da1b26ac5b0> begin[:] <ast.Raise object at 0x7da1b26aceb0> if <ast.UnaryOp object at 0x7da1b26adbd0> begin[:] <ast.Raise object at 0x7da1b26acf70>
keyword[def] identifier[_check_obj_properties] ( identifier[self] , identifier[pub] , identifier[name] = literal[string] ): literal[string] keyword[if] keyword[not] identifier[hasattr] ( identifier[pub] , literal[string] ): keyword[raise] identifier[InvalidType] ( literal[string] % identifier[name] ) keyword[if] keyword[not] identifier[pub] . identifier[indexes] : keyword[raise] identifier[InvalidType] ( literal[string] % identifier[name] ) keyword[if] keyword[not] identifier[hasattr] ( identifier[pub] , literal[string] ): keyword[raise] identifier[InvalidType] ( literal[string] % identifier[name] ) keyword[if] keyword[not] identifier[pub] . identifier[project_key] : keyword[raise] identifier[InvalidType] ( literal[string] % identifier[name] )
def _check_obj_properties(self, pub, name='pub'): """ Make sure, that `pub` has the right interface. Args: pub (obj): Instance which will be checked. name (str): Name of the instance. Used in exception. Default `pub`. Raises: InvalidType: When the `pub` is not instance of `obj_type`. """ if not hasattr(pub, 'indexes'): raise InvalidType("`%s` doesn't have .indexes property!" % name) # depends on [control=['if'], data=[]] if not pub.indexes: raise InvalidType('`%s.indexes` is not set!' % name) # depends on [control=['if'], data=[]] if not hasattr(pub, 'project_key'): raise InvalidType("`%s` doesn't have .project_key property!" % name) # depends on [control=['if'], data=[]] if not pub.project_key: raise InvalidType('`%s.project_key` is not set!' % name) # depends on [control=['if'], data=[]]
def _compute_equations(self, x, verbose=False): '''Compute the values and the normals (gradients) of active constraints. Arguments: | ``x`` -- The unknowns. ''' # compute the error and the normals. normals = [] values = [] signs = [] error = 0.0 if verbose: print() print(' '.join('% 10.3e' % val for val in x), end=' ') active_str = '' for i, (sign, equation) in enumerate(self.equations): value, normal = equation(x) if (i < len(self.lock) and self.lock[i]) or \ (sign==-1 and value > -self.threshold) or \ (sign==0) or (sign==1 and value < self.threshold): values.append(value) normals.append(normal) signs.append(sign) error += value**2 if verbose: active_str += 'X' if i < len(self.lock): self.lock[i] = True elif verbose: active_str += '-' error = np.sqrt(error) normals = np.array(normals, float) values = np.array(values, float) signs = np.array(signs, int) if verbose: print('[%s]' % active_str, end=' ') if error < self.threshold: print('OK') else: print('%.5e' % error) return normals, values, error, signs
def function[_compute_equations, parameter[self, x, verbose]]: constant[Compute the values and the normals (gradients) of active constraints. Arguments: | ``x`` -- The unknowns. ] variable[normals] assign[=] list[[]] variable[values] assign[=] list[[]] variable[signs] assign[=] list[[]] variable[error] assign[=] constant[0.0] if name[verbose] begin[:] call[name[print], parameter[]] call[name[print], parameter[call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da18ede4b80>]]]] variable[active_str] assign[=] constant[] for taget[tuple[[<ast.Name object at 0x7da18ede6440>, <ast.Tuple object at 0x7da18ede5060>]]] in starred[call[name[enumerate], parameter[name[self].equations]]] begin[:] <ast.Tuple object at 0x7da20c6a9540> assign[=] call[name[equation], parameter[name[x]]] if <ast.BoolOp object at 0x7da20c6a9750> begin[:] call[name[values].append, parameter[name[value]]] call[name[normals].append, parameter[name[normal]]] call[name[signs].append, parameter[name[sign]]] <ast.AugAssign object at 0x7da20c6ab070> if name[verbose] begin[:] <ast.AugAssign object at 0x7da20c6a9210> if compare[name[i] less[<] call[name[len], parameter[name[self].lock]]] begin[:] call[name[self].lock][name[i]] assign[=] constant[True] variable[error] assign[=] call[name[np].sqrt, parameter[name[error]]] variable[normals] assign[=] call[name[np].array, parameter[name[normals], name[float]]] variable[values] assign[=] call[name[np].array, parameter[name[values], name[float]]] variable[signs] assign[=] call[name[np].array, parameter[name[signs], name[int]]] if name[verbose] begin[:] call[name[print], parameter[binary_operation[constant[[%s]] <ast.Mod object at 0x7da2590d6920> name[active_str]]]] if compare[name[error] less[<] name[self].threshold] begin[:] call[name[print], parameter[constant[OK]]] return[tuple[[<ast.Name object at 0x7da20c6ab580>, <ast.Name object at 0x7da20c6abfd0>, <ast.Name object at 0x7da20c6a96c0>, <ast.Name object at 0x7da20c6a8e50>]]]
keyword[def] identifier[_compute_equations] ( identifier[self] , identifier[x] , identifier[verbose] = keyword[False] ): literal[string] identifier[normals] =[] identifier[values] =[] identifier[signs] =[] identifier[error] = literal[int] keyword[if] identifier[verbose] : identifier[print] () identifier[print] ( literal[string] . identifier[join] ( literal[string] % identifier[val] keyword[for] identifier[val] keyword[in] identifier[x] ), identifier[end] = literal[string] ) identifier[active_str] = literal[string] keyword[for] identifier[i] ,( identifier[sign] , identifier[equation] ) keyword[in] identifier[enumerate] ( identifier[self] . identifier[equations] ): identifier[value] , identifier[normal] = identifier[equation] ( identifier[x] ) keyword[if] ( identifier[i] < identifier[len] ( identifier[self] . identifier[lock] ) keyword[and] identifier[self] . identifier[lock] [ identifier[i] ]) keyword[or] ( identifier[sign] ==- literal[int] keyword[and] identifier[value] >- identifier[self] . identifier[threshold] ) keyword[or] ( identifier[sign] == literal[int] ) keyword[or] ( identifier[sign] == literal[int] keyword[and] identifier[value] < identifier[self] . identifier[threshold] ): identifier[values] . identifier[append] ( identifier[value] ) identifier[normals] . identifier[append] ( identifier[normal] ) identifier[signs] . identifier[append] ( identifier[sign] ) identifier[error] += identifier[value] ** literal[int] keyword[if] identifier[verbose] : identifier[active_str] += literal[string] keyword[if] identifier[i] < identifier[len] ( identifier[self] . identifier[lock] ): identifier[self] . identifier[lock] [ identifier[i] ]= keyword[True] keyword[elif] identifier[verbose] : identifier[active_str] += literal[string] identifier[error] = identifier[np] . identifier[sqrt] ( identifier[error] ) identifier[normals] = identifier[np] . identifier[array] ( identifier[normals] , identifier[float] ) identifier[values] = identifier[np] . identifier[array] ( identifier[values] , identifier[float] ) identifier[signs] = identifier[np] . identifier[array] ( identifier[signs] , identifier[int] ) keyword[if] identifier[verbose] : identifier[print] ( literal[string] % identifier[active_str] , identifier[end] = literal[string] ) keyword[if] identifier[error] < identifier[self] . identifier[threshold] : identifier[print] ( literal[string] ) keyword[else] : identifier[print] ( literal[string] % identifier[error] ) keyword[return] identifier[normals] , identifier[values] , identifier[error] , identifier[signs]
def _compute_equations(self, x, verbose=False): """Compute the values and the normals (gradients) of active constraints. Arguments: | ``x`` -- The unknowns. """ # compute the error and the normals. normals = [] values = [] signs = [] error = 0.0 if verbose: print() print(' '.join(('% 10.3e' % val for val in x)), end=' ') active_str = '' # depends on [control=['if'], data=[]] for (i, (sign, equation)) in enumerate(self.equations): (value, normal) = equation(x) if i < len(self.lock) and self.lock[i] or (sign == -1 and value > -self.threshold) or sign == 0 or (sign == 1 and value < self.threshold): values.append(value) normals.append(normal) signs.append(sign) error += value ** 2 if verbose: active_str += 'X' # depends on [control=['if'], data=[]] if i < len(self.lock): self.lock[i] = True # depends on [control=['if'], data=['i']] # depends on [control=['if'], data=[]] elif verbose: active_str += '-' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] error = np.sqrt(error) normals = np.array(normals, float) values = np.array(values, float) signs = np.array(signs, int) if verbose: print('[%s]' % active_str, end=' ') if error < self.threshold: print('OK') # depends on [control=['if'], data=[]] else: print('%.5e' % error) # depends on [control=['if'], data=[]] return (normals, values, error, signs)
def derivatives_ctrlpts(**kwargs): """ Computes the control points of all derivative curves up to and including the {degree}-th derivative. Implementation of Algorithm A3.3 from The NURBS Book by Piegl & Tiller. Output is PK[k][i], i-th control point of the k-th derivative curve where 0 <= k <= degree and r1 <= i <= r2-k. """ # r1 - minimum span, r2 - maximum span r1 = kwargs.get('r1') r2 = kwargs.get('r2') deriv_order = kwargs.get('deriv_order') degree = kwargs.get('degree') knotvector = kwargs.get('knotvector') ctrlpts = kwargs.get('ctrlpts') dimension = kwargs.get('dimension') # Algorithm A3.3 r = r2 - r1 PK = [[[None for _ in range(dimension)] for _ in range(r + 1)] for _ in range(deriv_order + 1)] for i in range(0, r + 1): PK[0][i][:] = [elem for elem in ctrlpts[r1 + i]] for k in range(1, deriv_order + 1): tmp = degree - k + 1 for i in range(0, r - k + 1): PK[k][i][:] = [tmp * (elem1 - elem2) / (knotvector[r1 + i + degree + 1] - knotvector[r1 + i + k]) for elem1, elem2 in zip(PK[k - 1][i + 1], PK[k - 1][i])] # Return a 2-dimensional list of control points return PK
def function[derivatives_ctrlpts, parameter[]]: constant[ Computes the control points of all derivative curves up to and including the {degree}-th derivative. Implementation of Algorithm A3.3 from The NURBS Book by Piegl & Tiller. Output is PK[k][i], i-th control point of the k-th derivative curve where 0 <= k <= degree and r1 <= i <= r2-k. ] variable[r1] assign[=] call[name[kwargs].get, parameter[constant[r1]]] variable[r2] assign[=] call[name[kwargs].get, parameter[constant[r2]]] variable[deriv_order] assign[=] call[name[kwargs].get, parameter[constant[deriv_order]]] variable[degree] assign[=] call[name[kwargs].get, parameter[constant[degree]]] variable[knotvector] assign[=] call[name[kwargs].get, parameter[constant[knotvector]]] variable[ctrlpts] assign[=] call[name[kwargs].get, parameter[constant[ctrlpts]]] variable[dimension] assign[=] call[name[kwargs].get, parameter[constant[dimension]]] variable[r] assign[=] binary_operation[name[r2] - name[r1]] variable[PK] assign[=] <ast.ListComp object at 0x7da1b16e4fa0> for taget[name[i]] in starred[call[name[range], parameter[constant[0], binary_operation[name[r] + constant[1]]]]] begin[:] call[call[call[name[PK]][constant[0]]][name[i]]][<ast.Slice object at 0x7da1b16e5ba0>] assign[=] <ast.ListComp object at 0x7da1b16e5c00> for taget[name[k]] in starred[call[name[range], parameter[constant[1], binary_operation[name[deriv_order] + constant[1]]]]] begin[:] variable[tmp] assign[=] binary_operation[binary_operation[name[degree] - name[k]] + constant[1]] for taget[name[i]] in starred[call[name[range], parameter[constant[0], binary_operation[binary_operation[name[r] - name[k]] + constant[1]]]]] begin[:] call[call[call[name[PK]][name[k]]][name[i]]][<ast.Slice object at 0x7da1b16e7460>] assign[=] <ast.ListComp object at 0x7da1b16e74c0> return[name[PK]]
keyword[def] identifier[derivatives_ctrlpts] (** identifier[kwargs] ): literal[string] identifier[r1] = identifier[kwargs] . identifier[get] ( literal[string] ) identifier[r2] = identifier[kwargs] . identifier[get] ( literal[string] ) identifier[deriv_order] = identifier[kwargs] . identifier[get] ( literal[string] ) identifier[degree] = identifier[kwargs] . identifier[get] ( literal[string] ) identifier[knotvector] = identifier[kwargs] . identifier[get] ( literal[string] ) identifier[ctrlpts] = identifier[kwargs] . identifier[get] ( literal[string] ) identifier[dimension] = identifier[kwargs] . identifier[get] ( literal[string] ) identifier[r] = identifier[r2] - identifier[r1] identifier[PK] =[[[ keyword[None] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[dimension] )] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[r] + literal[int] )] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[deriv_order] + literal[int] )] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[r] + literal[int] ): identifier[PK] [ literal[int] ][ identifier[i] ][:]=[ identifier[elem] keyword[for] identifier[elem] keyword[in] identifier[ctrlpts] [ identifier[r1] + identifier[i] ]] keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[deriv_order] + literal[int] ): identifier[tmp] = identifier[degree] - identifier[k] + literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[r] - identifier[k] + literal[int] ): identifier[PK] [ identifier[k] ][ identifier[i] ][:]=[ identifier[tmp] *( identifier[elem1] - identifier[elem2] )/ ( identifier[knotvector] [ identifier[r1] + identifier[i] + identifier[degree] + literal[int] ]- identifier[knotvector] [ identifier[r1] + identifier[i] + identifier[k] ]) keyword[for] identifier[elem1] , identifier[elem2] keyword[in] identifier[zip] ( identifier[PK] [ identifier[k] - literal[int] ][ identifier[i] + literal[int] ], identifier[PK] [ identifier[k] - literal[int] ][ identifier[i] ])] keyword[return] identifier[PK]
def derivatives_ctrlpts(**kwargs): """ Computes the control points of all derivative curves up to and including the {degree}-th derivative. Implementation of Algorithm A3.3 from The NURBS Book by Piegl & Tiller. Output is PK[k][i], i-th control point of the k-th derivative curve where 0 <= k <= degree and r1 <= i <= r2-k. """ # r1 - minimum span, r2 - maximum span r1 = kwargs.get('r1') r2 = kwargs.get('r2') deriv_order = kwargs.get('deriv_order') degree = kwargs.get('degree') knotvector = kwargs.get('knotvector') ctrlpts = kwargs.get('ctrlpts') dimension = kwargs.get('dimension') # Algorithm A3.3 r = r2 - r1 PK = [[[None for _ in range(dimension)] for _ in range(r + 1)] for _ in range(deriv_order + 1)] for i in range(0, r + 1): PK[0][i][:] = [elem for elem in ctrlpts[r1 + i]] # depends on [control=['for'], data=['i']] for k in range(1, deriv_order + 1): tmp = degree - k + 1 for i in range(0, r - k + 1): PK[k][i][:] = [tmp * (elem1 - elem2) / (knotvector[r1 + i + degree + 1] - knotvector[r1 + i + k]) for (elem1, elem2) in zip(PK[k - 1][i + 1], PK[k - 1][i])] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['k']] # Return a 2-dimensional list of control points return PK
def getCollectionClass(cls, name) : """Return the class object of a collection given its 'name'""" try : return cls.collectionClasses[name] except KeyError : raise KeyError( "There is no Collection Class of type: '%s'; currently supported values: [%s]" % (name, ', '.join(getCollectionClasses().keys())) )
def function[getCollectionClass, parameter[cls, name]]: constant[Return the class object of a collection given its 'name'] <ast.Try object at 0x7da1b0dc0940>
keyword[def] identifier[getCollectionClass] ( identifier[cls] , identifier[name] ): literal[string] keyword[try] : keyword[return] identifier[cls] . identifier[collectionClasses] [ identifier[name] ] keyword[except] identifier[KeyError] : keyword[raise] identifier[KeyError] ( literal[string] %( identifier[name] , literal[string] . identifier[join] ( identifier[getCollectionClasses] (). identifier[keys] ())))
def getCollectionClass(cls, name): """Return the class object of a collection given its 'name'""" try: return cls.collectionClasses[name] # depends on [control=['try'], data=[]] except KeyError: raise KeyError("There is no Collection Class of type: '%s'; currently supported values: [%s]" % (name, ', '.join(getCollectionClasses().keys()))) # depends on [control=['except'], data=[]]
def scan_for_spec(keyword): """ Attempt to return some sort of Spec from given keyword value. Returns None if one could not be derived. """ # Both 'spec' formats are wrapped in parens, discard keyword = keyword.lstrip('(').rstrip(')') # First, test for intermediate '1.2+' style matches = release_line_re.findall(keyword) if matches: return Spec(">={}".format(matches[0])) # Failing that, see if Spec can make sense of it try: return Spec(keyword) # I've only ever seen Spec fail with ValueError. except ValueError: return None
def function[scan_for_spec, parameter[keyword]]: constant[ Attempt to return some sort of Spec from given keyword value. Returns None if one could not be derived. ] variable[keyword] assign[=] call[call[name[keyword].lstrip, parameter[constant[(]]].rstrip, parameter[constant[)]]] variable[matches] assign[=] call[name[release_line_re].findall, parameter[name[keyword]]] if name[matches] begin[:] return[call[name[Spec], parameter[call[constant[>={}].format, parameter[call[name[matches]][constant[0]]]]]]] <ast.Try object at 0x7da1b05e2200>
keyword[def] identifier[scan_for_spec] ( identifier[keyword] ): literal[string] identifier[keyword] = identifier[keyword] . identifier[lstrip] ( literal[string] ). identifier[rstrip] ( literal[string] ) identifier[matches] = identifier[release_line_re] . identifier[findall] ( identifier[keyword] ) keyword[if] identifier[matches] : keyword[return] identifier[Spec] ( literal[string] . identifier[format] ( identifier[matches] [ literal[int] ])) keyword[try] : keyword[return] identifier[Spec] ( identifier[keyword] ) keyword[except] identifier[ValueError] : keyword[return] keyword[None]
def scan_for_spec(keyword): """ Attempt to return some sort of Spec from given keyword value. Returns None if one could not be derived. """ # Both 'spec' formats are wrapped in parens, discard keyword = keyword.lstrip('(').rstrip(')') # First, test for intermediate '1.2+' style matches = release_line_re.findall(keyword) if matches: return Spec('>={}'.format(matches[0])) # depends on [control=['if'], data=[]] # Failing that, see if Spec can make sense of it try: return Spec(keyword) # depends on [control=['try'], data=[]] # I've only ever seen Spec fail with ValueError. except ValueError: return None # depends on [control=['except'], data=[]]
def clean(self): """Remove services without host object linked to Note that this should not happen! :return: None """ to_del = [] for serv in self: if not serv.host: to_del.append(serv.uuid) for service_uuid in to_del: del self.items[service_uuid]
def function[clean, parameter[self]]: constant[Remove services without host object linked to Note that this should not happen! :return: None ] variable[to_del] assign[=] list[[]] for taget[name[serv]] in starred[name[self]] begin[:] if <ast.UnaryOp object at 0x7da18f722f80> begin[:] call[name[to_del].append, parameter[name[serv].uuid]] for taget[name[service_uuid]] in starred[name[to_del]] begin[:] <ast.Delete object at 0x7da1b0d21ab0>
keyword[def] identifier[clean] ( identifier[self] ): literal[string] identifier[to_del] =[] keyword[for] identifier[serv] keyword[in] identifier[self] : keyword[if] keyword[not] identifier[serv] . identifier[host] : identifier[to_del] . identifier[append] ( identifier[serv] . identifier[uuid] ) keyword[for] identifier[service_uuid] keyword[in] identifier[to_del] : keyword[del] identifier[self] . identifier[items] [ identifier[service_uuid] ]
def clean(self): """Remove services without host object linked to Note that this should not happen! :return: None """ to_del = [] for serv in self: if not serv.host: to_del.append(serv.uuid) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['serv']] for service_uuid in to_del: del self.items[service_uuid] # depends on [control=['for'], data=['service_uuid']]