code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def unpack_rsp(cls, rsp_pb): """Convert from PLS response to user response""" if rsp_pb.retType != RET_OK: return RET_ERROR, rsp_pb.retMsg, None raw_funds = rsp_pb.s2c.funds accinfo_list = [{ 'power': raw_funds.power, 'total_assets': raw_funds.totalAssets, 'cash': raw_funds.cash, 'market_val': raw_funds.marketVal, 'frozen_cash': raw_funds.frozenCash, 'avl_withdrawal_cash': raw_funds.avlWithdrawalCash, }] return RET_OK, "", accinfo_list
def function[unpack_rsp, parameter[cls, rsp_pb]]: constant[Convert from PLS response to user response] if compare[name[rsp_pb].retType not_equal[!=] name[RET_OK]] begin[:] return[tuple[[<ast.Name object at 0x7da1b07fbc70>, <ast.Attribute object at 0x7da1b07fa230>, <ast.Constant object at 0x7da1b07fad40>]]] variable[raw_funds] assign[=] name[rsp_pb].s2c.funds variable[accinfo_list] assign[=] list[[<ast.Dict object at 0x7da1b26ad2d0>]] return[tuple[[<ast.Name object at 0x7da1b07f8730>, <ast.Constant object at 0x7da1b07fbbe0>, <ast.Name object at 0x7da1b07f8520>]]]
keyword[def] identifier[unpack_rsp] ( identifier[cls] , identifier[rsp_pb] ): literal[string] keyword[if] identifier[rsp_pb] . identifier[retType] != identifier[RET_OK] : keyword[return] identifier[RET_ERROR] , identifier[rsp_pb] . identifier[retMsg] , keyword[None] identifier[raw_funds] = identifier[rsp_pb] . identifier[s2c] . identifier[funds] identifier[accinfo_list] =[{ literal[string] : identifier[raw_funds] . identifier[power] , literal[string] : identifier[raw_funds] . identifier[totalAssets] , literal[string] : identifier[raw_funds] . identifier[cash] , literal[string] : identifier[raw_funds] . identifier[marketVal] , literal[string] : identifier[raw_funds] . identifier[frozenCash] , literal[string] : identifier[raw_funds] . identifier[avlWithdrawalCash] , }] keyword[return] identifier[RET_OK] , literal[string] , identifier[accinfo_list]
def unpack_rsp(cls, rsp_pb): """Convert from PLS response to user response""" if rsp_pb.retType != RET_OK: return (RET_ERROR, rsp_pb.retMsg, None) # depends on [control=['if'], data=[]] raw_funds = rsp_pb.s2c.funds accinfo_list = [{'power': raw_funds.power, 'total_assets': raw_funds.totalAssets, 'cash': raw_funds.cash, 'market_val': raw_funds.marketVal, 'frozen_cash': raw_funds.frozenCash, 'avl_withdrawal_cash': raw_funds.avlWithdrawalCash}] return (RET_OK, '', accinfo_list)
def on_taskend(self, task): """ Play sounds at task end. """ key = 'timer' if task.elapsed else 'end' filename = self.files.get(key) if filename: self._play_sound(filename)
def function[on_taskend, parameter[self, task]]: constant[ Play sounds at task end. ] variable[key] assign[=] <ast.IfExp object at 0x7da18f8118d0> variable[filename] assign[=] call[name[self].files.get, parameter[name[key]]] if name[filename] begin[:] call[name[self]._play_sound, parameter[name[filename]]]
keyword[def] identifier[on_taskend] ( identifier[self] , identifier[task] ): literal[string] identifier[key] = literal[string] keyword[if] identifier[task] . identifier[elapsed] keyword[else] literal[string] identifier[filename] = identifier[self] . identifier[files] . identifier[get] ( identifier[key] ) keyword[if] identifier[filename] : identifier[self] . identifier[_play_sound] ( identifier[filename] )
def on_taskend(self, task): """ Play sounds at task end. """ key = 'timer' if task.elapsed else 'end' filename = self.files.get(key) if filename: self._play_sound(filename) # depends on [control=['if'], data=[]]
def exec_module(self, module): """Execute the module.""" code = self.get_code(module.__name__) if code is None: raise ImportError('cannot load module {!r} when get_code() ' 'returns None'.format(module.__name__)) exec(code, module.__dict__)
def function[exec_module, parameter[self, module]]: constant[Execute the module.] variable[code] assign[=] call[name[self].get_code, parameter[name[module].__name__]] if compare[name[code] is constant[None]] begin[:] <ast.Raise object at 0x7da204623b50> call[name[exec], parameter[name[code], name[module].__dict__]]
keyword[def] identifier[exec_module] ( identifier[self] , identifier[module] ): literal[string] identifier[code] = identifier[self] . identifier[get_code] ( identifier[module] . identifier[__name__] ) keyword[if] identifier[code] keyword[is] keyword[None] : keyword[raise] identifier[ImportError] ( literal[string] literal[string] . identifier[format] ( identifier[module] . identifier[__name__] )) identifier[exec] ( identifier[code] , identifier[module] . identifier[__dict__] )
def exec_module(self, module): """Execute the module.""" code = self.get_code(module.__name__) if code is None: raise ImportError('cannot load module {!r} when get_code() returns None'.format(module.__name__)) # depends on [control=['if'], data=[]] exec(code, module.__dict__)
def set_format(self): """ Check if the format has a flexible height, if so check if the ratio of the flexible format is closer to the actual ratio of the image. If so use that instead of the default values (f.max_width, f.max_height). """ f = self.fmt if f.flexible_height and f.flexible_max_height: flexw, flexh = self.fw, f.flexible_max_height flex_ratio = float(flexw) / flexh if abs(flex_ratio - self.image_ratio) < abs(self.format_ratio - self.image_ratio): self.fh = flexh self.format_ratio = flex_ratio
def function[set_format, parameter[self]]: constant[ Check if the format has a flexible height, if so check if the ratio of the flexible format is closer to the actual ratio of the image. If so use that instead of the default values (f.max_width, f.max_height). ] variable[f] assign[=] name[self].fmt if <ast.BoolOp object at 0x7da18f58ce50> begin[:] <ast.Tuple object at 0x7da18f58c7c0> assign[=] tuple[[<ast.Attribute object at 0x7da18f58c880>, <ast.Attribute object at 0x7da18f58d9f0>]] variable[flex_ratio] assign[=] binary_operation[call[name[float], parameter[name[flexw]]] / name[flexh]] if compare[call[name[abs], parameter[binary_operation[name[flex_ratio] - name[self].image_ratio]]] less[<] call[name[abs], parameter[binary_operation[name[self].format_ratio - name[self].image_ratio]]]] begin[:] name[self].fh assign[=] name[flexh] name[self].format_ratio assign[=] name[flex_ratio]
keyword[def] identifier[set_format] ( identifier[self] ): literal[string] identifier[f] = identifier[self] . identifier[fmt] keyword[if] identifier[f] . identifier[flexible_height] keyword[and] identifier[f] . identifier[flexible_max_height] : identifier[flexw] , identifier[flexh] = identifier[self] . identifier[fw] , identifier[f] . identifier[flexible_max_height] identifier[flex_ratio] = identifier[float] ( identifier[flexw] )/ identifier[flexh] keyword[if] identifier[abs] ( identifier[flex_ratio] - identifier[self] . identifier[image_ratio] )< identifier[abs] ( identifier[self] . identifier[format_ratio] - identifier[self] . identifier[image_ratio] ): identifier[self] . identifier[fh] = identifier[flexh] identifier[self] . identifier[format_ratio] = identifier[flex_ratio]
def set_format(self): """ Check if the format has a flexible height, if so check if the ratio of the flexible format is closer to the actual ratio of the image. If so use that instead of the default values (f.max_width, f.max_height). """ f = self.fmt if f.flexible_height and f.flexible_max_height: (flexw, flexh) = (self.fw, f.flexible_max_height) flex_ratio = float(flexw) / flexh if abs(flex_ratio - self.image_ratio) < abs(self.format_ratio - self.image_ratio): self.fh = flexh self.format_ratio = flex_ratio # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def stream(url, headers, stream_to=None, retry=True): '''stream is a get that will stream to file_name. Since this is a worker task, it differs from the client provided version in that it requires headers. ''' bot.debug("GET %s" % url) if DISABLE_SSL_CHECK is True: bot.warning('Verify of certificates disabled! ::TESTING USE ONLY::') # Ensure headers are present, update if not response = requests.get(url, headers=headers, verify=not DISABLE_SSL_CHECK, stream=True) # If we get permissions error, one more try with updated token if response.status_code in [401, 403]: headers = update_token(headers) return stream(url, headers, stream_to, retry=False) # Successful Response elif response.status_code == 200: # Keep user updated with Progress Bar content_size = None if 'Content-Length' in response.headers: progress = 0 content_size = int(response.headers['Content-Length']) bot.show_progress(progress,content_size,length=35) chunk_size = 1 << 20 with open(stream_to,'wb') as filey: for chunk in response.iter_content(chunk_size=chunk_size): filey.write(chunk) if content_size is not None: progress+=chunk_size bot.show_progress(iteration=progress, total=content_size, length=35, carriage_return=False) # Newline to finish download sys.stdout.write('\n') return stream_to bot.error("Problem with stream, response %s" %(response.status_code)) sys.exit(1)
def function[stream, parameter[url, headers, stream_to, retry]]: constant[stream is a get that will stream to file_name. Since this is a worker task, it differs from the client provided version in that it requires headers. ] call[name[bot].debug, parameter[binary_operation[constant[GET %s] <ast.Mod object at 0x7da2590d6920> name[url]]]] if compare[name[DISABLE_SSL_CHECK] is constant[True]] begin[:] call[name[bot].warning, parameter[constant[Verify of certificates disabled! ::TESTING USE ONLY::]]] variable[response] assign[=] call[name[requests].get, parameter[name[url]]] if compare[name[response].status_code in list[[<ast.Constant object at 0x7da1b05bd480>, <ast.Constant object at 0x7da1b05becb0>]]] begin[:] variable[headers] assign[=] call[name[update_token], parameter[name[headers]]] return[call[name[stream], parameter[name[url], name[headers], name[stream_to]]]] call[name[bot].error, parameter[binary_operation[constant[Problem with stream, response %s] <ast.Mod object at 0x7da2590d6920> name[response].status_code]]] call[name[sys].exit, parameter[constant[1]]]
keyword[def] identifier[stream] ( identifier[url] , identifier[headers] , identifier[stream_to] = keyword[None] , identifier[retry] = keyword[True] ): literal[string] identifier[bot] . identifier[debug] ( literal[string] % identifier[url] ) keyword[if] identifier[DISABLE_SSL_CHECK] keyword[is] keyword[True] : identifier[bot] . identifier[warning] ( literal[string] ) identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[headers] , identifier[verify] = keyword[not] identifier[DISABLE_SSL_CHECK] , identifier[stream] = keyword[True] ) keyword[if] identifier[response] . identifier[status_code] keyword[in] [ literal[int] , literal[int] ]: identifier[headers] = identifier[update_token] ( identifier[headers] ) keyword[return] identifier[stream] ( identifier[url] , identifier[headers] , identifier[stream_to] , identifier[retry] = keyword[False] ) keyword[elif] identifier[response] . identifier[status_code] == literal[int] : identifier[content_size] = keyword[None] keyword[if] literal[string] keyword[in] identifier[response] . identifier[headers] : identifier[progress] = literal[int] identifier[content_size] = identifier[int] ( identifier[response] . identifier[headers] [ literal[string] ]) identifier[bot] . identifier[show_progress] ( identifier[progress] , identifier[content_size] , identifier[length] = literal[int] ) identifier[chunk_size] = literal[int] << literal[int] keyword[with] identifier[open] ( identifier[stream_to] , literal[string] ) keyword[as] identifier[filey] : keyword[for] identifier[chunk] keyword[in] identifier[response] . identifier[iter_content] ( identifier[chunk_size] = identifier[chunk_size] ): identifier[filey] . identifier[write] ( identifier[chunk] ) keyword[if] identifier[content_size] keyword[is] keyword[not] keyword[None] : identifier[progress] += identifier[chunk_size] identifier[bot] . identifier[show_progress] ( identifier[iteration] = identifier[progress] , identifier[total] = identifier[content_size] , identifier[length] = literal[int] , identifier[carriage_return] = keyword[False] ) identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] ) keyword[return] identifier[stream_to] identifier[bot] . identifier[error] ( literal[string] %( identifier[response] . identifier[status_code] )) identifier[sys] . identifier[exit] ( literal[int] )
def stream(url, headers, stream_to=None, retry=True): """stream is a get that will stream to file_name. Since this is a worker task, it differs from the client provided version in that it requires headers. """ bot.debug('GET %s' % url) if DISABLE_SSL_CHECK is True: bot.warning('Verify of certificates disabled! ::TESTING USE ONLY::') # depends on [control=['if'], data=[]] # Ensure headers are present, update if not response = requests.get(url, headers=headers, verify=not DISABLE_SSL_CHECK, stream=True) # If we get permissions error, one more try with updated token if response.status_code in [401, 403]: headers = update_token(headers) return stream(url, headers, stream_to, retry=False) # depends on [control=['if'], data=[]] # Successful Response elif response.status_code == 200: # Keep user updated with Progress Bar content_size = None if 'Content-Length' in response.headers: progress = 0 content_size = int(response.headers['Content-Length']) bot.show_progress(progress, content_size, length=35) # depends on [control=['if'], data=[]] chunk_size = 1 << 20 with open(stream_to, 'wb') as filey: for chunk in response.iter_content(chunk_size=chunk_size): filey.write(chunk) if content_size is not None: progress += chunk_size bot.show_progress(iteration=progress, total=content_size, length=35, carriage_return=False) # depends on [control=['if'], data=['content_size']] # depends on [control=['for'], data=['chunk']] # depends on [control=['with'], data=['filey']] # Newline to finish download sys.stdout.write('\n') return stream_to # depends on [control=['if'], data=[]] bot.error('Problem with stream, response %s' % response.status_code) sys.exit(1)
def extract(connection, filename, table_names = None, verbose = False, xsl_file = None): """ Convert the database at the given connection to a tabular LIGO Light-Weight XML document. The XML document is written to the file named filename. If table_names is not None, it should be a sequence of strings and only the tables in that sequence will be converted. If verbose is True then progress messages will be printed to stderr. """ xmldoc = ligolw.Document() xmldoc.appendChild(dbtables.get_xml(connection, table_names)) ligolw_utils.write_filename(xmldoc, filename, gz = (filename or "stdout").endswith(".gz"), verbose = verbose, xsl_file = xsl_file) # delete cursors xmldoc.unlink()
def function[extract, parameter[connection, filename, table_names, verbose, xsl_file]]: constant[ Convert the database at the given connection to a tabular LIGO Light-Weight XML document. The XML document is written to the file named filename. If table_names is not None, it should be a sequence of strings and only the tables in that sequence will be converted. If verbose is True then progress messages will be printed to stderr. ] variable[xmldoc] assign[=] call[name[ligolw].Document, parameter[]] call[name[xmldoc].appendChild, parameter[call[name[dbtables].get_xml, parameter[name[connection], name[table_names]]]]] call[name[ligolw_utils].write_filename, parameter[name[xmldoc], name[filename]]] call[name[xmldoc].unlink, parameter[]]
keyword[def] identifier[extract] ( identifier[connection] , identifier[filename] , identifier[table_names] = keyword[None] , identifier[verbose] = keyword[False] , identifier[xsl_file] = keyword[None] ): literal[string] identifier[xmldoc] = identifier[ligolw] . identifier[Document] () identifier[xmldoc] . identifier[appendChild] ( identifier[dbtables] . identifier[get_xml] ( identifier[connection] , identifier[table_names] )) identifier[ligolw_utils] . identifier[write_filename] ( identifier[xmldoc] , identifier[filename] , identifier[gz] =( identifier[filename] keyword[or] literal[string] ). identifier[endswith] ( literal[string] ), identifier[verbose] = identifier[verbose] , identifier[xsl_file] = identifier[xsl_file] ) identifier[xmldoc] . identifier[unlink] ()
def extract(connection, filename, table_names=None, verbose=False, xsl_file=None): """ Convert the database at the given connection to a tabular LIGO Light-Weight XML document. The XML document is written to the file named filename. If table_names is not None, it should be a sequence of strings and only the tables in that sequence will be converted. If verbose is True then progress messages will be printed to stderr. """ xmldoc = ligolw.Document() xmldoc.appendChild(dbtables.get_xml(connection, table_names)) ligolw_utils.write_filename(xmldoc, filename, gz=(filename or 'stdout').endswith('.gz'), verbose=verbose, xsl_file=xsl_file) # delete cursors xmldoc.unlink()
def vmeasure(reference_intervals, reference_labels, estimated_intervals, estimated_labels, frame_size=0.1, beta=1.0): """Frame-clustering segmentation: v-measure Computes cross-entropy of cluster assignment, normalized by the marginal-entropy. This is equivalent to `nce(..., marginal=True)`. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> # Trim or pad the estimate to match reference timing >>> (ref_intervals, ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals, ... ref_labels, ... t_min=0) >>> (est_intervals, ... est_labels) = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max()) >>> V_precision, V_recall, V_F = mir_eval.structure.vmeasure(ref_intervals, ... ref_labels, ... est_intervals, ... est_labels) Parameters ---------- reference_intervals : np.ndarray, shape=(n, 2) reference segment intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. reference_labels : list, shape=(n,) reference segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. estimated_intervals : np.ndarray, shape=(m, 2) estimated segment intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. estimated_labels : list, shape=(m,) estimated segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. frame_size : float > 0 length (in seconds) of frames for clustering (Default value = 0.1) beta : float > 0 beta for F-measure (Default value = 1.0) Returns ------- V_precision Over-clustering score: ``1 - H(y_est | y_ref) / H(y_est)`` If `|y_est|==1`, then `V_precision` will be 0. V_recall Under-clustering score: ``1 - H(y_ref | y_est) / H(y_ref)`` If `|y_ref|==1`, then `V_recall` will be 0. V_F F-measure for (V_precision, V_recall) """ return nce(reference_intervals, reference_labels, estimated_intervals, estimated_labels, frame_size=frame_size, beta=beta, marginal=True)
def function[vmeasure, parameter[reference_intervals, reference_labels, estimated_intervals, estimated_labels, frame_size, beta]]: constant[Frame-clustering segmentation: v-measure Computes cross-entropy of cluster assignment, normalized by the marginal-entropy. This is equivalent to `nce(..., marginal=True)`. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> # Trim or pad the estimate to match reference timing >>> (ref_intervals, ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals, ... ref_labels, ... t_min=0) >>> (est_intervals, ... est_labels) = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max()) >>> V_precision, V_recall, V_F = mir_eval.structure.vmeasure(ref_intervals, ... ref_labels, ... est_intervals, ... est_labels) Parameters ---------- reference_intervals : np.ndarray, shape=(n, 2) reference segment intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. reference_labels : list, shape=(n,) reference segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. estimated_intervals : np.ndarray, shape=(m, 2) estimated segment intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. estimated_labels : list, shape=(m,) estimated segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. frame_size : float > 0 length (in seconds) of frames for clustering (Default value = 0.1) beta : float > 0 beta for F-measure (Default value = 1.0) Returns ------- V_precision Over-clustering score: ``1 - H(y_est | y_ref) / H(y_est)`` If `|y_est|==1`, then `V_precision` will be 0. V_recall Under-clustering score: ``1 - H(y_ref | y_est) / H(y_ref)`` If `|y_ref|==1`, then `V_recall` will be 0. V_F F-measure for (V_precision, V_recall) ] return[call[name[nce], parameter[name[reference_intervals], name[reference_labels], name[estimated_intervals], name[estimated_labels]]]]
keyword[def] identifier[vmeasure] ( identifier[reference_intervals] , identifier[reference_labels] , identifier[estimated_intervals] , identifier[estimated_labels] , identifier[frame_size] = literal[int] , identifier[beta] = literal[int] ): literal[string] keyword[return] identifier[nce] ( identifier[reference_intervals] , identifier[reference_labels] , identifier[estimated_intervals] , identifier[estimated_labels] , identifier[frame_size] = identifier[frame_size] , identifier[beta] = identifier[beta] , identifier[marginal] = keyword[True] )
def vmeasure(reference_intervals, reference_labels, estimated_intervals, estimated_labels, frame_size=0.1, beta=1.0): """Frame-clustering segmentation: v-measure Computes cross-entropy of cluster assignment, normalized by the marginal-entropy. This is equivalent to `nce(..., marginal=True)`. Examples -------- >>> (ref_intervals, ... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab') >>> (est_intervals, ... est_labels) = mir_eval.io.load_labeled_intervals('est.lab') >>> # Trim or pad the estimate to match reference timing >>> (ref_intervals, ... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals, ... ref_labels, ... t_min=0) >>> (est_intervals, ... est_labels) = mir_eval.util.adjust_intervals( ... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max()) >>> V_precision, V_recall, V_F = mir_eval.structure.vmeasure(ref_intervals, ... ref_labels, ... est_intervals, ... est_labels) Parameters ---------- reference_intervals : np.ndarray, shape=(n, 2) reference segment intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. reference_labels : list, shape=(n,) reference segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. estimated_intervals : np.ndarray, shape=(m, 2) estimated segment intervals, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. estimated_labels : list, shape=(m,) estimated segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. frame_size : float > 0 length (in seconds) of frames for clustering (Default value = 0.1) beta : float > 0 beta for F-measure (Default value = 1.0) Returns ------- V_precision Over-clustering score: ``1 - H(y_est | y_ref) / H(y_est)`` If `|y_est|==1`, then `V_precision` will be 0. V_recall Under-clustering score: ``1 - H(y_ref | y_est) / H(y_ref)`` If `|y_ref|==1`, then `V_recall` will be 0. V_F F-measure for (V_precision, V_recall) """ return nce(reference_intervals, reference_labels, estimated_intervals, estimated_labels, frame_size=frame_size, beta=beta, marginal=True)
def transform_import(self, node, results): """Transform for the basic import case. Replaces the old import name with a comma separated list of its replacements. """ import_mod = results.get("module") pref = import_mod.prefix names = [] # create a Node list of the replacement modules for name in MAPPING[import_mod.value][:-1]: names.extend([Name(name[0], prefix=pref), Comma()]) names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref)) import_mod.replace(names)
def function[transform_import, parameter[self, node, results]]: constant[Transform for the basic import case. Replaces the old import name with a comma separated list of its replacements. ] variable[import_mod] assign[=] call[name[results].get, parameter[constant[module]]] variable[pref] assign[=] name[import_mod].prefix variable[names] assign[=] list[[]] for taget[name[name]] in starred[call[call[name[MAPPING]][name[import_mod].value]][<ast.Slice object at 0x7da1b07d2b30>]] begin[:] call[name[names].extend, parameter[list[[<ast.Call object at 0x7da1b07d0160>, <ast.Call object at 0x7da1b07d0640>]]]] call[name[names].append, parameter[call[name[Name], parameter[call[call[call[name[MAPPING]][name[import_mod].value]][<ast.UnaryOp object at 0x7da1b07d1780>]][constant[0]]]]]] call[name[import_mod].replace, parameter[name[names]]]
keyword[def] identifier[transform_import] ( identifier[self] , identifier[node] , identifier[results] ): literal[string] identifier[import_mod] = identifier[results] . identifier[get] ( literal[string] ) identifier[pref] = identifier[import_mod] . identifier[prefix] identifier[names] =[] keyword[for] identifier[name] keyword[in] identifier[MAPPING] [ identifier[import_mod] . identifier[value] ][:- literal[int] ]: identifier[names] . identifier[extend] ([ identifier[Name] ( identifier[name] [ literal[int] ], identifier[prefix] = identifier[pref] ), identifier[Comma] ()]) identifier[names] . identifier[append] ( identifier[Name] ( identifier[MAPPING] [ identifier[import_mod] . identifier[value] ][- literal[int] ][ literal[int] ], identifier[prefix] = identifier[pref] )) identifier[import_mod] . identifier[replace] ( identifier[names] )
def transform_import(self, node, results): """Transform for the basic import case. Replaces the old import name with a comma separated list of its replacements. """ import_mod = results.get('module') pref = import_mod.prefix names = [] # create a Node list of the replacement modules for name in MAPPING[import_mod.value][:-1]: names.extend([Name(name[0], prefix=pref), Comma()]) # depends on [control=['for'], data=['name']] names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref)) import_mod.replace(names)
def get_data(self): """ attempt to read measurements file in working directory. """ meas_file = os.path.join(self.WD, 'magic_measurements.txt') if not os.path.isfile(meas_file): print("-I- No magic_measurements.txt file") return {} try: meas_data, file_type = pmag.magic_read(meas_file) except IOError: print("-I- No magic_measurements.txt file") return {} if file_type == 'bad_file': print("-E- ERROR: Can't read magic_measurements.txt file. File is corrupted.") old_specimen_name = '' #start_time = time.time() meas_name_list = [measurement.name for measurement in self.measurements] for rec in meas_data: # get citation information citation = rec.get('er_citation_names', 'This study') if 'This study' not in citation: citation = citation.strip() + ':This study' er_data = {'er_citation_names': citation} pmag_data = {'er_citation_names': 'This study'} specimen_name = rec["er_specimen_name"] # ignore measurement if there is no specimen if specimen_name == "" or specimen_name == " ": continue # if we've moved onto a new specimen, make sure a sample/site/location # exists for that specimen if specimen_name != old_specimen_name: sample_name = rec["er_sample_name"] site_name = rec["er_site_name"] location_name = rec["er_location_name"] # add items and parents location = self.find_by_name(location_name, self.locations) if location_name and not location: location = self.add_location(location_name, er_data=er_data, pmag_data=pmag_data) site = self.find_by_name(site_name, self.sites) if site_name and not site: site = self.add_site(site_name, location_name, er_data, pmag_data) sample = self.find_by_name(sample_name, self.samples) if sample_name and not sample: sample = self.add_sample(sample_name, site_name, er_data, pmag_data) specimen = self.find_by_name(specimen_name, self.specimens) if specimen_name and not specimen: specimen = self.add_specimen(specimen_name, sample_name, er_data, pmag_data) # add child_items if sample and not self.find_by_name(specimen_name, sample.specimens): sample.specimens.append(specimen) if site and not self.find_by_name(sample_name, site.samples): site.samples.append(sample) if location and not self.find_by_name(site_name, location.sites): location.sites.append(site) exp_name = rec['magic_experiment_name'] meas_num = rec['measurement_number'] meas_name = exp_name + '_' + str(meas_num) measurement = self.find_by_name(meas_name, self.measurements, meas_name_list) if not measurement: self.add_measurement(exp_name, meas_num, specimen.name, rec) meas_name_list.append(meas_name) old_specimen_name = specimen_name
def function[get_data, parameter[self]]: constant[ attempt to read measurements file in working directory. ] variable[meas_file] assign[=] call[name[os].path.join, parameter[name[self].WD, constant[magic_measurements.txt]]] if <ast.UnaryOp object at 0x7da1b01233d0> begin[:] call[name[print], parameter[constant[-I- No magic_measurements.txt file]]] return[dictionary[[], []]] <ast.Try object at 0x7da1b0122c50> if compare[name[file_type] equal[==] constant[bad_file]] begin[:] call[name[print], parameter[constant[-E- ERROR: Can't read magic_measurements.txt file. File is corrupted.]]] variable[old_specimen_name] assign[=] constant[] variable[meas_name_list] assign[=] <ast.ListComp object at 0x7da1b0123850> for taget[name[rec]] in starred[name[meas_data]] begin[:] variable[citation] assign[=] call[name[rec].get, parameter[constant[er_citation_names], constant[This study]]] if compare[constant[This study] <ast.NotIn object at 0x7da2590d7190> name[citation]] begin[:] variable[citation] assign[=] binary_operation[call[name[citation].strip, parameter[]] + constant[:This study]] variable[er_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0123e20>], [<ast.Name object at 0x7da1b0123eb0>]] variable[pmag_data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0123fd0>], [<ast.Constant object at 0x7da1b0123ca0>]] variable[specimen_name] assign[=] call[name[rec]][constant[er_specimen_name]] if <ast.BoolOp object at 0x7da1b048eaa0> begin[:] continue if compare[name[specimen_name] not_equal[!=] name[old_specimen_name]] begin[:] variable[sample_name] assign[=] call[name[rec]][constant[er_sample_name]] variable[site_name] assign[=] call[name[rec]][constant[er_site_name]] variable[location_name] assign[=] call[name[rec]][constant[er_location_name]] variable[location] assign[=] call[name[self].find_by_name, parameter[name[location_name], name[self].locations]] if <ast.BoolOp object at 0x7da1b04a4d30> begin[:] variable[location] assign[=] call[name[self].add_location, parameter[name[location_name]]] variable[site] assign[=] call[name[self].find_by_name, parameter[name[site_name], name[self].sites]] if <ast.BoolOp object at 0x7da1b04a4c40> begin[:] variable[site] assign[=] call[name[self].add_site, parameter[name[site_name], name[location_name], name[er_data], name[pmag_data]]] variable[sample] assign[=] call[name[self].find_by_name, parameter[name[sample_name], name[self].samples]] if <ast.BoolOp object at 0x7da1b04a4730> begin[:] variable[sample] assign[=] call[name[self].add_sample, parameter[name[sample_name], name[site_name], name[er_data], name[pmag_data]]] variable[specimen] assign[=] call[name[self].find_by_name, parameter[name[specimen_name], name[self].specimens]] if <ast.BoolOp object at 0x7da1b048ead0> begin[:] variable[specimen] assign[=] call[name[self].add_specimen, parameter[name[specimen_name], name[sample_name], name[er_data], name[pmag_data]]] if <ast.BoolOp object at 0x7da1b048eb60> begin[:] call[name[sample].specimens.append, parameter[name[specimen]]] if <ast.BoolOp object at 0x7da1b048e680> begin[:] call[name[site].samples.append, parameter[name[sample]]] if <ast.BoolOp object at 0x7da1b048dd80> begin[:] call[name[location].sites.append, parameter[name[site]]] variable[exp_name] assign[=] call[name[rec]][constant[magic_experiment_name]] variable[meas_num] assign[=] call[name[rec]][constant[measurement_number]] variable[meas_name] assign[=] binary_operation[binary_operation[name[exp_name] + constant[_]] + call[name[str], parameter[name[meas_num]]]] variable[measurement] assign[=] call[name[self].find_by_name, parameter[name[meas_name], name[self].measurements, name[meas_name_list]]] if <ast.UnaryOp object at 0x7da1b048f9d0> begin[:] call[name[self].add_measurement, parameter[name[exp_name], name[meas_num], name[specimen].name, name[rec]]] call[name[meas_name_list].append, parameter[name[meas_name]]] variable[old_specimen_name] assign[=] name[specimen_name]
keyword[def] identifier[get_data] ( identifier[self] ): literal[string] identifier[meas_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[WD] , literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[meas_file] ): identifier[print] ( literal[string] ) keyword[return] {} keyword[try] : identifier[meas_data] , identifier[file_type] = identifier[pmag] . identifier[magic_read] ( identifier[meas_file] ) keyword[except] identifier[IOError] : identifier[print] ( literal[string] ) keyword[return] {} keyword[if] identifier[file_type] == literal[string] : identifier[print] ( literal[string] ) identifier[old_specimen_name] = literal[string] identifier[meas_name_list] =[ identifier[measurement] . identifier[name] keyword[for] identifier[measurement] keyword[in] identifier[self] . identifier[measurements] ] keyword[for] identifier[rec] keyword[in] identifier[meas_data] : identifier[citation] = identifier[rec] . identifier[get] ( literal[string] , literal[string] ) keyword[if] literal[string] keyword[not] keyword[in] identifier[citation] : identifier[citation] = identifier[citation] . identifier[strip] ()+ literal[string] identifier[er_data] ={ literal[string] : identifier[citation] } identifier[pmag_data] ={ literal[string] : literal[string] } identifier[specimen_name] = identifier[rec] [ literal[string] ] keyword[if] identifier[specimen_name] == literal[string] keyword[or] identifier[specimen_name] == literal[string] : keyword[continue] keyword[if] identifier[specimen_name] != identifier[old_specimen_name] : identifier[sample_name] = identifier[rec] [ literal[string] ] identifier[site_name] = identifier[rec] [ literal[string] ] identifier[location_name] = identifier[rec] [ literal[string] ] identifier[location] = identifier[self] . identifier[find_by_name] ( identifier[location_name] , identifier[self] . identifier[locations] ) keyword[if] identifier[location_name] keyword[and] keyword[not] identifier[location] : identifier[location] = identifier[self] . identifier[add_location] ( identifier[location_name] , identifier[er_data] = identifier[er_data] , identifier[pmag_data] = identifier[pmag_data] ) identifier[site] = identifier[self] . identifier[find_by_name] ( identifier[site_name] , identifier[self] . identifier[sites] ) keyword[if] identifier[site_name] keyword[and] keyword[not] identifier[site] : identifier[site] = identifier[self] . identifier[add_site] ( identifier[site_name] , identifier[location_name] , identifier[er_data] , identifier[pmag_data] ) identifier[sample] = identifier[self] . identifier[find_by_name] ( identifier[sample_name] , identifier[self] . identifier[samples] ) keyword[if] identifier[sample_name] keyword[and] keyword[not] identifier[sample] : identifier[sample] = identifier[self] . identifier[add_sample] ( identifier[sample_name] , identifier[site_name] , identifier[er_data] , identifier[pmag_data] ) identifier[specimen] = identifier[self] . identifier[find_by_name] ( identifier[specimen_name] , identifier[self] . identifier[specimens] ) keyword[if] identifier[specimen_name] keyword[and] keyword[not] identifier[specimen] : identifier[specimen] = identifier[self] . identifier[add_specimen] ( identifier[specimen_name] , identifier[sample_name] , identifier[er_data] , identifier[pmag_data] ) keyword[if] identifier[sample] keyword[and] keyword[not] identifier[self] . identifier[find_by_name] ( identifier[specimen_name] , identifier[sample] . identifier[specimens] ): identifier[sample] . identifier[specimens] . identifier[append] ( identifier[specimen] ) keyword[if] identifier[site] keyword[and] keyword[not] identifier[self] . identifier[find_by_name] ( identifier[sample_name] , identifier[site] . identifier[samples] ): identifier[site] . identifier[samples] . identifier[append] ( identifier[sample] ) keyword[if] identifier[location] keyword[and] keyword[not] identifier[self] . identifier[find_by_name] ( identifier[site_name] , identifier[location] . identifier[sites] ): identifier[location] . identifier[sites] . identifier[append] ( identifier[site] ) identifier[exp_name] = identifier[rec] [ literal[string] ] identifier[meas_num] = identifier[rec] [ literal[string] ] identifier[meas_name] = identifier[exp_name] + literal[string] + identifier[str] ( identifier[meas_num] ) identifier[measurement] = identifier[self] . identifier[find_by_name] ( identifier[meas_name] , identifier[self] . identifier[measurements] , identifier[meas_name_list] ) keyword[if] keyword[not] identifier[measurement] : identifier[self] . identifier[add_measurement] ( identifier[exp_name] , identifier[meas_num] , identifier[specimen] . identifier[name] , identifier[rec] ) identifier[meas_name_list] . identifier[append] ( identifier[meas_name] ) identifier[old_specimen_name] = identifier[specimen_name]
def get_data(self): """ attempt to read measurements file in working directory. """ meas_file = os.path.join(self.WD, 'magic_measurements.txt') if not os.path.isfile(meas_file): print('-I- No magic_measurements.txt file') return {} # depends on [control=['if'], data=[]] try: (meas_data, file_type) = pmag.magic_read(meas_file) # depends on [control=['try'], data=[]] except IOError: print('-I- No magic_measurements.txt file') return {} # depends on [control=['except'], data=[]] if file_type == 'bad_file': print("-E- ERROR: Can't read magic_measurements.txt file. File is corrupted.") # depends on [control=['if'], data=[]] old_specimen_name = '' #start_time = time.time() meas_name_list = [measurement.name for measurement in self.measurements] for rec in meas_data: # get citation information citation = rec.get('er_citation_names', 'This study') if 'This study' not in citation: citation = citation.strip() + ':This study' # depends on [control=['if'], data=['citation']] er_data = {'er_citation_names': citation} pmag_data = {'er_citation_names': 'This study'} specimen_name = rec['er_specimen_name'] # ignore measurement if there is no specimen if specimen_name == '' or specimen_name == ' ': continue # depends on [control=['if'], data=[]] # if we've moved onto a new specimen, make sure a sample/site/location # exists for that specimen if specimen_name != old_specimen_name: sample_name = rec['er_sample_name'] site_name = rec['er_site_name'] location_name = rec['er_location_name'] # add items and parents location = self.find_by_name(location_name, self.locations) if location_name and (not location): location = self.add_location(location_name, er_data=er_data, pmag_data=pmag_data) # depends on [control=['if'], data=[]] site = self.find_by_name(site_name, self.sites) if site_name and (not site): site = self.add_site(site_name, location_name, er_data, pmag_data) # depends on [control=['if'], data=[]] sample = self.find_by_name(sample_name, self.samples) if sample_name and (not sample): sample = self.add_sample(sample_name, site_name, er_data, pmag_data) # depends on [control=['if'], data=[]] specimen = self.find_by_name(specimen_name, self.specimens) if specimen_name and (not specimen): specimen = self.add_specimen(specimen_name, sample_name, er_data, pmag_data) # depends on [control=['if'], data=[]] # add child_items if sample and (not self.find_by_name(specimen_name, sample.specimens)): sample.specimens.append(specimen) # depends on [control=['if'], data=[]] if site and (not self.find_by_name(sample_name, site.samples)): site.samples.append(sample) # depends on [control=['if'], data=[]] if location and (not self.find_by_name(site_name, location.sites)): location.sites.append(site) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['specimen_name']] exp_name = rec['magic_experiment_name'] meas_num = rec['measurement_number'] meas_name = exp_name + '_' + str(meas_num) measurement = self.find_by_name(meas_name, self.measurements, meas_name_list) if not measurement: self.add_measurement(exp_name, meas_num, specimen.name, rec) meas_name_list.append(meas_name) # depends on [control=['if'], data=[]] old_specimen_name = specimen_name # depends on [control=['for'], data=['rec']]
async def publish(self, exchange: str, routing: str, message: Union[str, dict], exchange_type: ExchangeType_ = 'topic'): """ Publish a new event message. Connections are created automatically when calling `publish()`, and will attempt to reconnect if connection was lost. For more information on publishing AMQP messages, see https://www.rabbitmq.com/tutorials/tutorial-three-python.html Args: exchange (str): The AMQP message exchange to publish the message to. A new exchange will be created if it does not yet exist. routing (str): The routing identification with which the message should be published. Subscribers use routing information for fine-grained filtering. Routing can be expressed as a '.'-separated path. message (Union[str, dict]): The message body. It will be serialized before transmission. exchange_type (ExchangeType_, optional): When publishing to a previously undeclared exchange, it will be created. `exchange_type` defines how the exchange distributes messages between subscribers. The default is 'topic', and acceptable values are: 'topic', 'direct', or 'fanout'. Raises: aioamqp.exceptions.AioamqpException: * Failed to connect to AMQP host * Failed to send message * `exchange` already exists, but has a different `exchange_type` """ try: await self._ensure_channel() except Exception: # If server has restarted since our last attempt, ensure channel will fail (old connection invalid) # Retry once to check whether a new connection can be made await self._ensure_channel() # json.dumps() also correctly handles strings data = json.dumps(message).encode() await self._channel.exchange_declare( exchange_name=exchange, type_name=exchange_type, auto_delete=True ) await self._channel.basic_publish( payload=data, exchange_name=exchange, routing_key=routing )
<ast.AsyncFunctionDef object at 0x7da204963820>
keyword[async] keyword[def] identifier[publish] ( identifier[self] , identifier[exchange] : identifier[str] , identifier[routing] : identifier[str] , identifier[message] : identifier[Union] [ identifier[str] , identifier[dict] ], identifier[exchange_type] : identifier[ExchangeType_] = literal[string] ): literal[string] keyword[try] : keyword[await] identifier[self] . identifier[_ensure_channel] () keyword[except] identifier[Exception] : keyword[await] identifier[self] . identifier[_ensure_channel] () identifier[data] = identifier[json] . identifier[dumps] ( identifier[message] ). identifier[encode] () keyword[await] identifier[self] . identifier[_channel] . identifier[exchange_declare] ( identifier[exchange_name] = identifier[exchange] , identifier[type_name] = identifier[exchange_type] , identifier[auto_delete] = keyword[True] ) keyword[await] identifier[self] . identifier[_channel] . identifier[basic_publish] ( identifier[payload] = identifier[data] , identifier[exchange_name] = identifier[exchange] , identifier[routing_key] = identifier[routing] )
async def publish(self, exchange: str, routing: str, message: Union[str, dict], exchange_type: ExchangeType_='topic'): """ Publish a new event message. Connections are created automatically when calling `publish()`, and will attempt to reconnect if connection was lost. For more information on publishing AMQP messages, see https://www.rabbitmq.com/tutorials/tutorial-three-python.html Args: exchange (str): The AMQP message exchange to publish the message to. A new exchange will be created if it does not yet exist. routing (str): The routing identification with which the message should be published. Subscribers use routing information for fine-grained filtering. Routing can be expressed as a '.'-separated path. message (Union[str, dict]): The message body. It will be serialized before transmission. exchange_type (ExchangeType_, optional): When publishing to a previously undeclared exchange, it will be created. `exchange_type` defines how the exchange distributes messages between subscribers. The default is 'topic', and acceptable values are: 'topic', 'direct', or 'fanout'. Raises: aioamqp.exceptions.AioamqpException: * Failed to connect to AMQP host * Failed to send message * `exchange` already exists, but has a different `exchange_type` """ try: await self._ensure_channel() # depends on [control=['try'], data=[]] except Exception: # If server has restarted since our last attempt, ensure channel will fail (old connection invalid) # Retry once to check whether a new connection can be made await self._ensure_channel() # depends on [control=['except'], data=[]] # json.dumps() also correctly handles strings data = json.dumps(message).encode() await self._channel.exchange_declare(exchange_name=exchange, type_name=exchange_type, auto_delete=True) await self._channel.basic_publish(payload=data, exchange_name=exchange, routing_key=routing)
def add_tools(self, *tools): ''' Adds tools to the plot. Args: *tools (Tool) : the tools to add to the Plot Returns: None ''' for tool in tools: if not isinstance(tool, Tool): raise ValueError("All arguments to add_tool must be Tool subclasses.") self.toolbar.tools.append(tool)
def function[add_tools, parameter[self]]: constant[ Adds tools to the plot. Args: *tools (Tool) : the tools to add to the Plot Returns: None ] for taget[name[tool]] in starred[name[tools]] begin[:] if <ast.UnaryOp object at 0x7da18f8122f0> begin[:] <ast.Raise object at 0x7da18f813640> call[name[self].toolbar.tools.append, parameter[name[tool]]]
keyword[def] identifier[add_tools] ( identifier[self] ,* identifier[tools] ): literal[string] keyword[for] identifier[tool] keyword[in] identifier[tools] : keyword[if] keyword[not] identifier[isinstance] ( identifier[tool] , identifier[Tool] ): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[toolbar] . identifier[tools] . identifier[append] ( identifier[tool] )
def add_tools(self, *tools): """ Adds tools to the plot. Args: *tools (Tool) : the tools to add to the Plot Returns: None """ for tool in tools: if not isinstance(tool, Tool): raise ValueError('All arguments to add_tool must be Tool subclasses.') # depends on [control=['if'], data=[]] self.toolbar.tools.append(tool) # depends on [control=['for'], data=['tool']]
def updateCalibration(self): """Updates the current calibration according to intenal values. For example, if the stimulus samplerate changes the calibration needs to be recalculated.""" if self.samplerate() != self._calibration_fs: self.setCalibration(self._attenuationVector, self._calFrequencies, self._calFrange)
def function[updateCalibration, parameter[self]]: constant[Updates the current calibration according to intenal values. For example, if the stimulus samplerate changes the calibration needs to be recalculated.] if compare[call[name[self].samplerate, parameter[]] not_equal[!=] name[self]._calibration_fs] begin[:] call[name[self].setCalibration, parameter[name[self]._attenuationVector, name[self]._calFrequencies, name[self]._calFrange]]
keyword[def] identifier[updateCalibration] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[samplerate] ()!= identifier[self] . identifier[_calibration_fs] : identifier[self] . identifier[setCalibration] ( identifier[self] . identifier[_attenuationVector] , identifier[self] . identifier[_calFrequencies] , identifier[self] . identifier[_calFrange] )
def updateCalibration(self): """Updates the current calibration according to intenal values. For example, if the stimulus samplerate changes the calibration needs to be recalculated.""" if self.samplerate() != self._calibration_fs: self.setCalibration(self._attenuationVector, self._calFrequencies, self._calFrange) # depends on [control=['if'], data=[]]
def read_maxquant(f, header=0, index_col='id', **kwargs): """ Load the quantified table output from MaxQuant run, e.g. - Proteingroups.txt - Phospho (STY)Sites.txt :param f: Source file :return: Pandas dataframe of imported data """ df = pd.read_csv(f, delimiter='\t', header=header, index_col=index_col, **kwargs) return df
def function[read_maxquant, parameter[f, header, index_col]]: constant[ Load the quantified table output from MaxQuant run, e.g. - Proteingroups.txt - Phospho (STY)Sites.txt :param f: Source file :return: Pandas dataframe of imported data ] variable[df] assign[=] call[name[pd].read_csv, parameter[name[f]]] return[name[df]]
keyword[def] identifier[read_maxquant] ( identifier[f] , identifier[header] = literal[int] , identifier[index_col] = literal[string] ,** identifier[kwargs] ): literal[string] identifier[df] = identifier[pd] . identifier[read_csv] ( identifier[f] , identifier[delimiter] = literal[string] , identifier[header] = identifier[header] , identifier[index_col] = identifier[index_col] ,** identifier[kwargs] ) keyword[return] identifier[df]
def read_maxquant(f, header=0, index_col='id', **kwargs): """ Load the quantified table output from MaxQuant run, e.g. - Proteingroups.txt - Phospho (STY)Sites.txt :param f: Source file :return: Pandas dataframe of imported data """ df = pd.read_csv(f, delimiter='\t', header=header, index_col=index_col, **kwargs) return df
def modify_customer(self, handle, address, phone, email=None, vat=None, fax=None, company_name=None, additional_data=None, extension_additional_data=None): """Modify a customer.""" self.request(E.modifyCustomerRequest( E.handle(handle), E.vat(vat or ''), _get_phone_xml('phone', phone), _get_phone_xml('fax', fax), E.address( E.street(address.street), E.number(address.number), E.suffix(address.suffix or ''), E.zipcode(address.zipcode), E.city(address.city), E.state(address.state or ''), E.country(address.country), ), E.email(email or ''), _additional_data(additional_data), _extension_additional_data(extension_additional_data), )) return True
def function[modify_customer, parameter[self, handle, address, phone, email, vat, fax, company_name, additional_data, extension_additional_data]]: constant[Modify a customer.] call[name[self].request, parameter[call[name[E].modifyCustomerRequest, parameter[call[name[E].handle, parameter[name[handle]]], call[name[E].vat, parameter[<ast.BoolOp object at 0x7da204622c80>]], call[name[_get_phone_xml], parameter[constant[phone], name[phone]]], call[name[_get_phone_xml], parameter[constant[fax], name[fax]]], call[name[E].address, parameter[call[name[E].street, parameter[name[address].street]], call[name[E].number, parameter[name[address].number]], call[name[E].suffix, parameter[<ast.BoolOp object at 0x7da204622950>]], call[name[E].zipcode, parameter[name[address].zipcode]], call[name[E].city, parameter[name[address].city]], call[name[E].state, parameter[<ast.BoolOp object at 0x7da204622f50>]], call[name[E].country, parameter[name[address].country]]]], call[name[E].email, parameter[<ast.BoolOp object at 0x7da204621540>]], call[name[_additional_data], parameter[name[additional_data]]], call[name[_extension_additional_data], parameter[name[extension_additional_data]]]]]]] return[constant[True]]
keyword[def] identifier[modify_customer] ( identifier[self] , identifier[handle] , identifier[address] , identifier[phone] , identifier[email] = keyword[None] , identifier[vat] = keyword[None] , identifier[fax] = keyword[None] , identifier[company_name] = keyword[None] , identifier[additional_data] = keyword[None] , identifier[extension_additional_data] = keyword[None] ): literal[string] identifier[self] . identifier[request] ( identifier[E] . identifier[modifyCustomerRequest] ( identifier[E] . identifier[handle] ( identifier[handle] ), identifier[E] . identifier[vat] ( identifier[vat] keyword[or] literal[string] ), identifier[_get_phone_xml] ( literal[string] , identifier[phone] ), identifier[_get_phone_xml] ( literal[string] , identifier[fax] ), identifier[E] . identifier[address] ( identifier[E] . identifier[street] ( identifier[address] . identifier[street] ), identifier[E] . identifier[number] ( identifier[address] . identifier[number] ), identifier[E] . identifier[suffix] ( identifier[address] . identifier[suffix] keyword[or] literal[string] ), identifier[E] . identifier[zipcode] ( identifier[address] . identifier[zipcode] ), identifier[E] . identifier[city] ( identifier[address] . identifier[city] ), identifier[E] . identifier[state] ( identifier[address] . identifier[state] keyword[or] literal[string] ), identifier[E] . identifier[country] ( identifier[address] . identifier[country] ), ), identifier[E] . identifier[email] ( identifier[email] keyword[or] literal[string] ), identifier[_additional_data] ( identifier[additional_data] ), identifier[_extension_additional_data] ( identifier[extension_additional_data] ), )) keyword[return] keyword[True]
def modify_customer(self, handle, address, phone, email=None, vat=None, fax=None, company_name=None, additional_data=None, extension_additional_data=None): """Modify a customer.""" self.request(E.modifyCustomerRequest(E.handle(handle), E.vat(vat or ''), _get_phone_xml('phone', phone), _get_phone_xml('fax', fax), E.address(E.street(address.street), E.number(address.number), E.suffix(address.suffix or ''), E.zipcode(address.zipcode), E.city(address.city), E.state(address.state or ''), E.country(address.country)), E.email(email or ''), _additional_data(additional_data), _extension_additional_data(extension_additional_data))) return True
def transform(self, X): ''' Transform the segmented time series data into feature data. If contextual data is included in X, it is returned with the feature data. Parameters ---------- X : array-like, shape [n_series, ...] Segmented time series data and (optionally) contextual data Returns ------- X_new : array shape [n_series, ...] Feature representation of segmented time series data and contextual data ''' self._check_if_fitted() Xt, Xc = get_ts_data_parts(X) check_array(Xt, dtype='numeric', ensure_2d=False, allow_nd=True) fts = np.column_stack([self.features[f](Xt) for f in self.features]) if Xc is not None: fts = np.column_stack([fts, Xc]) return fts
def function[transform, parameter[self, X]]: constant[ Transform the segmented time series data into feature data. If contextual data is included in X, it is returned with the feature data. Parameters ---------- X : array-like, shape [n_series, ...] Segmented time series data and (optionally) contextual data Returns ------- X_new : array shape [n_series, ...] Feature representation of segmented time series data and contextual data ] call[name[self]._check_if_fitted, parameter[]] <ast.Tuple object at 0x7da1b1577340> assign[=] call[name[get_ts_data_parts], parameter[name[X]]] call[name[check_array], parameter[name[Xt]]] variable[fts] assign[=] call[name[np].column_stack, parameter[<ast.ListComp object at 0x7da1b26ad5a0>]] if compare[name[Xc] is_not constant[None]] begin[:] variable[fts] assign[=] call[name[np].column_stack, parameter[list[[<ast.Name object at 0x7da1b26afeb0>, <ast.Name object at 0x7da1b26ae0b0>]]]] return[name[fts]]
keyword[def] identifier[transform] ( identifier[self] , identifier[X] ): literal[string] identifier[self] . identifier[_check_if_fitted] () identifier[Xt] , identifier[Xc] = identifier[get_ts_data_parts] ( identifier[X] ) identifier[check_array] ( identifier[Xt] , identifier[dtype] = literal[string] , identifier[ensure_2d] = keyword[False] , identifier[allow_nd] = keyword[True] ) identifier[fts] = identifier[np] . identifier[column_stack] ([ identifier[self] . identifier[features] [ identifier[f] ]( identifier[Xt] ) keyword[for] identifier[f] keyword[in] identifier[self] . identifier[features] ]) keyword[if] identifier[Xc] keyword[is] keyword[not] keyword[None] : identifier[fts] = identifier[np] . identifier[column_stack] ([ identifier[fts] , identifier[Xc] ]) keyword[return] identifier[fts]
def transform(self, X): """ Transform the segmented time series data into feature data. If contextual data is included in X, it is returned with the feature data. Parameters ---------- X : array-like, shape [n_series, ...] Segmented time series data and (optionally) contextual data Returns ------- X_new : array shape [n_series, ...] Feature representation of segmented time series data and contextual data """ self._check_if_fitted() (Xt, Xc) = get_ts_data_parts(X) check_array(Xt, dtype='numeric', ensure_2d=False, allow_nd=True) fts = np.column_stack([self.features[f](Xt) for f in self.features]) if Xc is not None: fts = np.column_stack([fts, Xc]) # depends on [control=['if'], data=['Xc']] return fts
def t_multiline_OPTION_AND_VALUE(self, t): r'[^\r\n]+' t.lexer.multiline_newline_seen = False if t.value.endswith('\\'): return t.type = "OPTION_AND_VALUE" t.lexer.begin('INITIAL') value = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos + 1] t.lexer.lineno += len(re.findall(r'\r\n|\n|\r', value)) value = value.replace('\\\n', '').replace('\r', '').replace('\n', '') option, value = self._parse_option_value(value) process, option, value = self._pre_parse_value(option, value) if not process: return t.value = option, value return t
def function[t_multiline_OPTION_AND_VALUE, parameter[self, t]]: constant[[^\r\n]+] name[t].lexer.multiline_newline_seen assign[=] constant[False] if call[name[t].value.endswith, parameter[constant[\]]] begin[:] return[None] name[t].type assign[=] constant[OPTION_AND_VALUE] call[name[t].lexer.begin, parameter[constant[INITIAL]]] variable[value] assign[=] call[name[t].lexer.lexdata][<ast.Slice object at 0x7da20c7cb0d0>] <ast.AugAssign object at 0x7da20c7c8a90> variable[value] assign[=] call[call[call[name[value].replace, parameter[constant[\ ], constant[]]].replace, parameter[constant[ ], constant[]]].replace, parameter[constant[ ], constant[]]] <ast.Tuple object at 0x7da20c7cb790> assign[=] call[name[self]._parse_option_value, parameter[name[value]]] <ast.Tuple object at 0x7da20c7c94e0> assign[=] call[name[self]._pre_parse_value, parameter[name[option], name[value]]] if <ast.UnaryOp object at 0x7da20c7c9900> begin[:] return[None] name[t].value assign[=] tuple[[<ast.Name object at 0x7da20c7cb580>, <ast.Name object at 0x7da20c7c9ff0>]] return[name[t]]
keyword[def] identifier[t_multiline_OPTION_AND_VALUE] ( identifier[self] , identifier[t] ): literal[string] identifier[t] . identifier[lexer] . identifier[multiline_newline_seen] = keyword[False] keyword[if] identifier[t] . identifier[value] . identifier[endswith] ( literal[string] ): keyword[return] identifier[t] . identifier[type] = literal[string] identifier[t] . identifier[lexer] . identifier[begin] ( literal[string] ) identifier[value] = identifier[t] . identifier[lexer] . identifier[lexdata] [ identifier[t] . identifier[lexer] . identifier[code_start] : identifier[t] . identifier[lexer] . identifier[lexpos] + literal[int] ] identifier[t] . identifier[lexer] . identifier[lineno] += identifier[len] ( identifier[re] . identifier[findall] ( literal[string] , identifier[value] )) identifier[value] = identifier[value] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ) identifier[option] , identifier[value] = identifier[self] . identifier[_parse_option_value] ( identifier[value] ) identifier[process] , identifier[option] , identifier[value] = identifier[self] . identifier[_pre_parse_value] ( identifier[option] , identifier[value] ) keyword[if] keyword[not] identifier[process] : keyword[return] identifier[t] . identifier[value] = identifier[option] , identifier[value] keyword[return] identifier[t]
def t_multiline_OPTION_AND_VALUE(self, t): """[^\\r\\n]+""" t.lexer.multiline_newline_seen = False if t.value.endswith('\\'): return # depends on [control=['if'], data=[]] t.type = 'OPTION_AND_VALUE' t.lexer.begin('INITIAL') value = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos + 1] t.lexer.lineno += len(re.findall('\\r\\n|\\n|\\r', value)) value = value.replace('\\\n', '').replace('\r', '').replace('\n', '') (option, value) = self._parse_option_value(value) (process, option, value) = self._pre_parse_value(option, value) if not process: return # depends on [control=['if'], data=[]] t.value = (option, value) return t
def get_last_lineno(node): """Recursively find the last line number of the ast node.""" max_lineno = 0 if hasattr(node, "lineno"): max_lineno = node.lineno for _, field in ast.iter_fields(node): if isinstance(field, list): for value in field: if isinstance(value, ast.AST): max_lineno = max(max_lineno, get_last_lineno(value)) elif isinstance(field, ast.AST): max_lineno = max(max_lineno, get_last_lineno(field)) return max_lineno
def function[get_last_lineno, parameter[node]]: constant[Recursively find the last line number of the ast node.] variable[max_lineno] assign[=] constant[0] if call[name[hasattr], parameter[name[node], constant[lineno]]] begin[:] variable[max_lineno] assign[=] name[node].lineno for taget[tuple[[<ast.Name object at 0x7da20e9634c0>, <ast.Name object at 0x7da20e962dd0>]]] in starred[call[name[ast].iter_fields, parameter[name[node]]]] begin[:] if call[name[isinstance], parameter[name[field], name[list]]] begin[:] for taget[name[value]] in starred[name[field]] begin[:] if call[name[isinstance], parameter[name[value], name[ast].AST]] begin[:] variable[max_lineno] assign[=] call[name[max], parameter[name[max_lineno], call[name[get_last_lineno], parameter[name[value]]]]] return[name[max_lineno]]
keyword[def] identifier[get_last_lineno] ( identifier[node] ): literal[string] identifier[max_lineno] = literal[int] keyword[if] identifier[hasattr] ( identifier[node] , literal[string] ): identifier[max_lineno] = identifier[node] . identifier[lineno] keyword[for] identifier[_] , identifier[field] keyword[in] identifier[ast] . identifier[iter_fields] ( identifier[node] ): keyword[if] identifier[isinstance] ( identifier[field] , identifier[list] ): keyword[for] identifier[value] keyword[in] identifier[field] : keyword[if] identifier[isinstance] ( identifier[value] , identifier[ast] . identifier[AST] ): identifier[max_lineno] = identifier[max] ( identifier[max_lineno] , identifier[get_last_lineno] ( identifier[value] )) keyword[elif] identifier[isinstance] ( identifier[field] , identifier[ast] . identifier[AST] ): identifier[max_lineno] = identifier[max] ( identifier[max_lineno] , identifier[get_last_lineno] ( identifier[field] )) keyword[return] identifier[max_lineno]
def get_last_lineno(node): """Recursively find the last line number of the ast node.""" max_lineno = 0 if hasattr(node, 'lineno'): max_lineno = node.lineno # depends on [control=['if'], data=[]] for (_, field) in ast.iter_fields(node): if isinstance(field, list): for value in field: if isinstance(value, ast.AST): max_lineno = max(max_lineno, get_last_lineno(value)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['value']] # depends on [control=['if'], data=[]] elif isinstance(field, ast.AST): max_lineno = max(max_lineno, get_last_lineno(field)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return max_lineno
def read_text_file(filename, encoding="utf-8"): """ Reads a file under python3 with encoding (default UTF-8). Also works under python2, without encoding. Uses the EAFP (https://docs.python.org/2/glossary.html#term-eafp) principle. """ try: with open(filename, 'r', encoding) as f: r = f.read() except TypeError: with open(filename, 'r') as f: r = f.read() return r
def function[read_text_file, parameter[filename, encoding]]: constant[ Reads a file under python3 with encoding (default UTF-8). Also works under python2, without encoding. Uses the EAFP (https://docs.python.org/2/glossary.html#term-eafp) principle. ] <ast.Try object at 0x7da1b0618f10> return[name[r]]
keyword[def] identifier[read_text_file] ( identifier[filename] , identifier[encoding] = literal[string] ): literal[string] keyword[try] : keyword[with] identifier[open] ( identifier[filename] , literal[string] , identifier[encoding] ) keyword[as] identifier[f] : identifier[r] = identifier[f] . identifier[read] () keyword[except] identifier[TypeError] : keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] : identifier[r] = identifier[f] . identifier[read] () keyword[return] identifier[r]
def read_text_file(filename, encoding='utf-8'): """ Reads a file under python3 with encoding (default UTF-8). Also works under python2, without encoding. Uses the EAFP (https://docs.python.org/2/glossary.html#term-eafp) principle. """ try: with open(filename, 'r', encoding) as f: r = f.read() # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]] except TypeError: with open(filename, 'r') as f: r = f.read() # depends on [control=['with'], data=['f']] # depends on [control=['except'], data=[]] return r
def _container_blacklist(self): """Container blacklist.""" if self.__container_blacklist is None: self.__container_blacklist = \ set(self.CLOUD_BROWSER_CONTAINER_BLACKLIST or []) return self.__container_blacklist
def function[_container_blacklist, parameter[self]]: constant[Container blacklist.] if compare[name[self].__container_blacklist is constant[None]] begin[:] name[self].__container_blacklist assign[=] call[name[set], parameter[<ast.BoolOp object at 0x7da18f00cbb0>]] return[name[self].__container_blacklist]
keyword[def] identifier[_container_blacklist] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[__container_blacklist] keyword[is] keyword[None] : identifier[self] . identifier[__container_blacklist] = identifier[set] ( identifier[self] . identifier[CLOUD_BROWSER_CONTAINER_BLACKLIST] keyword[or] []) keyword[return] identifier[self] . identifier[__container_blacklist]
def _container_blacklist(self): """Container blacklist.""" if self.__container_blacklist is None: self.__container_blacklist = set(self.CLOUD_BROWSER_CONTAINER_BLACKLIST or []) # depends on [control=['if'], data=[]] return self.__container_blacklist
def rescale_array_from_z1z2(array_rs, coef_rs=None): """Restore the values in a numpy array rescaled to the [z1,z2] interval. The transformation is carried out following the relation array = (array_rs + c_flux)/b_flux as explained in Appendix B1 of Cardiel (2009, MNRAS, 396, 680) Parameters ---------- array_rs : numpy array Numpy array previously rescaled to the [z1,z2] interval with the function rescale_array_to_z1z2(). coef_rs : tuple, floats Coefficients b_flux and c_flux previously employed in the rescaling operation. This tuple is one of the parameters returned by function_rescale_array_to_z1z2(). Returns ------- array : numpy array Array with restored values. """ if type(array_rs) is not np.ndarray: raise ValueError( "array_rs=" + str(array_rs) + "must be a numpy.ndarray") b_flux, c_flux = coef_rs array = (array_rs + c_flux) / b_flux return array
def function[rescale_array_from_z1z2, parameter[array_rs, coef_rs]]: constant[Restore the values in a numpy array rescaled to the [z1,z2] interval. The transformation is carried out following the relation array = (array_rs + c_flux)/b_flux as explained in Appendix B1 of Cardiel (2009, MNRAS, 396, 680) Parameters ---------- array_rs : numpy array Numpy array previously rescaled to the [z1,z2] interval with the function rescale_array_to_z1z2(). coef_rs : tuple, floats Coefficients b_flux and c_flux previously employed in the rescaling operation. This tuple is one of the parameters returned by function_rescale_array_to_z1z2(). Returns ------- array : numpy array Array with restored values. ] if compare[call[name[type], parameter[name[array_rs]]] is_not name[np].ndarray] begin[:] <ast.Raise object at 0x7da20c7cb640> <ast.Tuple object at 0x7da20c7caa10> assign[=] name[coef_rs] variable[array] assign[=] binary_operation[binary_operation[name[array_rs] + name[c_flux]] / name[b_flux]] return[name[array]]
keyword[def] identifier[rescale_array_from_z1z2] ( identifier[array_rs] , identifier[coef_rs] = keyword[None] ): literal[string] keyword[if] identifier[type] ( identifier[array_rs] ) keyword[is] keyword[not] identifier[np] . identifier[ndarray] : keyword[raise] identifier[ValueError] ( literal[string] + identifier[str] ( identifier[array_rs] )+ literal[string] ) identifier[b_flux] , identifier[c_flux] = identifier[coef_rs] identifier[array] =( identifier[array_rs] + identifier[c_flux] )/ identifier[b_flux] keyword[return] identifier[array]
def rescale_array_from_z1z2(array_rs, coef_rs=None): """Restore the values in a numpy array rescaled to the [z1,z2] interval. The transformation is carried out following the relation array = (array_rs + c_flux)/b_flux as explained in Appendix B1 of Cardiel (2009, MNRAS, 396, 680) Parameters ---------- array_rs : numpy array Numpy array previously rescaled to the [z1,z2] interval with the function rescale_array_to_z1z2(). coef_rs : tuple, floats Coefficients b_flux and c_flux previously employed in the rescaling operation. This tuple is one of the parameters returned by function_rescale_array_to_z1z2(). Returns ------- array : numpy array Array with restored values. """ if type(array_rs) is not np.ndarray: raise ValueError('array_rs=' + str(array_rs) + 'must be a numpy.ndarray') # depends on [control=['if'], data=[]] (b_flux, c_flux) = coef_rs array = (array_rs + c_flux) / b_flux return array
def get_cas_client(self, request, provider, renew=False): """ return a CAS client object matching provider :param django.http.HttpRequest request: The current request object :param cas_server.models.FederatedIendityProvider provider: the user identity provider :return: The user CAS client object :rtype: :class:`federate.CASFederateValidateUser <cas_server.federate.CASFederateValidateUser>` """ # compute the current url, ignoring ticket dans provider GET parameters service_url = utils.get_current_url(request, {"ticket", "provider"}) self.service_url = service_url return CASFederateValidateUser(provider, service_url, renew=renew)
def function[get_cas_client, parameter[self, request, provider, renew]]: constant[ return a CAS client object matching provider :param django.http.HttpRequest request: The current request object :param cas_server.models.FederatedIendityProvider provider: the user identity provider :return: The user CAS client object :rtype: :class:`federate.CASFederateValidateUser <cas_server.federate.CASFederateValidateUser>` ] variable[service_url] assign[=] call[name[utils].get_current_url, parameter[name[request], <ast.Set object at 0x7da1b0d0c3d0>]] name[self].service_url assign[=] name[service_url] return[call[name[CASFederateValidateUser], parameter[name[provider], name[service_url]]]]
keyword[def] identifier[get_cas_client] ( identifier[self] , identifier[request] , identifier[provider] , identifier[renew] = keyword[False] ): literal[string] identifier[service_url] = identifier[utils] . identifier[get_current_url] ( identifier[request] ,{ literal[string] , literal[string] }) identifier[self] . identifier[service_url] = identifier[service_url] keyword[return] identifier[CASFederateValidateUser] ( identifier[provider] , identifier[service_url] , identifier[renew] = identifier[renew] )
def get_cas_client(self, request, provider, renew=False): """ return a CAS client object matching provider :param django.http.HttpRequest request: The current request object :param cas_server.models.FederatedIendityProvider provider: the user identity provider :return: The user CAS client object :rtype: :class:`federate.CASFederateValidateUser <cas_server.federate.CASFederateValidateUser>` """ # compute the current url, ignoring ticket dans provider GET parameters service_url = utils.get_current_url(request, {'ticket', 'provider'}) self.service_url = service_url return CASFederateValidateUser(provider, service_url, renew=renew)
def work(self): """ Start ternya work. First, import customer's service modules. Second, init openstack mq. Third, keep a ternya connection that can auto-reconnect. """ self.init_modules() connection = self.init_mq() TernyaConnection(self, connection).connect()
def function[work, parameter[self]]: constant[ Start ternya work. First, import customer's service modules. Second, init openstack mq. Third, keep a ternya connection that can auto-reconnect. ] call[name[self].init_modules, parameter[]] variable[connection] assign[=] call[name[self].init_mq, parameter[]] call[call[name[TernyaConnection], parameter[name[self], name[connection]]].connect, parameter[]]
keyword[def] identifier[work] ( identifier[self] ): literal[string] identifier[self] . identifier[init_modules] () identifier[connection] = identifier[self] . identifier[init_mq] () identifier[TernyaConnection] ( identifier[self] , identifier[connection] ). identifier[connect] ()
def work(self): """ Start ternya work. First, import customer's service modules. Second, init openstack mq. Third, keep a ternya connection that can auto-reconnect. """ self.init_modules() connection = self.init_mq() TernyaConnection(self, connection).connect()
def next(self, varnum=0): """ Moves the 'Cursor' to & returns the next 'Node'. Raises 'GameTreeEndError' if the end of a branch is exceeded. Raises 'GameTreeNavigationError' if a non-existent variation is accessed. Argument: - varnum : integer, default 0 -- Variation number. Non-zero only valid at a branching, where variations exist.""" if self.index + 1 < len(self.gametree): # more main line? if varnum != 0: raise GameTreeNavigationError("Nonexistent variation.") self.index = self.index + 1 elif self.gametree.variations: # variations exist? if varnum < len(self.gametree.variations): self.stack.append(self.gametree) self.gametree = self.gametree.variations[varnum] self.index = 0 else: raise GameTreeNavigationError("Nonexistent variation.") else: raise GameTreeEndError self.node = self.gametree[self.index] self.nodenum = self.nodenum + 1 self._setChildren() self._setFlags() return self.node
def function[next, parameter[self, varnum]]: constant[ Moves the 'Cursor' to & returns the next 'Node'. Raises 'GameTreeEndError' if the end of a branch is exceeded. Raises 'GameTreeNavigationError' if a non-existent variation is accessed. Argument: - varnum : integer, default 0 -- Variation number. Non-zero only valid at a branching, where variations exist.] if compare[binary_operation[name[self].index + constant[1]] less[<] call[name[len], parameter[name[self].gametree]]] begin[:] if compare[name[varnum] not_equal[!=] constant[0]] begin[:] <ast.Raise object at 0x7da1b0ebf460> name[self].index assign[=] binary_operation[name[self].index + constant[1]] name[self].node assign[=] call[name[self].gametree][name[self].index] name[self].nodenum assign[=] binary_operation[name[self].nodenum + constant[1]] call[name[self]._setChildren, parameter[]] call[name[self]._setFlags, parameter[]] return[name[self].node]
keyword[def] identifier[next] ( identifier[self] , identifier[varnum] = literal[int] ): literal[string] keyword[if] identifier[self] . identifier[index] + literal[int] < identifier[len] ( identifier[self] . identifier[gametree] ): keyword[if] identifier[varnum] != literal[int] : keyword[raise] identifier[GameTreeNavigationError] ( literal[string] ) identifier[self] . identifier[index] = identifier[self] . identifier[index] + literal[int] keyword[elif] identifier[self] . identifier[gametree] . identifier[variations] : keyword[if] identifier[varnum] < identifier[len] ( identifier[self] . identifier[gametree] . identifier[variations] ): identifier[self] . identifier[stack] . identifier[append] ( identifier[self] . identifier[gametree] ) identifier[self] . identifier[gametree] = identifier[self] . identifier[gametree] . identifier[variations] [ identifier[varnum] ] identifier[self] . identifier[index] = literal[int] keyword[else] : keyword[raise] identifier[GameTreeNavigationError] ( literal[string] ) keyword[else] : keyword[raise] identifier[GameTreeEndError] identifier[self] . identifier[node] = identifier[self] . identifier[gametree] [ identifier[self] . identifier[index] ] identifier[self] . identifier[nodenum] = identifier[self] . identifier[nodenum] + literal[int] identifier[self] . identifier[_setChildren] () identifier[self] . identifier[_setFlags] () keyword[return] identifier[self] . identifier[node]
def next(self, varnum=0): """ Moves the 'Cursor' to & returns the next 'Node'. Raises 'GameTreeEndError' if the end of a branch is exceeded. Raises 'GameTreeNavigationError' if a non-existent variation is accessed. Argument: - varnum : integer, default 0 -- Variation number. Non-zero only valid at a branching, where variations exist.""" if self.index + 1 < len(self.gametree): # more main line? if varnum != 0: raise GameTreeNavigationError('Nonexistent variation.') # depends on [control=['if'], data=[]] self.index = self.index + 1 # depends on [control=['if'], data=[]] elif self.gametree.variations: # variations exist? if varnum < len(self.gametree.variations): self.stack.append(self.gametree) self.gametree = self.gametree.variations[varnum] self.index = 0 # depends on [control=['if'], data=['varnum']] else: raise GameTreeNavigationError('Nonexistent variation.') # depends on [control=['if'], data=[]] else: raise GameTreeEndError self.node = self.gametree[self.index] self.nodenum = self.nodenum + 1 self._setChildren() self._setFlags() return self.node
def get_queue_message_counts(self, queue_name): """Get the number of messages in a queue. This method is only meant to be used in unit and integration tests. Parameters: queue_name(str): The queue whose message counts to get. Returns: tuple: A triple representing the number of messages in the queue, its delayed queue and its dead letter queue. """ queue_response = self._declare_queue(queue_name) dq_queue_response = self._declare_dq_queue(queue_name) xq_queue_response = self._declare_xq_queue(queue_name) return ( queue_response.method.message_count, dq_queue_response.method.message_count, xq_queue_response.method.message_count, )
def function[get_queue_message_counts, parameter[self, queue_name]]: constant[Get the number of messages in a queue. This method is only meant to be used in unit and integration tests. Parameters: queue_name(str): The queue whose message counts to get. Returns: tuple: A triple representing the number of messages in the queue, its delayed queue and its dead letter queue. ] variable[queue_response] assign[=] call[name[self]._declare_queue, parameter[name[queue_name]]] variable[dq_queue_response] assign[=] call[name[self]._declare_dq_queue, parameter[name[queue_name]]] variable[xq_queue_response] assign[=] call[name[self]._declare_xq_queue, parameter[name[queue_name]]] return[tuple[[<ast.Attribute object at 0x7da1b1639090>, <ast.Attribute object at 0x7da1b1638880>, <ast.Attribute object at 0x7da1b1639000>]]]
keyword[def] identifier[get_queue_message_counts] ( identifier[self] , identifier[queue_name] ): literal[string] identifier[queue_response] = identifier[self] . identifier[_declare_queue] ( identifier[queue_name] ) identifier[dq_queue_response] = identifier[self] . identifier[_declare_dq_queue] ( identifier[queue_name] ) identifier[xq_queue_response] = identifier[self] . identifier[_declare_xq_queue] ( identifier[queue_name] ) keyword[return] ( identifier[queue_response] . identifier[method] . identifier[message_count] , identifier[dq_queue_response] . identifier[method] . identifier[message_count] , identifier[xq_queue_response] . identifier[method] . identifier[message_count] , )
def get_queue_message_counts(self, queue_name): """Get the number of messages in a queue. This method is only meant to be used in unit and integration tests. Parameters: queue_name(str): The queue whose message counts to get. Returns: tuple: A triple representing the number of messages in the queue, its delayed queue and its dead letter queue. """ queue_response = self._declare_queue(queue_name) dq_queue_response = self._declare_dq_queue(queue_name) xq_queue_response = self._declare_xq_queue(queue_name) return (queue_response.method.message_count, dq_queue_response.method.message_count, xq_queue_response.method.message_count)
def mkdir(path, create_parent=True, check_if_exists=False): """ Generates a unix command line for creating a directory. :param path: Directory path. :type path: unicode | str :param create_parent: Create parent directories, if necessary. Default is ``True``. :type create_parent: bool :param check_if_exists: Prepend a check if the directory exists; in that case, the command is not run. Default is ``False``. :type check_if_exists: bool :return: Unix shell command line. :rtype: unicode | str """ cmd = _format_cmd('mkdir', path, _p=create_parent) if check_if_exists: return 'if [[ ! -d {0} ]]; then {1}; fi'.format(path, cmd) return cmd
def function[mkdir, parameter[path, create_parent, check_if_exists]]: constant[ Generates a unix command line for creating a directory. :param path: Directory path. :type path: unicode | str :param create_parent: Create parent directories, if necessary. Default is ``True``. :type create_parent: bool :param check_if_exists: Prepend a check if the directory exists; in that case, the command is not run. Default is ``False``. :type check_if_exists: bool :return: Unix shell command line. :rtype: unicode | str ] variable[cmd] assign[=] call[name[_format_cmd], parameter[constant[mkdir], name[path]]] if name[check_if_exists] begin[:] return[call[constant[if [[ ! -d {0} ]]; then {1}; fi].format, parameter[name[path], name[cmd]]]] return[name[cmd]]
keyword[def] identifier[mkdir] ( identifier[path] , identifier[create_parent] = keyword[True] , identifier[check_if_exists] = keyword[False] ): literal[string] identifier[cmd] = identifier[_format_cmd] ( literal[string] , identifier[path] , identifier[_p] = identifier[create_parent] ) keyword[if] identifier[check_if_exists] : keyword[return] literal[string] . identifier[format] ( identifier[path] , identifier[cmd] ) keyword[return] identifier[cmd]
def mkdir(path, create_parent=True, check_if_exists=False): """ Generates a unix command line for creating a directory. :param path: Directory path. :type path: unicode | str :param create_parent: Create parent directories, if necessary. Default is ``True``. :type create_parent: bool :param check_if_exists: Prepend a check if the directory exists; in that case, the command is not run. Default is ``False``. :type check_if_exists: bool :return: Unix shell command line. :rtype: unicode | str """ cmd = _format_cmd('mkdir', path, _p=create_parent) if check_if_exists: return 'if [[ ! -d {0} ]]; then {1}; fi'.format(path, cmd) # depends on [control=['if'], data=[]] return cmd
def notify_mail(title, message, recipient=None, sender=None, smtp_host=None, smtp_port=None, **kwargs): """ Mail notification method taking a *title* and a string *message*. *recipient*, *sender*, *smtp_host* and *smtp_port* default to the configuration values in the [notifications] section. """ cfg = Config.instance() if not recipient: recipient = cfg.get_expanded("notifications", "mail_recipient") if not sender: sender = cfg.get_expanded("notifications", "mail_sender") if not smtp_host: smtp_host = cfg.get_expanded("notifications", "mail_smtp_host") if not smtp_port: smtp_port = cfg.get_expanded("notifications", "mail_smtp_port") if not recipient or not sender: logger.warning("cannot send mail notification, recipient ({}) or sender ({}) empty".format( recipient, sender)) return False return send_mail(recipient, sender, title, message, smtp_host=smtp_host, smtp_port=smtp_port)
def function[notify_mail, parameter[title, message, recipient, sender, smtp_host, smtp_port]]: constant[ Mail notification method taking a *title* and a string *message*. *recipient*, *sender*, *smtp_host* and *smtp_port* default to the configuration values in the [notifications] section. ] variable[cfg] assign[=] call[name[Config].instance, parameter[]] if <ast.UnaryOp object at 0x7da1b05f2f80> begin[:] variable[recipient] assign[=] call[name[cfg].get_expanded, parameter[constant[notifications], constant[mail_recipient]]] if <ast.UnaryOp object at 0x7da1b05f1d50> begin[:] variable[sender] assign[=] call[name[cfg].get_expanded, parameter[constant[notifications], constant[mail_sender]]] if <ast.UnaryOp object at 0x7da1b05f3a60> begin[:] variable[smtp_host] assign[=] call[name[cfg].get_expanded, parameter[constant[notifications], constant[mail_smtp_host]]] if <ast.UnaryOp object at 0x7da1b05f1930> begin[:] variable[smtp_port] assign[=] call[name[cfg].get_expanded, parameter[constant[notifications], constant[mail_smtp_port]]] if <ast.BoolOp object at 0x7da1b05f0a90> begin[:] call[name[logger].warning, parameter[call[constant[cannot send mail notification, recipient ({}) or sender ({}) empty].format, parameter[name[recipient], name[sender]]]]] return[constant[False]] return[call[name[send_mail], parameter[name[recipient], name[sender], name[title], name[message]]]]
keyword[def] identifier[notify_mail] ( identifier[title] , identifier[message] , identifier[recipient] = keyword[None] , identifier[sender] = keyword[None] , identifier[smtp_host] = keyword[None] , identifier[smtp_port] = keyword[None] , ** identifier[kwargs] ): literal[string] identifier[cfg] = identifier[Config] . identifier[instance] () keyword[if] keyword[not] identifier[recipient] : identifier[recipient] = identifier[cfg] . identifier[get_expanded] ( literal[string] , literal[string] ) keyword[if] keyword[not] identifier[sender] : identifier[sender] = identifier[cfg] . identifier[get_expanded] ( literal[string] , literal[string] ) keyword[if] keyword[not] identifier[smtp_host] : identifier[smtp_host] = identifier[cfg] . identifier[get_expanded] ( literal[string] , literal[string] ) keyword[if] keyword[not] identifier[smtp_port] : identifier[smtp_port] = identifier[cfg] . identifier[get_expanded] ( literal[string] , literal[string] ) keyword[if] keyword[not] identifier[recipient] keyword[or] keyword[not] identifier[sender] : identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[recipient] , identifier[sender] )) keyword[return] keyword[False] keyword[return] identifier[send_mail] ( identifier[recipient] , identifier[sender] , identifier[title] , identifier[message] , identifier[smtp_host] = identifier[smtp_host] , identifier[smtp_port] = identifier[smtp_port] )
def notify_mail(title, message, recipient=None, sender=None, smtp_host=None, smtp_port=None, **kwargs): """ Mail notification method taking a *title* and a string *message*. *recipient*, *sender*, *smtp_host* and *smtp_port* default to the configuration values in the [notifications] section. """ cfg = Config.instance() if not recipient: recipient = cfg.get_expanded('notifications', 'mail_recipient') # depends on [control=['if'], data=[]] if not sender: sender = cfg.get_expanded('notifications', 'mail_sender') # depends on [control=['if'], data=[]] if not smtp_host: smtp_host = cfg.get_expanded('notifications', 'mail_smtp_host') # depends on [control=['if'], data=[]] if not smtp_port: smtp_port = cfg.get_expanded('notifications', 'mail_smtp_port') # depends on [control=['if'], data=[]] if not recipient or not sender: logger.warning('cannot send mail notification, recipient ({}) or sender ({}) empty'.format(recipient, sender)) return False # depends on [control=['if'], data=[]] return send_mail(recipient, sender, title, message, smtp_host=smtp_host, smtp_port=smtp_port)
def _check_guts_toc(attr, old, toc, last_build, pyc=0): """ rebuild is required if either toc content changed if mtimes of files listed in old toc are newer than ast_build if pyc=1, check for .py files, too """ return (_check_guts_eq(attr, old, toc, last_build) or _check_guts_toc_mtime(attr, old, toc, last_build, pyc=pyc))
def function[_check_guts_toc, parameter[attr, old, toc, last_build, pyc]]: constant[ rebuild is required if either toc content changed if mtimes of files listed in old toc are newer than ast_build if pyc=1, check for .py files, too ] return[<ast.BoolOp object at 0x7da1b0ed4fd0>]
keyword[def] identifier[_check_guts_toc] ( identifier[attr] , identifier[old] , identifier[toc] , identifier[last_build] , identifier[pyc] = literal[int] ): literal[string] keyword[return] ( identifier[_check_guts_eq] ( identifier[attr] , identifier[old] , identifier[toc] , identifier[last_build] ) keyword[or] identifier[_check_guts_toc_mtime] ( identifier[attr] , identifier[old] , identifier[toc] , identifier[last_build] , identifier[pyc] = identifier[pyc] ))
def _check_guts_toc(attr, old, toc, last_build, pyc=0): """ rebuild is required if either toc content changed if mtimes of files listed in old toc are newer than ast_build if pyc=1, check for .py files, too """ return _check_guts_eq(attr, old, toc, last_build) or _check_guts_toc_mtime(attr, old, toc, last_build, pyc=pyc)
def _operation_caveat(cond, ops): ''' Helper for allow_caveat and deny_caveat. It checks that all operation names are valid before creating the caveat. ''' for op in ops: if op.find(' ') != -1: return error_caveat('invalid operation name "{}"'.format(op)) return _first_party(cond, ' '.join(ops))
def function[_operation_caveat, parameter[cond, ops]]: constant[ Helper for allow_caveat and deny_caveat. It checks that all operation names are valid before creating the caveat. ] for taget[name[op]] in starred[name[ops]] begin[:] if compare[call[name[op].find, parameter[constant[ ]]] not_equal[!=] <ast.UnaryOp object at 0x7da1b2431ab0>] begin[:] return[call[name[error_caveat], parameter[call[constant[invalid operation name "{}"].format, parameter[name[op]]]]]] return[call[name[_first_party], parameter[name[cond], call[constant[ ].join, parameter[name[ops]]]]]]
keyword[def] identifier[_operation_caveat] ( identifier[cond] , identifier[ops] ): literal[string] keyword[for] identifier[op] keyword[in] identifier[ops] : keyword[if] identifier[op] . identifier[find] ( literal[string] )!=- literal[int] : keyword[return] identifier[error_caveat] ( literal[string] . identifier[format] ( identifier[op] )) keyword[return] identifier[_first_party] ( identifier[cond] , literal[string] . identifier[join] ( identifier[ops] ))
def _operation_caveat(cond, ops): """ Helper for allow_caveat and deny_caveat. It checks that all operation names are valid before creating the caveat. """ for op in ops: if op.find(' ') != -1: return error_caveat('invalid operation name "{}"'.format(op)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['op']] return _first_party(cond, ' '.join(ops))
def randomRow(self): """ Gets a random row from the provider :returns: List """ l = [] for row in self.data: l.append(row) return random.choice(l)
def function[randomRow, parameter[self]]: constant[ Gets a random row from the provider :returns: List ] variable[l] assign[=] list[[]] for taget[name[row]] in starred[name[self].data] begin[:] call[name[l].append, parameter[name[row]]] return[call[name[random].choice, parameter[name[l]]]]
keyword[def] identifier[randomRow] ( identifier[self] ): literal[string] identifier[l] =[] keyword[for] identifier[row] keyword[in] identifier[self] . identifier[data] : identifier[l] . identifier[append] ( identifier[row] ) keyword[return] identifier[random] . identifier[choice] ( identifier[l] )
def randomRow(self): """ Gets a random row from the provider :returns: List """ l = [] for row in self.data: l.append(row) # depends on [control=['for'], data=['row']] return random.choice(l)
def quat_conjugate(quaternion): """Return conjugate of quaternion. >>> q0 = random_quaternion() >>> q1 = quat_conjugate(q0) >>> q1[3] == q0[3] and all(q1[:3] == -q0[:3]) True """ return np.array( (-quaternion[0], -quaternion[1], -quaternion[2], quaternion[3]), dtype=np.float32, )
def function[quat_conjugate, parameter[quaternion]]: constant[Return conjugate of quaternion. >>> q0 = random_quaternion() >>> q1 = quat_conjugate(q0) >>> q1[3] == q0[3] and all(q1[:3] == -q0[:3]) True ] return[call[name[np].array, parameter[tuple[[<ast.UnaryOp object at 0x7da20c6c6e00>, <ast.UnaryOp object at 0x7da20c6c6ad0>, <ast.UnaryOp object at 0x7da2045663b0>, <ast.Subscript object at 0x7da2045644f0>]]]]]
keyword[def] identifier[quat_conjugate] ( identifier[quaternion] ): literal[string] keyword[return] identifier[np] . identifier[array] ( (- identifier[quaternion] [ literal[int] ],- identifier[quaternion] [ literal[int] ],- identifier[quaternion] [ literal[int] ], identifier[quaternion] [ literal[int] ]), identifier[dtype] = identifier[np] . identifier[float32] , )
def quat_conjugate(quaternion): """Return conjugate of quaternion. >>> q0 = random_quaternion() >>> q1 = quat_conjugate(q0) >>> q1[3] == q0[3] and all(q1[:3] == -q0[:3]) True """ return np.array((-quaternion[0], -quaternion[1], -quaternion[2], quaternion[3]), dtype=np.float32)
def hpd_threshold(mu_in, post, alpha, tol): ''' For a PDF post over samples mu_in, find a density threshold such that the region having higher density has coverage of at least alpha, and less than alpha plus a given tolerance. ''' norm_post = normalize_pdf(mu_in, post) # initialize bisection search p_minus = 0.0 p_plus = max(post) while abs(hpd_coverage(mu_in, norm_post, p_minus) - hpd_coverage(mu_in, norm_post, p_plus)) >= tol: p_test = (p_minus + p_plus) / 2. if hpd_coverage(mu_in, post, p_test) >= alpha: # test value was too low or just right p_minus = p_test else: # test value was too high p_plus = p_test # p_minus never goes above the required threshold and p_plus never goes below # thus on exiting p_minus is at or below the required threshold and the # difference in coverage is within tolerance return p_minus
def function[hpd_threshold, parameter[mu_in, post, alpha, tol]]: constant[ For a PDF post over samples mu_in, find a density threshold such that the region having higher density has coverage of at least alpha, and less than alpha plus a given tolerance. ] variable[norm_post] assign[=] call[name[normalize_pdf], parameter[name[mu_in], name[post]]] variable[p_minus] assign[=] constant[0.0] variable[p_plus] assign[=] call[name[max], parameter[name[post]]] while compare[call[name[abs], parameter[binary_operation[call[name[hpd_coverage], parameter[name[mu_in], name[norm_post], name[p_minus]]] - call[name[hpd_coverage], parameter[name[mu_in], name[norm_post], name[p_plus]]]]]] greater_or_equal[>=] name[tol]] begin[:] variable[p_test] assign[=] binary_operation[binary_operation[name[p_minus] + name[p_plus]] / constant[2.0]] if compare[call[name[hpd_coverage], parameter[name[mu_in], name[post], name[p_test]]] greater_or_equal[>=] name[alpha]] begin[:] variable[p_minus] assign[=] name[p_test] return[name[p_minus]]
keyword[def] identifier[hpd_threshold] ( identifier[mu_in] , identifier[post] , identifier[alpha] , identifier[tol] ): literal[string] identifier[norm_post] = identifier[normalize_pdf] ( identifier[mu_in] , identifier[post] ) identifier[p_minus] = literal[int] identifier[p_plus] = identifier[max] ( identifier[post] ) keyword[while] identifier[abs] ( identifier[hpd_coverage] ( identifier[mu_in] , identifier[norm_post] , identifier[p_minus] )- identifier[hpd_coverage] ( identifier[mu_in] , identifier[norm_post] , identifier[p_plus] ))>= identifier[tol] : identifier[p_test] =( identifier[p_minus] + identifier[p_plus] )/ literal[int] keyword[if] identifier[hpd_coverage] ( identifier[mu_in] , identifier[post] , identifier[p_test] )>= identifier[alpha] : identifier[p_minus] = identifier[p_test] keyword[else] : identifier[p_plus] = identifier[p_test] keyword[return] identifier[p_minus]
def hpd_threshold(mu_in, post, alpha, tol): """ For a PDF post over samples mu_in, find a density threshold such that the region having higher density has coverage of at least alpha, and less than alpha plus a given tolerance. """ norm_post = normalize_pdf(mu_in, post) # initialize bisection search p_minus = 0.0 p_plus = max(post) while abs(hpd_coverage(mu_in, norm_post, p_minus) - hpd_coverage(mu_in, norm_post, p_plus)) >= tol: p_test = (p_minus + p_plus) / 2.0 if hpd_coverage(mu_in, post, p_test) >= alpha: # test value was too low or just right p_minus = p_test # depends on [control=['if'], data=[]] else: # test value was too high p_plus = p_test # depends on [control=['while'], data=[]] # p_minus never goes above the required threshold and p_plus never goes below # thus on exiting p_minus is at or below the required threshold and the # difference in coverage is within tolerance return p_minus
def auto_decompress_stream(src): """Decompress data from `src` if required. If the first block of `src` appears to be compressed, then the entire stream will be uncompressed. Otherwise the stream will be passed along as-is. Args: src (iterable): iterable that yields blocks of data Yields: blocks of uncompressed data """ block = next(src) compression = guess_compression(block) if compression == 'bz2': src = bz2_decompress_stream(chain([block], src)) elif compression == 'xz': src = xz_decompress_stream(chain([block], src)) else: src = chain([block], src) for block in src: yield block
def function[auto_decompress_stream, parameter[src]]: constant[Decompress data from `src` if required. If the first block of `src` appears to be compressed, then the entire stream will be uncompressed. Otherwise the stream will be passed along as-is. Args: src (iterable): iterable that yields blocks of data Yields: blocks of uncompressed data ] variable[block] assign[=] call[name[next], parameter[name[src]]] variable[compression] assign[=] call[name[guess_compression], parameter[name[block]]] if compare[name[compression] equal[==] constant[bz2]] begin[:] variable[src] assign[=] call[name[bz2_decompress_stream], parameter[call[name[chain], parameter[list[[<ast.Name object at 0x7da20c76f640>]], name[src]]]]] for taget[name[block]] in starred[name[src]] begin[:] <ast.Yield object at 0x7da20c76fbe0>
keyword[def] identifier[auto_decompress_stream] ( identifier[src] ): literal[string] identifier[block] = identifier[next] ( identifier[src] ) identifier[compression] = identifier[guess_compression] ( identifier[block] ) keyword[if] identifier[compression] == literal[string] : identifier[src] = identifier[bz2_decompress_stream] ( identifier[chain] ([ identifier[block] ], identifier[src] )) keyword[elif] identifier[compression] == literal[string] : identifier[src] = identifier[xz_decompress_stream] ( identifier[chain] ([ identifier[block] ], identifier[src] )) keyword[else] : identifier[src] = identifier[chain] ([ identifier[block] ], identifier[src] ) keyword[for] identifier[block] keyword[in] identifier[src] : keyword[yield] identifier[block]
def auto_decompress_stream(src): """Decompress data from `src` if required. If the first block of `src` appears to be compressed, then the entire stream will be uncompressed. Otherwise the stream will be passed along as-is. Args: src (iterable): iterable that yields blocks of data Yields: blocks of uncompressed data """ block = next(src) compression = guess_compression(block) if compression == 'bz2': src = bz2_decompress_stream(chain([block], src)) # depends on [control=['if'], data=[]] elif compression == 'xz': src = xz_decompress_stream(chain([block], src)) # depends on [control=['if'], data=[]] else: src = chain([block], src) for block in src: yield block # depends on [control=['for'], data=['block']]
def change_vlan_id(self, vlan_id): """ Change a VLAN id for an inline interface. :param str vlan_id: New VLAN id. Can be in format '1-2' or a single numerical value. If in '1-2' format, this specifies the vlan ID for the first inline interface and the rightmost for the second. :return: None """ first, second = self.nicid.split('-') firstintf = first.split('.')[0] secondintf = second.split('.')[0] newvlan = str(vlan_id).split('-') self.update(nicid='{}.{}-{}.{}'.format( firstintf, newvlan[0], secondintf, newvlan[-1]))
def function[change_vlan_id, parameter[self, vlan_id]]: constant[ Change a VLAN id for an inline interface. :param str vlan_id: New VLAN id. Can be in format '1-2' or a single numerical value. If in '1-2' format, this specifies the vlan ID for the first inline interface and the rightmost for the second. :return: None ] <ast.Tuple object at 0x7da1b1ba97e0> assign[=] call[name[self].nicid.split, parameter[constant[-]]] variable[firstintf] assign[=] call[call[name[first].split, parameter[constant[.]]]][constant[0]] variable[secondintf] assign[=] call[call[name[second].split, parameter[constant[.]]]][constant[0]] variable[newvlan] assign[=] call[call[name[str], parameter[name[vlan_id]]].split, parameter[constant[-]]] call[name[self].update, parameter[]]
keyword[def] identifier[change_vlan_id] ( identifier[self] , identifier[vlan_id] ): literal[string] identifier[first] , identifier[second] = identifier[self] . identifier[nicid] . identifier[split] ( literal[string] ) identifier[firstintf] = identifier[first] . identifier[split] ( literal[string] )[ literal[int] ] identifier[secondintf] = identifier[second] . identifier[split] ( literal[string] )[ literal[int] ] identifier[newvlan] = identifier[str] ( identifier[vlan_id] ). identifier[split] ( literal[string] ) identifier[self] . identifier[update] ( identifier[nicid] = literal[string] . identifier[format] ( identifier[firstintf] , identifier[newvlan] [ literal[int] ], identifier[secondintf] , identifier[newvlan] [- literal[int] ]))
def change_vlan_id(self, vlan_id): """ Change a VLAN id for an inline interface. :param str vlan_id: New VLAN id. Can be in format '1-2' or a single numerical value. If in '1-2' format, this specifies the vlan ID for the first inline interface and the rightmost for the second. :return: None """ (first, second) = self.nicid.split('-') firstintf = first.split('.')[0] secondintf = second.split('.')[0] newvlan = str(vlan_id).split('-') self.update(nicid='{}.{}-{}.{}'.format(firstintf, newvlan[0], secondintf, newvlan[-1]))
def search_definition(self, module, keyword, arg): """Search for a defintion with `keyword` `name` Search the module and its submodules.""" r = module.search_one(keyword, arg) if r is not None: return r for i in module.search('include'): modulename = i.arg m = self.ctx.search_module(i.pos, modulename) if m is not None: r = m.search_one(keyword, arg) if r is not None: return r return None
def function[search_definition, parameter[self, module, keyword, arg]]: constant[Search for a defintion with `keyword` `name` Search the module and its submodules.] variable[r] assign[=] call[name[module].search_one, parameter[name[keyword], name[arg]]] if compare[name[r] is_not constant[None]] begin[:] return[name[r]] for taget[name[i]] in starred[call[name[module].search, parameter[constant[include]]]] begin[:] variable[modulename] assign[=] name[i].arg variable[m] assign[=] call[name[self].ctx.search_module, parameter[name[i].pos, name[modulename]]] if compare[name[m] is_not constant[None]] begin[:] variable[r] assign[=] call[name[m].search_one, parameter[name[keyword], name[arg]]] if compare[name[r] is_not constant[None]] begin[:] return[name[r]] return[constant[None]]
keyword[def] identifier[search_definition] ( identifier[self] , identifier[module] , identifier[keyword] , identifier[arg] ): literal[string] identifier[r] = identifier[module] . identifier[search_one] ( identifier[keyword] , identifier[arg] ) keyword[if] identifier[r] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[r] keyword[for] identifier[i] keyword[in] identifier[module] . identifier[search] ( literal[string] ): identifier[modulename] = identifier[i] . identifier[arg] identifier[m] = identifier[self] . identifier[ctx] . identifier[search_module] ( identifier[i] . identifier[pos] , identifier[modulename] ) keyword[if] identifier[m] keyword[is] keyword[not] keyword[None] : identifier[r] = identifier[m] . identifier[search_one] ( identifier[keyword] , identifier[arg] ) keyword[if] identifier[r] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[r] keyword[return] keyword[None]
def search_definition(self, module, keyword, arg): """Search for a defintion with `keyword` `name` Search the module and its submodules.""" r = module.search_one(keyword, arg) if r is not None: return r # depends on [control=['if'], data=['r']] for i in module.search('include'): modulename = i.arg m = self.ctx.search_module(i.pos, modulename) if m is not None: r = m.search_one(keyword, arg) if r is not None: return r # depends on [control=['if'], data=['r']] # depends on [control=['if'], data=['m']] # depends on [control=['for'], data=['i']] return None
def select_rect(action, action_space, select_add, screen, screen2): """Select units within a rectangle.""" select = spatial(action, action_space).unit_selection_rect out_rect = select.selection_screen_coord.add() screen_rect = point.Rect(screen, screen2) screen_rect.tl.assign_to(out_rect.p0) screen_rect.br.assign_to(out_rect.p1) select.selection_add = bool(select_add)
def function[select_rect, parameter[action, action_space, select_add, screen, screen2]]: constant[Select units within a rectangle.] variable[select] assign[=] call[name[spatial], parameter[name[action], name[action_space]]].unit_selection_rect variable[out_rect] assign[=] call[name[select].selection_screen_coord.add, parameter[]] variable[screen_rect] assign[=] call[name[point].Rect, parameter[name[screen], name[screen2]]] call[name[screen_rect].tl.assign_to, parameter[name[out_rect].p0]] call[name[screen_rect].br.assign_to, parameter[name[out_rect].p1]] name[select].selection_add assign[=] call[name[bool], parameter[name[select_add]]]
keyword[def] identifier[select_rect] ( identifier[action] , identifier[action_space] , identifier[select_add] , identifier[screen] , identifier[screen2] ): literal[string] identifier[select] = identifier[spatial] ( identifier[action] , identifier[action_space] ). identifier[unit_selection_rect] identifier[out_rect] = identifier[select] . identifier[selection_screen_coord] . identifier[add] () identifier[screen_rect] = identifier[point] . identifier[Rect] ( identifier[screen] , identifier[screen2] ) identifier[screen_rect] . identifier[tl] . identifier[assign_to] ( identifier[out_rect] . identifier[p0] ) identifier[screen_rect] . identifier[br] . identifier[assign_to] ( identifier[out_rect] . identifier[p1] ) identifier[select] . identifier[selection_add] = identifier[bool] ( identifier[select_add] )
def select_rect(action, action_space, select_add, screen, screen2): """Select units within a rectangle.""" select = spatial(action, action_space).unit_selection_rect out_rect = select.selection_screen_coord.add() screen_rect = point.Rect(screen, screen2) screen_rect.tl.assign_to(out_rect.p0) screen_rect.br.assign_to(out_rect.p1) select.selection_add = bool(select_add)
def data_iterator_simple(load_func, num_examples, batch_size, shuffle=False, rng=None, with_memory_cache=True, with_file_cache=True, cache_dir=None, epoch_begin_callbacks=[], epoch_end_callbacks=[]): """A generator that ``yield`` s minibatch data as a tuple, as defined in ``load_func`` . It can unlimitedly yield minibatches at your request, queried from the provided data. Args: load_func (function): Takes a single argument `i`, an index of an example in your dataset to be loaded, and returns a tuple of data. Every call by any index `i` must return a tuple of arrays with the same shape. num_examples (int): Number of examples in your dataset. Random sequence of indexes is generated according to this number. batch_size (int): Size of data unit. shuffle (bool): Indicates whether the dataset is shuffled or not. Default value is False. rng (None or :obj:`numpy.random.RandomState`): Numpy random number generator. with_memory_cache (bool): If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache` to wrap ``data_source``. It is a good idea to set this as true unless data_source provides on-memory data. Default value is True. with_file_cache (bool): If ``True``, use :py:class:`.data_source.DataSourceWithFileCache` to wrap ``data_source``. If ``data_source`` is slow, enabling this option a is good idea. Default value is False. cache_dir (str): Location of file_cache. If this value is None, :py:class:`.data_source.DataSourceWithFileCache` creates file caches implicitly on temporary directory and erases them all when data_iterator is finished. Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache. Default is None. epoch_begin_callbacks (list of functions): An item is a function which takes an epoch index as an argument. These are called at the beginning of an epoch. epoch_end_callbacks (list of functions): An item is a function which takes an epoch index as an argument. These are called at the end of an epoch. Returns: :py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`: Instance of DataIterator. Here is an example of `load_func` which returns an image and a label of a classification dataset. .. code-block:: python import numpy as np from nnabla.utils.image_utils import imread image_paths = load_image_paths() labels = load_labels() def my_load_func(i): ''' Returns: image: c x h x w array label: 0-shape array ''' img = imread(image_paths[i]).astype('float32') return np.rollaxis(img, 2), np.array(labels[i]) """ return data_iterator(SimpleDataSource(load_func, num_examples, shuffle=shuffle, rng=rng), batch_size=batch_size, with_memory_cache=with_memory_cache, with_file_cache=with_file_cache, cache_dir=cache_dir, epoch_begin_callbacks=epoch_begin_callbacks, epoch_end_callbacks=epoch_end_callbacks)
def function[data_iterator_simple, parameter[load_func, num_examples, batch_size, shuffle, rng, with_memory_cache, with_file_cache, cache_dir, epoch_begin_callbacks, epoch_end_callbacks]]: constant[A generator that ``yield`` s minibatch data as a tuple, as defined in ``load_func`` . It can unlimitedly yield minibatches at your request, queried from the provided data. Args: load_func (function): Takes a single argument `i`, an index of an example in your dataset to be loaded, and returns a tuple of data. Every call by any index `i` must return a tuple of arrays with the same shape. num_examples (int): Number of examples in your dataset. Random sequence of indexes is generated according to this number. batch_size (int): Size of data unit. shuffle (bool): Indicates whether the dataset is shuffled or not. Default value is False. rng (None or :obj:`numpy.random.RandomState`): Numpy random number generator. with_memory_cache (bool): If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache` to wrap ``data_source``. It is a good idea to set this as true unless data_source provides on-memory data. Default value is True. with_file_cache (bool): If ``True``, use :py:class:`.data_source.DataSourceWithFileCache` to wrap ``data_source``. If ``data_source`` is slow, enabling this option a is good idea. Default value is False. cache_dir (str): Location of file_cache. If this value is None, :py:class:`.data_source.DataSourceWithFileCache` creates file caches implicitly on temporary directory and erases them all when data_iterator is finished. Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache. Default is None. epoch_begin_callbacks (list of functions): An item is a function which takes an epoch index as an argument. These are called at the beginning of an epoch. epoch_end_callbacks (list of functions): An item is a function which takes an epoch index as an argument. These are called at the end of an epoch. Returns: :py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`: Instance of DataIterator. Here is an example of `load_func` which returns an image and a label of a classification dataset. .. code-block:: python import numpy as np from nnabla.utils.image_utils import imread image_paths = load_image_paths() labels = load_labels() def my_load_func(i): ''' Returns: image: c x h x w array label: 0-shape array ''' img = imread(image_paths[i]).astype('float32') return np.rollaxis(img, 2), np.array(labels[i]) ] return[call[name[data_iterator], parameter[call[name[SimpleDataSource], parameter[name[load_func], name[num_examples]]]]]]
keyword[def] identifier[data_iterator_simple] ( identifier[load_func] , identifier[num_examples] , identifier[batch_size] , identifier[shuffle] = keyword[False] , identifier[rng] = keyword[None] , identifier[with_memory_cache] = keyword[True] , identifier[with_file_cache] = keyword[True] , identifier[cache_dir] = keyword[None] , identifier[epoch_begin_callbacks] =[], identifier[epoch_end_callbacks] =[]): literal[string] keyword[return] identifier[data_iterator] ( identifier[SimpleDataSource] ( identifier[load_func] , identifier[num_examples] , identifier[shuffle] = identifier[shuffle] , identifier[rng] = identifier[rng] ), identifier[batch_size] = identifier[batch_size] , identifier[with_memory_cache] = identifier[with_memory_cache] , identifier[with_file_cache] = identifier[with_file_cache] , identifier[cache_dir] = identifier[cache_dir] , identifier[epoch_begin_callbacks] = identifier[epoch_begin_callbacks] , identifier[epoch_end_callbacks] = identifier[epoch_end_callbacks] )
def data_iterator_simple(load_func, num_examples, batch_size, shuffle=False, rng=None, with_memory_cache=True, with_file_cache=True, cache_dir=None, epoch_begin_callbacks=[], epoch_end_callbacks=[]): """A generator that ``yield`` s minibatch data as a tuple, as defined in ``load_func`` . It can unlimitedly yield minibatches at your request, queried from the provided data. Args: load_func (function): Takes a single argument `i`, an index of an example in your dataset to be loaded, and returns a tuple of data. Every call by any index `i` must return a tuple of arrays with the same shape. num_examples (int): Number of examples in your dataset. Random sequence of indexes is generated according to this number. batch_size (int): Size of data unit. shuffle (bool): Indicates whether the dataset is shuffled or not. Default value is False. rng (None or :obj:`numpy.random.RandomState`): Numpy random number generator. with_memory_cache (bool): If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache` to wrap ``data_source``. It is a good idea to set this as true unless data_source provides on-memory data. Default value is True. with_file_cache (bool): If ``True``, use :py:class:`.data_source.DataSourceWithFileCache` to wrap ``data_source``. If ``data_source`` is slow, enabling this option a is good idea. Default value is False. cache_dir (str): Location of file_cache. If this value is None, :py:class:`.data_source.DataSourceWithFileCache` creates file caches implicitly on temporary directory and erases them all when data_iterator is finished. Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache. Default is None. epoch_begin_callbacks (list of functions): An item is a function which takes an epoch index as an argument. These are called at the beginning of an epoch. epoch_end_callbacks (list of functions): An item is a function which takes an epoch index as an argument. These are called at the end of an epoch. Returns: :py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`: Instance of DataIterator. Here is an example of `load_func` which returns an image and a label of a classification dataset. .. code-block:: python import numpy as np from nnabla.utils.image_utils import imread image_paths = load_image_paths() labels = load_labels() def my_load_func(i): ''' Returns: image: c x h x w array label: 0-shape array ''' img = imread(image_paths[i]).astype('float32') return np.rollaxis(img, 2), np.array(labels[i]) """ return data_iterator(SimpleDataSource(load_func, num_examples, shuffle=shuffle, rng=rng), batch_size=batch_size, with_memory_cache=with_memory_cache, with_file_cache=with_file_cache, cache_dir=cache_dir, epoch_begin_callbacks=epoch_begin_callbacks, epoch_end_callbacks=epoch_end_callbacks)
def run_metrics(command, parser, cl_args, unknown_args): """ run metrics subcommand """ cluster, role, env = cl_args['cluster'], cl_args['role'], cl_args['environ'] topology = cl_args['topology-name'] try: result = tracker_access.get_topology_info(cluster, env, topology, role) spouts = result['physical_plan']['spouts'].keys() bolts = result['physical_plan']['bolts'].keys() components = spouts + bolts cname = cl_args['component'] if cname: if cname in components: components = [cname] else: Log.error('Unknown component: \'%s\'' % cname) raise except Exception: Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"]) return False cresult = [] for comp in components: try: metrics = tracker_access.get_component_metrics(comp, cluster, env, topology, role) except: Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"]) return False stat, header = to_table(metrics) cresult.append((comp, stat, header)) for i, (comp, stat, header) in enumerate(cresult): if i != 0: print('') print('\'%s\' metrics:' % comp) print(tabulate(stat, headers=header)) return True
def function[run_metrics, parameter[command, parser, cl_args, unknown_args]]: constant[ run metrics subcommand ] <ast.Tuple object at 0x7da20c6e7640> assign[=] tuple[[<ast.Subscript object at 0x7da20c6e58a0>, <ast.Subscript object at 0x7da20c6e6050>, <ast.Subscript object at 0x7da20c6e66e0>]] variable[topology] assign[=] call[name[cl_args]][constant[topology-name]] <ast.Try object at 0x7da20c6e5480> variable[cresult] assign[=] list[[]] for taget[name[comp]] in starred[name[components]] begin[:] <ast.Try object at 0x7da20c6e6140> <ast.Tuple object at 0x7da20c6e5300> assign[=] call[name[to_table], parameter[name[metrics]]] call[name[cresult].append, parameter[tuple[[<ast.Name object at 0x7da20c6e64a0>, <ast.Name object at 0x7da20c6e5840>, <ast.Name object at 0x7da20c6e6020>]]]] for taget[tuple[[<ast.Name object at 0x7da20c6e7460>, <ast.Tuple object at 0x7da20c6e7850>]]] in starred[call[name[enumerate], parameter[name[cresult]]]] begin[:] if compare[name[i] not_equal[!=] constant[0]] begin[:] call[name[print], parameter[constant[]]] call[name[print], parameter[binary_operation[constant['%s' metrics:] <ast.Mod object at 0x7da2590d6920> name[comp]]]] call[name[print], parameter[call[name[tabulate], parameter[name[stat]]]]] return[constant[True]]
keyword[def] identifier[run_metrics] ( identifier[command] , identifier[parser] , identifier[cl_args] , identifier[unknown_args] ): literal[string] identifier[cluster] , identifier[role] , identifier[env] = identifier[cl_args] [ literal[string] ], identifier[cl_args] [ literal[string] ], identifier[cl_args] [ literal[string] ] identifier[topology] = identifier[cl_args] [ literal[string] ] keyword[try] : identifier[result] = identifier[tracker_access] . identifier[get_topology_info] ( identifier[cluster] , identifier[env] , identifier[topology] , identifier[role] ) identifier[spouts] = identifier[result] [ literal[string] ][ literal[string] ]. identifier[keys] () identifier[bolts] = identifier[result] [ literal[string] ][ literal[string] ]. identifier[keys] () identifier[components] = identifier[spouts] + identifier[bolts] identifier[cname] = identifier[cl_args] [ literal[string] ] keyword[if] identifier[cname] : keyword[if] identifier[cname] keyword[in] identifier[components] : identifier[components] =[ identifier[cname] ] keyword[else] : identifier[Log] . identifier[error] ( literal[string] % identifier[cname] ) keyword[raise] keyword[except] identifier[Exception] : identifier[Log] . identifier[error] ( literal[string] , identifier[cl_args] [ literal[string] ]) keyword[return] keyword[False] identifier[cresult] =[] keyword[for] identifier[comp] keyword[in] identifier[components] : keyword[try] : identifier[metrics] = identifier[tracker_access] . identifier[get_component_metrics] ( identifier[comp] , identifier[cluster] , identifier[env] , identifier[topology] , identifier[role] ) keyword[except] : identifier[Log] . identifier[error] ( literal[string] , identifier[cl_args] [ literal[string] ]) keyword[return] keyword[False] identifier[stat] , identifier[header] = identifier[to_table] ( identifier[metrics] ) identifier[cresult] . identifier[append] (( identifier[comp] , identifier[stat] , identifier[header] )) keyword[for] identifier[i] ,( identifier[comp] , identifier[stat] , identifier[header] ) keyword[in] identifier[enumerate] ( identifier[cresult] ): keyword[if] identifier[i] != literal[int] : identifier[print] ( literal[string] ) identifier[print] ( literal[string] % identifier[comp] ) identifier[print] ( identifier[tabulate] ( identifier[stat] , identifier[headers] = identifier[header] )) keyword[return] keyword[True]
def run_metrics(command, parser, cl_args, unknown_args): """ run metrics subcommand """ (cluster, role, env) = (cl_args['cluster'], cl_args['role'], cl_args['environ']) topology = cl_args['topology-name'] try: result = tracker_access.get_topology_info(cluster, env, topology, role) spouts = result['physical_plan']['spouts'].keys() bolts = result['physical_plan']['bolts'].keys() components = spouts + bolts cname = cl_args['component'] if cname: if cname in components: components = [cname] # depends on [control=['if'], data=['cname', 'components']] else: Log.error("Unknown component: '%s'" % cname) raise # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except Exception: Log.error("Fail to connect to tracker: '%s'", cl_args['tracker_url']) return False # depends on [control=['except'], data=[]] cresult = [] for comp in components: try: metrics = tracker_access.get_component_metrics(comp, cluster, env, topology, role) # depends on [control=['try'], data=[]] except: Log.error("Fail to connect to tracker: '%s'", cl_args['tracker_url']) return False # depends on [control=['except'], data=[]] (stat, header) = to_table(metrics) cresult.append((comp, stat, header)) # depends on [control=['for'], data=['comp']] for (i, (comp, stat, header)) in enumerate(cresult): if i != 0: print('') # depends on [control=['if'], data=[]] print("'%s' metrics:" % comp) print(tabulate(stat, headers=header)) # depends on [control=['for'], data=[]] return True
def validate(self, auth_rest): """Validate user credentials whether format is right for Sha1 :param auth_rest: User credentials' part without auth_type :return: Dict with a hash and a salt part of user credentials :raises ValueError: If credentials' part doesn't contain delimiter between a salt and a hash. """ try: auth_salt, auth_hash = auth_rest.split('$') except ValueError: raise ValueError("Missing '$' in %s" % auth_rest) if len(auth_salt) == 0: raise ValueError("Salt must have non-zero length!") if len(auth_hash) != 40: raise ValueError("Hash must have 40 chars!") if not all(c in string.hexdigits for c in auth_hash): raise ValueError("Hash must be hexadecimal!") return dict(salt=auth_salt, hash=auth_hash)
def function[validate, parameter[self, auth_rest]]: constant[Validate user credentials whether format is right for Sha1 :param auth_rest: User credentials' part without auth_type :return: Dict with a hash and a salt part of user credentials :raises ValueError: If credentials' part doesn't contain delimiter between a salt and a hash. ] <ast.Try object at 0x7da1b04333a0> if compare[call[name[len], parameter[name[auth_salt]]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da1b0430d30> if compare[call[name[len], parameter[name[auth_hash]]] not_equal[!=] constant[40]] begin[:] <ast.Raise object at 0x7da1b0431f00> if <ast.UnaryOp object at 0x7da1b0430400> begin[:] <ast.Raise object at 0x7da1b0432f20> return[call[name[dict], parameter[]]]
keyword[def] identifier[validate] ( identifier[self] , identifier[auth_rest] ): literal[string] keyword[try] : identifier[auth_salt] , identifier[auth_hash] = identifier[auth_rest] . identifier[split] ( literal[string] ) keyword[except] identifier[ValueError] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[auth_rest] ) keyword[if] identifier[len] ( identifier[auth_salt] )== literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[len] ( identifier[auth_hash] )!= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] keyword[not] identifier[all] ( identifier[c] keyword[in] identifier[string] . identifier[hexdigits] keyword[for] identifier[c] keyword[in] identifier[auth_hash] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[dict] ( identifier[salt] = identifier[auth_salt] , identifier[hash] = identifier[auth_hash] )
def validate(self, auth_rest): """Validate user credentials whether format is right for Sha1 :param auth_rest: User credentials' part without auth_type :return: Dict with a hash and a salt part of user credentials :raises ValueError: If credentials' part doesn't contain delimiter between a salt and a hash. """ try: (auth_salt, auth_hash) = auth_rest.split('$') # depends on [control=['try'], data=[]] except ValueError: raise ValueError("Missing '$' in %s" % auth_rest) # depends on [control=['except'], data=[]] if len(auth_salt) == 0: raise ValueError('Salt must have non-zero length!') # depends on [control=['if'], data=[]] if len(auth_hash) != 40: raise ValueError('Hash must have 40 chars!') # depends on [control=['if'], data=[]] if not all((c in string.hexdigits for c in auth_hash)): raise ValueError('Hash must be hexadecimal!') # depends on [control=['if'], data=[]] return dict(salt=auth_salt, hash=auth_hash)
def batch_remove_absolute_retrain__r2(X, y, model_generator, method_name, num_fcounts=11): """ Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 13 """ return __run_batch_abs_metric(measures.batch_remove_retrain, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
def function[batch_remove_absolute_retrain__r2, parameter[X, y, model_generator, method_name, num_fcounts]]: constant[ Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 13 ] return[call[name[__run_batch_abs_metric], parameter[name[measures].batch_remove_retrain, name[X], name[y], name[model_generator], name[method_name], name[sklearn].metrics.r2_score, name[num_fcounts]]]]
keyword[def] identifier[batch_remove_absolute_retrain__r2] ( identifier[X] , identifier[y] , identifier[model_generator] , identifier[method_name] , identifier[num_fcounts] = literal[int] ): literal[string] keyword[return] identifier[__run_batch_abs_metric] ( identifier[measures] . identifier[batch_remove_retrain] , identifier[X] , identifier[y] , identifier[model_generator] , identifier[method_name] , identifier[sklearn] . identifier[metrics] . identifier[r2_score] , identifier[num_fcounts] )
def batch_remove_absolute_retrain__r2(X, y, model_generator, method_name, num_fcounts=11): """ Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 13 """ return __run_batch_abs_metric(measures.batch_remove_retrain, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
def lex_document(self, cli, document): """ Create a lexer function that takes a line number and returns the list of (Token, text) tuples as the Pygments lexer returns for that line. """ # Cache of already lexed lines. cache = {} # Pygments generators that are currently lexing. line_generators = {} # Map lexer generator to the line number. def get_syntax_sync(): " The Syntax synchronisation objcet that we currently use. " if self.sync_from_start(cli): return SyncFromStart() else: return self.syntax_sync def find_closest_generator(i): " Return a generator close to line 'i', or None if none was fonud. " for generator, lineno in line_generators.items(): if lineno < i and i - lineno < self.REUSE_GENERATOR_MAX_DISTANCE: return generator def create_line_generator(start_lineno, column=0): """ Create a generator that yields the lexed lines. Each iteration it yields a (line_number, [(token, text), ...]) tuple. """ def get_tokens(): text = '\n'.join(document.lines[start_lineno:])[column:] # We call `get_tokens_unprocessed`, because `get_tokens` will # still replace \r\n and \r by \n. (We don't want that, # Pygments should return exactly the same amount of text, as we # have given as input.) for _, t, v in self.pygments_lexer.get_tokens_unprocessed(text): yield t, v return enumerate(split_lines(get_tokens()), start_lineno) def get_generator(i): """ Find an already started generator that is close, or create a new one. """ # Find closest line generator. generator = find_closest_generator(i) if generator: return generator # No generator found. Determine starting point for the syntax # synchronisation first. # Go at least x lines back. (Make scrolling upwards more # efficient.) i = max(0, i - self.MIN_LINES_BACKWARDS) if i == 0: row = 0 column = 0 else: row, column = get_syntax_sync().get_sync_start_position(document, i) # Find generator close to this point, or otherwise create a new one. generator = find_closest_generator(i) if generator: return generator else: generator = create_line_generator(row, column) # If the column is not 0, ignore the first line. (Which is # incomplete. This happens when the synchronisation algorithm tells # us to start parsing in the middle of a line.) if column: next(generator) row += 1 line_generators[generator] = row return generator def get_line(i): " Return the tokens for a given line number. " try: return cache[i] except KeyError: generator = get_generator(i) # Exhaust the generator, until we find the requested line. for num, line in generator: cache[num] = line if num == i: line_generators[generator] = i # Remove the next item from the cache. # (It could happen that it's already there, because of # another generator that started filling these lines, # but we want to synchronise these lines with the # current lexer's state.) if num + 1 in cache: del cache[num + 1] return cache[num] return [] return get_line
def function[lex_document, parameter[self, cli, document]]: constant[ Create a lexer function that takes a line number and returns the list of (Token, text) tuples as the Pygments lexer returns for that line. ] variable[cache] assign[=] dictionary[[], []] variable[line_generators] assign[=] dictionary[[], []] def function[get_syntax_sync, parameter[]]: constant[ The Syntax synchronisation objcet that we currently use. ] if call[name[self].sync_from_start, parameter[name[cli]]] begin[:] return[call[name[SyncFromStart], parameter[]]] def function[find_closest_generator, parameter[i]]: constant[ Return a generator close to line 'i', or None if none was fonud. ] for taget[tuple[[<ast.Name object at 0x7da204565720>, <ast.Name object at 0x7da204565300>]]] in starred[call[name[line_generators].items, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da1b26ad420> begin[:] return[name[generator]] def function[create_line_generator, parameter[start_lineno, column]]: constant[ Create a generator that yields the lexed lines. Each iteration it yields a (line_number, [(token, text), ...]) tuple. ] def function[get_tokens, parameter[]]: variable[text] assign[=] call[call[constant[ ].join, parameter[call[name[document].lines][<ast.Slice object at 0x7da1b08a7e20>]]]][<ast.Slice object at 0x7da1b08a4d60>] for taget[tuple[[<ast.Name object at 0x7da1b08a4bb0>, <ast.Name object at 0x7da18bcca8f0>, <ast.Name object at 0x7da18bcc9750>]]] in starred[call[name[self].pygments_lexer.get_tokens_unprocessed, parameter[name[text]]]] begin[:] <ast.Yield object at 0x7da18bcc8490> return[call[name[enumerate], parameter[call[name[split_lines], parameter[call[name[get_tokens], parameter[]]]], name[start_lineno]]]] def function[get_generator, parameter[i]]: constant[ Find an already started generator that is close, or create a new one. ] variable[generator] assign[=] call[name[find_closest_generator], parameter[name[i]]] if name[generator] begin[:] return[name[generator]] variable[i] assign[=] call[name[max], parameter[constant[0], binary_operation[name[i] - name[self].MIN_LINES_BACKWARDS]]] if compare[name[i] equal[==] constant[0]] begin[:] variable[row] assign[=] constant[0] variable[column] assign[=] constant[0] variable[generator] assign[=] call[name[find_closest_generator], parameter[name[i]]] if name[generator] begin[:] return[name[generator]] if name[column] begin[:] call[name[next], parameter[name[generator]]] <ast.AugAssign object at 0x7da18f09edd0> call[name[line_generators]][name[generator]] assign[=] name[row] return[name[generator]] def function[get_line, parameter[i]]: constant[ Return the tokens for a given line number. ] <ast.Try object at 0x7da18f09d5d0> return[list[[]]] return[name[get_line]]
keyword[def] identifier[lex_document] ( identifier[self] , identifier[cli] , identifier[document] ): literal[string] identifier[cache] ={} identifier[line_generators] ={} keyword[def] identifier[get_syntax_sync] (): literal[string] keyword[if] identifier[self] . identifier[sync_from_start] ( identifier[cli] ): keyword[return] identifier[SyncFromStart] () keyword[else] : keyword[return] identifier[self] . identifier[syntax_sync] keyword[def] identifier[find_closest_generator] ( identifier[i] ): literal[string] keyword[for] identifier[generator] , identifier[lineno] keyword[in] identifier[line_generators] . identifier[items] (): keyword[if] identifier[lineno] < identifier[i] keyword[and] identifier[i] - identifier[lineno] < identifier[self] . identifier[REUSE_GENERATOR_MAX_DISTANCE] : keyword[return] identifier[generator] keyword[def] identifier[create_line_generator] ( identifier[start_lineno] , identifier[column] = literal[int] ): literal[string] keyword[def] identifier[get_tokens] (): identifier[text] = literal[string] . identifier[join] ( identifier[document] . identifier[lines] [ identifier[start_lineno] :])[ identifier[column] :] keyword[for] identifier[_] , identifier[t] , identifier[v] keyword[in] identifier[self] . identifier[pygments_lexer] . identifier[get_tokens_unprocessed] ( identifier[text] ): keyword[yield] identifier[t] , identifier[v] keyword[return] identifier[enumerate] ( identifier[split_lines] ( identifier[get_tokens] ()), identifier[start_lineno] ) keyword[def] identifier[get_generator] ( identifier[i] ): literal[string] identifier[generator] = identifier[find_closest_generator] ( identifier[i] ) keyword[if] identifier[generator] : keyword[return] identifier[generator] identifier[i] = identifier[max] ( literal[int] , identifier[i] - identifier[self] . identifier[MIN_LINES_BACKWARDS] ) keyword[if] identifier[i] == literal[int] : identifier[row] = literal[int] identifier[column] = literal[int] keyword[else] : identifier[row] , identifier[column] = identifier[get_syntax_sync] (). identifier[get_sync_start_position] ( identifier[document] , identifier[i] ) identifier[generator] = identifier[find_closest_generator] ( identifier[i] ) keyword[if] identifier[generator] : keyword[return] identifier[generator] keyword[else] : identifier[generator] = identifier[create_line_generator] ( identifier[row] , identifier[column] ) keyword[if] identifier[column] : identifier[next] ( identifier[generator] ) identifier[row] += literal[int] identifier[line_generators] [ identifier[generator] ]= identifier[row] keyword[return] identifier[generator] keyword[def] identifier[get_line] ( identifier[i] ): literal[string] keyword[try] : keyword[return] identifier[cache] [ identifier[i] ] keyword[except] identifier[KeyError] : identifier[generator] = identifier[get_generator] ( identifier[i] ) keyword[for] identifier[num] , identifier[line] keyword[in] identifier[generator] : identifier[cache] [ identifier[num] ]= identifier[line] keyword[if] identifier[num] == identifier[i] : identifier[line_generators] [ identifier[generator] ]= identifier[i] keyword[if] identifier[num] + literal[int] keyword[in] identifier[cache] : keyword[del] identifier[cache] [ identifier[num] + literal[int] ] keyword[return] identifier[cache] [ identifier[num] ] keyword[return] [] keyword[return] identifier[get_line]
def lex_document(self, cli, document): """ Create a lexer function that takes a line number and returns the list of (Token, text) tuples as the Pygments lexer returns for that line. """ # Cache of already lexed lines. cache = {} # Pygments generators that are currently lexing. line_generators = {} # Map lexer generator to the line number. def get_syntax_sync(): """ The Syntax synchronisation objcet that we currently use. """ if self.sync_from_start(cli): return SyncFromStart() # depends on [control=['if'], data=[]] else: return self.syntax_sync def find_closest_generator(i): """ Return a generator close to line 'i', or None if none was fonud. """ for (generator, lineno) in line_generators.items(): if lineno < i and i - lineno < self.REUSE_GENERATOR_MAX_DISTANCE: return generator # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] def create_line_generator(start_lineno, column=0): """ Create a generator that yields the lexed lines. Each iteration it yields a (line_number, [(token, text), ...]) tuple. """ def get_tokens(): text = '\n'.join(document.lines[start_lineno:])[column:] # We call `get_tokens_unprocessed`, because `get_tokens` will # still replace \r\n and \r by \n. (We don't want that, # Pygments should return exactly the same amount of text, as we # have given as input.) for (_, t, v) in self.pygments_lexer.get_tokens_unprocessed(text): yield (t, v) # depends on [control=['for'], data=[]] return enumerate(split_lines(get_tokens()), start_lineno) def get_generator(i): """ Find an already started generator that is close, or create a new one. """ # Find closest line generator. generator = find_closest_generator(i) if generator: return generator # depends on [control=['if'], data=[]] # No generator found. Determine starting point for the syntax # synchronisation first. # Go at least x lines back. (Make scrolling upwards more # efficient.) i = max(0, i - self.MIN_LINES_BACKWARDS) if i == 0: row = 0 column = 0 # depends on [control=['if'], data=[]] else: (row, column) = get_syntax_sync().get_sync_start_position(document, i) # Find generator close to this point, or otherwise create a new one. generator = find_closest_generator(i) if generator: return generator # depends on [control=['if'], data=[]] else: generator = create_line_generator(row, column) # If the column is not 0, ignore the first line. (Which is # incomplete. This happens when the synchronisation algorithm tells # us to start parsing in the middle of a line.) if column: next(generator) row += 1 # depends on [control=['if'], data=[]] line_generators[generator] = row return generator def get_line(i): """ Return the tokens for a given line number. """ try: return cache[i] # depends on [control=['try'], data=[]] except KeyError: generator = get_generator(i) # Exhaust the generator, until we find the requested line. for (num, line) in generator: cache[num] = line if num == i: line_generators[generator] = i # Remove the next item from the cache. # (It could happen that it's already there, because of # another generator that started filling these lines, # but we want to synchronise these lines with the # current lexer's state.) if num + 1 in cache: del cache[num + 1] # depends on [control=['if'], data=['cache']] return cache[num] # depends on [control=['if'], data=['num', 'i']] # depends on [control=['for'], data=[]] # depends on [control=['except'], data=[]] return [] return get_line
def dirichlet_covariance(alpha): r"""Covariance matrix for Dirichlet distribution. Parameters ---------- alpha : (M, ) ndarray Parameters of Dirichlet distribution Returns ------- cov : (M, M) ndarray Covariance matrix """ alpha0 = alpha.sum() norm = alpha0 ** 2 * (alpha0 + 1.0) """Non normalized covariance""" Z = -alpha[:, np.newaxis] * alpha[np.newaxis, :] """Correct diagonal""" ind = np.diag_indices(Z.shape[0]) Z[ind] += alpha0 * alpha """Covariance matrix""" cov = Z / norm return cov
def function[dirichlet_covariance, parameter[alpha]]: constant[Covariance matrix for Dirichlet distribution. Parameters ---------- alpha : (M, ) ndarray Parameters of Dirichlet distribution Returns ------- cov : (M, M) ndarray Covariance matrix ] variable[alpha0] assign[=] call[name[alpha].sum, parameter[]] variable[norm] assign[=] binary_operation[binary_operation[name[alpha0] ** constant[2]] * binary_operation[name[alpha0] + constant[1.0]]] constant[Non normalized covariance] variable[Z] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b255e4a0> * call[name[alpha]][tuple[[<ast.Attribute object at 0x7da1b255ce80>, <ast.Slice object at 0x7da1b255ec20>]]]] constant[Correct diagonal] variable[ind] assign[=] call[name[np].diag_indices, parameter[call[name[Z].shape][constant[0]]]] <ast.AugAssign object at 0x7da1b255f5b0> constant[Covariance matrix] variable[cov] assign[=] binary_operation[name[Z] / name[norm]] return[name[cov]]
keyword[def] identifier[dirichlet_covariance] ( identifier[alpha] ): literal[string] identifier[alpha0] = identifier[alpha] . identifier[sum] () identifier[norm] = identifier[alpha0] ** literal[int] *( identifier[alpha0] + literal[int] ) literal[string] identifier[Z] =- identifier[alpha] [:, identifier[np] . identifier[newaxis] ]* identifier[alpha] [ identifier[np] . identifier[newaxis] ,:] literal[string] identifier[ind] = identifier[np] . identifier[diag_indices] ( identifier[Z] . identifier[shape] [ literal[int] ]) identifier[Z] [ identifier[ind] ]+= identifier[alpha0] * identifier[alpha] literal[string] identifier[cov] = identifier[Z] / identifier[norm] keyword[return] identifier[cov]
def dirichlet_covariance(alpha): """Covariance matrix for Dirichlet distribution. Parameters ---------- alpha : (M, ) ndarray Parameters of Dirichlet distribution Returns ------- cov : (M, M) ndarray Covariance matrix """ alpha0 = alpha.sum() norm = alpha0 ** 2 * (alpha0 + 1.0) 'Non normalized covariance' Z = -alpha[:, np.newaxis] * alpha[np.newaxis, :] 'Correct diagonal' ind = np.diag_indices(Z.shape[0]) Z[ind] += alpha0 * alpha 'Covariance matrix' cov = Z / norm return cov
def shell_context_processor(self, func: Callable) -> Callable: """Add a shell context processor. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.shell_context_processor def additional_context(): return context """ self.shell_context_processors.append(func) return func
def function[shell_context_processor, parameter[self, func]]: constant[Add a shell context processor. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.shell_context_processor def additional_context(): return context ] call[name[self].shell_context_processors.append, parameter[name[func]]] return[name[func]]
keyword[def] identifier[shell_context_processor] ( identifier[self] , identifier[func] : identifier[Callable] )-> identifier[Callable] : literal[string] identifier[self] . identifier[shell_context_processors] . identifier[append] ( identifier[func] ) keyword[return] identifier[func]
def shell_context_processor(self, func: Callable) -> Callable: """Add a shell context processor. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.shell_context_processor def additional_context(): return context """ self.shell_context_processors.append(func) return func
def get_number_of_current_players(self, appID, format=None): """Request the current number of players for a given app. appID: The app ID format: Return format. None defaults to json. (json, xml, vdf) """ parameters = {'appid' : appID} if format is not None: parameters['format'] = format url = self.create_request_url(self.interface, 'GetNumberOfCurrentPlayers', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
def function[get_number_of_current_players, parameter[self, appID, format]]: constant[Request the current number of players for a given app. appID: The app ID format: Return format. None defaults to json. (json, xml, vdf) ] variable[parameters] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf010>], [<ast.Name object at 0x7da18c4cc040>]] if compare[name[format] is_not constant[None]] begin[:] call[name[parameters]][constant[format]] assign[=] name[format] variable[url] assign[=] call[name[self].create_request_url, parameter[name[self].interface, constant[GetNumberOfCurrentPlayers], constant[1], name[parameters]]] variable[data] assign[=] call[name[self].retrieve_request, parameter[name[url]]] return[call[name[self].return_data, parameter[name[data]]]]
keyword[def] identifier[get_number_of_current_players] ( identifier[self] , identifier[appID] , identifier[format] = keyword[None] ): literal[string] identifier[parameters] ={ literal[string] : identifier[appID] } keyword[if] identifier[format] keyword[is] keyword[not] keyword[None] : identifier[parameters] [ literal[string] ]= identifier[format] identifier[url] = identifier[self] . identifier[create_request_url] ( identifier[self] . identifier[interface] , literal[string] , literal[int] , identifier[parameters] ) identifier[data] = identifier[self] . identifier[retrieve_request] ( identifier[url] ) keyword[return] identifier[self] . identifier[return_data] ( identifier[data] , identifier[format] = identifier[format] )
def get_number_of_current_players(self, appID, format=None): """Request the current number of players for a given app. appID: The app ID format: Return format. None defaults to json. (json, xml, vdf) """ parameters = {'appid': appID} if format is not None: parameters['format'] = format # depends on [control=['if'], data=['format']] url = self.create_request_url(self.interface, 'GetNumberOfCurrentPlayers', 1, parameters) data = self.retrieve_request(url) return self.return_data(data, format=format)
def _plot_formatting(title, est_file, algo_ids, last_bound, N, output_file): """Formats the plot with the correct axis labels, title, ticks, and so on.""" import matplotlib.pyplot as plt if title is None: title = os.path.basename(est_file).split(".")[0] plt.title(title) plt.yticks(np.arange(0, 1, 1 / float(N)) + 1 / (float(N) * 2)) plt.gcf().subplots_adjust(bottom=0.22) plt.gca().set_yticklabels(algo_ids) plt.xlabel("Time (seconds)") plt.xlim((0, last_bound)) plt.tight_layout() if output_file is not None: plt.savefig(output_file) plt.show()
def function[_plot_formatting, parameter[title, est_file, algo_ids, last_bound, N, output_file]]: constant[Formats the plot with the correct axis labels, title, ticks, and so on.] import module[matplotlib.pyplot] as alias[plt] if compare[name[title] is constant[None]] begin[:] variable[title] assign[=] call[call[call[name[os].path.basename, parameter[name[est_file]]].split, parameter[constant[.]]]][constant[0]] call[name[plt].title, parameter[name[title]]] call[name[plt].yticks, parameter[binary_operation[call[name[np].arange, parameter[constant[0], constant[1], binary_operation[constant[1] / call[name[float], parameter[name[N]]]]]] + binary_operation[constant[1] / binary_operation[call[name[float], parameter[name[N]]] * constant[2]]]]]] call[call[name[plt].gcf, parameter[]].subplots_adjust, parameter[]] call[call[name[plt].gca, parameter[]].set_yticklabels, parameter[name[algo_ids]]] call[name[plt].xlabel, parameter[constant[Time (seconds)]]] call[name[plt].xlim, parameter[tuple[[<ast.Constant object at 0x7da1b02dca60>, <ast.Name object at 0x7da1b02df0d0>]]]] call[name[plt].tight_layout, parameter[]] if compare[name[output_file] is_not constant[None]] begin[:] call[name[plt].savefig, parameter[name[output_file]]] call[name[plt].show, parameter[]]
keyword[def] identifier[_plot_formatting] ( identifier[title] , identifier[est_file] , identifier[algo_ids] , identifier[last_bound] , identifier[N] , identifier[output_file] ): literal[string] keyword[import] identifier[matplotlib] . identifier[pyplot] keyword[as] identifier[plt] keyword[if] identifier[title] keyword[is] keyword[None] : identifier[title] = identifier[os] . identifier[path] . identifier[basename] ( identifier[est_file] ). identifier[split] ( literal[string] )[ literal[int] ] identifier[plt] . identifier[title] ( identifier[title] ) identifier[plt] . identifier[yticks] ( identifier[np] . identifier[arange] ( literal[int] , literal[int] , literal[int] / identifier[float] ( identifier[N] ))+ literal[int] /( identifier[float] ( identifier[N] )* literal[int] )) identifier[plt] . identifier[gcf] (). identifier[subplots_adjust] ( identifier[bottom] = literal[int] ) identifier[plt] . identifier[gca] (). identifier[set_yticklabels] ( identifier[algo_ids] ) identifier[plt] . identifier[xlabel] ( literal[string] ) identifier[plt] . identifier[xlim] (( literal[int] , identifier[last_bound] )) identifier[plt] . identifier[tight_layout] () keyword[if] identifier[output_file] keyword[is] keyword[not] keyword[None] : identifier[plt] . identifier[savefig] ( identifier[output_file] ) identifier[plt] . identifier[show] ()
def _plot_formatting(title, est_file, algo_ids, last_bound, N, output_file): """Formats the plot with the correct axis labels, title, ticks, and so on.""" import matplotlib.pyplot as plt if title is None: title = os.path.basename(est_file).split('.')[0] # depends on [control=['if'], data=['title']] plt.title(title) plt.yticks(np.arange(0, 1, 1 / float(N)) + 1 / (float(N) * 2)) plt.gcf().subplots_adjust(bottom=0.22) plt.gca().set_yticklabels(algo_ids) plt.xlabel('Time (seconds)') plt.xlim((0, last_bound)) plt.tight_layout() if output_file is not None: plt.savefig(output_file) # depends on [control=['if'], data=['output_file']] plt.show()
def parse_device_information( device_info_string: str) -> Mapping[str, str]: ''' Parse the modules's device information response. Example response from temp-deck: "serial:aa11 model:bb22 version:cc33" ''' error_msg = 'Unexpected argument to parse_device_information: {}'.format( device_info_string) if not device_info_string or \ not isinstance(device_info_string, str): raise ParseError(error_msg) parsed_values = device_info_string.strip().split(' ') if len(parsed_values) < 3: log.error(error_msg) raise ParseError(error_msg) res = { parse_key_from_substring(s): parse_string_value_from_substring(s) for s in parsed_values[:3] } for key in ['model', 'version', 'serial']: if key not in res: raise ParseError(error_msg) return res
def function[parse_device_information, parameter[device_info_string]]: constant[ Parse the modules's device information response. Example response from temp-deck: "serial:aa11 model:bb22 version:cc33" ] variable[error_msg] assign[=] call[constant[Unexpected argument to parse_device_information: {}].format, parameter[name[device_info_string]]] if <ast.BoolOp object at 0x7da2043459f0> begin[:] <ast.Raise object at 0x7da2043471f0> variable[parsed_values] assign[=] call[call[name[device_info_string].strip, parameter[]].split, parameter[constant[ ]]] if compare[call[name[len], parameter[name[parsed_values]]] less[<] constant[3]] begin[:] call[name[log].error, parameter[name[error_msg]]] <ast.Raise object at 0x7da2043456f0> variable[res] assign[=] <ast.DictComp object at 0x7da2043467a0> for taget[name[key]] in starred[list[[<ast.Constant object at 0x7da204346c20>, <ast.Constant object at 0x7da2043476d0>, <ast.Constant object at 0x7da204347790>]]] begin[:] if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[res]] begin[:] <ast.Raise object at 0x7da20c6c4fa0> return[name[res]]
keyword[def] identifier[parse_device_information] ( identifier[device_info_string] : identifier[str] )-> identifier[Mapping] [ identifier[str] , identifier[str] ]: literal[string] identifier[error_msg] = literal[string] . identifier[format] ( identifier[device_info_string] ) keyword[if] keyword[not] identifier[device_info_string] keyword[or] keyword[not] identifier[isinstance] ( identifier[device_info_string] , identifier[str] ): keyword[raise] identifier[ParseError] ( identifier[error_msg] ) identifier[parsed_values] = identifier[device_info_string] . identifier[strip] (). identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[parsed_values] )< literal[int] : identifier[log] . identifier[error] ( identifier[error_msg] ) keyword[raise] identifier[ParseError] ( identifier[error_msg] ) identifier[res] ={ identifier[parse_key_from_substring] ( identifier[s] ): identifier[parse_string_value_from_substring] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[parsed_values] [: literal[int] ] } keyword[for] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] ]: keyword[if] identifier[key] keyword[not] keyword[in] identifier[res] : keyword[raise] identifier[ParseError] ( identifier[error_msg] ) keyword[return] identifier[res]
def parse_device_information(device_info_string: str) -> Mapping[str, str]: """ Parse the modules's device information response. Example response from temp-deck: "serial:aa11 model:bb22 version:cc33" """ error_msg = 'Unexpected argument to parse_device_information: {}'.format(device_info_string) if not device_info_string or not isinstance(device_info_string, str): raise ParseError(error_msg) # depends on [control=['if'], data=[]] parsed_values = device_info_string.strip().split(' ') if len(parsed_values) < 3: log.error(error_msg) raise ParseError(error_msg) # depends on [control=['if'], data=[]] res = {parse_key_from_substring(s): parse_string_value_from_substring(s) for s in parsed_values[:3]} for key in ['model', 'version', 'serial']: if key not in res: raise ParseError(error_msg) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] return res
def get_pelican_cls(settings): '''Get the Pelican class requested in settings''' cls = settings['PELICAN_CLASS'] if isinstance(cls, six.string_types): module, cls_name = cls.rsplit('.', 1) module = __import__(module) cls = getattr(module, cls_name) return cls
def function[get_pelican_cls, parameter[settings]]: constant[Get the Pelican class requested in settings] variable[cls] assign[=] call[name[settings]][constant[PELICAN_CLASS]] if call[name[isinstance], parameter[name[cls], name[six].string_types]] begin[:] <ast.Tuple object at 0x7da1b1d21090> assign[=] call[name[cls].rsplit, parameter[constant[.], constant[1]]] variable[module] assign[=] call[name[__import__], parameter[name[module]]] variable[cls] assign[=] call[name[getattr], parameter[name[module], name[cls_name]]] return[name[cls]]
keyword[def] identifier[get_pelican_cls] ( identifier[settings] ): literal[string] identifier[cls] = identifier[settings] [ literal[string] ] keyword[if] identifier[isinstance] ( identifier[cls] , identifier[six] . identifier[string_types] ): identifier[module] , identifier[cls_name] = identifier[cls] . identifier[rsplit] ( literal[string] , literal[int] ) identifier[module] = identifier[__import__] ( identifier[module] ) identifier[cls] = identifier[getattr] ( identifier[module] , identifier[cls_name] ) keyword[return] identifier[cls]
def get_pelican_cls(settings): """Get the Pelican class requested in settings""" cls = settings['PELICAN_CLASS'] if isinstance(cls, six.string_types): (module, cls_name) = cls.rsplit('.', 1) module = __import__(module) cls = getattr(module, cls_name) # depends on [control=['if'], data=[]] return cls
def state(self, statespec=None): """ Modify or inquire widget state. :param statespec: Widget state is returned if `statespec` is None, otherwise it is set according to the statespec flags and then a new state spec is returned indicating which flags were changed. :type statespec: None or sequence[str] """ if statespec: if "disabled" in statespec: self.bind('<Button-1>', lambda e: 'break') elif "!disabled" in statespec: self.unbind("<Button-1>") self.bind("<Button-1>", self._box_click, True) return ttk.Treeview.state(self, statespec) else: return ttk.Treeview.state(self)
def function[state, parameter[self, statespec]]: constant[ Modify or inquire widget state. :param statespec: Widget state is returned if `statespec` is None, otherwise it is set according to the statespec flags and then a new state spec is returned indicating which flags were changed. :type statespec: None or sequence[str] ] if name[statespec] begin[:] if compare[constant[disabled] in name[statespec]] begin[:] call[name[self].bind, parameter[constant[<Button-1>], <ast.Lambda object at 0x7da1b236cfa0>]] return[call[name[ttk].Treeview.state, parameter[name[self], name[statespec]]]]
keyword[def] identifier[state] ( identifier[self] , identifier[statespec] = keyword[None] ): literal[string] keyword[if] identifier[statespec] : keyword[if] literal[string] keyword[in] identifier[statespec] : identifier[self] . identifier[bind] ( literal[string] , keyword[lambda] identifier[e] : literal[string] ) keyword[elif] literal[string] keyword[in] identifier[statespec] : identifier[self] . identifier[unbind] ( literal[string] ) identifier[self] . identifier[bind] ( literal[string] , identifier[self] . identifier[_box_click] , keyword[True] ) keyword[return] identifier[ttk] . identifier[Treeview] . identifier[state] ( identifier[self] , identifier[statespec] ) keyword[else] : keyword[return] identifier[ttk] . identifier[Treeview] . identifier[state] ( identifier[self] )
def state(self, statespec=None): """ Modify or inquire widget state. :param statespec: Widget state is returned if `statespec` is None, otherwise it is set according to the statespec flags and then a new state spec is returned indicating which flags were changed. :type statespec: None or sequence[str] """ if statespec: if 'disabled' in statespec: self.bind('<Button-1>', lambda e: 'break') # depends on [control=['if'], data=[]] elif '!disabled' in statespec: self.unbind('<Button-1>') self.bind('<Button-1>', self._box_click, True) # depends on [control=['if'], data=[]] return ttk.Treeview.state(self, statespec) # depends on [control=['if'], data=[]] else: return ttk.Treeview.state(self)
def getWindowPID(self, hwnd): """ Gets the process ID that the specified window belongs to """ pid = ctypes.c_ulong() ctypes.windll.user32.GetWindowThreadProcessId(hwnd, ctypes.byref(pid)) return int(pid.value)
def function[getWindowPID, parameter[self, hwnd]]: constant[ Gets the process ID that the specified window belongs to ] variable[pid] assign[=] call[name[ctypes].c_ulong, parameter[]] call[name[ctypes].windll.user32.GetWindowThreadProcessId, parameter[name[hwnd], call[name[ctypes].byref, parameter[name[pid]]]]] return[call[name[int], parameter[name[pid].value]]]
keyword[def] identifier[getWindowPID] ( identifier[self] , identifier[hwnd] ): literal[string] identifier[pid] = identifier[ctypes] . identifier[c_ulong] () identifier[ctypes] . identifier[windll] . identifier[user32] . identifier[GetWindowThreadProcessId] ( identifier[hwnd] , identifier[ctypes] . identifier[byref] ( identifier[pid] )) keyword[return] identifier[int] ( identifier[pid] . identifier[value] )
def getWindowPID(self, hwnd): """ Gets the process ID that the specified window belongs to """ pid = ctypes.c_ulong() ctypes.windll.user32.GetWindowThreadProcessId(hwnd, ctypes.byref(pid)) return int(pid.value)
def fingerprint_file(workspace, filename): """Given a relative filename located in a workspace, fingerprint the file. Returns a tuple of fingerprint string and size string. """ content = read_file(os.path.join(workspace, filename), binary_mode=True) fingerprint = hashlib.sha256(content) b64_encoded = base64.b64encode(fingerprint.digest()) return 'sha256={}'.format(b64_encoded.decode('utf-8')), str(len(content))
def function[fingerprint_file, parameter[workspace, filename]]: constant[Given a relative filename located in a workspace, fingerprint the file. Returns a tuple of fingerprint string and size string. ] variable[content] assign[=] call[name[read_file], parameter[call[name[os].path.join, parameter[name[workspace], name[filename]]]]] variable[fingerprint] assign[=] call[name[hashlib].sha256, parameter[name[content]]] variable[b64_encoded] assign[=] call[name[base64].b64encode, parameter[call[name[fingerprint].digest, parameter[]]]] return[tuple[[<ast.Call object at 0x7da1b224a470>, <ast.Call object at 0x7da1b224ad10>]]]
keyword[def] identifier[fingerprint_file] ( identifier[workspace] , identifier[filename] ): literal[string] identifier[content] = identifier[read_file] ( identifier[os] . identifier[path] . identifier[join] ( identifier[workspace] , identifier[filename] ), identifier[binary_mode] = keyword[True] ) identifier[fingerprint] = identifier[hashlib] . identifier[sha256] ( identifier[content] ) identifier[b64_encoded] = identifier[base64] . identifier[b64encode] ( identifier[fingerprint] . identifier[digest] ()) keyword[return] literal[string] . identifier[format] ( identifier[b64_encoded] . identifier[decode] ( literal[string] )), identifier[str] ( identifier[len] ( identifier[content] ))
def fingerprint_file(workspace, filename): """Given a relative filename located in a workspace, fingerprint the file. Returns a tuple of fingerprint string and size string. """ content = read_file(os.path.join(workspace, filename), binary_mode=True) fingerprint = hashlib.sha256(content) b64_encoded = base64.b64encode(fingerprint.digest()) return ('sha256={}'.format(b64_encoded.decode('utf-8')), str(len(content)))
def pretty_relpath(path, start): ''' Returns a relative path, but only if it doesn't start with a non-pretty parent directory ".." ''' relpath = os.path.relpath(path, start) if relpath.startswith('..'): return path return relpath
def function[pretty_relpath, parameter[path, start]]: constant[ Returns a relative path, but only if it doesn't start with a non-pretty parent directory ".." ] variable[relpath] assign[=] call[name[os].path.relpath, parameter[name[path], name[start]]] if call[name[relpath].startswith, parameter[constant[..]]] begin[:] return[name[path]] return[name[relpath]]
keyword[def] identifier[pretty_relpath] ( identifier[path] , identifier[start] ): literal[string] identifier[relpath] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[path] , identifier[start] ) keyword[if] identifier[relpath] . identifier[startswith] ( literal[string] ): keyword[return] identifier[path] keyword[return] identifier[relpath]
def pretty_relpath(path, start): """ Returns a relative path, but only if it doesn't start with a non-pretty parent directory ".." """ relpath = os.path.relpath(path, start) if relpath.startswith('..'): return path # depends on [control=['if'], data=[]] return relpath
def lookup(node, name): """Lookup the given special method name in the given *node* If the special method was found, then a list of attributes will be returned. Otherwise, `astroid.AttributeInferenceError` is going to be raised. """ if isinstance( node, (astroid.List, astroid.Tuple, astroid.Const, astroid.Dict, astroid.Set) ): return _builtin_lookup(node, name) if isinstance(node, astroid.Instance): return _lookup_in_mro(node, name) if isinstance(node, astroid.ClassDef): return _class_lookup(node, name) raise exceptions.AttributeInferenceError(attribute=name, target=node)
def function[lookup, parameter[node, name]]: constant[Lookup the given special method name in the given *node* If the special method was found, then a list of attributes will be returned. Otherwise, `astroid.AttributeInferenceError` is going to be raised. ] if call[name[isinstance], parameter[name[node], tuple[[<ast.Attribute object at 0x7da1b1e7bf70>, <ast.Attribute object at 0x7da1b1e78910>, <ast.Attribute object at 0x7da1b1e7b610>, <ast.Attribute object at 0x7da1b1e78c70>, <ast.Attribute object at 0x7da1b1e7b100>]]]] begin[:] return[call[name[_builtin_lookup], parameter[name[node], name[name]]]] if call[name[isinstance], parameter[name[node], name[astroid].Instance]] begin[:] return[call[name[_lookup_in_mro], parameter[name[node], name[name]]]] if call[name[isinstance], parameter[name[node], name[astroid].ClassDef]] begin[:] return[call[name[_class_lookup], parameter[name[node], name[name]]]] <ast.Raise object at 0x7da1b1e7aad0>
keyword[def] identifier[lookup] ( identifier[node] , identifier[name] ): literal[string] keyword[if] identifier[isinstance] ( identifier[node] ,( identifier[astroid] . identifier[List] , identifier[astroid] . identifier[Tuple] , identifier[astroid] . identifier[Const] , identifier[astroid] . identifier[Dict] , identifier[astroid] . identifier[Set] ) ): keyword[return] identifier[_builtin_lookup] ( identifier[node] , identifier[name] ) keyword[if] identifier[isinstance] ( identifier[node] , identifier[astroid] . identifier[Instance] ): keyword[return] identifier[_lookup_in_mro] ( identifier[node] , identifier[name] ) keyword[if] identifier[isinstance] ( identifier[node] , identifier[astroid] . identifier[ClassDef] ): keyword[return] identifier[_class_lookup] ( identifier[node] , identifier[name] ) keyword[raise] identifier[exceptions] . identifier[AttributeInferenceError] ( identifier[attribute] = identifier[name] , identifier[target] = identifier[node] )
def lookup(node, name): """Lookup the given special method name in the given *node* If the special method was found, then a list of attributes will be returned. Otherwise, `astroid.AttributeInferenceError` is going to be raised. """ if isinstance(node, (astroid.List, astroid.Tuple, astroid.Const, astroid.Dict, astroid.Set)): return _builtin_lookup(node, name) # depends on [control=['if'], data=[]] if isinstance(node, astroid.Instance): return _lookup_in_mro(node, name) # depends on [control=['if'], data=[]] if isinstance(node, astroid.ClassDef): return _class_lookup(node, name) # depends on [control=['if'], data=[]] raise exceptions.AttributeInferenceError(attribute=name, target=node)
def init_app(self, app): """Setup the 4 default routes.""" app.add_url_rule(self.request_token_url, view_func=self.request_token, methods=[u'POST']) app.add_url_rule(self.access_token_url, view_func=self.access_token, methods=[u'POST']) app.add_url_rule(self.register_url, view_func=self.register, methods=[u'GET', u'POST']) app.add_url_rule(self.authorize_url, view_func=self.authorize, methods=[u'GET', u'POST'])
def function[init_app, parameter[self, app]]: constant[Setup the 4 default routes.] call[name[app].add_url_rule, parameter[name[self].request_token_url]] call[name[app].add_url_rule, parameter[name[self].access_token_url]] call[name[app].add_url_rule, parameter[name[self].register_url]] call[name[app].add_url_rule, parameter[name[self].authorize_url]]
keyword[def] identifier[init_app] ( identifier[self] , identifier[app] ): literal[string] identifier[app] . identifier[add_url_rule] ( identifier[self] . identifier[request_token_url] , identifier[view_func] = identifier[self] . identifier[request_token] , identifier[methods] =[ literal[string] ]) identifier[app] . identifier[add_url_rule] ( identifier[self] . identifier[access_token_url] , identifier[view_func] = identifier[self] . identifier[access_token] , identifier[methods] =[ literal[string] ]) identifier[app] . identifier[add_url_rule] ( identifier[self] . identifier[register_url] , identifier[view_func] = identifier[self] . identifier[register] , identifier[methods] =[ literal[string] , literal[string] ]) identifier[app] . identifier[add_url_rule] ( identifier[self] . identifier[authorize_url] , identifier[view_func] = identifier[self] . identifier[authorize] , identifier[methods] =[ literal[string] , literal[string] ])
def init_app(self, app): """Setup the 4 default routes.""" app.add_url_rule(self.request_token_url, view_func=self.request_token, methods=[u'POST']) app.add_url_rule(self.access_token_url, view_func=self.access_token, methods=[u'POST']) app.add_url_rule(self.register_url, view_func=self.register, methods=[u'GET', u'POST']) app.add_url_rule(self.authorize_url, view_func=self.authorize, methods=[u'GET', u'POST'])
def show_hist(self, props=[], bins=20, **kwargs): r""" Show a quick plot of key property distributions. Parameters ---------- props : string or list of strings The pore and/or throat properties to be plotted as histograms bins : int or array_like The number of bins to use when generating the histogram. If an array is given they are used as the bin spacing instead. Notes ----- Other keyword arguments are passed to the ``matplotlib.pyplot.hist`` function. """ if type(props) is str: props = [props] N = len(props) if N == 1: r = 1 c = 1 elif N < 4: r = 1 c = N else: r = int(sp.ceil(N**0.5)) c = int(sp.floor(N**0.5)) for i in range(len(props)): plt.subplot(r, c, i+1) plt.hist(self[props[i]], bins=bins, **kwargs)
def function[show_hist, parameter[self, props, bins]]: constant[ Show a quick plot of key property distributions. Parameters ---------- props : string or list of strings The pore and/or throat properties to be plotted as histograms bins : int or array_like The number of bins to use when generating the histogram. If an array is given they are used as the bin spacing instead. Notes ----- Other keyword arguments are passed to the ``matplotlib.pyplot.hist`` function. ] if compare[call[name[type], parameter[name[props]]] is name[str]] begin[:] variable[props] assign[=] list[[<ast.Name object at 0x7da18fe905e0>]] variable[N] assign[=] call[name[len], parameter[name[props]]] if compare[name[N] equal[==] constant[1]] begin[:] variable[r] assign[=] constant[1] variable[c] assign[=] constant[1] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[props]]]]]] begin[:] call[name[plt].subplot, parameter[name[r], name[c], binary_operation[name[i] + constant[1]]]] call[name[plt].hist, parameter[call[name[self]][call[name[props]][name[i]]]]]
keyword[def] identifier[show_hist] ( identifier[self] , identifier[props] =[], identifier[bins] = literal[int] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[type] ( identifier[props] ) keyword[is] identifier[str] : identifier[props] =[ identifier[props] ] identifier[N] = identifier[len] ( identifier[props] ) keyword[if] identifier[N] == literal[int] : identifier[r] = literal[int] identifier[c] = literal[int] keyword[elif] identifier[N] < literal[int] : identifier[r] = literal[int] identifier[c] = identifier[N] keyword[else] : identifier[r] = identifier[int] ( identifier[sp] . identifier[ceil] ( identifier[N] ** literal[int] )) identifier[c] = identifier[int] ( identifier[sp] . identifier[floor] ( identifier[N] ** literal[int] )) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[props] )): identifier[plt] . identifier[subplot] ( identifier[r] , identifier[c] , identifier[i] + literal[int] ) identifier[plt] . identifier[hist] ( identifier[self] [ identifier[props] [ identifier[i] ]], identifier[bins] = identifier[bins] ,** identifier[kwargs] )
def show_hist(self, props=[], bins=20, **kwargs): """ Show a quick plot of key property distributions. Parameters ---------- props : string or list of strings The pore and/or throat properties to be plotted as histograms bins : int or array_like The number of bins to use when generating the histogram. If an array is given they are used as the bin spacing instead. Notes ----- Other keyword arguments are passed to the ``matplotlib.pyplot.hist`` function. """ if type(props) is str: props = [props] # depends on [control=['if'], data=[]] N = len(props) if N == 1: r = 1 c = 1 # depends on [control=['if'], data=[]] elif N < 4: r = 1 c = N # depends on [control=['if'], data=['N']] else: r = int(sp.ceil(N ** 0.5)) c = int(sp.floor(N ** 0.5)) for i in range(len(props)): plt.subplot(r, c, i + 1) plt.hist(self[props[i]], bins=bins, **kwargs) # depends on [control=['for'], data=['i']]
def rotate_right(head, k): """ :type head: ListNode :type k: int :rtype: ListNode """ if not head or not head.next: return head current = head length = 1 # count length of the list while current.next: current = current.next length += 1 # make it circular current.next = head k = k % length # rotate until length-k for i in range(length-k): current = current.next head = current.next current.next = None return head
def function[rotate_right, parameter[head, k]]: constant[ :type head: ListNode :type k: int :rtype: ListNode ] if <ast.BoolOp object at 0x7da1b2032f20> begin[:] return[name[head]] variable[current] assign[=] name[head] variable[length] assign[=] constant[1] while name[current].next begin[:] variable[current] assign[=] name[current].next <ast.AugAssign object at 0x7da1b2033490> name[current].next assign[=] name[head] variable[k] assign[=] binary_operation[name[k] <ast.Mod object at 0x7da2590d6920> name[length]] for taget[name[i]] in starred[call[name[range], parameter[binary_operation[name[length] - name[k]]]]] begin[:] variable[current] assign[=] name[current].next variable[head] assign[=] name[current].next name[current].next assign[=] constant[None] return[name[head]]
keyword[def] identifier[rotate_right] ( identifier[head] , identifier[k] ): literal[string] keyword[if] keyword[not] identifier[head] keyword[or] keyword[not] identifier[head] . identifier[next] : keyword[return] identifier[head] identifier[current] = identifier[head] identifier[length] = literal[int] keyword[while] identifier[current] . identifier[next] : identifier[current] = identifier[current] . identifier[next] identifier[length] += literal[int] identifier[current] . identifier[next] = identifier[head] identifier[k] = identifier[k] % identifier[length] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[length] - identifier[k] ): identifier[current] = identifier[current] . identifier[next] identifier[head] = identifier[current] . identifier[next] identifier[current] . identifier[next] = keyword[None] keyword[return] identifier[head]
def rotate_right(head, k): """ :type head: ListNode :type k: int :rtype: ListNode """ if not head or not head.next: return head # depends on [control=['if'], data=[]] current = head length = 1 # count length of the list while current.next: current = current.next length += 1 # depends on [control=['while'], data=[]] # make it circular current.next = head k = k % length # rotate until length-k for i in range(length - k): current = current.next # depends on [control=['for'], data=[]] head = current.next current.next = None return head
def _embed_state(embedding, state): """Embed a single state/sample by spreading it's values over the chains in the embedding""" return {u: state[v] for v, chain in embedding.items() for u in chain}
def function[_embed_state, parameter[embedding, state]]: constant[Embed a single state/sample by spreading it's values over the chains in the embedding] return[<ast.DictComp object at 0x7da1b0f3acb0>]
keyword[def] identifier[_embed_state] ( identifier[embedding] , identifier[state] ): literal[string] keyword[return] { identifier[u] : identifier[state] [ identifier[v] ] keyword[for] identifier[v] , identifier[chain] keyword[in] identifier[embedding] . identifier[items] () keyword[for] identifier[u] keyword[in] identifier[chain] }
def _embed_state(embedding, state): """Embed a single state/sample by spreading it's values over the chains in the embedding""" return {u: state[v] for (v, chain) in embedding.items() for u in chain}
def get_pattern_formatter(cls, location): """ Fragment from aiohttp.web_urldispatcher.UrlDispatcher#add_resource :param location: :return: """ pattern = '' formatter = '' canon = '' for part in cls.ROUTE_RE.split(location): match = cls.DYN.match(part) if match: pattern += '(?P<{}>{})'.format(match.group('var'), cls.GOOD) formatter += '{' + match.group('var') + '}' continue match = cls.DYN_WITH_RE.match(part) if match: pattern += '(?P<{var}>{re})'.format(**match.groupdict()) formatter += '{' + match.group('var') + '}' canon += match.group('re') continue if '{' in part or '}' in part: raise ValueError("Invalid path '{}'['{}']".format( location, part)) formatter += part pattern += re.escape(part) canon += part try: return re.compile(pattern), formatter, canon except re.error as exc: raise ValueError( "Bad pattern '{}': {}".format(pattern, exc)) from None
def function[get_pattern_formatter, parameter[cls, location]]: constant[ Fragment from aiohttp.web_urldispatcher.UrlDispatcher#add_resource :param location: :return: ] variable[pattern] assign[=] constant[] variable[formatter] assign[=] constant[] variable[canon] assign[=] constant[] for taget[name[part]] in starred[call[name[cls].ROUTE_RE.split, parameter[name[location]]]] begin[:] variable[match] assign[=] call[name[cls].DYN.match, parameter[name[part]]] if name[match] begin[:] <ast.AugAssign object at 0x7da1b0b50100> <ast.AugAssign object at 0x7da1b0b82d40> continue variable[match] assign[=] call[name[cls].DYN_WITH_RE.match, parameter[name[part]]] if name[match] begin[:] <ast.AugAssign object at 0x7da1b0b824a0> <ast.AugAssign object at 0x7da1b0b81990> <ast.AugAssign object at 0x7da1b0b819c0> continue if <ast.BoolOp object at 0x7da1b0b82380> begin[:] <ast.Raise object at 0x7da1b0b82fb0> <ast.AugAssign object at 0x7da1b0b81750> <ast.AugAssign object at 0x7da1b0b81210> <ast.AugAssign object at 0x7da1b0b80a00> <ast.Try object at 0x7da1b0b839a0>
keyword[def] identifier[get_pattern_formatter] ( identifier[cls] , identifier[location] ): literal[string] identifier[pattern] = literal[string] identifier[formatter] = literal[string] identifier[canon] = literal[string] keyword[for] identifier[part] keyword[in] identifier[cls] . identifier[ROUTE_RE] . identifier[split] ( identifier[location] ): identifier[match] = identifier[cls] . identifier[DYN] . identifier[match] ( identifier[part] ) keyword[if] identifier[match] : identifier[pattern] += literal[string] . identifier[format] ( identifier[match] . identifier[group] ( literal[string] ), identifier[cls] . identifier[GOOD] ) identifier[formatter] += literal[string] + identifier[match] . identifier[group] ( literal[string] )+ literal[string] keyword[continue] identifier[match] = identifier[cls] . identifier[DYN_WITH_RE] . identifier[match] ( identifier[part] ) keyword[if] identifier[match] : identifier[pattern] += literal[string] . identifier[format] (** identifier[match] . identifier[groupdict] ()) identifier[formatter] += literal[string] + identifier[match] . identifier[group] ( literal[string] )+ literal[string] identifier[canon] += identifier[match] . identifier[group] ( literal[string] ) keyword[continue] keyword[if] literal[string] keyword[in] identifier[part] keyword[or] literal[string] keyword[in] identifier[part] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[location] , identifier[part] )) identifier[formatter] += identifier[part] identifier[pattern] += identifier[re] . identifier[escape] ( identifier[part] ) identifier[canon] += identifier[part] keyword[try] : keyword[return] identifier[re] . identifier[compile] ( identifier[pattern] ), identifier[formatter] , identifier[canon] keyword[except] identifier[re] . identifier[error] keyword[as] identifier[exc] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[pattern] , identifier[exc] )) keyword[from] keyword[None]
def get_pattern_formatter(cls, location): """ Fragment from aiohttp.web_urldispatcher.UrlDispatcher#add_resource :param location: :return: """ pattern = '' formatter = '' canon = '' for part in cls.ROUTE_RE.split(location): match = cls.DYN.match(part) if match: pattern += '(?P<{}>{})'.format(match.group('var'), cls.GOOD) formatter += '{' + match.group('var') + '}' continue # depends on [control=['if'], data=[]] match = cls.DYN_WITH_RE.match(part) if match: pattern += '(?P<{var}>{re})'.format(**match.groupdict()) formatter += '{' + match.group('var') + '}' canon += match.group('re') continue # depends on [control=['if'], data=[]] if '{' in part or '}' in part: raise ValueError("Invalid path '{}'['{}']".format(location, part)) # depends on [control=['if'], data=[]] formatter += part pattern += re.escape(part) canon += part # depends on [control=['for'], data=['part']] try: return (re.compile(pattern), formatter, canon) # depends on [control=['try'], data=[]] except re.error as exc: raise ValueError("Bad pattern '{}': {}".format(pattern, exc)) from None # depends on [control=['except'], data=['exc']]
def file_source(self, filename): """ Return a list of namedtuple `Line` for each line of code found in the source file with the given `filename`. """ lines = [] try: with self.filesystem.open(filename) as f: line_statuses = dict(self.line_statuses(filename)) for lineno, source in enumerate(f, start=1): line_status = line_statuses.get(lineno) line = Line(lineno, source, line_status, None) lines.append(line) except self.filesystem.FileNotFound as file_not_found: lines.append( Line(0, '%s not found' % file_not_found.path, None, None) ) return lines
def function[file_source, parameter[self, filename]]: constant[ Return a list of namedtuple `Line` for each line of code found in the source file with the given `filename`. ] variable[lines] assign[=] list[[]] <ast.Try object at 0x7da1b0f3e860> return[name[lines]]
keyword[def] identifier[file_source] ( identifier[self] , identifier[filename] ): literal[string] identifier[lines] =[] keyword[try] : keyword[with] identifier[self] . identifier[filesystem] . identifier[open] ( identifier[filename] ) keyword[as] identifier[f] : identifier[line_statuses] = identifier[dict] ( identifier[self] . identifier[line_statuses] ( identifier[filename] )) keyword[for] identifier[lineno] , identifier[source] keyword[in] identifier[enumerate] ( identifier[f] , identifier[start] = literal[int] ): identifier[line_status] = identifier[line_statuses] . identifier[get] ( identifier[lineno] ) identifier[line] = identifier[Line] ( identifier[lineno] , identifier[source] , identifier[line_status] , keyword[None] ) identifier[lines] . identifier[append] ( identifier[line] ) keyword[except] identifier[self] . identifier[filesystem] . identifier[FileNotFound] keyword[as] identifier[file_not_found] : identifier[lines] . identifier[append] ( identifier[Line] ( literal[int] , literal[string] % identifier[file_not_found] . identifier[path] , keyword[None] , keyword[None] ) ) keyword[return] identifier[lines]
def file_source(self, filename): """ Return a list of namedtuple `Line` for each line of code found in the source file with the given `filename`. """ lines = [] try: with self.filesystem.open(filename) as f: line_statuses = dict(self.line_statuses(filename)) for (lineno, source) in enumerate(f, start=1): line_status = line_statuses.get(lineno) line = Line(lineno, source, line_status, None) lines.append(line) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]] except self.filesystem.FileNotFound as file_not_found: lines.append(Line(0, '%s not found' % file_not_found.path, None, None)) # depends on [control=['except'], data=['file_not_found']] return lines
def run_with_tornado(self): """ runs the tornado/websockets based test server """ from zengine.tornado_server.server import runserver runserver(self.manager.args.addr, int(self.manager.args.port))
def function[run_with_tornado, parameter[self]]: constant[ runs the tornado/websockets based test server ] from relative_module[zengine.tornado_server.server] import module[runserver] call[name[runserver], parameter[name[self].manager.args.addr, call[name[int], parameter[name[self].manager.args.port]]]]
keyword[def] identifier[run_with_tornado] ( identifier[self] ): literal[string] keyword[from] identifier[zengine] . identifier[tornado_server] . identifier[server] keyword[import] identifier[runserver] identifier[runserver] ( identifier[self] . identifier[manager] . identifier[args] . identifier[addr] , identifier[int] ( identifier[self] . identifier[manager] . identifier[args] . identifier[port] ))
def run_with_tornado(self): """ runs the tornado/websockets based test server """ from zengine.tornado_server.server import runserver runserver(self.manager.args.addr, int(self.manager.args.port))
def GetLayerFromFeatureService(self, fs, layerName="", returnURLOnly=False): """Obtains a layer from a feature service by feature service reference. Args: fs (FeatureService): The feature service from which to obtain the layer. layerName (str): The name of the layer. Defaults to ``""``. returnURLOnly (bool): A boolean value to return the URL of the layer. Defaults to ``False``. Returns: When ``returnURLOnly`` is ``True``, the URL of the layer is returned. When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`. """ layers = None table = None layer = None sublayer = None try: layers = fs.layers if (layers is None or len(layers) == 0) and fs.url is not None: fs = arcrest.ags.FeatureService( url=fs.url) layers = fs.layers if layers is not None: for layer in layers: if layer.name == layerName: if returnURLOnly: return fs.url + '/' + str(layer.id) else: return layer elif not layer.subLayers is None: for sublayer in layer.subLayers: if sublayer == layerName: return sublayer if fs.tables is not None: for table in fs.tables: if table.name == layerName: if returnURLOnly: return fs.url + '/' + str(layer.id) else: return table return None except: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "GetLayerFromFeatureService", "line": line, "filename": filename, "synerror": synerror, } ) finally: layers = None table = None layer = None sublayer = None del layers del table del layer del sublayer gc.collect()
def function[GetLayerFromFeatureService, parameter[self, fs, layerName, returnURLOnly]]: constant[Obtains a layer from a feature service by feature service reference. Args: fs (FeatureService): The feature service from which to obtain the layer. layerName (str): The name of the layer. Defaults to ``""``. returnURLOnly (bool): A boolean value to return the URL of the layer. Defaults to ``False``. Returns: When ``returnURLOnly`` is ``True``, the URL of the layer is returned. When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`. ] variable[layers] assign[=] constant[None] variable[table] assign[=] constant[None] variable[layer] assign[=] constant[None] variable[sublayer] assign[=] constant[None] <ast.Try object at 0x7da1b124f4c0>
keyword[def] identifier[GetLayerFromFeatureService] ( identifier[self] , identifier[fs] , identifier[layerName] = literal[string] , identifier[returnURLOnly] = keyword[False] ): literal[string] identifier[layers] = keyword[None] identifier[table] = keyword[None] identifier[layer] = keyword[None] identifier[sublayer] = keyword[None] keyword[try] : identifier[layers] = identifier[fs] . identifier[layers] keyword[if] ( identifier[layers] keyword[is] keyword[None] keyword[or] identifier[len] ( identifier[layers] )== literal[int] ) keyword[and] identifier[fs] . identifier[url] keyword[is] keyword[not] keyword[None] : identifier[fs] = identifier[arcrest] . identifier[ags] . identifier[FeatureService] ( identifier[url] = identifier[fs] . identifier[url] ) identifier[layers] = identifier[fs] . identifier[layers] keyword[if] identifier[layers] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[layer] keyword[in] identifier[layers] : keyword[if] identifier[layer] . identifier[name] == identifier[layerName] : keyword[if] identifier[returnURLOnly] : keyword[return] identifier[fs] . identifier[url] + literal[string] + identifier[str] ( identifier[layer] . identifier[id] ) keyword[else] : keyword[return] identifier[layer] keyword[elif] keyword[not] identifier[layer] . identifier[subLayers] keyword[is] keyword[None] : keyword[for] identifier[sublayer] keyword[in] identifier[layer] . identifier[subLayers] : keyword[if] identifier[sublayer] == identifier[layerName] : keyword[return] identifier[sublayer] keyword[if] identifier[fs] . identifier[tables] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[table] keyword[in] identifier[fs] . identifier[tables] : keyword[if] identifier[table] . identifier[name] == identifier[layerName] : keyword[if] identifier[returnURLOnly] : keyword[return] identifier[fs] . identifier[url] + literal[string] + identifier[str] ( identifier[layer] . identifier[id] ) keyword[else] : keyword[return] identifier[table] keyword[return] keyword[None] keyword[except] : identifier[line] , identifier[filename] , identifier[synerror] = identifier[trace] () keyword[raise] identifier[common] . identifier[ArcRestHelperError] ({ literal[string] : literal[string] , literal[string] : identifier[line] , literal[string] : identifier[filename] , literal[string] : identifier[synerror] , } ) keyword[finally] : identifier[layers] = keyword[None] identifier[table] = keyword[None] identifier[layer] = keyword[None] identifier[sublayer] = keyword[None] keyword[del] identifier[layers] keyword[del] identifier[table] keyword[del] identifier[layer] keyword[del] identifier[sublayer] identifier[gc] . identifier[collect] ()
def GetLayerFromFeatureService(self, fs, layerName='', returnURLOnly=False): """Obtains a layer from a feature service by feature service reference. Args: fs (FeatureService): The feature service from which to obtain the layer. layerName (str): The name of the layer. Defaults to ``""``. returnURLOnly (bool): A boolean value to return the URL of the layer. Defaults to ``False``. Returns: When ``returnURLOnly`` is ``True``, the URL of the layer is returned. When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`. """ layers = None table = None layer = None sublayer = None try: layers = fs.layers if (layers is None or len(layers) == 0) and fs.url is not None: fs = arcrest.ags.FeatureService(url=fs.url) layers = fs.layers # depends on [control=['if'], data=[]] if layers is not None: for layer in layers: if layer.name == layerName: if returnURLOnly: return fs.url + '/' + str(layer.id) # depends on [control=['if'], data=[]] else: return layer # depends on [control=['if'], data=[]] elif not layer.subLayers is None: for sublayer in layer.subLayers: if sublayer == layerName: return sublayer # depends on [control=['if'], data=['sublayer']] # depends on [control=['for'], data=['sublayer']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['layer']] # depends on [control=['if'], data=['layers']] if fs.tables is not None: for table in fs.tables: if table.name == layerName: if returnURLOnly: return fs.url + '/' + str(layer.id) # depends on [control=['if'], data=[]] else: return table # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['table']] # depends on [control=['if'], data=[]] return None # depends on [control=['try'], data=[]] except: (line, filename, synerror) = trace() raise common.ArcRestHelperError({'function': 'GetLayerFromFeatureService', 'line': line, 'filename': filename, 'synerror': synerror}) # depends on [control=['except'], data=[]] finally: layers = None table = None layer = None sublayer = None del layers del table del layer del sublayer gc.collect()
def env_float(name: str, required: bool=False, default: Union[Type[empty], float]=empty) -> float: """Pulls an environment variable out of the environment and casts it to an float. If the name is not present in the environment and no default is specified then a ``ValueError`` will be raised. Similarly, if the environment value is not castable to an float, a ``ValueError`` will be raised. :param name: The name of the environment variable be pulled :type name: str :param required: Whether the environment variable is required. If ``True`` and the variable is not present, a ``KeyError`` is raised. :type required: bool :param default: The value to return if the environment variable is not present. (Providing a default alongside setting ``required=True`` will raise a ``ValueError``) :type default: bool """ value = get_env_value(name, required=required, default=default) if value is empty: raise ValueError( "`env_float` requires either a default value to be specified, or for " "the variable to be present in the environment" ) return float(value)
def function[env_float, parameter[name, required, default]]: constant[Pulls an environment variable out of the environment and casts it to an float. If the name is not present in the environment and no default is specified then a ``ValueError`` will be raised. Similarly, if the environment value is not castable to an float, a ``ValueError`` will be raised. :param name: The name of the environment variable be pulled :type name: str :param required: Whether the environment variable is required. If ``True`` and the variable is not present, a ``KeyError`` is raised. :type required: bool :param default: The value to return if the environment variable is not present. (Providing a default alongside setting ``required=True`` will raise a ``ValueError``) :type default: bool ] variable[value] assign[=] call[name[get_env_value], parameter[name[name]]] if compare[name[value] is name[empty]] begin[:] <ast.Raise object at 0x7da1b17f8ac0> return[call[name[float], parameter[name[value]]]]
keyword[def] identifier[env_float] ( identifier[name] : identifier[str] , identifier[required] : identifier[bool] = keyword[False] , identifier[default] : identifier[Union] [ identifier[Type] [ identifier[empty] ], identifier[float] ]= identifier[empty] )-> identifier[float] : literal[string] identifier[value] = identifier[get_env_value] ( identifier[name] , identifier[required] = identifier[required] , identifier[default] = identifier[default] ) keyword[if] identifier[value] keyword[is] identifier[empty] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[return] identifier[float] ( identifier[value] )
def env_float(name: str, required: bool=False, default: Union[Type[empty], float]=empty) -> float: """Pulls an environment variable out of the environment and casts it to an float. If the name is not present in the environment and no default is specified then a ``ValueError`` will be raised. Similarly, if the environment value is not castable to an float, a ``ValueError`` will be raised. :param name: The name of the environment variable be pulled :type name: str :param required: Whether the environment variable is required. If ``True`` and the variable is not present, a ``KeyError`` is raised. :type required: bool :param default: The value to return if the environment variable is not present. (Providing a default alongside setting ``required=True`` will raise a ``ValueError``) :type default: bool """ value = get_env_value(name, required=required, default=default) if value is empty: raise ValueError('`env_float` requires either a default value to be specified, or for the variable to be present in the environment') # depends on [control=['if'], data=[]] return float(value)
def tag_path(cls, project, incident, tag): """Return a fully-qualified tag string.""" return google.api_core.path_template.expand( "projects/{project}/incidents/{incident}/tags/{tag}", project=project, incident=incident, tag=tag, )
def function[tag_path, parameter[cls, project, incident, tag]]: constant[Return a fully-qualified tag string.] return[call[name[google].api_core.path_template.expand, parameter[constant[projects/{project}/incidents/{incident}/tags/{tag}]]]]
keyword[def] identifier[tag_path] ( identifier[cls] , identifier[project] , identifier[incident] , identifier[tag] ): literal[string] keyword[return] identifier[google] . identifier[api_core] . identifier[path_template] . identifier[expand] ( literal[string] , identifier[project] = identifier[project] , identifier[incident] = identifier[incident] , identifier[tag] = identifier[tag] , )
def tag_path(cls, project, incident, tag): """Return a fully-qualified tag string.""" return google.api_core.path_template.expand('projects/{project}/incidents/{incident}/tags/{tag}', project=project, incident=incident, tag=tag)
def retrieve(self, uri, payload, headers): """Retrieve a raw item from the archive. The method will return the `data` content corresponding to the hascode derived from the given parameters. :param uri: request URI :param payload: request payload :param headers: request headers :returns: the archived data :raises ArchiveError: when an error occurs retrieving data """ hashcode = self.make_hashcode(uri, payload, headers) logger.debug("Retrieving entry %s with %s %s %s in %s", hashcode, uri, payload, headers, self.archive_path) self._db.row_factory = sqlite3.Row try: cursor = self._db.cursor() select_stmt = "SELECT data " \ "FROM " + self.ARCHIVE_TABLE + " " \ "WHERE hashcode = ?" cursor.execute(select_stmt, (hashcode,)) row = cursor.fetchone() cursor.close() except sqlite3.DatabaseError as e: msg = "data retrieval error; cause: %s" % str(e) raise ArchiveError(cause=msg) if row: found = pickle.loads(row['data']) else: msg = "entry %s not found in archive %s" % (hashcode, self.archive_path) raise ArchiveError(cause=msg) return found
def function[retrieve, parameter[self, uri, payload, headers]]: constant[Retrieve a raw item from the archive. The method will return the `data` content corresponding to the hascode derived from the given parameters. :param uri: request URI :param payload: request payload :param headers: request headers :returns: the archived data :raises ArchiveError: when an error occurs retrieving data ] variable[hashcode] assign[=] call[name[self].make_hashcode, parameter[name[uri], name[payload], name[headers]]] call[name[logger].debug, parameter[constant[Retrieving entry %s with %s %s %s in %s], name[hashcode], name[uri], name[payload], name[headers], name[self].archive_path]] name[self]._db.row_factory assign[=] name[sqlite3].Row <ast.Try object at 0x7da1b0285ba0> if name[row] begin[:] variable[found] assign[=] call[name[pickle].loads, parameter[call[name[row]][constant[data]]]] return[name[found]]
keyword[def] identifier[retrieve] ( identifier[self] , identifier[uri] , identifier[payload] , identifier[headers] ): literal[string] identifier[hashcode] = identifier[self] . identifier[make_hashcode] ( identifier[uri] , identifier[payload] , identifier[headers] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[hashcode] , identifier[uri] , identifier[payload] , identifier[headers] , identifier[self] . identifier[archive_path] ) identifier[self] . identifier[_db] . identifier[row_factory] = identifier[sqlite3] . identifier[Row] keyword[try] : identifier[cursor] = identifier[self] . identifier[_db] . identifier[cursor] () identifier[select_stmt] = literal[string] literal[string] + identifier[self] . identifier[ARCHIVE_TABLE] + literal[string] literal[string] identifier[cursor] . identifier[execute] ( identifier[select_stmt] ,( identifier[hashcode] ,)) identifier[row] = identifier[cursor] . identifier[fetchone] () identifier[cursor] . identifier[close] () keyword[except] identifier[sqlite3] . identifier[DatabaseError] keyword[as] identifier[e] : identifier[msg] = literal[string] % identifier[str] ( identifier[e] ) keyword[raise] identifier[ArchiveError] ( identifier[cause] = identifier[msg] ) keyword[if] identifier[row] : identifier[found] = identifier[pickle] . identifier[loads] ( identifier[row] [ literal[string] ]) keyword[else] : identifier[msg] = literal[string] %( identifier[hashcode] , identifier[self] . identifier[archive_path] ) keyword[raise] identifier[ArchiveError] ( identifier[cause] = identifier[msg] ) keyword[return] identifier[found]
def retrieve(self, uri, payload, headers): """Retrieve a raw item from the archive. The method will return the `data` content corresponding to the hascode derived from the given parameters. :param uri: request URI :param payload: request payload :param headers: request headers :returns: the archived data :raises ArchiveError: when an error occurs retrieving data """ hashcode = self.make_hashcode(uri, payload, headers) logger.debug('Retrieving entry %s with %s %s %s in %s', hashcode, uri, payload, headers, self.archive_path) self._db.row_factory = sqlite3.Row try: cursor = self._db.cursor() select_stmt = 'SELECT data FROM ' + self.ARCHIVE_TABLE + ' WHERE hashcode = ?' cursor.execute(select_stmt, (hashcode,)) row = cursor.fetchone() cursor.close() # depends on [control=['try'], data=[]] except sqlite3.DatabaseError as e: msg = 'data retrieval error; cause: %s' % str(e) raise ArchiveError(cause=msg) # depends on [control=['except'], data=['e']] if row: found = pickle.loads(row['data']) # depends on [control=['if'], data=[]] else: msg = 'entry %s not found in archive %s' % (hashcode, self.archive_path) raise ArchiveError(cause=msg) return found
def split(self): """Split the phase. When a phase is exhausted, it gets split into a pair of phases to be further solved. The split happens like so: 1) Select the first unsolved package scope. 2) Find some common dependency in the first N variants of the scope. 3) Split the scope into two: [:N] and [N:]. 4) Create two copies of the phase, containing each half of the split scope. The result of this split is that we have a new phase (the first phase), which contains a package scope with a common dependency. This dependency can now be intersected with the current resolve, thus progressing it. Returns: A 2-tuple of _ResolvePhase objects, where the first phase is the best contender for resolving. """ assert(self.status == SolverStatus.exhausted) scopes = [] next_scopes = [] split_i = None for i, scope in enumerate(self.scopes): if split_i is None: r = scope.split() if r is not None: scope_, next_scope = r scopes.append(scope_) next_scopes.append(next_scope) split_i = i continue scopes.append(scope) next_scopes.append(scope) assert split_i is not None phase = copy.copy(self) phase.scopes = scopes phase.status = SolverStatus.pending phase.changed_scopes_i = set([split_i]) # because a scope was narrowed by a split, other scopes need to be # reduced against it #for i in range(len(phase.scopes)): # if i != split_i: # phase.pending_reducts.add((i, split_i)) next_phase = copy.copy(phase) next_phase.scopes = next_scopes return (phase, next_phase)
def function[split, parameter[self]]: constant[Split the phase. When a phase is exhausted, it gets split into a pair of phases to be further solved. The split happens like so: 1) Select the first unsolved package scope. 2) Find some common dependency in the first N variants of the scope. 3) Split the scope into two: [:N] and [N:]. 4) Create two copies of the phase, containing each half of the split scope. The result of this split is that we have a new phase (the first phase), which contains a package scope with a common dependency. This dependency can now be intersected with the current resolve, thus progressing it. Returns: A 2-tuple of _ResolvePhase objects, where the first phase is the best contender for resolving. ] assert[compare[name[self].status equal[==] name[SolverStatus].exhausted]] variable[scopes] assign[=] list[[]] variable[next_scopes] assign[=] list[[]] variable[split_i] assign[=] constant[None] for taget[tuple[[<ast.Name object at 0x7da207f98760>, <ast.Name object at 0x7da207f9b700>]]] in starred[call[name[enumerate], parameter[name[self].scopes]]] begin[:] if compare[name[split_i] is constant[None]] begin[:] variable[r] assign[=] call[name[scope].split, parameter[]] if compare[name[r] is_not constant[None]] begin[:] <ast.Tuple object at 0x7da207f98e20> assign[=] name[r] call[name[scopes].append, parameter[name[scope_]]] call[name[next_scopes].append, parameter[name[next_scope]]] variable[split_i] assign[=] name[i] continue call[name[scopes].append, parameter[name[scope]]] call[name[next_scopes].append, parameter[name[scope]]] assert[compare[name[split_i] is_not constant[None]]] variable[phase] assign[=] call[name[copy].copy, parameter[name[self]]] name[phase].scopes assign[=] name[scopes] name[phase].status assign[=] name[SolverStatus].pending name[phase].changed_scopes_i assign[=] call[name[set], parameter[list[[<ast.Name object at 0x7da1b17ec1c0>]]]] variable[next_phase] assign[=] call[name[copy].copy, parameter[name[phase]]] name[next_phase].scopes assign[=] name[next_scopes] return[tuple[[<ast.Name object at 0x7da1b17efc70>, <ast.Name object at 0x7da1b17efa90>]]]
keyword[def] identifier[split] ( identifier[self] ): literal[string] keyword[assert] ( identifier[self] . identifier[status] == identifier[SolverStatus] . identifier[exhausted] ) identifier[scopes] =[] identifier[next_scopes] =[] identifier[split_i] = keyword[None] keyword[for] identifier[i] , identifier[scope] keyword[in] identifier[enumerate] ( identifier[self] . identifier[scopes] ): keyword[if] identifier[split_i] keyword[is] keyword[None] : identifier[r] = identifier[scope] . identifier[split] () keyword[if] identifier[r] keyword[is] keyword[not] keyword[None] : identifier[scope_] , identifier[next_scope] = identifier[r] identifier[scopes] . identifier[append] ( identifier[scope_] ) identifier[next_scopes] . identifier[append] ( identifier[next_scope] ) identifier[split_i] = identifier[i] keyword[continue] identifier[scopes] . identifier[append] ( identifier[scope] ) identifier[next_scopes] . identifier[append] ( identifier[scope] ) keyword[assert] identifier[split_i] keyword[is] keyword[not] keyword[None] identifier[phase] = identifier[copy] . identifier[copy] ( identifier[self] ) identifier[phase] . identifier[scopes] = identifier[scopes] identifier[phase] . identifier[status] = identifier[SolverStatus] . identifier[pending] identifier[phase] . identifier[changed_scopes_i] = identifier[set] ([ identifier[split_i] ]) identifier[next_phase] = identifier[copy] . identifier[copy] ( identifier[phase] ) identifier[next_phase] . identifier[scopes] = identifier[next_scopes] keyword[return] ( identifier[phase] , identifier[next_phase] )
def split(self): """Split the phase. When a phase is exhausted, it gets split into a pair of phases to be further solved. The split happens like so: 1) Select the first unsolved package scope. 2) Find some common dependency in the first N variants of the scope. 3) Split the scope into two: [:N] and [N:]. 4) Create two copies of the phase, containing each half of the split scope. The result of this split is that we have a new phase (the first phase), which contains a package scope with a common dependency. This dependency can now be intersected with the current resolve, thus progressing it. Returns: A 2-tuple of _ResolvePhase objects, where the first phase is the best contender for resolving. """ assert self.status == SolverStatus.exhausted scopes = [] next_scopes = [] split_i = None for (i, scope) in enumerate(self.scopes): if split_i is None: r = scope.split() if r is not None: (scope_, next_scope) = r scopes.append(scope_) next_scopes.append(next_scope) split_i = i continue # depends on [control=['if'], data=['r']] # depends on [control=['if'], data=['split_i']] scopes.append(scope) next_scopes.append(scope) # depends on [control=['for'], data=[]] assert split_i is not None phase = copy.copy(self) phase.scopes = scopes phase.status = SolverStatus.pending phase.changed_scopes_i = set([split_i]) # because a scope was narrowed by a split, other scopes need to be # reduced against it #for i in range(len(phase.scopes)): # if i != split_i: # phase.pending_reducts.add((i, split_i)) next_phase = copy.copy(phase) next_phase.scopes = next_scopes return (phase, next_phase)
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None): '''Van der Waerden's test for pairwise multiple comparisons between group levels. See references for additional information [1]_, [2]_. Parameters ---------- a : array_like or pandas DataFrame object An array, any object exposing the array interface or a pandas DataFrame. val_col : str, optional Name of a DataFrame column that contains dependent variable values (test or response variable). Values should have a non-nominal scale. Must be specified if `a` is a pandas DataFrame object. group_col : str, optional Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). Must be specified if `a` is a pandas DataFrame object. sort : bool, optional If True, sort data by block and group columns. p_adjust : str, optional Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are: 'bonferroni' : one-step correction 'sidak' : one-step correction 'holm-sidak' : step-down method using Sidak adjustments 'holm' : step-down method using Bonferroni adjustments 'simes-hochberg' : step-up method (independent) 'hommel' : closed method based on Simes tests (non-negative) 'fdr_bh' : Benjamini/Hochberg (non-negative) 'fdr_by' : Benjamini/Yekutieli (negative) 'fdr_tsbh' : two stage fdr correction (non-negative) 'fdr_tsbky' : two stage fdr correction (non-negative) Returns ------- result : pandas DataFrame P values. Notes ----- For one-factorial designs with samples that do not meet the assumptions for one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using normal scores can be employed. Provided that significant differences were detected by this global test, one may be interested in applying post hoc tests according to van der Waerden for pairwise multiple comparisons of the group levels. There is no tie correction applied in this function. References ---------- .. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures, Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory. .. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and their power, Indagationes Mathematicae, 14, 453-458. Examples -------- >>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']]) >>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1) ''' x, _val_col, _group_col = __convert_to_df(a, val_col, group_col) if not sort: x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True) x.sort_values(by=[_group_col], ascending=True, inplace=True) groups = x[_group_col].unique() n = x[_val_col].size k = groups.size r = ss.rankdata(x[_val_col]) x['z_scores'] = ss.norm.ppf(r / (n + 1)) aj = x.groupby(_group_col)['z_scores'].sum() nj = x.groupby(_group_col)['z_scores'].count() s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum() sts = (1. / s2) * np.sum(aj ** 2. / nj) param = k - 1 A = aj / nj vs = np.zeros((k, k), dtype=np.float) combs = it.combinations(range(k), 2) tri_upper = np.triu_indices(vs.shape[0], 1) tri_lower = np.tril_indices(vs.shape[0], -1) vs[:,:] = 0 def compare_stats(i, j): dif = np.abs(A[groups[i]] - A[groups[j]]) B = 1. / nj[groups[i]] + 1. / nj[groups[j]] tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B) pval = 2. * ss.t.sf(np.abs(tval), df = n - k) return pval for i, j in combs: vs[i, j] = compare_stats(i, j) if p_adjust: vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1] vs[tri_lower] = vs.T[tri_lower] np.fill_diagonal(vs, -1) return DataFrame(vs, index=groups, columns=groups)
def function[posthoc_vanwaerden, parameter[a, val_col, group_col, sort, p_adjust]]: constant[Van der Waerden's test for pairwise multiple comparisons between group levels. See references for additional information [1]_, [2]_. Parameters ---------- a : array_like or pandas DataFrame object An array, any object exposing the array interface or a pandas DataFrame. val_col : str, optional Name of a DataFrame column that contains dependent variable values (test or response variable). Values should have a non-nominal scale. Must be specified if `a` is a pandas DataFrame object. group_col : str, optional Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). Must be specified if `a` is a pandas DataFrame object. sort : bool, optional If True, sort data by block and group columns. p_adjust : str, optional Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are: 'bonferroni' : one-step correction 'sidak' : one-step correction 'holm-sidak' : step-down method using Sidak adjustments 'holm' : step-down method using Bonferroni adjustments 'simes-hochberg' : step-up method (independent) 'hommel' : closed method based on Simes tests (non-negative) 'fdr_bh' : Benjamini/Hochberg (non-negative) 'fdr_by' : Benjamini/Yekutieli (negative) 'fdr_tsbh' : two stage fdr correction (non-negative) 'fdr_tsbky' : two stage fdr correction (non-negative) Returns ------- result : pandas DataFrame P values. Notes ----- For one-factorial designs with samples that do not meet the assumptions for one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using normal scores can be employed. Provided that significant differences were detected by this global test, one may be interested in applying post hoc tests according to van der Waerden for pairwise multiple comparisons of the group levels. There is no tie correction applied in this function. References ---------- .. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures, Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory. .. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and their power, Indagationes Mathematicae, 14, 453-458. Examples -------- >>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']]) >>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1) ] <ast.Tuple object at 0x7da1b12dab30> assign[=] call[name[__convert_to_df], parameter[name[a], name[val_col], name[group_col]]] if <ast.UnaryOp object at 0x7da1b12d9480> begin[:] call[name[x]][name[_group_col]] assign[=] call[name[Categorical], parameter[call[name[x]][name[_group_col]]]] call[name[x].sort_values, parameter[]] variable[groups] assign[=] call[call[name[x]][name[_group_col]].unique, parameter[]] variable[n] assign[=] call[name[x]][name[_val_col]].size variable[k] assign[=] name[groups].size variable[r] assign[=] call[name[ss].rankdata, parameter[call[name[x]][name[_val_col]]]] call[name[x]][constant[z_scores]] assign[=] call[name[ss].norm.ppf, parameter[binary_operation[name[r] / binary_operation[name[n] + constant[1]]]]] variable[aj] assign[=] call[call[call[name[x].groupby, parameter[name[_group_col]]]][constant[z_scores]].sum, parameter[]] variable[nj] assign[=] call[call[call[name[x].groupby, parameter[name[_group_col]]]][constant[z_scores]].count, parameter[]] variable[s2] assign[=] binary_operation[binary_operation[constant[1.0] / binary_operation[name[n] - constant[1.0]]] * call[binary_operation[call[name[x]][constant[z_scores]] ** constant[2.0]].sum, parameter[]]] variable[sts] assign[=] binary_operation[binary_operation[constant[1.0] / name[s2]] * call[name[np].sum, parameter[binary_operation[binary_operation[name[aj] ** constant[2.0]] / name[nj]]]]] variable[param] assign[=] binary_operation[name[k] - constant[1]] variable[A] assign[=] binary_operation[name[aj] / name[nj]] variable[vs] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b1195930>, <ast.Name object at 0x7da1b1195ff0>]]]] variable[combs] assign[=] call[name[it].combinations, parameter[call[name[range], parameter[name[k]]], constant[2]]] variable[tri_upper] assign[=] call[name[np].triu_indices, parameter[call[name[vs].shape][constant[0]], constant[1]]] variable[tri_lower] assign[=] call[name[np].tril_indices, parameter[call[name[vs].shape][constant[0]], <ast.UnaryOp object at 0x7da18f00e5f0>]] call[name[vs]][tuple[[<ast.Slice object at 0x7da18f00e830>, <ast.Slice object at 0x7da18f00d6f0>]]] assign[=] constant[0] def function[compare_stats, parameter[i, j]]: variable[dif] assign[=] call[name[np].abs, parameter[binary_operation[call[name[A]][call[name[groups]][name[i]]] - call[name[A]][call[name[groups]][name[j]]]]]] variable[B] assign[=] binary_operation[binary_operation[constant[1.0] / call[name[nj]][call[name[groups]][name[i]]]] + binary_operation[constant[1.0] / call[name[nj]][call[name[groups]][name[j]]]]] variable[tval] assign[=] binary_operation[name[dif] / call[name[np].sqrt, parameter[binary_operation[binary_operation[binary_operation[name[s2] * binary_operation[binary_operation[name[n] - constant[1.0]] - name[sts]]] / binary_operation[name[n] - name[k]]] * name[B]]]]] variable[pval] assign[=] binary_operation[constant[2.0] * call[name[ss].t.sf, parameter[call[name[np].abs, parameter[name[tval]]]]]] return[name[pval]] for taget[tuple[[<ast.Name object at 0x7da1b12b4700>, <ast.Name object at 0x7da1b12b5e10>]]] in starred[name[combs]] begin[:] call[name[vs]][tuple[[<ast.Name object at 0x7da1b12b5ed0>, <ast.Name object at 0x7da1b12b6020>]]] assign[=] call[name[compare_stats], parameter[name[i], name[j]]] if name[p_adjust] begin[:] call[name[vs]][name[tri_upper]] assign[=] call[call[name[multipletests], parameter[call[name[vs]][name[tri_upper]]]]][constant[1]] call[name[vs]][name[tri_lower]] assign[=] call[name[vs].T][name[tri_lower]] call[name[np].fill_diagonal, parameter[name[vs], <ast.UnaryOp object at 0x7da1b12b4730>]] return[call[name[DataFrame], parameter[name[vs]]]]
keyword[def] identifier[posthoc_vanwaerden] ( identifier[a] , identifier[val_col] = keyword[None] , identifier[group_col] = keyword[None] , identifier[sort] = keyword[True] , identifier[p_adjust] = keyword[None] ): literal[string] identifier[x] , identifier[_val_col] , identifier[_group_col] = identifier[__convert_to_df] ( identifier[a] , identifier[val_col] , identifier[group_col] ) keyword[if] keyword[not] identifier[sort] : identifier[x] [ identifier[_group_col] ]= identifier[Categorical] ( identifier[x] [ identifier[_group_col] ], identifier[categories] = identifier[x] [ identifier[_group_col] ]. identifier[unique] (), identifier[ordered] = keyword[True] ) identifier[x] . identifier[sort_values] ( identifier[by] =[ identifier[_group_col] ], identifier[ascending] = keyword[True] , identifier[inplace] = keyword[True] ) identifier[groups] = identifier[x] [ identifier[_group_col] ]. identifier[unique] () identifier[n] = identifier[x] [ identifier[_val_col] ]. identifier[size] identifier[k] = identifier[groups] . identifier[size] identifier[r] = identifier[ss] . identifier[rankdata] ( identifier[x] [ identifier[_val_col] ]) identifier[x] [ literal[string] ]= identifier[ss] . identifier[norm] . identifier[ppf] ( identifier[r] /( identifier[n] + literal[int] )) identifier[aj] = identifier[x] . identifier[groupby] ( identifier[_group_col] )[ literal[string] ]. identifier[sum] () identifier[nj] = identifier[x] . identifier[groupby] ( identifier[_group_col] )[ literal[string] ]. identifier[count] () identifier[s2] =( literal[int] /( identifier[n] - literal[int] ))*( identifier[x] [ literal[string] ]** literal[int] ). identifier[sum] () identifier[sts] =( literal[int] / identifier[s2] )* identifier[np] . identifier[sum] ( identifier[aj] ** literal[int] / identifier[nj] ) identifier[param] = identifier[k] - literal[int] identifier[A] = identifier[aj] / identifier[nj] identifier[vs] = identifier[np] . identifier[zeros] (( identifier[k] , identifier[k] ), identifier[dtype] = identifier[np] . identifier[float] ) identifier[combs] = identifier[it] . identifier[combinations] ( identifier[range] ( identifier[k] ), literal[int] ) identifier[tri_upper] = identifier[np] . identifier[triu_indices] ( identifier[vs] . identifier[shape] [ literal[int] ], literal[int] ) identifier[tri_lower] = identifier[np] . identifier[tril_indices] ( identifier[vs] . identifier[shape] [ literal[int] ],- literal[int] ) identifier[vs] [:,:]= literal[int] keyword[def] identifier[compare_stats] ( identifier[i] , identifier[j] ): identifier[dif] = identifier[np] . identifier[abs] ( identifier[A] [ identifier[groups] [ identifier[i] ]]- identifier[A] [ identifier[groups] [ identifier[j] ]]) identifier[B] = literal[int] / identifier[nj] [ identifier[groups] [ identifier[i] ]]+ literal[int] / identifier[nj] [ identifier[groups] [ identifier[j] ]] identifier[tval] = identifier[dif] / identifier[np] . identifier[sqrt] ( identifier[s2] *( identifier[n] - literal[int] - identifier[sts] )/( identifier[n] - identifier[k] )* identifier[B] ) identifier[pval] = literal[int] * identifier[ss] . identifier[t] . identifier[sf] ( identifier[np] . identifier[abs] ( identifier[tval] ), identifier[df] = identifier[n] - identifier[k] ) keyword[return] identifier[pval] keyword[for] identifier[i] , identifier[j] keyword[in] identifier[combs] : identifier[vs] [ identifier[i] , identifier[j] ]= identifier[compare_stats] ( identifier[i] , identifier[j] ) keyword[if] identifier[p_adjust] : identifier[vs] [ identifier[tri_upper] ]= identifier[multipletests] ( identifier[vs] [ identifier[tri_upper] ], identifier[method] = identifier[p_adjust] )[ literal[int] ] identifier[vs] [ identifier[tri_lower] ]= identifier[vs] . identifier[T] [ identifier[tri_lower] ] identifier[np] . identifier[fill_diagonal] ( identifier[vs] ,- literal[int] ) keyword[return] identifier[DataFrame] ( identifier[vs] , identifier[index] = identifier[groups] , identifier[columns] = identifier[groups] )
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=True, p_adjust=None): """Van der Waerden's test for pairwise multiple comparisons between group levels. See references for additional information [1]_, [2]_. Parameters ---------- a : array_like or pandas DataFrame object An array, any object exposing the array interface or a pandas DataFrame. val_col : str, optional Name of a DataFrame column that contains dependent variable values (test or response variable). Values should have a non-nominal scale. Must be specified if `a` is a pandas DataFrame object. group_col : str, optional Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). Must be specified if `a` is a pandas DataFrame object. sort : bool, optional If True, sort data by block and group columns. p_adjust : str, optional Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are: 'bonferroni' : one-step correction 'sidak' : one-step correction 'holm-sidak' : step-down method using Sidak adjustments 'holm' : step-down method using Bonferroni adjustments 'simes-hochberg' : step-up method (independent) 'hommel' : closed method based on Simes tests (non-negative) 'fdr_bh' : Benjamini/Hochberg (non-negative) 'fdr_by' : Benjamini/Yekutieli (negative) 'fdr_tsbh' : two stage fdr correction (non-negative) 'fdr_tsbky' : two stage fdr correction (non-negative) Returns ------- result : pandas DataFrame P values. Notes ----- For one-factorial designs with samples that do not meet the assumptions for one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using normal scores can be employed. Provided that significant differences were detected by this global test, one may be interested in applying post hoc tests according to van der Waerden for pairwise multiple comparisons of the group levels. There is no tie correction applied in this function. References ---------- .. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures, Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory. .. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and their power, Indagationes Mathematicae, 14, 453-458. Examples -------- >>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']]) >>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1) """ (x, _val_col, _group_col) = __convert_to_df(a, val_col, group_col) if not sort: x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True) # depends on [control=['if'], data=[]] x.sort_values(by=[_group_col], ascending=True, inplace=True) groups = x[_group_col].unique() n = x[_val_col].size k = groups.size r = ss.rankdata(x[_val_col]) x['z_scores'] = ss.norm.ppf(r / (n + 1)) aj = x.groupby(_group_col)['z_scores'].sum() nj = x.groupby(_group_col)['z_scores'].count() s2 = 1.0 / (n - 1.0) * (x['z_scores'] ** 2.0).sum() sts = 1.0 / s2 * np.sum(aj ** 2.0 / nj) param = k - 1 A = aj / nj vs = np.zeros((k, k), dtype=np.float) combs = it.combinations(range(k), 2) tri_upper = np.triu_indices(vs.shape[0], 1) tri_lower = np.tril_indices(vs.shape[0], -1) vs[:, :] = 0 def compare_stats(i, j): dif = np.abs(A[groups[i]] - A[groups[j]]) B = 1.0 / nj[groups[i]] + 1.0 / nj[groups[j]] tval = dif / np.sqrt(s2 * (n - 1.0 - sts) / (n - k) * B) pval = 2.0 * ss.t.sf(np.abs(tval), df=n - k) return pval for (i, j) in combs: vs[i, j] = compare_stats(i, j) # depends on [control=['for'], data=[]] if p_adjust: vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1] # depends on [control=['if'], data=[]] vs[tri_lower] = vs.T[tri_lower] np.fill_diagonal(vs, -1) return DataFrame(vs, index=groups, columns=groups)
def get_likes(self) -> Iterator['Profile']: """Iterate over all likes of the post. A :class:`Profile` instance of each likee is yielded.""" if self.likes == 0: # Avoid doing additional requests if there are no comments return likes_edges = self._field('edge_media_preview_like', 'edges') if self.likes == len(likes_edges): # If the Post's metadata already contains all likes, don't do GraphQL requests to obtain them yield from (Profile(self._context, like['node']) for like in likes_edges) return yield from (Profile(self._context, node) for node in self._context.graphql_node_list("1cb6ec562846122743b61e492c85999f", {'shortcode': self.shortcode}, 'https://www.instagram.com/p/' + self.shortcode + '/', lambda d: d['data']['shortcode_media']['edge_liked_by'], self._rhx_gis))
def function[get_likes, parameter[self]]: constant[Iterate over all likes of the post. A :class:`Profile` instance of each likee is yielded.] if compare[name[self].likes equal[==] constant[0]] begin[:] return[None] variable[likes_edges] assign[=] call[name[self]._field, parameter[constant[edge_media_preview_like], constant[edges]]] if compare[name[self].likes equal[==] call[name[len], parameter[name[likes_edges]]]] begin[:] <ast.YieldFrom object at 0x7da18f09e290> return[None] <ast.YieldFrom object at 0x7da18f09df90>
keyword[def] identifier[get_likes] ( identifier[self] )-> identifier[Iterator] [ literal[string] ]: literal[string] keyword[if] identifier[self] . identifier[likes] == literal[int] : keyword[return] identifier[likes_edges] = identifier[self] . identifier[_field] ( literal[string] , literal[string] ) keyword[if] identifier[self] . identifier[likes] == identifier[len] ( identifier[likes_edges] ): keyword[yield] keyword[from] ( identifier[Profile] ( identifier[self] . identifier[_context] , identifier[like] [ literal[string] ]) keyword[for] identifier[like] keyword[in] identifier[likes_edges] ) keyword[return] keyword[yield] keyword[from] ( identifier[Profile] ( identifier[self] . identifier[_context] , identifier[node] ) keyword[for] identifier[node] keyword[in] identifier[self] . identifier[_context] . identifier[graphql_node_list] ( literal[string] ,{ literal[string] : identifier[self] . identifier[shortcode] }, literal[string] + identifier[self] . identifier[shortcode] + literal[string] , keyword[lambda] identifier[d] : identifier[d] [ literal[string] ][ literal[string] ][ literal[string] ], identifier[self] . identifier[_rhx_gis] ))
def get_likes(self) -> Iterator['Profile']: """Iterate over all likes of the post. A :class:`Profile` instance of each likee is yielded.""" if self.likes == 0: # Avoid doing additional requests if there are no comments return # depends on [control=['if'], data=[]] likes_edges = self._field('edge_media_preview_like', 'edges') if self.likes == len(likes_edges): # If the Post's metadata already contains all likes, don't do GraphQL requests to obtain them yield from (Profile(self._context, like['node']) for like in likes_edges) return # depends on [control=['if'], data=[]] yield from (Profile(self._context, node) for node in self._context.graphql_node_list('1cb6ec562846122743b61e492c85999f', {'shortcode': self.shortcode}, 'https://www.instagram.com/p/' + self.shortcode + '/', lambda d: d['data']['shortcode_media']['edge_liked_by'], self._rhx_gis))
def change_filename(filehandle, meta): """Changes the filename to reflect the conversion from PDF to JPG. This method will preserve the original filename in the meta dictionary. """ filename = secure_filename(meta.get('filename', filehandle.filename)) basename, _ = os.path.splitext(filename) meta['original_filename'] = filehandle.filename filehandle.filename = filename + '.jpg' return filehandle
def function[change_filename, parameter[filehandle, meta]]: constant[Changes the filename to reflect the conversion from PDF to JPG. This method will preserve the original filename in the meta dictionary. ] variable[filename] assign[=] call[name[secure_filename], parameter[call[name[meta].get, parameter[constant[filename], name[filehandle].filename]]]] <ast.Tuple object at 0x7da1b2607ee0> assign[=] call[name[os].path.splitext, parameter[name[filename]]] call[name[meta]][constant[original_filename]] assign[=] name[filehandle].filename name[filehandle].filename assign[=] binary_operation[name[filename] + constant[.jpg]] return[name[filehandle]]
keyword[def] identifier[change_filename] ( identifier[filehandle] , identifier[meta] ): literal[string] identifier[filename] = identifier[secure_filename] ( identifier[meta] . identifier[get] ( literal[string] , identifier[filehandle] . identifier[filename] )) identifier[basename] , identifier[_] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] ) identifier[meta] [ literal[string] ]= identifier[filehandle] . identifier[filename] identifier[filehandle] . identifier[filename] = identifier[filename] + literal[string] keyword[return] identifier[filehandle]
def change_filename(filehandle, meta): """Changes the filename to reflect the conversion from PDF to JPG. This method will preserve the original filename in the meta dictionary. """ filename = secure_filename(meta.get('filename', filehandle.filename)) (basename, _) = os.path.splitext(filename) meta['original_filename'] = filehandle.filename filehandle.filename = filename + '.jpg' return filehandle
def calculate_incorrect_name_dict(graph: BELGraph) -> Mapping[str, List[str]]: """Get missing names grouped by namespace.""" missing = defaultdict(list) for namespace, name in _iterate_namespace_name(graph): missing[namespace].append(name) return dict(missing)
def function[calculate_incorrect_name_dict, parameter[graph]]: constant[Get missing names grouped by namespace.] variable[missing] assign[=] call[name[defaultdict], parameter[name[list]]] for taget[tuple[[<ast.Name object at 0x7da207f9a7a0>, <ast.Name object at 0x7da207f9a7d0>]]] in starred[call[name[_iterate_namespace_name], parameter[name[graph]]]] begin[:] call[call[name[missing]][name[namespace]].append, parameter[name[name]]] return[call[name[dict], parameter[name[missing]]]]
keyword[def] identifier[calculate_incorrect_name_dict] ( identifier[graph] : identifier[BELGraph] )-> identifier[Mapping] [ identifier[str] , identifier[List] [ identifier[str] ]]: literal[string] identifier[missing] = identifier[defaultdict] ( identifier[list] ) keyword[for] identifier[namespace] , identifier[name] keyword[in] identifier[_iterate_namespace_name] ( identifier[graph] ): identifier[missing] [ identifier[namespace] ]. identifier[append] ( identifier[name] ) keyword[return] identifier[dict] ( identifier[missing] )
def calculate_incorrect_name_dict(graph: BELGraph) -> Mapping[str, List[str]]: """Get missing names grouped by namespace.""" missing = defaultdict(list) for (namespace, name) in _iterate_namespace_name(graph): missing[namespace].append(name) # depends on [control=['for'], data=[]] return dict(missing)
def next_sibling(self): """ Returns the next sibling of the current node. The next sibling is searched in the parent node if we are not considering a top-level node. Otherwise it is searched inside the list of nodes (which should be sorted by tree ID) that is associated with the considered tree instance. """ if self.parent: nodes = self.parent.children index = nodes.index(self) sibling = nodes[index + 1] if index < len(nodes) - 1 else None else: nodes = self.tree.nodes index = nodes.index(self) sibling = ( next((n for n in nodes[index + 1:] if n.level == self.level), None) if index < len(nodes) - 1 else None ) return sibling
def function[next_sibling, parameter[self]]: constant[ Returns the next sibling of the current node. The next sibling is searched in the parent node if we are not considering a top-level node. Otherwise it is searched inside the list of nodes (which should be sorted by tree ID) that is associated with the considered tree instance. ] if name[self].parent begin[:] variable[nodes] assign[=] name[self].parent.children variable[index] assign[=] call[name[nodes].index, parameter[name[self]]] variable[sibling] assign[=] <ast.IfExp object at 0x7da18f8119f0> return[name[sibling]]
keyword[def] identifier[next_sibling] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[parent] : identifier[nodes] = identifier[self] . identifier[parent] . identifier[children] identifier[index] = identifier[nodes] . identifier[index] ( identifier[self] ) identifier[sibling] = identifier[nodes] [ identifier[index] + literal[int] ] keyword[if] identifier[index] < identifier[len] ( identifier[nodes] )- literal[int] keyword[else] keyword[None] keyword[else] : identifier[nodes] = identifier[self] . identifier[tree] . identifier[nodes] identifier[index] = identifier[nodes] . identifier[index] ( identifier[self] ) identifier[sibling] =( identifier[next] (( identifier[n] keyword[for] identifier[n] keyword[in] identifier[nodes] [ identifier[index] + literal[int] :] keyword[if] identifier[n] . identifier[level] == identifier[self] . identifier[level] ), keyword[None] ) keyword[if] identifier[index] < identifier[len] ( identifier[nodes] )- literal[int] keyword[else] keyword[None] ) keyword[return] identifier[sibling]
def next_sibling(self): """ Returns the next sibling of the current node. The next sibling is searched in the parent node if we are not considering a top-level node. Otherwise it is searched inside the list of nodes (which should be sorted by tree ID) that is associated with the considered tree instance. """ if self.parent: nodes = self.parent.children index = nodes.index(self) sibling = nodes[index + 1] if index < len(nodes) - 1 else None # depends on [control=['if'], data=[]] else: nodes = self.tree.nodes index = nodes.index(self) sibling = next((n for n in nodes[index + 1:] if n.level == self.level), None) if index < len(nodes) - 1 else None return sibling
def get_all_keys(self, headers=None, **params): """ A lower-level method for listing contents of a bucket. This closely models the actual S3 API and requires you to manually handle the paging of results. For a higher-level method that handles the details of paging for you, you can use the list method. :type max_keys: int :param max_keys: The maximum number of keys to retrieve :type prefix: string :param prefix: The prefix of the keys you want to retrieve :type marker: string :param marker: The "marker" of where you are in the result set :type delimiter: string :param delimiter: If this optional, Unicode string parameter is included with your request, then keys that contain the same string between the prefix and the first occurrence of the delimiter will be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. :rtype: ResultSet :return: The result from S3 listing the keys requested """ return self._get_all([('Contents', self.key_class), ('CommonPrefixes', Prefix)], '', headers, **params)
def function[get_all_keys, parameter[self, headers]]: constant[ A lower-level method for listing contents of a bucket. This closely models the actual S3 API and requires you to manually handle the paging of results. For a higher-level method that handles the details of paging for you, you can use the list method. :type max_keys: int :param max_keys: The maximum number of keys to retrieve :type prefix: string :param prefix: The prefix of the keys you want to retrieve :type marker: string :param marker: The "marker" of where you are in the result set :type delimiter: string :param delimiter: If this optional, Unicode string parameter is included with your request, then keys that contain the same string between the prefix and the first occurrence of the delimiter will be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. :rtype: ResultSet :return: The result from S3 listing the keys requested ] return[call[name[self]._get_all, parameter[list[[<ast.Tuple object at 0x7da1b26a7ee0>, <ast.Tuple object at 0x7da1b26a69e0>]], constant[], name[headers]]]]
keyword[def] identifier[get_all_keys] ( identifier[self] , identifier[headers] = keyword[None] ,** identifier[params] ): literal[string] keyword[return] identifier[self] . identifier[_get_all] ([( literal[string] , identifier[self] . identifier[key_class] ), ( literal[string] , identifier[Prefix] )], literal[string] , identifier[headers] ,** identifier[params] )
def get_all_keys(self, headers=None, **params): """ A lower-level method for listing contents of a bucket. This closely models the actual S3 API and requires you to manually handle the paging of results. For a higher-level method that handles the details of paging for you, you can use the list method. :type max_keys: int :param max_keys: The maximum number of keys to retrieve :type prefix: string :param prefix: The prefix of the keys you want to retrieve :type marker: string :param marker: The "marker" of where you are in the result set :type delimiter: string :param delimiter: If this optional, Unicode string parameter is included with your request, then keys that contain the same string between the prefix and the first occurrence of the delimiter will be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. :rtype: ResultSet :return: The result from S3 listing the keys requested """ return self._get_all([('Contents', self.key_class), ('CommonPrefixes', Prefix)], '', headers, **params)
def get_all(self, **kwargs): """ Gets all Licenses This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: LicensePage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_all_with_http_info(**kwargs) else: (data) = self.get_all_with_http_info(**kwargs) return data
def function[get_all, parameter[self]]: constant[ Gets all Licenses This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: LicensePage If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[callback]]] begin[:] return[call[name[self].get_all_with_http_info, parameter[]]]
keyword[def] identifier[get_all] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[get_all_with_http_info] (** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[get_all_with_http_info] (** identifier[kwargs] ) keyword[return] identifier[data]
def get_all(self, **kwargs): """ Gets all Licenses This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: LicensePage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_all_with_http_info(**kwargs) # depends on [control=['if'], data=[]] else: data = self.get_all_with_http_info(**kwargs) return data
def profile_setting_default_args(ij): """Build the default args for this profile. Args: ij (dict): The install.json contents. Returns: dict: The default args for a Job or Playbook App. """ # build default args profile_default_args = OrderedDict() profile_default_args['api_default_org'] = '$env.API_DEFAULT_ORG' profile_default_args['api_access_id'] = '$env.API_ACCESS_ID' profile_default_args['api_secret_key'] = '$envs.API_SECRET_KEY' profile_default_args['tc_api_path'] = '$env.TC_API_PATH' profile_default_args['tc_docker'] = False profile_default_args['tc_in_path'] = 'log' profile_default_args['tc_log_level'] = 'debug' profile_default_args['tc_log_path'] = 'log' profile_default_args['tc_log_to_api'] = False profile_default_args['tc_out_path'] = 'log' profile_default_args['tc_proxy_external'] = False profile_default_args['tc_proxy_host'] = '$env.TC_PROXY_HOST' profile_default_args['tc_proxy_port'] = '$env.TC_PROXY_PORT' profile_default_args['tc_proxy_password'] = '$envs.TC_PROXY_PASSWORD' profile_default_args['tc_proxy_tc'] = False profile_default_args['tc_proxy_username'] = '$env.TC_PROXY_USERNAME' profile_default_args['tc_temp_path'] = 'log' if ij.get('runtimeLevel') == 'Playbook': profile_default_args['tc_playbook_db_type'] = 'Redis' profile_default_args['tc_playbook_db_context'] = str(uuid4()) profile_default_args['tc_playbook_db_path'] = '$env.DB_PATH' profile_default_args['tc_playbook_db_port'] = '$env.DB_PORT' profile_default_args['tc_playbook_out_variables'] = '' return profile_default_args
def function[profile_setting_default_args, parameter[ij]]: constant[Build the default args for this profile. Args: ij (dict): The install.json contents. Returns: dict: The default args for a Job or Playbook App. ] variable[profile_default_args] assign[=] call[name[OrderedDict], parameter[]] call[name[profile_default_args]][constant[api_default_org]] assign[=] constant[$env.API_DEFAULT_ORG] call[name[profile_default_args]][constant[api_access_id]] assign[=] constant[$env.API_ACCESS_ID] call[name[profile_default_args]][constant[api_secret_key]] assign[=] constant[$envs.API_SECRET_KEY] call[name[profile_default_args]][constant[tc_api_path]] assign[=] constant[$env.TC_API_PATH] call[name[profile_default_args]][constant[tc_docker]] assign[=] constant[False] call[name[profile_default_args]][constant[tc_in_path]] assign[=] constant[log] call[name[profile_default_args]][constant[tc_log_level]] assign[=] constant[debug] call[name[profile_default_args]][constant[tc_log_path]] assign[=] constant[log] call[name[profile_default_args]][constant[tc_log_to_api]] assign[=] constant[False] call[name[profile_default_args]][constant[tc_out_path]] assign[=] constant[log] call[name[profile_default_args]][constant[tc_proxy_external]] assign[=] constant[False] call[name[profile_default_args]][constant[tc_proxy_host]] assign[=] constant[$env.TC_PROXY_HOST] call[name[profile_default_args]][constant[tc_proxy_port]] assign[=] constant[$env.TC_PROXY_PORT] call[name[profile_default_args]][constant[tc_proxy_password]] assign[=] constant[$envs.TC_PROXY_PASSWORD] call[name[profile_default_args]][constant[tc_proxy_tc]] assign[=] constant[False] call[name[profile_default_args]][constant[tc_proxy_username]] assign[=] constant[$env.TC_PROXY_USERNAME] call[name[profile_default_args]][constant[tc_temp_path]] assign[=] constant[log] if compare[call[name[ij].get, parameter[constant[runtimeLevel]]] equal[==] constant[Playbook]] begin[:] call[name[profile_default_args]][constant[tc_playbook_db_type]] assign[=] constant[Redis] call[name[profile_default_args]][constant[tc_playbook_db_context]] assign[=] call[name[str], parameter[call[name[uuid4], parameter[]]]] call[name[profile_default_args]][constant[tc_playbook_db_path]] assign[=] constant[$env.DB_PATH] call[name[profile_default_args]][constant[tc_playbook_db_port]] assign[=] constant[$env.DB_PORT] call[name[profile_default_args]][constant[tc_playbook_out_variables]] assign[=] constant[] return[name[profile_default_args]]
keyword[def] identifier[profile_setting_default_args] ( identifier[ij] ): literal[string] identifier[profile_default_args] = identifier[OrderedDict] () identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= keyword[False] identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= keyword[False] identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= keyword[False] identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= keyword[False] identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= literal[string] keyword[if] identifier[ij] . identifier[get] ( literal[string] )== literal[string] : identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= identifier[str] ( identifier[uuid4] ()) identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= literal[string] identifier[profile_default_args] [ literal[string] ]= literal[string] keyword[return] identifier[profile_default_args]
def profile_setting_default_args(ij): """Build the default args for this profile. Args: ij (dict): The install.json contents. Returns: dict: The default args for a Job or Playbook App. """ # build default args profile_default_args = OrderedDict() profile_default_args['api_default_org'] = '$env.API_DEFAULT_ORG' profile_default_args['api_access_id'] = '$env.API_ACCESS_ID' profile_default_args['api_secret_key'] = '$envs.API_SECRET_KEY' profile_default_args['tc_api_path'] = '$env.TC_API_PATH' profile_default_args['tc_docker'] = False profile_default_args['tc_in_path'] = 'log' profile_default_args['tc_log_level'] = 'debug' profile_default_args['tc_log_path'] = 'log' profile_default_args['tc_log_to_api'] = False profile_default_args['tc_out_path'] = 'log' profile_default_args['tc_proxy_external'] = False profile_default_args['tc_proxy_host'] = '$env.TC_PROXY_HOST' profile_default_args['tc_proxy_port'] = '$env.TC_PROXY_PORT' profile_default_args['tc_proxy_password'] = '$envs.TC_PROXY_PASSWORD' profile_default_args['tc_proxy_tc'] = False profile_default_args['tc_proxy_username'] = '$env.TC_PROXY_USERNAME' profile_default_args['tc_temp_path'] = 'log' if ij.get('runtimeLevel') == 'Playbook': profile_default_args['tc_playbook_db_type'] = 'Redis' profile_default_args['tc_playbook_db_context'] = str(uuid4()) profile_default_args['tc_playbook_db_path'] = '$env.DB_PATH' profile_default_args['tc_playbook_db_port'] = '$env.DB_PORT' profile_default_args['tc_playbook_out_variables'] = '' # depends on [control=['if'], data=[]] return profile_default_args
def _nbaSeason(x): """Takes in 4-digit year for first half of season and returns API appropriate formatted code Input Values: YYYY Used in: _Draft.Anthro(), _Draft.Agility(), _Draft.NonStationaryShooting(), _Draft.SpotUpShooting(), _Draft.Combine() """ if len(str(x)) == 4: try: return '{0}-{1}'.format(x, str(int(x) % 100 + 1)[-2:].zfill(2)) except ValueError: raise ValueError("Enter the four digit year for the first half of the desired season") else: raise ValueError("Enter the four digit year for the first half of the desired season")
def function[_nbaSeason, parameter[x]]: constant[Takes in 4-digit year for first half of season and returns API appropriate formatted code Input Values: YYYY Used in: _Draft.Anthro(), _Draft.Agility(), _Draft.NonStationaryShooting(), _Draft.SpotUpShooting(), _Draft.Combine() ] if compare[call[name[len], parameter[call[name[str], parameter[name[x]]]]] equal[==] constant[4]] begin[:] <ast.Try object at 0x7da204622c50>
keyword[def] identifier[_nbaSeason] ( identifier[x] ): literal[string] keyword[if] identifier[len] ( identifier[str] ( identifier[x] ))== literal[int] : keyword[try] : keyword[return] literal[string] . identifier[format] ( identifier[x] , identifier[str] ( identifier[int] ( identifier[x] )% literal[int] + literal[int] )[- literal[int] :]. identifier[zfill] ( literal[int] )) keyword[except] identifier[ValueError] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] )
def _nbaSeason(x): """Takes in 4-digit year for first half of season and returns API appropriate formatted code Input Values: YYYY Used in: _Draft.Anthro(), _Draft.Agility(), _Draft.NonStationaryShooting(), _Draft.SpotUpShooting(), _Draft.Combine() """ if len(str(x)) == 4: try: return '{0}-{1}'.format(x, str(int(x) % 100 + 1)[-2:].zfill(2)) # depends on [control=['try'], data=[]] except ValueError: raise ValueError('Enter the four digit year for the first half of the desired season') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: raise ValueError('Enter the four digit year for the first half of the desired season')
def _GetNormalizedTimestamp(self): """Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined. """ if self._normalized_timestamp is None: if self._number_of_seconds is not None and self._number_of_seconds >= 0: self._normalized_timestamp = ( decimal.Decimal(self._number_of_seconds) + self._FAT_DATE_TO_POSIX_BASE) return self._normalized_timestamp
def function[_GetNormalizedTimestamp, parameter[self]]: constant[Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined. ] if compare[name[self]._normalized_timestamp is constant[None]] begin[:] if <ast.BoolOp object at 0x7da18f721810> begin[:] name[self]._normalized_timestamp assign[=] binary_operation[call[name[decimal].Decimal, parameter[name[self]._number_of_seconds]] + name[self]._FAT_DATE_TO_POSIX_BASE] return[name[self]._normalized_timestamp]
keyword[def] identifier[_GetNormalizedTimestamp] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_normalized_timestamp] keyword[is] keyword[None] : keyword[if] identifier[self] . identifier[_number_of_seconds] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[_number_of_seconds] >= literal[int] : identifier[self] . identifier[_normalized_timestamp] =( identifier[decimal] . identifier[Decimal] ( identifier[self] . identifier[_number_of_seconds] )+ identifier[self] . identifier[_FAT_DATE_TO_POSIX_BASE] ) keyword[return] identifier[self] . identifier[_normalized_timestamp]
def _GetNormalizedTimestamp(self): """Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined. """ if self._normalized_timestamp is None: if self._number_of_seconds is not None and self._number_of_seconds >= 0: self._normalized_timestamp = decimal.Decimal(self._number_of_seconds) + self._FAT_DATE_TO_POSIX_BASE # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return self._normalized_timestamp
def branchings_segments(self): """Detect branchings and partition the data into corresponding segments. Detect all branchings up to `n_branchings`. Writes ------ segs : np.ndarray Array of dimension (number of segments) × (number of data points). Each row stores a mask array that defines a segment. segs_tips : np.ndarray Array of dimension (number of segments) × 2. Each row stores the indices of the two tip points of each segment. segs_names : np.ndarray Array of dimension (number of data points). Stores an integer label for each segment. """ self.detect_branchings() self.postprocess_segments() self.set_segs_names() self.order_pseudotime()
def function[branchings_segments, parameter[self]]: constant[Detect branchings and partition the data into corresponding segments. Detect all branchings up to `n_branchings`. Writes ------ segs : np.ndarray Array of dimension (number of segments) × (number of data points). Each row stores a mask array that defines a segment. segs_tips : np.ndarray Array of dimension (number of segments) × 2. Each row stores the indices of the two tip points of each segment. segs_names : np.ndarray Array of dimension (number of data points). Stores an integer label for each segment. ] call[name[self].detect_branchings, parameter[]] call[name[self].postprocess_segments, parameter[]] call[name[self].set_segs_names, parameter[]] call[name[self].order_pseudotime, parameter[]]
keyword[def] identifier[branchings_segments] ( identifier[self] ): literal[string] identifier[self] . identifier[detect_branchings] () identifier[self] . identifier[postprocess_segments] () identifier[self] . identifier[set_segs_names] () identifier[self] . identifier[order_pseudotime] ()
def branchings_segments(self): """Detect branchings and partition the data into corresponding segments. Detect all branchings up to `n_branchings`. Writes ------ segs : np.ndarray Array of dimension (number of segments) × (number of data points). Each row stores a mask array that defines a segment. segs_tips : np.ndarray Array of dimension (number of segments) × 2. Each row stores the indices of the two tip points of each segment. segs_names : np.ndarray Array of dimension (number of data points). Stores an integer label for each segment. """ self.detect_branchings() self.postprocess_segments() self.set_segs_names() self.order_pseudotime()
def get_client_rights(self, right_name=None, product_version=None, edition=None, rel_type=None, include_certificate=None, canary=None, machine_id=None): """GetClientRights. [Preview API] :param str right_name: :param str product_version: :param str edition: :param str rel_type: :param bool include_certificate: :param str canary: :param str machine_id: :rtype: :class:`<ClientRightsContainer> <azure.devops.v5_0.licensing.models.ClientRightsContainer>` """ route_values = {} if right_name is not None: route_values['rightName'] = self._serialize.url('right_name', right_name, 'str') query_parameters = {} if product_version is not None: query_parameters['productVersion'] = self._serialize.query('product_version', product_version, 'str') if edition is not None: query_parameters['edition'] = self._serialize.query('edition', edition, 'str') if rel_type is not None: query_parameters['relType'] = self._serialize.query('rel_type', rel_type, 'str') if include_certificate is not None: query_parameters['includeCertificate'] = self._serialize.query('include_certificate', include_certificate, 'bool') if canary is not None: query_parameters['canary'] = self._serialize.query('canary', canary, 'str') if machine_id is not None: query_parameters['machineId'] = self._serialize.query('machine_id', machine_id, 'str') response = self._send(http_method='GET', location_id='643c72da-eaee-4163-9f07-d748ef5c2a0c', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ClientRightsContainer', response)
def function[get_client_rights, parameter[self, right_name, product_version, edition, rel_type, include_certificate, canary, machine_id]]: constant[GetClientRights. [Preview API] :param str right_name: :param str product_version: :param str edition: :param str rel_type: :param bool include_certificate: :param str canary: :param str machine_id: :rtype: :class:`<ClientRightsContainer> <azure.devops.v5_0.licensing.models.ClientRightsContainer>` ] variable[route_values] assign[=] dictionary[[], []] if compare[name[right_name] is_not constant[None]] begin[:] call[name[route_values]][constant[rightName]] assign[=] call[name[self]._serialize.url, parameter[constant[right_name], name[right_name], constant[str]]] variable[query_parameters] assign[=] dictionary[[], []] if compare[name[product_version] is_not constant[None]] begin[:] call[name[query_parameters]][constant[productVersion]] assign[=] call[name[self]._serialize.query, parameter[constant[product_version], name[product_version], constant[str]]] if compare[name[edition] is_not constant[None]] begin[:] call[name[query_parameters]][constant[edition]] assign[=] call[name[self]._serialize.query, parameter[constant[edition], name[edition], constant[str]]] if compare[name[rel_type] is_not constant[None]] begin[:] call[name[query_parameters]][constant[relType]] assign[=] call[name[self]._serialize.query, parameter[constant[rel_type], name[rel_type], constant[str]]] if compare[name[include_certificate] is_not constant[None]] begin[:] call[name[query_parameters]][constant[includeCertificate]] assign[=] call[name[self]._serialize.query, parameter[constant[include_certificate], name[include_certificate], constant[bool]]] if compare[name[canary] is_not constant[None]] begin[:] call[name[query_parameters]][constant[canary]] assign[=] call[name[self]._serialize.query, parameter[constant[canary], name[canary], constant[str]]] if compare[name[machine_id] is_not constant[None]] begin[:] call[name[query_parameters]][constant[machineId]] assign[=] call[name[self]._serialize.query, parameter[constant[machine_id], name[machine_id], constant[str]]] variable[response] assign[=] call[name[self]._send, parameter[]] return[call[name[self]._deserialize, parameter[constant[ClientRightsContainer], name[response]]]]
keyword[def] identifier[get_client_rights] ( identifier[self] , identifier[right_name] = keyword[None] , identifier[product_version] = keyword[None] , identifier[edition] = keyword[None] , identifier[rel_type] = keyword[None] , identifier[include_certificate] = keyword[None] , identifier[canary] = keyword[None] , identifier[machine_id] = keyword[None] ): literal[string] identifier[route_values] ={} keyword[if] identifier[right_name] keyword[is] keyword[not] keyword[None] : identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[right_name] , literal[string] ) identifier[query_parameters] ={} keyword[if] identifier[product_version] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[product_version] , literal[string] ) keyword[if] identifier[edition] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[edition] , literal[string] ) keyword[if] identifier[rel_type] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[rel_type] , literal[string] ) keyword[if] identifier[include_certificate] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[include_certificate] , literal[string] ) keyword[if] identifier[canary] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[canary] , literal[string] ) keyword[if] identifier[machine_id] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[machine_id] , literal[string] ) identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] , identifier[location_id] = literal[string] , identifier[version] = literal[string] , identifier[route_values] = identifier[route_values] , identifier[query_parameters] = identifier[query_parameters] ) keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] )
def get_client_rights(self, right_name=None, product_version=None, edition=None, rel_type=None, include_certificate=None, canary=None, machine_id=None): """GetClientRights. [Preview API] :param str right_name: :param str product_version: :param str edition: :param str rel_type: :param bool include_certificate: :param str canary: :param str machine_id: :rtype: :class:`<ClientRightsContainer> <azure.devops.v5_0.licensing.models.ClientRightsContainer>` """ route_values = {} if right_name is not None: route_values['rightName'] = self._serialize.url('right_name', right_name, 'str') # depends on [control=['if'], data=['right_name']] query_parameters = {} if product_version is not None: query_parameters['productVersion'] = self._serialize.query('product_version', product_version, 'str') # depends on [control=['if'], data=['product_version']] if edition is not None: query_parameters['edition'] = self._serialize.query('edition', edition, 'str') # depends on [control=['if'], data=['edition']] if rel_type is not None: query_parameters['relType'] = self._serialize.query('rel_type', rel_type, 'str') # depends on [control=['if'], data=['rel_type']] if include_certificate is not None: query_parameters['includeCertificate'] = self._serialize.query('include_certificate', include_certificate, 'bool') # depends on [control=['if'], data=['include_certificate']] if canary is not None: query_parameters['canary'] = self._serialize.query('canary', canary, 'str') # depends on [control=['if'], data=['canary']] if machine_id is not None: query_parameters['machineId'] = self._serialize.query('machine_id', machine_id, 'str') # depends on [control=['if'], data=['machine_id']] response = self._send(http_method='GET', location_id='643c72da-eaee-4163-9f07-d748ef5c2a0c', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('ClientRightsContainer', response)
def get_project_root(): """Get the project root folder as a string.""" cfg = get_project_configuration() # At this point it can be sure that the configuration file exists # Now make sure the project structure exists for dirname in ["raw-datasets", "preprocessed", "feature-files", "models", "reports"]: directory = os.path.join(cfg['root'], dirname) if not os.path.exists(directory): os.makedirs(directory) raw_yml_path = pkg_resources.resource_filename('hwrt', 'misc/') # TODO: How to check for updates if it already exists? raw_data_dst = os.path.join(cfg['root'], "raw-datasets/info.yml") if not os.path.isfile(raw_data_dst): raw_yml_pkg_src = os.path.join(raw_yml_path, "info.yml") shutil.copy(raw_yml_pkg_src, raw_data_dst) # Make sure small-baseline folders exists for dirname in ["models/small-baseline", "feature-files/small-baseline", "preprocessed/small-baseline"]: directory = os.path.join(cfg['root'], dirname) if not os.path.exists(directory): os.makedirs(directory) # Make sure small-baseline yml files exist paths = [("preprocessed/small-baseline/", "preprocessing-small-info.yml"), ("feature-files/small-baseline/", "feature-small-info.yml"), ("models/small-baseline/", "model-small-info.yml")] for dest, src in paths: raw_data_dst = os.path.join(cfg['root'], "%s/info.yml" % dest) if not os.path.isfile(raw_data_dst): raw_yml_pkg_src = os.path.join(raw_yml_path, src) shutil.copy(raw_yml_pkg_src, raw_data_dst) return cfg['root']
def function[get_project_root, parameter[]]: constant[Get the project root folder as a string.] variable[cfg] assign[=] call[name[get_project_configuration], parameter[]] for taget[name[dirname]] in starred[list[[<ast.Constant object at 0x7da1b28d6770>, <ast.Constant object at 0x7da1b28d6aa0>, <ast.Constant object at 0x7da1b28d6e60>, <ast.Constant object at 0x7da1b28d6c50>, <ast.Constant object at 0x7da1b28d6a10>]]] begin[:] variable[directory] assign[=] call[name[os].path.join, parameter[call[name[cfg]][constant[root]], name[dirname]]] if <ast.UnaryOp object at 0x7da1b28d5cc0> begin[:] call[name[os].makedirs, parameter[name[directory]]] variable[raw_yml_path] assign[=] call[name[pkg_resources].resource_filename, parameter[constant[hwrt], constant[misc/]]] variable[raw_data_dst] assign[=] call[name[os].path.join, parameter[call[name[cfg]][constant[root]], constant[raw-datasets/info.yml]]] if <ast.UnaryOp object at 0x7da1b28d68f0> begin[:] variable[raw_yml_pkg_src] assign[=] call[name[os].path.join, parameter[name[raw_yml_path], constant[info.yml]]] call[name[shutil].copy, parameter[name[raw_yml_pkg_src], name[raw_data_dst]]] for taget[name[dirname]] in starred[list[[<ast.Constant object at 0x7da1b28c6d70>, <ast.Constant object at 0x7da1b28c6e00>, <ast.Constant object at 0x7da1b28c55a0>]]] begin[:] variable[directory] assign[=] call[name[os].path.join, parameter[call[name[cfg]][constant[root]], name[dirname]]] if <ast.UnaryOp object at 0x7da1b2866cb0> begin[:] call[name[os].makedirs, parameter[name[directory]]] variable[paths] assign[=] list[[<ast.Tuple object at 0x7da1b28641c0>, <ast.Tuple object at 0x7da1b28655d0>, <ast.Tuple object at 0x7da1b2867a30>]] for taget[tuple[[<ast.Name object at 0x7da1b2865a20>, <ast.Name object at 0x7da1b28650f0>]]] in starred[name[paths]] begin[:] variable[raw_data_dst] assign[=] call[name[os].path.join, parameter[call[name[cfg]][constant[root]], binary_operation[constant[%s/info.yml] <ast.Mod object at 0x7da2590d6920> name[dest]]]] if <ast.UnaryOp object at 0x7da1b2867820> begin[:] variable[raw_yml_pkg_src] assign[=] call[name[os].path.join, parameter[name[raw_yml_path], name[src]]] call[name[shutil].copy, parameter[name[raw_yml_pkg_src], name[raw_data_dst]]] return[call[name[cfg]][constant[root]]]
keyword[def] identifier[get_project_root] (): literal[string] identifier[cfg] = identifier[get_project_configuration] () keyword[for] identifier[dirname] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]: identifier[directory] = identifier[os] . identifier[path] . identifier[join] ( identifier[cfg] [ literal[string] ], identifier[dirname] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[directory] ): identifier[os] . identifier[makedirs] ( identifier[directory] ) identifier[raw_yml_path] = identifier[pkg_resources] . identifier[resource_filename] ( literal[string] , literal[string] ) identifier[raw_data_dst] = identifier[os] . identifier[path] . identifier[join] ( identifier[cfg] [ literal[string] ], literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[raw_data_dst] ): identifier[raw_yml_pkg_src] = identifier[os] . identifier[path] . identifier[join] ( identifier[raw_yml_path] , literal[string] ) identifier[shutil] . identifier[copy] ( identifier[raw_yml_pkg_src] , identifier[raw_data_dst] ) keyword[for] identifier[dirname] keyword[in] [ literal[string] , literal[string] , literal[string] ]: identifier[directory] = identifier[os] . identifier[path] . identifier[join] ( identifier[cfg] [ literal[string] ], identifier[dirname] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[directory] ): identifier[os] . identifier[makedirs] ( identifier[directory] ) identifier[paths] =[( literal[string] , literal[string] ), ( literal[string] , literal[string] ), ( literal[string] , literal[string] )] keyword[for] identifier[dest] , identifier[src] keyword[in] identifier[paths] : identifier[raw_data_dst] = identifier[os] . identifier[path] . identifier[join] ( identifier[cfg] [ literal[string] ], literal[string] % identifier[dest] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[raw_data_dst] ): identifier[raw_yml_pkg_src] = identifier[os] . identifier[path] . identifier[join] ( identifier[raw_yml_path] , identifier[src] ) identifier[shutil] . identifier[copy] ( identifier[raw_yml_pkg_src] , identifier[raw_data_dst] ) keyword[return] identifier[cfg] [ literal[string] ]
def get_project_root(): """Get the project root folder as a string.""" cfg = get_project_configuration() # At this point it can be sure that the configuration file exists # Now make sure the project structure exists for dirname in ['raw-datasets', 'preprocessed', 'feature-files', 'models', 'reports']: directory = os.path.join(cfg['root'], dirname) if not os.path.exists(directory): os.makedirs(directory) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dirname']] raw_yml_path = pkg_resources.resource_filename('hwrt', 'misc/') # TODO: How to check for updates if it already exists? raw_data_dst = os.path.join(cfg['root'], 'raw-datasets/info.yml') if not os.path.isfile(raw_data_dst): raw_yml_pkg_src = os.path.join(raw_yml_path, 'info.yml') shutil.copy(raw_yml_pkg_src, raw_data_dst) # depends on [control=['if'], data=[]] # Make sure small-baseline folders exists for dirname in ['models/small-baseline', 'feature-files/small-baseline', 'preprocessed/small-baseline']: directory = os.path.join(cfg['root'], dirname) if not os.path.exists(directory): os.makedirs(directory) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dirname']] # Make sure small-baseline yml files exist paths = [('preprocessed/small-baseline/', 'preprocessing-small-info.yml'), ('feature-files/small-baseline/', 'feature-small-info.yml'), ('models/small-baseline/', 'model-small-info.yml')] for (dest, src) in paths: raw_data_dst = os.path.join(cfg['root'], '%s/info.yml' % dest) if not os.path.isfile(raw_data_dst): raw_yml_pkg_src = os.path.join(raw_yml_path, src) shutil.copy(raw_yml_pkg_src, raw_data_dst) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return cfg['root']
def discovery(self, compute_resource): """An example that discovers hosts and VMs in the inventory.""" # Find the first ClusterComputeResource if compute_resource is None: cr_list = ComputeResource.all(self.client) print("ERROR: You must specify a ComputeResource.") print("Available ComputeResource's:") for cr in cr_list: print(cr.name) sys.exit(1) try: ccr = ComputeResource.get(self.client, name=compute_resource) except ObjectNotFoundError: print("ERROR: Could not find ComputeResource with name %s" % compute_resource) sys.exit(1) print('Cluster: %s (%s hosts)' % (ccr.name, len(ccr.host))) ccr.preload("host", properties=["name", "vm"]) for host in ccr.host: print(' Host: %s (%s VMs)' % (host.name, len(host.vm))) # Get the vm views in one fell swoop host.preload("vm", properties=["name"]) for vm in host.vm: print(' VM: %s' % vm.name)
def function[discovery, parameter[self, compute_resource]]: constant[An example that discovers hosts and VMs in the inventory.] if compare[name[compute_resource] is constant[None]] begin[:] variable[cr_list] assign[=] call[name[ComputeResource].all, parameter[name[self].client]] call[name[print], parameter[constant[ERROR: You must specify a ComputeResource.]]] call[name[print], parameter[constant[Available ComputeResource's:]]] for taget[name[cr]] in starred[name[cr_list]] begin[:] call[name[print], parameter[name[cr].name]] call[name[sys].exit, parameter[constant[1]]] <ast.Try object at 0x7da18dc98550> call[name[print], parameter[binary_operation[constant[Cluster: %s (%s hosts)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18dc9a530>, <ast.Call object at 0x7da18dc986a0>]]]]] call[name[ccr].preload, parameter[constant[host]]] for taget[name[host]] in starred[name[ccr].host] begin[:] call[name[print], parameter[binary_operation[constant[ Host: %s (%s VMs)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18dc98250>, <ast.Call object at 0x7da18dc99060>]]]]] call[name[host].preload, parameter[constant[vm]]] for taget[name[vm]] in starred[name[host].vm] begin[:] call[name[print], parameter[binary_operation[constant[ VM: %s] <ast.Mod object at 0x7da2590d6920> name[vm].name]]]
keyword[def] identifier[discovery] ( identifier[self] , identifier[compute_resource] ): literal[string] keyword[if] identifier[compute_resource] keyword[is] keyword[None] : identifier[cr_list] = identifier[ComputeResource] . identifier[all] ( identifier[self] . identifier[client] ) identifier[print] ( literal[string] ) identifier[print] ( literal[string] ) keyword[for] identifier[cr] keyword[in] identifier[cr_list] : identifier[print] ( identifier[cr] . identifier[name] ) identifier[sys] . identifier[exit] ( literal[int] ) keyword[try] : identifier[ccr] = identifier[ComputeResource] . identifier[get] ( identifier[self] . identifier[client] , identifier[name] = identifier[compute_resource] ) keyword[except] identifier[ObjectNotFoundError] : identifier[print] ( literal[string] % identifier[compute_resource] ) identifier[sys] . identifier[exit] ( literal[int] ) identifier[print] ( literal[string] %( identifier[ccr] . identifier[name] , identifier[len] ( identifier[ccr] . identifier[host] ))) identifier[ccr] . identifier[preload] ( literal[string] , identifier[properties] =[ literal[string] , literal[string] ]) keyword[for] identifier[host] keyword[in] identifier[ccr] . identifier[host] : identifier[print] ( literal[string] %( identifier[host] . identifier[name] , identifier[len] ( identifier[host] . identifier[vm] ))) identifier[host] . identifier[preload] ( literal[string] , identifier[properties] =[ literal[string] ]) keyword[for] identifier[vm] keyword[in] identifier[host] . identifier[vm] : identifier[print] ( literal[string] % identifier[vm] . identifier[name] )
def discovery(self, compute_resource): """An example that discovers hosts and VMs in the inventory.""" # Find the first ClusterComputeResource if compute_resource is None: cr_list = ComputeResource.all(self.client) print('ERROR: You must specify a ComputeResource.') print("Available ComputeResource's:") for cr in cr_list: print(cr.name) # depends on [control=['for'], data=['cr']] sys.exit(1) # depends on [control=['if'], data=[]] try: ccr = ComputeResource.get(self.client, name=compute_resource) # depends on [control=['try'], data=[]] except ObjectNotFoundError: print('ERROR: Could not find ComputeResource with name %s' % compute_resource) sys.exit(1) # depends on [control=['except'], data=[]] print('Cluster: %s (%s hosts)' % (ccr.name, len(ccr.host))) ccr.preload('host', properties=['name', 'vm']) for host in ccr.host: print(' Host: %s (%s VMs)' % (host.name, len(host.vm))) # Get the vm views in one fell swoop host.preload('vm', properties=['name']) for vm in host.vm: print(' VM: %s' % vm.name) # depends on [control=['for'], data=['vm']] # depends on [control=['for'], data=['host']]
def get_key_by_value(dictionary, search_value): """ searchs a value in a dicionary and returns the key of the first occurrence :param dictionary: dictionary to search in :param search_value: value to search for """ for key, value in dictionary.iteritems(): if value == search_value: return ugettext(key)
def function[get_key_by_value, parameter[dictionary, search_value]]: constant[ searchs a value in a dicionary and returns the key of the first occurrence :param dictionary: dictionary to search in :param search_value: value to search for ] for taget[tuple[[<ast.Name object at 0x7da1b1800220>, <ast.Name object at 0x7da1b1800e50>]]] in starred[call[name[dictionary].iteritems, parameter[]]] begin[:] if compare[name[value] equal[==] name[search_value]] begin[:] return[call[name[ugettext], parameter[name[key]]]]
keyword[def] identifier[get_key_by_value] ( identifier[dictionary] , identifier[search_value] ): literal[string] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[dictionary] . identifier[iteritems] (): keyword[if] identifier[value] == identifier[search_value] : keyword[return] identifier[ugettext] ( identifier[key] )
def get_key_by_value(dictionary, search_value): """ searchs a value in a dicionary and returns the key of the first occurrence :param dictionary: dictionary to search in :param search_value: value to search for """ for (key, value) in dictionary.iteritems(): if value == search_value: return ugettext(key) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def delete_types_s(s, types): """ Delete the given types from a string Same as :meth:`delete_types` but does not use the :attr:`params` dictionary Parameters ---------- s: str The string of the returns like section types: list of str The type identifiers to delete Returns ------- str The modified string `s` without the descriptions of `types` """ patt = '(?s)' + '|'.join( '(?<=\n)' + s + '\n.+?\n(?=\S+|$)' for s in types) return re.sub(patt, '', '\n' + s.strip() + '\n',).strip()
def function[delete_types_s, parameter[s, types]]: constant[ Delete the given types from a string Same as :meth:`delete_types` but does not use the :attr:`params` dictionary Parameters ---------- s: str The string of the returns like section types: list of str The type identifiers to delete Returns ------- str The modified string `s` without the descriptions of `types` ] variable[patt] assign[=] binary_operation[constant[(?s)] + call[constant[|].join, parameter[<ast.GeneratorExp object at 0x7da1b1b69960>]]] return[call[call[name[re].sub, parameter[name[patt], constant[], binary_operation[binary_operation[constant[ ] + call[name[s].strip, parameter[]]] + constant[ ]]]].strip, parameter[]]]
keyword[def] identifier[delete_types_s] ( identifier[s] , identifier[types] ): literal[string] identifier[patt] = literal[string] + literal[string] . identifier[join] ( literal[string] + identifier[s] + literal[string] keyword[for] identifier[s] keyword[in] identifier[types] ) keyword[return] identifier[re] . identifier[sub] ( identifier[patt] , literal[string] , literal[string] + identifier[s] . identifier[strip] ()+ literal[string] ,). identifier[strip] ()
def delete_types_s(s, types): """ Delete the given types from a string Same as :meth:`delete_types` but does not use the :attr:`params` dictionary Parameters ---------- s: str The string of the returns like section types: list of str The type identifiers to delete Returns ------- str The modified string `s` without the descriptions of `types` """ patt = '(?s)' + '|'.join(('(?<=\n)' + s + '\n.+?\n(?=\\S+|$)' for s in types)) return re.sub(patt, '', '\n' + s.strip() + '\n').strip()
def _set_priv(self, v, load=False): """ Setter method for priv, mapped from YANG variable /rbridge_id/snmp_server/user/priv (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_priv is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_priv() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'AES128': {'value': 2}, u'DES': {'value': 0}, u'nopriv': {'value': 1}},), default=unicode("nopriv"), is_leaf=True, yang_name="priv", rest_name="priv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Privacy protocol for username (Default=nopriv)'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='enumeration', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """priv must be of a type compatible with enumeration""", 'defined-type': "brocade-snmp:enumeration", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'AES128': {'value': 2}, u'DES': {'value': 0}, u'nopriv': {'value': 1}},), default=unicode("nopriv"), is_leaf=True, yang_name="priv", rest_name="priv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Privacy protocol for username (Default=nopriv)'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='enumeration', is_config=True)""", }) self.__priv = t if hasattr(self, '_set'): self._set()
def function[_set_priv, parameter[self, v, load]]: constant[ Setter method for priv, mapped from YANG variable /rbridge_id/snmp_server/user/priv (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_priv is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_priv() directly. ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da18fe938e0> name[self].__priv assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_priv] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[RestrictedClassType] ( identifier[base_type] = identifier[unicode] , identifier[restriction_type] = literal[string] , identifier[restriction_arg] ={ literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }, literal[string] :{ literal[string] : literal[int] }},), identifier[default] = identifier[unicode] ( literal[string] ), identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__priv] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_priv(self, v, load=False): """ Setter method for priv, mapped from YANG variable /rbridge_id/snmp_server/user/priv (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_priv is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_priv() directly. """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=RestrictedClassType(base_type=unicode, restriction_type='dict_key', restriction_arg={u'AES128': {'value': 2}, u'DES': {'value': 0}, u'nopriv': {'value': 1}}), default=unicode('nopriv'), is_leaf=True, yang_name='priv', rest_name='priv', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Privacy protocol for username (Default=nopriv)'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='enumeration', is_config=True) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'priv must be of a type compatible with enumeration', 'defined-type': 'brocade-snmp:enumeration', 'generated-type': 'YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u\'AES128\': {\'value\': 2}, u\'DES\': {\'value\': 0}, u\'nopriv\': {\'value\': 1}},), default=unicode("nopriv"), is_leaf=True, yang_name="priv", rest_name="priv", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Privacy protocol for username (Default=nopriv)\'}}, namespace=\'urn:brocade.com:mgmt:brocade-snmp\', defining_module=\'brocade-snmp\', yang_type=\'enumeration\', is_config=True)'}) # depends on [control=['except'], data=[]] self.__priv = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def spans_columns(self, column_names): """ Checks if this index exactly spans the given column names in the correct order. :type column_names: list :rtype: bool """ columns = self.get_columns() number_of_columns = len(columns) same_columns = True for i in range(number_of_columns): column = self._trim_quotes(columns[i].lower()) if i >= len(column_names) or column != self._trim_quotes( column_names[i].lower() ): same_columns = False return same_columns
def function[spans_columns, parameter[self, column_names]]: constant[ Checks if this index exactly spans the given column names in the correct order. :type column_names: list :rtype: bool ] variable[columns] assign[=] call[name[self].get_columns, parameter[]] variable[number_of_columns] assign[=] call[name[len], parameter[name[columns]]] variable[same_columns] assign[=] constant[True] for taget[name[i]] in starred[call[name[range], parameter[name[number_of_columns]]]] begin[:] variable[column] assign[=] call[name[self]._trim_quotes, parameter[call[call[name[columns]][name[i]].lower, parameter[]]]] if <ast.BoolOp object at 0x7da18eb54670> begin[:] variable[same_columns] assign[=] constant[False] return[name[same_columns]]
keyword[def] identifier[spans_columns] ( identifier[self] , identifier[column_names] ): literal[string] identifier[columns] = identifier[self] . identifier[get_columns] () identifier[number_of_columns] = identifier[len] ( identifier[columns] ) identifier[same_columns] = keyword[True] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[number_of_columns] ): identifier[column] = identifier[self] . identifier[_trim_quotes] ( identifier[columns] [ identifier[i] ]. identifier[lower] ()) keyword[if] identifier[i] >= identifier[len] ( identifier[column_names] ) keyword[or] identifier[column] != identifier[self] . identifier[_trim_quotes] ( identifier[column_names] [ identifier[i] ]. identifier[lower] () ): identifier[same_columns] = keyword[False] keyword[return] identifier[same_columns]
def spans_columns(self, column_names): """ Checks if this index exactly spans the given column names in the correct order. :type column_names: list :rtype: bool """ columns = self.get_columns() number_of_columns = len(columns) same_columns = True for i in range(number_of_columns): column = self._trim_quotes(columns[i].lower()) if i >= len(column_names) or column != self._trim_quotes(column_names[i].lower()): same_columns = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] return same_columns
def init_db(self): '''init_db for the filesystem ensures that the base folder (named according to the studyid) exists. ''' self.session = None if not os.path.exists(self.data_base): mkdir_p(self.data_base) self.database = "%s/%s" %(self.data_base, self.study_id) if not os.path.exists(self.database): mkdir_p(self.database)
def function[init_db, parameter[self]]: constant[init_db for the filesystem ensures that the base folder (named according to the studyid) exists. ] name[self].session assign[=] constant[None] if <ast.UnaryOp object at 0x7da1b10d6770> begin[:] call[name[mkdir_p], parameter[name[self].data_base]] name[self].database assign[=] binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18fe91540>, <ast.Attribute object at 0x7da18fe91d20>]]] if <ast.UnaryOp object at 0x7da18fe926b0> begin[:] call[name[mkdir_p], parameter[name[self].database]]
keyword[def] identifier[init_db] ( identifier[self] ): literal[string] identifier[self] . identifier[session] = keyword[None] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[data_base] ): identifier[mkdir_p] ( identifier[self] . identifier[data_base] ) identifier[self] . identifier[database] = literal[string] %( identifier[self] . identifier[data_base] , identifier[self] . identifier[study_id] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[database] ): identifier[mkdir_p] ( identifier[self] . identifier[database] )
def init_db(self): """init_db for the filesystem ensures that the base folder (named according to the studyid) exists. """ self.session = None if not os.path.exists(self.data_base): mkdir_p(self.data_base) # depends on [control=['if'], data=[]] self.database = '%s/%s' % (self.data_base, self.study_id) if not os.path.exists(self.database): mkdir_p(self.database) # depends on [control=['if'], data=[]]
def Connect(self): '''Connect a device ''' device_path = self.path if device_path not in mockobject.objects: raise dbus.exceptions.DBusException('No such device.', name='org.bluez.Error.NoSuchDevice') device = mockobject.objects[device_path] device.props[AUDIO_IFACE]['State'] = dbus.String("connected", variant_level=1) device.EmitSignal(AUDIO_IFACE, 'PropertyChanged', 'sv', [ 'State', dbus.String("connected", variant_level=1), ]) device.props[DEVICE_IFACE]['Connected'] = dbus.Boolean(True, variant_level=1) device.EmitSignal(DEVICE_IFACE, 'PropertyChanged', 'sv', [ 'Connected', dbus.Boolean(True, variant_level=1), ])
def function[Connect, parameter[self]]: constant[Connect a device ] variable[device_path] assign[=] name[self].path if compare[name[device_path] <ast.NotIn object at 0x7da2590d7190> name[mockobject].objects] begin[:] <ast.Raise object at 0x7da20e9b0700> variable[device] assign[=] call[name[mockobject].objects][name[device_path]] call[call[name[device].props][name[AUDIO_IFACE]]][constant[State]] assign[=] call[name[dbus].String, parameter[constant[connected]]] call[name[device].EmitSignal, parameter[name[AUDIO_IFACE], constant[PropertyChanged], constant[sv], list[[<ast.Constant object at 0x7da20e9b2e60>, <ast.Call object at 0x7da20e9b38b0>]]]] call[call[name[device].props][name[DEVICE_IFACE]]][constant[Connected]] assign[=] call[name[dbus].Boolean, parameter[constant[True]]] call[name[device].EmitSignal, parameter[name[DEVICE_IFACE], constant[PropertyChanged], constant[sv], list[[<ast.Constant object at 0x7da20e9b22f0>, <ast.Call object at 0x7da20e9b10f0>]]]]
keyword[def] identifier[Connect] ( identifier[self] ): literal[string] identifier[device_path] = identifier[self] . identifier[path] keyword[if] identifier[device_path] keyword[not] keyword[in] identifier[mockobject] . identifier[objects] : keyword[raise] identifier[dbus] . identifier[exceptions] . identifier[DBusException] ( literal[string] , identifier[name] = literal[string] ) identifier[device] = identifier[mockobject] . identifier[objects] [ identifier[device_path] ] identifier[device] . identifier[props] [ identifier[AUDIO_IFACE] ][ literal[string] ]= identifier[dbus] . identifier[String] ( literal[string] , identifier[variant_level] = literal[int] ) identifier[device] . identifier[EmitSignal] ( identifier[AUDIO_IFACE] , literal[string] , literal[string] ,[ literal[string] , identifier[dbus] . identifier[String] ( literal[string] , identifier[variant_level] = literal[int] ), ]) identifier[device] . identifier[props] [ identifier[DEVICE_IFACE] ][ literal[string] ]= identifier[dbus] . identifier[Boolean] ( keyword[True] , identifier[variant_level] = literal[int] ) identifier[device] . identifier[EmitSignal] ( identifier[DEVICE_IFACE] , literal[string] , literal[string] ,[ literal[string] , identifier[dbus] . identifier[Boolean] ( keyword[True] , identifier[variant_level] = literal[int] ), ])
def Connect(self): """Connect a device """ device_path = self.path if device_path not in mockobject.objects: raise dbus.exceptions.DBusException('No such device.', name='org.bluez.Error.NoSuchDevice') # depends on [control=['if'], data=[]] device = mockobject.objects[device_path] device.props[AUDIO_IFACE]['State'] = dbus.String('connected', variant_level=1) device.EmitSignal(AUDIO_IFACE, 'PropertyChanged', 'sv', ['State', dbus.String('connected', variant_level=1)]) device.props[DEVICE_IFACE]['Connected'] = dbus.Boolean(True, variant_level=1) device.EmitSignal(DEVICE_IFACE, 'PropertyChanged', 'sv', ['Connected', dbus.Boolean(True, variant_level=1)])
def get_fragment(self, **kwargs): """ Return a complete fragment. :param gp: :return: """ gen, namespaces, plan = self.get_fragment_generator(**kwargs) graph = ConjunctiveGraph() [graph.bind(prefix, u) for (prefix, u) in namespaces] [graph.add((s, p, o)) for (_, s, p, o) in gen] return graph
def function[get_fragment, parameter[self]]: constant[ Return a complete fragment. :param gp: :return: ] <ast.Tuple object at 0x7da18f00d2d0> assign[=] call[name[self].get_fragment_generator, parameter[]] variable[graph] assign[=] call[name[ConjunctiveGraph], parameter[]] <ast.ListComp object at 0x7da18f00e050> <ast.ListComp object at 0x7da18f00ded0> return[name[graph]]
keyword[def] identifier[get_fragment] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[gen] , identifier[namespaces] , identifier[plan] = identifier[self] . identifier[get_fragment_generator] (** identifier[kwargs] ) identifier[graph] = identifier[ConjunctiveGraph] () [ identifier[graph] . identifier[bind] ( identifier[prefix] , identifier[u] ) keyword[for] ( identifier[prefix] , identifier[u] ) keyword[in] identifier[namespaces] ] [ identifier[graph] . identifier[add] (( identifier[s] , identifier[p] , identifier[o] )) keyword[for] ( identifier[_] , identifier[s] , identifier[p] , identifier[o] ) keyword[in] identifier[gen] ] keyword[return] identifier[graph]
def get_fragment(self, **kwargs): """ Return a complete fragment. :param gp: :return: """ (gen, namespaces, plan) = self.get_fragment_generator(**kwargs) graph = ConjunctiveGraph() [graph.bind(prefix, u) for (prefix, u) in namespaces] [graph.add((s, p, o)) for (_, s, p, o) in gen] return graph
def __get_wbfmt_usrfld(self, data_nt): """Return format for text cell from namedtuple field specified by 'ntfld_wbfmt'""" if self.ntfld_wbfmt is not None: if isinstance(self.ntfld_wbfmt, str): ntval = getattr(data_nt, self.ntfld_wbfmt, None) # Ex: 'section' if ntval is not None: return self.fmtname2wbfmtobj.get(ntval, None)
def function[__get_wbfmt_usrfld, parameter[self, data_nt]]: constant[Return format for text cell from namedtuple field specified by 'ntfld_wbfmt'] if compare[name[self].ntfld_wbfmt is_not constant[None]] begin[:] if call[name[isinstance], parameter[name[self].ntfld_wbfmt, name[str]]] begin[:] variable[ntval] assign[=] call[name[getattr], parameter[name[data_nt], name[self].ntfld_wbfmt, constant[None]]] if compare[name[ntval] is_not constant[None]] begin[:] return[call[name[self].fmtname2wbfmtobj.get, parameter[name[ntval], constant[None]]]]
keyword[def] identifier[__get_wbfmt_usrfld] ( identifier[self] , identifier[data_nt] ): literal[string] keyword[if] identifier[self] . identifier[ntfld_wbfmt] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[isinstance] ( identifier[self] . identifier[ntfld_wbfmt] , identifier[str] ): identifier[ntval] = identifier[getattr] ( identifier[data_nt] , identifier[self] . identifier[ntfld_wbfmt] , keyword[None] ) keyword[if] identifier[ntval] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[self] . identifier[fmtname2wbfmtobj] . identifier[get] ( identifier[ntval] , keyword[None] )
def __get_wbfmt_usrfld(self, data_nt): """Return format for text cell from namedtuple field specified by 'ntfld_wbfmt'""" if self.ntfld_wbfmt is not None: if isinstance(self.ntfld_wbfmt, str): ntval = getattr(data_nt, self.ntfld_wbfmt, None) # Ex: 'section' if ntval is not None: return self.fmtname2wbfmtobj.get(ntval, None) # depends on [control=['if'], data=['ntval']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def add(self,keybind,kbname,handler,mod=True): """ Adds a keybind to the internal registry. Keybind names should be of the format ``namespace:category.subcategory.name``\ e.g. ``peng3d:actor.player.controls.forward`` for the forward key combo for the player actor. :param str keybind: Keybind string, as described above :param str kbname: Name of the keybind, may be used to later change the keybinding without re-registering :param function handler: Function or any other callable called with the positional arguments ``(symbol,modifiers,release)`` if the keybind is pressed or released :param int mod: If the keybind should respect modifiers """ keybind = keybind.lower() if mod: if keybind not in self.keybinds: self.keybinds[keybind]=[] self.keybinds[keybind].append(kbname) else: if keybind not in self.keybinds_nm: self.keybinds_nm[keybind]=[] self.keybinds_nm[keybind].append(kbname) self.kbname[kbname]=handler self.peng.sendEvent("peng3d:keybind.add",{"peng":self.peng,"keybind":keybind,"kbname":kbname,"handler":handler,"mod":mod})
def function[add, parameter[self, keybind, kbname, handler, mod]]: constant[ Adds a keybind to the internal registry. Keybind names should be of the format ``namespace:category.subcategory.name``\ e.g. ``peng3d:actor.player.controls.forward`` for the forward key combo for the player actor. :param str keybind: Keybind string, as described above :param str kbname: Name of the keybind, may be used to later change the keybinding without re-registering :param function handler: Function or any other callable called with the positional arguments ``(symbol,modifiers,release)`` if the keybind is pressed or released :param int mod: If the keybind should respect modifiers ] variable[keybind] assign[=] call[name[keybind].lower, parameter[]] if name[mod] begin[:] if compare[name[keybind] <ast.NotIn object at 0x7da2590d7190> name[self].keybinds] begin[:] call[name[self].keybinds][name[keybind]] assign[=] list[[]] call[call[name[self].keybinds][name[keybind]].append, parameter[name[kbname]]] call[name[self].kbname][name[kbname]] assign[=] name[handler] call[name[self].peng.sendEvent, parameter[constant[peng3d:keybind.add], dictionary[[<ast.Constant object at 0x7da1b012cd00>, <ast.Constant object at 0x7da1b012cf40>, <ast.Constant object at 0x7da1b012cbe0>, <ast.Constant object at 0x7da1b012d330>, <ast.Constant object at 0x7da1b012c7f0>], [<ast.Attribute object at 0x7da1b012d5d0>, <ast.Name object at 0x7da1b012c2b0>, <ast.Name object at 0x7da1b012d660>, <ast.Name object at 0x7da1b012d510>, <ast.Name object at 0x7da1b012c6d0>]]]]
keyword[def] identifier[add] ( identifier[self] , identifier[keybind] , identifier[kbname] , identifier[handler] , identifier[mod] = keyword[True] ): literal[string] identifier[keybind] = identifier[keybind] . identifier[lower] () keyword[if] identifier[mod] : keyword[if] identifier[keybind] keyword[not] keyword[in] identifier[self] . identifier[keybinds] : identifier[self] . identifier[keybinds] [ identifier[keybind] ]=[] identifier[self] . identifier[keybinds] [ identifier[keybind] ]. identifier[append] ( identifier[kbname] ) keyword[else] : keyword[if] identifier[keybind] keyword[not] keyword[in] identifier[self] . identifier[keybinds_nm] : identifier[self] . identifier[keybinds_nm] [ identifier[keybind] ]=[] identifier[self] . identifier[keybinds_nm] [ identifier[keybind] ]. identifier[append] ( identifier[kbname] ) identifier[self] . identifier[kbname] [ identifier[kbname] ]= identifier[handler] identifier[self] . identifier[peng] . identifier[sendEvent] ( literal[string] ,{ literal[string] : identifier[self] . identifier[peng] , literal[string] : identifier[keybind] , literal[string] : identifier[kbname] , literal[string] : identifier[handler] , literal[string] : identifier[mod] })
def add(self, keybind, kbname, handler, mod=True): """ Adds a keybind to the internal registry. Keybind names should be of the format ``namespace:category.subcategory.name``\\ e.g. ``peng3d:actor.player.controls.forward`` for the forward key combo for the player actor. :param str keybind: Keybind string, as described above :param str kbname: Name of the keybind, may be used to later change the keybinding without re-registering :param function handler: Function or any other callable called with the positional arguments ``(symbol,modifiers,release)`` if the keybind is pressed or released :param int mod: If the keybind should respect modifiers """ keybind = keybind.lower() if mod: if keybind not in self.keybinds: self.keybinds[keybind] = [] # depends on [control=['if'], data=['keybind']] self.keybinds[keybind].append(kbname) # depends on [control=['if'], data=[]] else: if keybind not in self.keybinds_nm: self.keybinds_nm[keybind] = [] # depends on [control=['if'], data=['keybind']] self.keybinds_nm[keybind].append(kbname) self.kbname[kbname] = handler self.peng.sendEvent('peng3d:keybind.add', {'peng': self.peng, 'keybind': keybind, 'kbname': kbname, 'handler': handler, 'mod': mod})
def _input_as_multiline_string(self, data): """Writes data to tempfile and sets -i parameter data -- list of lines """ if data: self.Parameters['-i']\ .on(super(CD_HIT,self)._input_as_multiline_string(data)) return ''
def function[_input_as_multiline_string, parameter[self, data]]: constant[Writes data to tempfile and sets -i parameter data -- list of lines ] if name[data] begin[:] call[call[name[self].Parameters][constant[-i]].on, parameter[call[call[name[super], parameter[name[CD_HIT], name[self]]]._input_as_multiline_string, parameter[name[data]]]]] return[constant[]]
keyword[def] identifier[_input_as_multiline_string] ( identifier[self] , identifier[data] ): literal[string] keyword[if] identifier[data] : identifier[self] . identifier[Parameters] [ literal[string] ]. identifier[on] ( identifier[super] ( identifier[CD_HIT] , identifier[self] ). identifier[_input_as_multiline_string] ( identifier[data] )) keyword[return] literal[string]
def _input_as_multiline_string(self, data): """Writes data to tempfile and sets -i parameter data -- list of lines """ if data: self.Parameters['-i'].on(super(CD_HIT, self)._input_as_multiline_string(data)) # depends on [control=['if'], data=[]] return ''
async def query_handler(service, action_type, payload, props, **kwds): """ This action handler interprets the payload as a query to be executed by the api gateway service. """ # check that the action type indicates a query if action_type == query_action_type(): print('encountered query event {!r} '.format(payload)) # perform the query result = await parse_string(payload, service.object_resolver, service.connection_resolver, service.mutation_resolver, obey_auth=False ) # the props for the reply message reply_props = {'correlation_id': props['correlation_id']} if 'correlation_id' in props else {} # publish the success event await service.event_broker.send( payload=result, action_type=change_action_status(action_type, success_status()), **reply_props )
<ast.AsyncFunctionDef object at 0x7da18f00f370>
keyword[async] keyword[def] identifier[query_handler] ( identifier[service] , identifier[action_type] , identifier[payload] , identifier[props] ,** identifier[kwds] ): literal[string] keyword[if] identifier[action_type] == identifier[query_action_type] (): identifier[print] ( literal[string] . identifier[format] ( identifier[payload] )) identifier[result] = keyword[await] identifier[parse_string] ( identifier[payload] , identifier[service] . identifier[object_resolver] , identifier[service] . identifier[connection_resolver] , identifier[service] . identifier[mutation_resolver] , identifier[obey_auth] = keyword[False] ) identifier[reply_props] ={ literal[string] : identifier[props] [ literal[string] ]} keyword[if] literal[string] keyword[in] identifier[props] keyword[else] {} keyword[await] identifier[service] . identifier[event_broker] . identifier[send] ( identifier[payload] = identifier[result] , identifier[action_type] = identifier[change_action_status] ( identifier[action_type] , identifier[success_status] ()), ** identifier[reply_props] )
async def query_handler(service, action_type, payload, props, **kwds): """ This action handler interprets the payload as a query to be executed by the api gateway service. """ # check that the action type indicates a query if action_type == query_action_type(): print('encountered query event {!r} '.format(payload)) # perform the query result = await parse_string(payload, service.object_resolver, service.connection_resolver, service.mutation_resolver, obey_auth=False) # the props for the reply message reply_props = {'correlation_id': props['correlation_id']} if 'correlation_id' in props else {} # publish the success event await service.event_broker.send(payload=result, action_type=change_action_status(action_type, success_status()), **reply_props) # depends on [control=['if'], data=['action_type']]
def sendline(self, s=''): '''Wraps send(), sending string ``s`` to child process, with os.linesep automatically appended. Returns number of bytes written. ''' n = self.send(s) return n + self.send(self.linesep)
def function[sendline, parameter[self, s]]: constant[Wraps send(), sending string ``s`` to child process, with os.linesep automatically appended. Returns number of bytes written. ] variable[n] assign[=] call[name[self].send, parameter[name[s]]] return[binary_operation[name[n] + call[name[self].send, parameter[name[self].linesep]]]]
keyword[def] identifier[sendline] ( identifier[self] , identifier[s] = literal[string] ): literal[string] identifier[n] = identifier[self] . identifier[send] ( identifier[s] ) keyword[return] identifier[n] + identifier[self] . identifier[send] ( identifier[self] . identifier[linesep] )
def sendline(self, s=''): """Wraps send(), sending string ``s`` to child process, with os.linesep automatically appended. Returns number of bytes written. """ n = self.send(s) return n + self.send(self.linesep)
def byaxis(self): """Return the subspace defined along one or several dimensions. Examples -------- Indexing with integers or slices: >>> space = odl.rn((2, 3, 4)) >>> space.byaxis[0] rn(2) >>> space.byaxis[1:] rn((3, 4)) Lists can be used to stack spaces arbitrarily: >>> space.byaxis[[2, 1, 2]] rn((4, 3, 4)) """ space = self class NpyTensorSpacebyaxis(object): """Helper class for indexing by axis.""" def __getitem__(self, indices): """Return ``self[indices]``.""" try: iter(indices) except TypeError: newshape = space.shape[indices] else: newshape = tuple(space.shape[i] for i in indices) if isinstance(space.weighting, ArrayWeighting): new_array = np.asarray(space.weighting.array[indices]) weighting = NumpyTensorSpaceArrayWeighting( new_array, space.weighting.exponent) else: weighting = space.weighting return type(space)(newshape, space.dtype, weighting=weighting) def __repr__(self): """Return ``repr(self)``.""" return repr(space) + '.byaxis' return NpyTensorSpacebyaxis()
def function[byaxis, parameter[self]]: constant[Return the subspace defined along one or several dimensions. Examples -------- Indexing with integers or slices: >>> space = odl.rn((2, 3, 4)) >>> space.byaxis[0] rn(2) >>> space.byaxis[1:] rn((3, 4)) Lists can be used to stack spaces arbitrarily: >>> space.byaxis[[2, 1, 2]] rn((4, 3, 4)) ] variable[space] assign[=] name[self] class class[NpyTensorSpacebyaxis, parameter[]] begin[:] constant[Helper class for indexing by axis.] def function[__getitem__, parameter[self, indices]]: constant[Return ``self[indices]``.] <ast.Try object at 0x7da18f58d810> if call[name[isinstance], parameter[name[space].weighting, name[ArrayWeighting]]] begin[:] variable[new_array] assign[=] call[name[np].asarray, parameter[call[name[space].weighting.array][name[indices]]]] variable[weighting] assign[=] call[name[NumpyTensorSpaceArrayWeighting], parameter[name[new_array], name[space].weighting.exponent]] return[call[call[name[type], parameter[name[space]]], parameter[name[newshape], name[space].dtype]]] def function[__repr__, parameter[self]]: constant[Return ``repr(self)``.] return[binary_operation[call[name[repr], parameter[name[space]]] + constant[.byaxis]]] return[call[name[NpyTensorSpacebyaxis], parameter[]]]
keyword[def] identifier[byaxis] ( identifier[self] ): literal[string] identifier[space] = identifier[self] keyword[class] identifier[NpyTensorSpacebyaxis] ( identifier[object] ): literal[string] keyword[def] identifier[__getitem__] ( identifier[self] , identifier[indices] ): literal[string] keyword[try] : identifier[iter] ( identifier[indices] ) keyword[except] identifier[TypeError] : identifier[newshape] = identifier[space] . identifier[shape] [ identifier[indices] ] keyword[else] : identifier[newshape] = identifier[tuple] ( identifier[space] . identifier[shape] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[indices] ) keyword[if] identifier[isinstance] ( identifier[space] . identifier[weighting] , identifier[ArrayWeighting] ): identifier[new_array] = identifier[np] . identifier[asarray] ( identifier[space] . identifier[weighting] . identifier[array] [ identifier[indices] ]) identifier[weighting] = identifier[NumpyTensorSpaceArrayWeighting] ( identifier[new_array] , identifier[space] . identifier[weighting] . identifier[exponent] ) keyword[else] : identifier[weighting] = identifier[space] . identifier[weighting] keyword[return] identifier[type] ( identifier[space] )( identifier[newshape] , identifier[space] . identifier[dtype] , identifier[weighting] = identifier[weighting] ) keyword[def] identifier[__repr__] ( identifier[self] ): literal[string] keyword[return] identifier[repr] ( identifier[space] )+ literal[string] keyword[return] identifier[NpyTensorSpacebyaxis] ()
def byaxis(self): """Return the subspace defined along one or several dimensions. Examples -------- Indexing with integers or slices: >>> space = odl.rn((2, 3, 4)) >>> space.byaxis[0] rn(2) >>> space.byaxis[1:] rn((3, 4)) Lists can be used to stack spaces arbitrarily: >>> space.byaxis[[2, 1, 2]] rn((4, 3, 4)) """ space = self class NpyTensorSpacebyaxis(object): """Helper class for indexing by axis.""" def __getitem__(self, indices): """Return ``self[indices]``.""" try: iter(indices) # depends on [control=['try'], data=[]] except TypeError: newshape = space.shape[indices] # depends on [control=['except'], data=[]] else: newshape = tuple((space.shape[i] for i in indices)) if isinstance(space.weighting, ArrayWeighting): new_array = np.asarray(space.weighting.array[indices]) weighting = NumpyTensorSpaceArrayWeighting(new_array, space.weighting.exponent) # depends on [control=['if'], data=[]] else: weighting = space.weighting return type(space)(newshape, space.dtype, weighting=weighting) def __repr__(self): """Return ``repr(self)``.""" return repr(space) + '.byaxis' return NpyTensorSpacebyaxis()
def load_script(zap_helper, **options): """Load a script from a file.""" with zap_error_handler(): if not os.path.isfile(options['file_path']): raise ZAPError('No file found at "{0}", cannot load script.'.format(options['file_path'])) if not _is_valid_script_engine(zap_helper.zap, options['engine']): engines = zap_helper.zap.script.list_engines raise ZAPError('Invalid script engine provided. Valid engines are: {0}'.format(', '.join(engines))) console.debug('Loading script "{0}" from "{1}"'.format(options['name'], options['file_path'])) result = zap_helper.zap.script.load(options['name'], options['script_type'], options['engine'], options['file_path'], scriptdescription=options['description']) if result != 'OK': raise ZAPError('Error loading script: {0}'.format(result)) console.info('Script "{0}" loaded'.format(options['name']))
def function[load_script, parameter[zap_helper]]: constant[Load a script from a file.] with call[name[zap_error_handler], parameter[]] begin[:] if <ast.UnaryOp object at 0x7da1b084f010> begin[:] <ast.Raise object at 0x7da1b084c160> if <ast.UnaryOp object at 0x7da2054a6230> begin[:] variable[engines] assign[=] name[zap_helper].zap.script.list_engines <ast.Raise object at 0x7da2054a5570> call[name[console].debug, parameter[call[constant[Loading script "{0}" from "{1}"].format, parameter[call[name[options]][constant[name]], call[name[options]][constant[file_path]]]]]] variable[result] assign[=] call[name[zap_helper].zap.script.load, parameter[call[name[options]][constant[name]], call[name[options]][constant[script_type]], call[name[options]][constant[engine]], call[name[options]][constant[file_path]]]] if compare[name[result] not_equal[!=] constant[OK]] begin[:] <ast.Raise object at 0x7da2054a4c10> call[name[console].info, parameter[call[constant[Script "{0}" loaded].format, parameter[call[name[options]][constant[name]]]]]]
keyword[def] identifier[load_script] ( identifier[zap_helper] ,** identifier[options] ): literal[string] keyword[with] identifier[zap_error_handler] (): keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[options] [ literal[string] ]): keyword[raise] identifier[ZAPError] ( literal[string] . identifier[format] ( identifier[options] [ literal[string] ])) keyword[if] keyword[not] identifier[_is_valid_script_engine] ( identifier[zap_helper] . identifier[zap] , identifier[options] [ literal[string] ]): identifier[engines] = identifier[zap_helper] . identifier[zap] . identifier[script] . identifier[list_engines] keyword[raise] identifier[ZAPError] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[engines] ))) identifier[console] . identifier[debug] ( literal[string] . identifier[format] ( identifier[options] [ literal[string] ], identifier[options] [ literal[string] ])) identifier[result] = identifier[zap_helper] . identifier[zap] . identifier[script] . identifier[load] ( identifier[options] [ literal[string] ], identifier[options] [ literal[string] ], identifier[options] [ literal[string] ], identifier[options] [ literal[string] ], identifier[scriptdescription] = identifier[options] [ literal[string] ]) keyword[if] identifier[result] != literal[string] : keyword[raise] identifier[ZAPError] ( literal[string] . identifier[format] ( identifier[result] )) identifier[console] . identifier[info] ( literal[string] . identifier[format] ( identifier[options] [ literal[string] ]))
def load_script(zap_helper, **options): """Load a script from a file.""" with zap_error_handler(): if not os.path.isfile(options['file_path']): raise ZAPError('No file found at "{0}", cannot load script.'.format(options['file_path'])) # depends on [control=['if'], data=[]] if not _is_valid_script_engine(zap_helper.zap, options['engine']): engines = zap_helper.zap.script.list_engines raise ZAPError('Invalid script engine provided. Valid engines are: {0}'.format(', '.join(engines))) # depends on [control=['if'], data=[]] console.debug('Loading script "{0}" from "{1}"'.format(options['name'], options['file_path'])) result = zap_helper.zap.script.load(options['name'], options['script_type'], options['engine'], options['file_path'], scriptdescription=options['description']) if result != 'OK': raise ZAPError('Error loading script: {0}'.format(result)) # depends on [control=['if'], data=['result']] # depends on [control=['with'], data=[]] console.info('Script "{0}" loaded'.format(options['name']))
def update_simulation_runtime(self): """ Updates the total simulation duration from the m3 file (Vlat_file) and the time step (ZS_TauR). .. warning:: You need to set the m3 file (Vlat_file) and the time step (ZS_TauR) before runnning this function. Example: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( Vlat_file='../rapid-io/input/m3_riv.csv', ZS_TauR=3*3600, ) rapid_manager.update_simulation_runtime() """ if not self.Vlat_file or not os.path.exists(self.Vlat_file): log("Need Vlat_file to proceed ...", "ERROR") if self.ZS_TauR <= 0: log("Missing routing time step ...", "ERROR") try: self.ZS_TauR = int(self.ZS_TauR) except ValueError: log("Invalid routing time step: {0} ...".format(self.ZS_TauR), "ERROR") with RAPIDDataset(self.Vlat_file) as m3_nc: self.ZS_TauM = m3_nc.size_time*self.ZS_TauR self.ZS_TauO = m3_nc.size_time*self.ZS_TauR
def function[update_simulation_runtime, parameter[self]]: constant[ Updates the total simulation duration from the m3 file (Vlat_file) and the time step (ZS_TauR). .. warning:: You need to set the m3 file (Vlat_file) and the time step (ZS_TauR) before runnning this function. Example: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( Vlat_file='../rapid-io/input/m3_riv.csv', ZS_TauR=3*3600, ) rapid_manager.update_simulation_runtime() ] if <ast.BoolOp object at 0x7da204620040> begin[:] call[name[log], parameter[constant[Need Vlat_file to proceed ...], constant[ERROR]]] if compare[name[self].ZS_TauR less_or_equal[<=] constant[0]] begin[:] call[name[log], parameter[constant[Missing routing time step ...], constant[ERROR]]] <ast.Try object at 0x7da2041dbc70> with call[name[RAPIDDataset], parameter[name[self].Vlat_file]] begin[:] name[self].ZS_TauM assign[=] binary_operation[name[m3_nc].size_time * name[self].ZS_TauR] name[self].ZS_TauO assign[=] binary_operation[name[m3_nc].size_time * name[self].ZS_TauR]
keyword[def] identifier[update_simulation_runtime] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[Vlat_file] keyword[or] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[Vlat_file] ): identifier[log] ( literal[string] , literal[string] ) keyword[if] identifier[self] . identifier[ZS_TauR] <= literal[int] : identifier[log] ( literal[string] , literal[string] ) keyword[try] : identifier[self] . identifier[ZS_TauR] = identifier[int] ( identifier[self] . identifier[ZS_TauR] ) keyword[except] identifier[ValueError] : identifier[log] ( literal[string] . identifier[format] ( identifier[self] . identifier[ZS_TauR] ), literal[string] ) keyword[with] identifier[RAPIDDataset] ( identifier[self] . identifier[Vlat_file] ) keyword[as] identifier[m3_nc] : identifier[self] . identifier[ZS_TauM] = identifier[m3_nc] . identifier[size_time] * identifier[self] . identifier[ZS_TauR] identifier[self] . identifier[ZS_TauO] = identifier[m3_nc] . identifier[size_time] * identifier[self] . identifier[ZS_TauR]
def update_simulation_runtime(self): """ Updates the total simulation duration from the m3 file (Vlat_file) and the time step (ZS_TauR). .. warning:: You need to set the m3 file (Vlat_file) and the time step (ZS_TauR) before runnning this function. Example: .. code:: python from RAPIDpy import RAPID rapid_manager = RAPID( Vlat_file='../rapid-io/input/m3_riv.csv', ZS_TauR=3*3600, ) rapid_manager.update_simulation_runtime() """ if not self.Vlat_file or not os.path.exists(self.Vlat_file): log('Need Vlat_file to proceed ...', 'ERROR') # depends on [control=['if'], data=[]] if self.ZS_TauR <= 0: log('Missing routing time step ...', 'ERROR') # depends on [control=['if'], data=[]] try: self.ZS_TauR = int(self.ZS_TauR) # depends on [control=['try'], data=[]] except ValueError: log('Invalid routing time step: {0} ...'.format(self.ZS_TauR), 'ERROR') # depends on [control=['except'], data=[]] with RAPIDDataset(self.Vlat_file) as m3_nc: self.ZS_TauM = m3_nc.size_time * self.ZS_TauR self.ZS_TauO = m3_nc.size_time * self.ZS_TauR # depends on [control=['with'], data=['m3_nc']]
def truth(message, expected=None): """ Convenience decorator that applies [`Check`](#check) to a callable. ```python from good import truth @truth(u'Must be an existing directory') def isDir(v): return os.path.isdir(v) ``` :param message: Validation error message :type message: unicode :param expected: Expected value string representation, or `None` to get it from the wrapped callable :type expected: None|str|unicode :return: decorator :rtype: callable """ def decorator(func): return update_wrapper(Check(func, message, expected), func) return decorator
def function[truth, parameter[message, expected]]: constant[ Convenience decorator that applies [`Check`](#check) to a callable. ```python from good import truth @truth(u'Must be an existing directory') def isDir(v): return os.path.isdir(v) ``` :param message: Validation error message :type message: unicode :param expected: Expected value string representation, or `None` to get it from the wrapped callable :type expected: None|str|unicode :return: decorator :rtype: callable ] def function[decorator, parameter[func]]: return[call[name[update_wrapper], parameter[call[name[Check], parameter[name[func], name[message], name[expected]]], name[func]]]] return[name[decorator]]
keyword[def] identifier[truth] ( identifier[message] , identifier[expected] = keyword[None] ): literal[string] keyword[def] identifier[decorator] ( identifier[func] ): keyword[return] identifier[update_wrapper] ( identifier[Check] ( identifier[func] , identifier[message] , identifier[expected] ), identifier[func] ) keyword[return] identifier[decorator]
def truth(message, expected=None): """ Convenience decorator that applies [`Check`](#check) to a callable. ```python from good import truth @truth(u'Must be an existing directory') def isDir(v): return os.path.isdir(v) ``` :param message: Validation error message :type message: unicode :param expected: Expected value string representation, or `None` to get it from the wrapped callable :type expected: None|str|unicode :return: decorator :rtype: callable """ def decorator(func): return update_wrapper(Check(func, message, expected), func) return decorator
def _kill(self, kill_sig): """Send a signal to the current process.""" if self.pid: os.kill(self.pid, kill_sig)
def function[_kill, parameter[self, kill_sig]]: constant[Send a signal to the current process.] if name[self].pid begin[:] call[name[os].kill, parameter[name[self].pid, name[kill_sig]]]
keyword[def] identifier[_kill] ( identifier[self] , identifier[kill_sig] ): literal[string] keyword[if] identifier[self] . identifier[pid] : identifier[os] . identifier[kill] ( identifier[self] . identifier[pid] , identifier[kill_sig] )
def _kill(self, kill_sig): """Send a signal to the current process.""" if self.pid: os.kill(self.pid, kill_sig) # depends on [control=['if'], data=[]]
def url(self): '''Current or base URL. Can be redefined via keyword argument on initialization. Returns `iktomi.web.URL object. `''' return URL.from_url(self.request.url, show_host=self.show_host)
def function[url, parameter[self]]: constant[Current or base URL. Can be redefined via keyword argument on initialization. Returns `iktomi.web.URL object. `] return[call[name[URL].from_url, parameter[name[self].request.url]]]
keyword[def] identifier[url] ( identifier[self] ): literal[string] keyword[return] identifier[URL] . identifier[from_url] ( identifier[self] . identifier[request] . identifier[url] , identifier[show_host] = identifier[self] . identifier[show_host] )
def url(self): """Current or base URL. Can be redefined via keyword argument on initialization. Returns `iktomi.web.URL object. `""" return URL.from_url(self.request.url, show_host=self.show_host)
def find_missing_projections(label_list, projections): """ Finds all combinations of labels in `label_list` that are not covered by an entry in the dictionary of `projections`. Returns a list containing tuples of uncovered label combinations or en empty list if there are none. All uncovered label combinations are naturally sorted. Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key) to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not required to specify a projection for every single combination of labels. Args: label_list (audiomate.annotations.LabelList): The label list to relabel projections (dict): A dictionary that maps tuples of label combinations to string labels. Returns: List: List of combinations of labels that are not covered by any projection Example: >>> ll = annotations.LabelList(labels=[ ... annotations.Label('b', 3.2, 4.5), ... annotations.Label('a', 4.0, 4.9), ... annotations.Label('c', 4.2, 5.1) ... ]) >>> find_missing_projections(ll, {('b',): 'new_label'}) [('a', 'b'), ('a', 'b', 'c'), ('a', 'c'), ('c',)] """ unmapped_combinations = set() if WILDCARD_COMBINATION in projections: return [] for labeled_segment in label_list.ranges(): combination = tuple(sorted([label.value for label in labeled_segment[2]])) if combination not in projections: unmapped_combinations.add(combination) return sorted(unmapped_combinations)
def function[find_missing_projections, parameter[label_list, projections]]: constant[ Finds all combinations of labels in `label_list` that are not covered by an entry in the dictionary of `projections`. Returns a list containing tuples of uncovered label combinations or en empty list if there are none. All uncovered label combinations are naturally sorted. Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key) to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not required to specify a projection for every single combination of labels. Args: label_list (audiomate.annotations.LabelList): The label list to relabel projections (dict): A dictionary that maps tuples of label combinations to string labels. Returns: List: List of combinations of labels that are not covered by any projection Example: >>> ll = annotations.LabelList(labels=[ ... annotations.Label('b', 3.2, 4.5), ... annotations.Label('a', 4.0, 4.9), ... annotations.Label('c', 4.2, 5.1) ... ]) >>> find_missing_projections(ll, {('b',): 'new_label'}) [('a', 'b'), ('a', 'b', 'c'), ('a', 'c'), ('c',)] ] variable[unmapped_combinations] assign[=] call[name[set], parameter[]] if compare[name[WILDCARD_COMBINATION] in name[projections]] begin[:] return[list[[]]] for taget[name[labeled_segment]] in starred[call[name[label_list].ranges, parameter[]]] begin[:] variable[combination] assign[=] call[name[tuple], parameter[call[name[sorted], parameter[<ast.ListComp object at 0x7da1b0b810f0>]]]] if compare[name[combination] <ast.NotIn object at 0x7da2590d7190> name[projections]] begin[:] call[name[unmapped_combinations].add, parameter[name[combination]]] return[call[name[sorted], parameter[name[unmapped_combinations]]]]
keyword[def] identifier[find_missing_projections] ( identifier[label_list] , identifier[projections] ): literal[string] identifier[unmapped_combinations] = identifier[set] () keyword[if] identifier[WILDCARD_COMBINATION] keyword[in] identifier[projections] : keyword[return] [] keyword[for] identifier[labeled_segment] keyword[in] identifier[label_list] . identifier[ranges] (): identifier[combination] = identifier[tuple] ( identifier[sorted] ([ identifier[label] . identifier[value] keyword[for] identifier[label] keyword[in] identifier[labeled_segment] [ literal[int] ]])) keyword[if] identifier[combination] keyword[not] keyword[in] identifier[projections] : identifier[unmapped_combinations] . identifier[add] ( identifier[combination] ) keyword[return] identifier[sorted] ( identifier[unmapped_combinations] )
def find_missing_projections(label_list, projections): """ Finds all combinations of labels in `label_list` that are not covered by an entry in the dictionary of `projections`. Returns a list containing tuples of uncovered label combinations or en empty list if there are none. All uncovered label combinations are naturally sorted. Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key) to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not required to specify a projection for every single combination of labels. Args: label_list (audiomate.annotations.LabelList): The label list to relabel projections (dict): A dictionary that maps tuples of label combinations to string labels. Returns: List: List of combinations of labels that are not covered by any projection Example: >>> ll = annotations.LabelList(labels=[ ... annotations.Label('b', 3.2, 4.5), ... annotations.Label('a', 4.0, 4.9), ... annotations.Label('c', 4.2, 5.1) ... ]) >>> find_missing_projections(ll, {('b',): 'new_label'}) [('a', 'b'), ('a', 'b', 'c'), ('a', 'c'), ('c',)] """ unmapped_combinations = set() if WILDCARD_COMBINATION in projections: return [] # depends on [control=['if'], data=[]] for labeled_segment in label_list.ranges(): combination = tuple(sorted([label.value for label in labeled_segment[2]])) if combination not in projections: unmapped_combinations.add(combination) # depends on [control=['if'], data=['combination']] # depends on [control=['for'], data=['labeled_segment']] return sorted(unmapped_combinations)
def logs(self, container, tail='all', follow=False, timestamps=False): """ :param container: 'web', 'solr' or 'postgres' :param tail: number of lines to show :param follow: True to return generator instead of list :param timestamps: True to include timestamps """ return container_logs( self._get_container_name(container), tail, follow, timestamps)
def function[logs, parameter[self, container, tail, follow, timestamps]]: constant[ :param container: 'web', 'solr' or 'postgres' :param tail: number of lines to show :param follow: True to return generator instead of list :param timestamps: True to include timestamps ] return[call[name[container_logs], parameter[call[name[self]._get_container_name, parameter[name[container]]], name[tail], name[follow], name[timestamps]]]]
keyword[def] identifier[logs] ( identifier[self] , identifier[container] , identifier[tail] = literal[string] , identifier[follow] = keyword[False] , identifier[timestamps] = keyword[False] ): literal[string] keyword[return] identifier[container_logs] ( identifier[self] . identifier[_get_container_name] ( identifier[container] ), identifier[tail] , identifier[follow] , identifier[timestamps] )
def logs(self, container, tail='all', follow=False, timestamps=False): """ :param container: 'web', 'solr' or 'postgres' :param tail: number of lines to show :param follow: True to return generator instead of list :param timestamps: True to include timestamps """ return container_logs(self._get_container_name(container), tail, follow, timestamps)
def open(cls, blob, username, password): """Creates a vault from a blob object""" return cls(blob, blob.encryption_key(username, password))
def function[open, parameter[cls, blob, username, password]]: constant[Creates a vault from a blob object] return[call[name[cls], parameter[name[blob], call[name[blob].encryption_key, parameter[name[username], name[password]]]]]]
keyword[def] identifier[open] ( identifier[cls] , identifier[blob] , identifier[username] , identifier[password] ): literal[string] keyword[return] identifier[cls] ( identifier[blob] , identifier[blob] . identifier[encryption_key] ( identifier[username] , identifier[password] ))
def open(cls, blob, username, password): """Creates a vault from a blob object""" return cls(blob, blob.encryption_key(username, password))
def kill_process_on_host( hostname, pattern ): """ Kill the process matching pattern at ip :param hostname: the hostname or ip address of the host on which the process will be killed :param pattern: a regular expression matching the name of the process to kill """ status, stdout = run_command_on_agent(hostname, "ps aux | grep -v grep | grep '{}'".format(pattern)) pids = [p.strip().split()[1] for p in stdout.splitlines()] for pid in pids: status, stdout = run_command_on_agent(hostname, "sudo kill -9 {}".format(pid)) if status: print("Killed pid: {}".format(pid)) else: print("Unable to killed pid: {}".format(pid))
def function[kill_process_on_host, parameter[hostname, pattern]]: constant[ Kill the process matching pattern at ip :param hostname: the hostname or ip address of the host on which the process will be killed :param pattern: a regular expression matching the name of the process to kill ] <ast.Tuple object at 0x7da1b1a7da80> assign[=] call[name[run_command_on_agent], parameter[name[hostname], call[constant[ps aux | grep -v grep | grep '{}'].format, parameter[name[pattern]]]]] variable[pids] assign[=] <ast.ListComp object at 0x7da1b1a7d3f0> for taget[name[pid]] in starred[name[pids]] begin[:] <ast.Tuple object at 0x7da1b1a7f580> assign[=] call[name[run_command_on_agent], parameter[name[hostname], call[constant[sudo kill -9 {}].format, parameter[name[pid]]]]] if name[status] begin[:] call[name[print], parameter[call[constant[Killed pid: {}].format, parameter[name[pid]]]]]
keyword[def] identifier[kill_process_on_host] ( identifier[hostname] , identifier[pattern] ): literal[string] identifier[status] , identifier[stdout] = identifier[run_command_on_agent] ( identifier[hostname] , literal[string] . identifier[format] ( identifier[pattern] )) identifier[pids] =[ identifier[p] . identifier[strip] (). identifier[split] ()[ literal[int] ] keyword[for] identifier[p] keyword[in] identifier[stdout] . identifier[splitlines] ()] keyword[for] identifier[pid] keyword[in] identifier[pids] : identifier[status] , identifier[stdout] = identifier[run_command_on_agent] ( identifier[hostname] , literal[string] . identifier[format] ( identifier[pid] )) keyword[if] identifier[status] : identifier[print] ( literal[string] . identifier[format] ( identifier[pid] )) keyword[else] : identifier[print] ( literal[string] . identifier[format] ( identifier[pid] ))
def kill_process_on_host(hostname, pattern): """ Kill the process matching pattern at ip :param hostname: the hostname or ip address of the host on which the process will be killed :param pattern: a regular expression matching the name of the process to kill """ (status, stdout) = run_command_on_agent(hostname, "ps aux | grep -v grep | grep '{}'".format(pattern)) pids = [p.strip().split()[1] for p in stdout.splitlines()] for pid in pids: (status, stdout) = run_command_on_agent(hostname, 'sudo kill -9 {}'.format(pid)) if status: print('Killed pid: {}'.format(pid)) # depends on [control=['if'], data=[]] else: print('Unable to killed pid: {}'.format(pid)) # depends on [control=['for'], data=['pid']]
def pack(o, default=encode, encoding='utf-8', unicode_errors='strict', use_single_float=False, autoreset=1, use_bin_type=1): """ Pack an object and return the packed bytes. """ return Packer(default=default, encoding=encoding, unicode_errors=unicode_errors, use_single_float=use_single_float, autoreset=autoreset, use_bin_type=use_bin_type).pack(o)
def function[pack, parameter[o, default, encoding, unicode_errors, use_single_float, autoreset, use_bin_type]]: constant[ Pack an object and return the packed bytes. ] return[call[call[name[Packer], parameter[]].pack, parameter[name[o]]]]
keyword[def] identifier[pack] ( identifier[o] , identifier[default] = identifier[encode] , identifier[encoding] = literal[string] , identifier[unicode_errors] = literal[string] , identifier[use_single_float] = keyword[False] , identifier[autoreset] = literal[int] , identifier[use_bin_type] = literal[int] ): literal[string] keyword[return] identifier[Packer] ( identifier[default] = identifier[default] , identifier[encoding] = identifier[encoding] , identifier[unicode_errors] = identifier[unicode_errors] , identifier[use_single_float] = identifier[use_single_float] , identifier[autoreset] = identifier[autoreset] , identifier[use_bin_type] = identifier[use_bin_type] ). identifier[pack] ( identifier[o] )
def pack(o, default=encode, encoding='utf-8', unicode_errors='strict', use_single_float=False, autoreset=1, use_bin_type=1): """ Pack an object and return the packed bytes. """ return Packer(default=default, encoding=encoding, unicode_errors=unicode_errors, use_single_float=use_single_float, autoreset=autoreset, use_bin_type=use_bin_type).pack(o)