code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def _rndPointDisposition(dx, dy): """Return random disposition point.""" x = int(random.uniform(-dx, dx)) y = int(random.uniform(-dy, dy)) return (x, y)
def function[_rndPointDisposition, parameter[dx, dy]]: constant[Return random disposition point.] variable[x] assign[=] call[name[int], parameter[call[name[random].uniform, parameter[<ast.UnaryOp object at 0x7da1b1b84d60>, name[dx]]]]] variable[y] assign[=] call[name[int], parameter[call[name[random].uniform, parameter[<ast.UnaryOp object at 0x7da1b1b86170>, name[dy]]]]] return[tuple[[<ast.Name object at 0x7da1b1b85090>, <ast.Name object at 0x7da1b1b87520>]]]
keyword[def] identifier[_rndPointDisposition] ( identifier[dx] , identifier[dy] ): literal[string] identifier[x] = identifier[int] ( identifier[random] . identifier[uniform] (- identifier[dx] , identifier[dx] )) identifier[y] = identifier[int] ( identifier[random] . identifier[uniform] (- identifier[dy] , identifier[dy] )) keyword[return] ( identifier[x] , identifier[y] )
def _rndPointDisposition(dx, dy): """Return random disposition point.""" x = int(random.uniform(-dx, dx)) y = int(random.uniform(-dy, dy)) return (x, y)
def setGameScore(self, user_id, score, game_message_identifier, force=None, disable_edit_message=None): """ See: https://core.telegram.org/bots/api#setgamescore :param game_message_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText` """ p = _strip(locals(), more=['game_message_identifier']) p.update(_dismantle_message_identifier(game_message_identifier)) return self._api_request('setGameScore', _rectify(p))
def function[setGameScore, parameter[self, user_id, score, game_message_identifier, force, disable_edit_message]]: constant[ See: https://core.telegram.org/bots/api#setgamescore :param game_message_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText` ] variable[p] assign[=] call[name[_strip], parameter[call[name[locals], parameter[]]]] call[name[p].update, parameter[call[name[_dismantle_message_identifier], parameter[name[game_message_identifier]]]]] return[call[name[self]._api_request, parameter[constant[setGameScore], call[name[_rectify], parameter[name[p]]]]]]
keyword[def] identifier[setGameScore] ( identifier[self] , identifier[user_id] , identifier[score] , identifier[game_message_identifier] , identifier[force] = keyword[None] , identifier[disable_edit_message] = keyword[None] ): literal[string] identifier[p] = identifier[_strip] ( identifier[locals] (), identifier[more] =[ literal[string] ]) identifier[p] . identifier[update] ( identifier[_dismantle_message_identifier] ( identifier[game_message_identifier] )) keyword[return] identifier[self] . identifier[_api_request] ( literal[string] , identifier[_rectify] ( identifier[p] ))
def setGameScore(self, user_id, score, game_message_identifier, force=None, disable_edit_message=None): """ See: https://core.telegram.org/bots/api#setgamescore :param game_message_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText` """ p = _strip(locals(), more=['game_message_identifier']) p.update(_dismantle_message_identifier(game_message_identifier)) return self._api_request('setGameScore', _rectify(p))
def main(): """Entry point. - Execute notebook - Save output to either file or display it in stderr - Display errors during the run if they exist """ parser = argparse.ArgumentParser() parser.add_argument('-i', dest='path_to_notebook_json', required=True, help='Zeppelin notebook input file (.json)') parser.add_argument('-o', dest='output_path', default=sys.stdout, help='Path to save rendered output file (.json) (optional)') parser.add_argument('-u', dest='zeppelin_url', default='localhost:8890', help='Zeppelin URL (optional)') args = parser.parse_args() with open(args.path_to_notebook_json, 'rb') as notebook: try: t = json.load(notebook) notebook_name = os.path.basename(args.path_to_notebook_json) if args.output_path is sys.stdout: args.output_path = '' elif not os.path.isdir(args.output_path): raise ValueError('Output path given is not valid directory.') output_path = os.path.join(args.output_path, '') notebook_executor = NotebookExecutor(notebook_name, output_path, args.zeppelin_url) notebook_executor.execute_notebook(t) except ValueError as err: print(err) sys.exit(1)
def function[main, parameter[]]: constant[Entry point. - Execute notebook - Save output to either file or display it in stderr - Display errors during the run if they exist ] variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]] call[name[parser].add_argument, parameter[constant[-i]]] call[name[parser].add_argument, parameter[constant[-o]]] call[name[parser].add_argument, parameter[constant[-u]]] variable[args] assign[=] call[name[parser].parse_args, parameter[]] with call[name[open], parameter[name[args].path_to_notebook_json, constant[rb]]] begin[:] <ast.Try object at 0x7da18f720310>
keyword[def] identifier[main] (): literal[string] identifier[parser] = identifier[argparse] . identifier[ArgumentParser] () identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] , identifier[default] = identifier[sys] . identifier[stdout] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[dest] = literal[string] , identifier[default] = literal[string] , identifier[help] = literal[string] ) identifier[args] = identifier[parser] . identifier[parse_args] () keyword[with] identifier[open] ( identifier[args] . identifier[path_to_notebook_json] , literal[string] ) keyword[as] identifier[notebook] : keyword[try] : identifier[t] = identifier[json] . identifier[load] ( identifier[notebook] ) identifier[notebook_name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[args] . identifier[path_to_notebook_json] ) keyword[if] identifier[args] . identifier[output_path] keyword[is] identifier[sys] . identifier[stdout] : identifier[args] . identifier[output_path] = literal[string] keyword[elif] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[args] . identifier[output_path] ): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[output_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[args] . identifier[output_path] , literal[string] ) identifier[notebook_executor] = identifier[NotebookExecutor] ( identifier[notebook_name] , identifier[output_path] , identifier[args] . identifier[zeppelin_url] ) identifier[notebook_executor] . identifier[execute_notebook] ( identifier[t] ) keyword[except] identifier[ValueError] keyword[as] identifier[err] : identifier[print] ( identifier[err] ) identifier[sys] . identifier[exit] ( literal[int] )
def main(): """Entry point. - Execute notebook - Save output to either file or display it in stderr - Display errors during the run if they exist """ parser = argparse.ArgumentParser() parser.add_argument('-i', dest='path_to_notebook_json', required=True, help='Zeppelin notebook input file (.json)') parser.add_argument('-o', dest='output_path', default=sys.stdout, help='Path to save rendered output file (.json) (optional)') parser.add_argument('-u', dest='zeppelin_url', default='localhost:8890', help='Zeppelin URL (optional)') args = parser.parse_args() with open(args.path_to_notebook_json, 'rb') as notebook: try: t = json.load(notebook) notebook_name = os.path.basename(args.path_to_notebook_json) if args.output_path is sys.stdout: args.output_path = '' # depends on [control=['if'], data=[]] elif not os.path.isdir(args.output_path): raise ValueError('Output path given is not valid directory.') # depends on [control=['if'], data=[]] output_path = os.path.join(args.output_path, '') notebook_executor = NotebookExecutor(notebook_name, output_path, args.zeppelin_url) notebook_executor.execute_notebook(t) # depends on [control=['try'], data=[]] except ValueError as err: print(err) sys.exit(1) # depends on [control=['except'], data=['err']] # depends on [control=['with'], data=['notebook']]
def cmd_dhcp_discover(iface, timeout, verbose): """Send a DHCP request and show what devices has replied. Note: Using '-v' you can see all the options (like DNS servers) included on the responses. \b # habu.dhcp_discover Ether / IP / UDP 192.168.0.1:bootps > 192.168.0.5:bootpc / BOOTP / DHCP """ conf.verb = False if iface: conf.iface = iface conf.checkIPaddr = False hw = get_if_raw_hwaddr(conf.iface) ether = Ether(dst="ff:ff:ff:ff:ff:ff") ip = IP(src="0.0.0.0",dst="255.255.255.255") udp = UDP(sport=68,dport=67) bootp = BOOTP(chaddr=hw) dhcp = DHCP(options=[("message-type","discover"),"end"]) dhcp_discover = ether / ip / udp / bootp / dhcp ans, unans = srp(dhcp_discover, multi=True, timeout=5) # Press CTRL-C after several seconds for _, pkt in ans: if verbose: print(pkt.show()) else: print(pkt.summary())
def function[cmd_dhcp_discover, parameter[iface, timeout, verbose]]: constant[Send a DHCP request and show what devices has replied. Note: Using '-v' you can see all the options (like DNS servers) included on the responses.  # habu.dhcp_discover Ether / IP / UDP 192.168.0.1:bootps > 192.168.0.5:bootpc / BOOTP / DHCP ] name[conf].verb assign[=] constant[False] if name[iface] begin[:] name[conf].iface assign[=] name[iface] name[conf].checkIPaddr assign[=] constant[False] variable[hw] assign[=] call[name[get_if_raw_hwaddr], parameter[name[conf].iface]] variable[ether] assign[=] call[name[Ether], parameter[]] variable[ip] assign[=] call[name[IP], parameter[]] variable[udp] assign[=] call[name[UDP], parameter[]] variable[bootp] assign[=] call[name[BOOTP], parameter[]] variable[dhcp] assign[=] call[name[DHCP], parameter[]] variable[dhcp_discover] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[ether] / name[ip]] / name[udp]] / name[bootp]] / name[dhcp]] <ast.Tuple object at 0x7da2044c15d0> assign[=] call[name[srp], parameter[name[dhcp_discover]]] for taget[tuple[[<ast.Name object at 0x7da2044c3d30>, <ast.Name object at 0x7da2044c24d0>]]] in starred[name[ans]] begin[:] if name[verbose] begin[:] call[name[print], parameter[call[name[pkt].show, parameter[]]]]
keyword[def] identifier[cmd_dhcp_discover] ( identifier[iface] , identifier[timeout] , identifier[verbose] ): literal[string] identifier[conf] . identifier[verb] = keyword[False] keyword[if] identifier[iface] : identifier[conf] . identifier[iface] = identifier[iface] identifier[conf] . identifier[checkIPaddr] = keyword[False] identifier[hw] = identifier[get_if_raw_hwaddr] ( identifier[conf] . identifier[iface] ) identifier[ether] = identifier[Ether] ( identifier[dst] = literal[string] ) identifier[ip] = identifier[IP] ( identifier[src] = literal[string] , identifier[dst] = literal[string] ) identifier[udp] = identifier[UDP] ( identifier[sport] = literal[int] , identifier[dport] = literal[int] ) identifier[bootp] = identifier[BOOTP] ( identifier[chaddr] = identifier[hw] ) identifier[dhcp] = identifier[DHCP] ( identifier[options] =[( literal[string] , literal[string] ), literal[string] ]) identifier[dhcp_discover] = identifier[ether] / identifier[ip] / identifier[udp] / identifier[bootp] / identifier[dhcp] identifier[ans] , identifier[unans] = identifier[srp] ( identifier[dhcp_discover] , identifier[multi] = keyword[True] , identifier[timeout] = literal[int] ) keyword[for] identifier[_] , identifier[pkt] keyword[in] identifier[ans] : keyword[if] identifier[verbose] : identifier[print] ( identifier[pkt] . identifier[show] ()) keyword[else] : identifier[print] ( identifier[pkt] . identifier[summary] ())
def cmd_dhcp_discover(iface, timeout, verbose): """Send a DHCP request and show what devices has replied. Note: Using '-v' you can see all the options (like DNS servers) included on the responses. \x08 # habu.dhcp_discover Ether / IP / UDP 192.168.0.1:bootps > 192.168.0.5:bootpc / BOOTP / DHCP """ conf.verb = False if iface: conf.iface = iface # depends on [control=['if'], data=[]] conf.checkIPaddr = False hw = get_if_raw_hwaddr(conf.iface) ether = Ether(dst='ff:ff:ff:ff:ff:ff') ip = IP(src='0.0.0.0', dst='255.255.255.255') udp = UDP(sport=68, dport=67) bootp = BOOTP(chaddr=hw) dhcp = DHCP(options=[('message-type', 'discover'), 'end']) dhcp_discover = ether / ip / udp / bootp / dhcp (ans, unans) = srp(dhcp_discover, multi=True, timeout=5) # Press CTRL-C after several seconds for (_, pkt) in ans: if verbose: print(pkt.show()) # depends on [control=['if'], data=[]] else: print(pkt.summary()) # depends on [control=['for'], data=[]]
def get_user(prompt=None): """ Prompts the user for his login name, defaulting to the USER environment variable. Returns a string containing the username. May throw an exception if EOF is given by the user. :type prompt: str|None :param prompt: The user prompt or the default one if None. :rtype: string :return: A username. """ # Read username and password. try: env_user = getpass.getuser() except KeyError: env_user = '' if prompt is None: prompt = "Please enter your user name" if env_user is None or env_user == '': user = input('%s: ' % prompt) else: user = input('%s [%s]: ' % (prompt, env_user)) if user == '': user = env_user return user
def function[get_user, parameter[prompt]]: constant[ Prompts the user for his login name, defaulting to the USER environment variable. Returns a string containing the username. May throw an exception if EOF is given by the user. :type prompt: str|None :param prompt: The user prompt or the default one if None. :rtype: string :return: A username. ] <ast.Try object at 0x7da1b07af2b0> if compare[name[prompt] is constant[None]] begin[:] variable[prompt] assign[=] constant[Please enter your user name] if <ast.BoolOp object at 0x7da1b06a3520> begin[:] variable[user] assign[=] call[name[input], parameter[binary_operation[constant[%s: ] <ast.Mod object at 0x7da2590d6920> name[prompt]]]] return[name[user]]
keyword[def] identifier[get_user] ( identifier[prompt] = keyword[None] ): literal[string] keyword[try] : identifier[env_user] = identifier[getpass] . identifier[getuser] () keyword[except] identifier[KeyError] : identifier[env_user] = literal[string] keyword[if] identifier[prompt] keyword[is] keyword[None] : identifier[prompt] = literal[string] keyword[if] identifier[env_user] keyword[is] keyword[None] keyword[or] identifier[env_user] == literal[string] : identifier[user] = identifier[input] ( literal[string] % identifier[prompt] ) keyword[else] : identifier[user] = identifier[input] ( literal[string] %( identifier[prompt] , identifier[env_user] )) keyword[if] identifier[user] == literal[string] : identifier[user] = identifier[env_user] keyword[return] identifier[user]
def get_user(prompt=None): """ Prompts the user for his login name, defaulting to the USER environment variable. Returns a string containing the username. May throw an exception if EOF is given by the user. :type prompt: str|None :param prompt: The user prompt or the default one if None. :rtype: string :return: A username. """ # Read username and password. try: env_user = getpass.getuser() # depends on [control=['try'], data=[]] except KeyError: env_user = '' # depends on [control=['except'], data=[]] if prompt is None: prompt = 'Please enter your user name' # depends on [control=['if'], data=['prompt']] if env_user is None or env_user == '': user = input('%s: ' % prompt) # depends on [control=['if'], data=[]] else: user = input('%s [%s]: ' % (prompt, env_user)) if user == '': user = env_user # depends on [control=['if'], data=['user']] return user
def str_to_datetime(ts): """Format a string to a datetime object. This functions supports several date formats like YYYY-MM-DD, MM-DD-YYYY and YY-MM-DD. When the given data is None or an empty string, the function returns None. :param ts: string to convert :returns: a datetime object :raises IvalidDateError: when the given string cannot be converted into a valid date """ if not ts: return None try: return dateutil.parser.parse(ts).replace(tzinfo=None) except Exception: raise InvalidDateError(date=str(ts))
def function[str_to_datetime, parameter[ts]]: constant[Format a string to a datetime object. This functions supports several date formats like YYYY-MM-DD, MM-DD-YYYY and YY-MM-DD. When the given data is None or an empty string, the function returns None. :param ts: string to convert :returns: a datetime object :raises IvalidDateError: when the given string cannot be converted into a valid date ] if <ast.UnaryOp object at 0x7da1b0e14640> begin[:] return[constant[None]] <ast.Try object at 0x7da1b0e155d0>
keyword[def] identifier[str_to_datetime] ( identifier[ts] ): literal[string] keyword[if] keyword[not] identifier[ts] : keyword[return] keyword[None] keyword[try] : keyword[return] identifier[dateutil] . identifier[parser] . identifier[parse] ( identifier[ts] ). identifier[replace] ( identifier[tzinfo] = keyword[None] ) keyword[except] identifier[Exception] : keyword[raise] identifier[InvalidDateError] ( identifier[date] = identifier[str] ( identifier[ts] ))
def str_to_datetime(ts): """Format a string to a datetime object. This functions supports several date formats like YYYY-MM-DD, MM-DD-YYYY and YY-MM-DD. When the given data is None or an empty string, the function returns None. :param ts: string to convert :returns: a datetime object :raises IvalidDateError: when the given string cannot be converted into a valid date """ if not ts: return None # depends on [control=['if'], data=[]] try: return dateutil.parser.parse(ts).replace(tzinfo=None) # depends on [control=['try'], data=[]] except Exception: raise InvalidDateError(date=str(ts)) # depends on [control=['except'], data=[]]
def fetch_speeches(data_dir, range_start, range_end): """ :param data_dir: (str) directory in which the output file will be saved :param range_start: (str) date in the format dd/mm/yyyy :param range_end: (str) date in the format dd/mm/yyyy """ speeches = SpeechesDataset() df = speeches.fetch(range_start, range_end) save_to_csv(df, data_dir, "speeches") return df
def function[fetch_speeches, parameter[data_dir, range_start, range_end]]: constant[ :param data_dir: (str) directory in which the output file will be saved :param range_start: (str) date in the format dd/mm/yyyy :param range_end: (str) date in the format dd/mm/yyyy ] variable[speeches] assign[=] call[name[SpeechesDataset], parameter[]] variable[df] assign[=] call[name[speeches].fetch, parameter[name[range_start], name[range_end]]] call[name[save_to_csv], parameter[name[df], name[data_dir], constant[speeches]]] return[name[df]]
keyword[def] identifier[fetch_speeches] ( identifier[data_dir] , identifier[range_start] , identifier[range_end] ): literal[string] identifier[speeches] = identifier[SpeechesDataset] () identifier[df] = identifier[speeches] . identifier[fetch] ( identifier[range_start] , identifier[range_end] ) identifier[save_to_csv] ( identifier[df] , identifier[data_dir] , literal[string] ) keyword[return] identifier[df]
def fetch_speeches(data_dir, range_start, range_end): """ :param data_dir: (str) directory in which the output file will be saved :param range_start: (str) date in the format dd/mm/yyyy :param range_end: (str) date in the format dd/mm/yyyy """ speeches = SpeechesDataset() df = speeches.fetch(range_start, range_end) save_to_csv(df, data_dir, 'speeches') return df
def default_error_handler(exception): """ Default error handler Will display an error page with the corresponding error code from template directory, for example, a not found will load a 404.html etc. Will first look in userland app templates and if not found, fallback to boiler templates to display a default page. :param exception: Exception :return: string """ http_exception = isinstance(exception, exceptions.HTTPException) code = exception.code if http_exception else 500 # log exceptions only (app debug should be off) if code == 500: current_app.logger.error(exception) # jsonify error if json requested via accept header if has_app_context() and has_request_context(): headers = request.headers if 'Accept' in headers and headers['Accept'] == 'application/json': return json_error_handler(exception) # otherwise render template return template_error_handler(exception)
def function[default_error_handler, parameter[exception]]: constant[ Default error handler Will display an error page with the corresponding error code from template directory, for example, a not found will load a 404.html etc. Will first look in userland app templates and if not found, fallback to boiler templates to display a default page. :param exception: Exception :return: string ] variable[http_exception] assign[=] call[name[isinstance], parameter[name[exception], name[exceptions].HTTPException]] variable[code] assign[=] <ast.IfExp object at 0x7da20c6c7010> if compare[name[code] equal[==] constant[500]] begin[:] call[name[current_app].logger.error, parameter[name[exception]]] if <ast.BoolOp object at 0x7da2045650c0> begin[:] variable[headers] assign[=] name[request].headers if <ast.BoolOp object at 0x7da204565930> begin[:] return[call[name[json_error_handler], parameter[name[exception]]]] return[call[name[template_error_handler], parameter[name[exception]]]]
keyword[def] identifier[default_error_handler] ( identifier[exception] ): literal[string] identifier[http_exception] = identifier[isinstance] ( identifier[exception] , identifier[exceptions] . identifier[HTTPException] ) identifier[code] = identifier[exception] . identifier[code] keyword[if] identifier[http_exception] keyword[else] literal[int] keyword[if] identifier[code] == literal[int] : identifier[current_app] . identifier[logger] . identifier[error] ( identifier[exception] ) keyword[if] identifier[has_app_context] () keyword[and] identifier[has_request_context] (): identifier[headers] = identifier[request] . identifier[headers] keyword[if] literal[string] keyword[in] identifier[headers] keyword[and] identifier[headers] [ literal[string] ]== literal[string] : keyword[return] identifier[json_error_handler] ( identifier[exception] ) keyword[return] identifier[template_error_handler] ( identifier[exception] )
def default_error_handler(exception): """ Default error handler Will display an error page with the corresponding error code from template directory, for example, a not found will load a 404.html etc. Will first look in userland app templates and if not found, fallback to boiler templates to display a default page. :param exception: Exception :return: string """ http_exception = isinstance(exception, exceptions.HTTPException) code = exception.code if http_exception else 500 # log exceptions only (app debug should be off) if code == 500: current_app.logger.error(exception) # depends on [control=['if'], data=[]] # jsonify error if json requested via accept header if has_app_context() and has_request_context(): headers = request.headers if 'Accept' in headers and headers['Accept'] == 'application/json': return json_error_handler(exception) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # otherwise render template return template_error_handler(exception)
def _create_xref(self, current_class): """ Create the xref for `current_class` There are four steps involved in getting the xrefs: * Xrefs for classes * for method calls * for string usage * for field manipulation All these information are stored in the *Analysis Objects. Note that this might be quite slow, as all instructions are parsed. :param androguard.core.bytecodes.dvm.ClassDefItem current_class: The class to create xrefs for """ cur_cls_name = current_class.get_name() log.debug("Creating XREF/DREF for %s" % cur_cls_name) for current_method in current_class.get_methods(): log.debug("Creating XREF for %s" % current_method) off = 0 for instruction in current_method.get_instructions(): op_value = instruction.get_op_value() # 1) check for class calls: const-class (0x1c), new-instance (0x22) if op_value in [0x1c, 0x22]: idx_type = instruction.get_ref_kind() # type_info is the string like 'Ljava/lang/Object;' type_info = instruction.cm.vm.get_cm_type(idx_type) # Internal xref related to class manipulation # FIXME should the xref really only set if the class is in self.classes? If an external class is added later, it will be added too! # See https://github.com/androguard/androguard/blob/d720ebf2a9c8e2a28484f1c81fdddbc57e04c157/androguard/core/analysis/analysis.py#L806 # Before the check would go for internal classes only! # FIXME: effectively ignoring calls to itself - do we want that? if type_info != cur_cls_name: if type_info not in self.classes: # Create new external class self.classes[type_info] = ClassAnalysis(ExternalClass(type_info)) cur_cls = self.classes[cur_cls_name] oth_cls = self.classes[type_info] # FIXME: xref_to does not work here! current_method is wrong, as it is not the target! cur_cls.AddXrefTo(REF_TYPE(op_value), oth_cls, current_method, off) oth_cls.AddXrefFrom(REF_TYPE(op_value), cur_cls, current_method, off) # 2) check for method calls: invoke-* (0x6e ... 0x72), invoke-xxx/range (0x74 ... 0x78) elif (0x6e <= op_value <= 0x72) or (0x74 <= op_value <= 0x78): idx_meth = instruction.get_ref_kind() method_info = instruction.cm.vm.get_cm_method(idx_meth) if method_info: class_info = method_info[0] method_item = None # TODO: should create get_method_descriptor inside Analysis for vm in self.vms: method_item = vm.get_method_descriptor(method_info[0], method_info[1], ''.join(method_info[2])) if method_item: break if not method_item: # Seems to be an external class, create it first # Beware: if not all DEX files are loaded at the time create_xref runs # you will run into problems! if method_info[0] not in self.classes: self.classes[method_info[0]] = ClassAnalysis(ExternalClass(method_info[0])) method_item = self.classes[method_info[0]].get_fake_method(method_info[1], method_info[2]) self.classes[cur_cls_name].AddMXrefTo(current_method, self.classes[class_info], method_item, off) self.classes[class_info].AddMXrefFrom(method_item, self.classes[cur_cls_name], current_method, off) # Internal xref related to class manipulation if class_info in self.classes and class_info != cur_cls_name: self.classes[cur_cls_name].AddXrefTo(REF_TYPE(op_value), self.classes[class_info], method_item, off) self.classes[class_info].AddXrefFrom(REF_TYPE(op_value), self.classes[cur_cls_name], current_method, off) # 3) check for string usage: const-string (0x1a), const-string/jumbo (0x1b) elif 0x1a <= op_value <= 0x1b: string_value = instruction.cm.vm.get_cm_string(instruction.get_ref_kind()) if string_value not in self.strings: self.strings[string_value] = StringAnalysis(string_value) # TODO: The bytecode offset is stored for classes but not here? self.strings[string_value].AddXrefFrom(self.classes[cur_cls_name], current_method) # TODO maybe we should add a step 3a) here and check for all const fields. You can then xref for integers etc! # But: This does not work, as const fields are usually optimized internally to const calls... # 4) check for field usage: i*op (0x52 ... 0x5f), s*op (0x60 ... 0x6d) elif 0x52 <= op_value <= 0x6d: idx_field = instruction.get_ref_kind() field_info = instruction.cm.vm.get_cm_field(idx_field) field_item = instruction.cm.vm.get_field_descriptor(field_info[0], field_info[2], field_info[1]) # TODO: The bytecode offset is stored for classes but not here? if field_item: if (0x52 <= op_value <= 0x58) or (0x60 <= op_value <= 0x66): # read access to a field self.classes[cur_cls_name].AddFXrefRead(current_method, self.classes[cur_cls_name], field_item) else: # write access to a field self.classes[cur_cls_name].AddFXrefWrite(current_method, self.classes[cur_cls_name], field_item) off += instruction.get_length()
def function[_create_xref, parameter[self, current_class]]: constant[ Create the xref for `current_class` There are four steps involved in getting the xrefs: * Xrefs for classes * for method calls * for string usage * for field manipulation All these information are stored in the *Analysis Objects. Note that this might be quite slow, as all instructions are parsed. :param androguard.core.bytecodes.dvm.ClassDefItem current_class: The class to create xrefs for ] variable[cur_cls_name] assign[=] call[name[current_class].get_name, parameter[]] call[name[log].debug, parameter[binary_operation[constant[Creating XREF/DREF for %s] <ast.Mod object at 0x7da2590d6920> name[cur_cls_name]]]] for taget[name[current_method]] in starred[call[name[current_class].get_methods, parameter[]]] begin[:] call[name[log].debug, parameter[binary_operation[constant[Creating XREF for %s] <ast.Mod object at 0x7da2590d6920> name[current_method]]]] variable[off] assign[=] constant[0] for taget[name[instruction]] in starred[call[name[current_method].get_instructions, parameter[]]] begin[:] variable[op_value] assign[=] call[name[instruction].get_op_value, parameter[]] if compare[name[op_value] in list[[<ast.Constant object at 0x7da18f09d120>, <ast.Constant object at 0x7da18f09e020>]]] begin[:] variable[idx_type] assign[=] call[name[instruction].get_ref_kind, parameter[]] variable[type_info] assign[=] call[name[instruction].cm.vm.get_cm_type, parameter[name[idx_type]]] if compare[name[type_info] not_equal[!=] name[cur_cls_name]] begin[:] if compare[name[type_info] <ast.NotIn object at 0x7da2590d7190> name[self].classes] begin[:] call[name[self].classes][name[type_info]] assign[=] call[name[ClassAnalysis], parameter[call[name[ExternalClass], parameter[name[type_info]]]]] variable[cur_cls] assign[=] call[name[self].classes][name[cur_cls_name]] variable[oth_cls] assign[=] call[name[self].classes][name[type_info]] call[name[cur_cls].AddXrefTo, parameter[call[name[REF_TYPE], parameter[name[op_value]]], name[oth_cls], name[current_method], name[off]]] call[name[oth_cls].AddXrefFrom, parameter[call[name[REF_TYPE], parameter[name[op_value]]], name[cur_cls], name[current_method], name[off]]] <ast.AugAssign object at 0x7da18c4cf220>
keyword[def] identifier[_create_xref] ( identifier[self] , identifier[current_class] ): literal[string] identifier[cur_cls_name] = identifier[current_class] . identifier[get_name] () identifier[log] . identifier[debug] ( literal[string] % identifier[cur_cls_name] ) keyword[for] identifier[current_method] keyword[in] identifier[current_class] . identifier[get_methods] (): identifier[log] . identifier[debug] ( literal[string] % identifier[current_method] ) identifier[off] = literal[int] keyword[for] identifier[instruction] keyword[in] identifier[current_method] . identifier[get_instructions] (): identifier[op_value] = identifier[instruction] . identifier[get_op_value] () keyword[if] identifier[op_value] keyword[in] [ literal[int] , literal[int] ]: identifier[idx_type] = identifier[instruction] . identifier[get_ref_kind] () identifier[type_info] = identifier[instruction] . identifier[cm] . identifier[vm] . identifier[get_cm_type] ( identifier[idx_type] ) keyword[if] identifier[type_info] != identifier[cur_cls_name] : keyword[if] identifier[type_info] keyword[not] keyword[in] identifier[self] . identifier[classes] : identifier[self] . identifier[classes] [ identifier[type_info] ]= identifier[ClassAnalysis] ( identifier[ExternalClass] ( identifier[type_info] )) identifier[cur_cls] = identifier[self] . identifier[classes] [ identifier[cur_cls_name] ] identifier[oth_cls] = identifier[self] . identifier[classes] [ identifier[type_info] ] identifier[cur_cls] . identifier[AddXrefTo] ( identifier[REF_TYPE] ( identifier[op_value] ), identifier[oth_cls] , identifier[current_method] , identifier[off] ) identifier[oth_cls] . identifier[AddXrefFrom] ( identifier[REF_TYPE] ( identifier[op_value] ), identifier[cur_cls] , identifier[current_method] , identifier[off] ) keyword[elif] ( literal[int] <= identifier[op_value] <= literal[int] ) keyword[or] ( literal[int] <= identifier[op_value] <= literal[int] ): identifier[idx_meth] = identifier[instruction] . identifier[get_ref_kind] () identifier[method_info] = identifier[instruction] . identifier[cm] . identifier[vm] . identifier[get_cm_method] ( identifier[idx_meth] ) keyword[if] identifier[method_info] : identifier[class_info] = identifier[method_info] [ literal[int] ] identifier[method_item] = keyword[None] keyword[for] identifier[vm] keyword[in] identifier[self] . identifier[vms] : identifier[method_item] = identifier[vm] . identifier[get_method_descriptor] ( identifier[method_info] [ literal[int] ], identifier[method_info] [ literal[int] ], literal[string] . identifier[join] ( identifier[method_info] [ literal[int] ])) keyword[if] identifier[method_item] : keyword[break] keyword[if] keyword[not] identifier[method_item] : keyword[if] identifier[method_info] [ literal[int] ] keyword[not] keyword[in] identifier[self] . identifier[classes] : identifier[self] . identifier[classes] [ identifier[method_info] [ literal[int] ]]= identifier[ClassAnalysis] ( identifier[ExternalClass] ( identifier[method_info] [ literal[int] ])) identifier[method_item] = identifier[self] . identifier[classes] [ identifier[method_info] [ literal[int] ]]. identifier[get_fake_method] ( identifier[method_info] [ literal[int] ], identifier[method_info] [ literal[int] ]) identifier[self] . identifier[classes] [ identifier[cur_cls_name] ]. identifier[AddMXrefTo] ( identifier[current_method] , identifier[self] . identifier[classes] [ identifier[class_info] ], identifier[method_item] , identifier[off] ) identifier[self] . identifier[classes] [ identifier[class_info] ]. identifier[AddMXrefFrom] ( identifier[method_item] , identifier[self] . identifier[classes] [ identifier[cur_cls_name] ], identifier[current_method] , identifier[off] ) keyword[if] identifier[class_info] keyword[in] identifier[self] . identifier[classes] keyword[and] identifier[class_info] != identifier[cur_cls_name] : identifier[self] . identifier[classes] [ identifier[cur_cls_name] ]. identifier[AddXrefTo] ( identifier[REF_TYPE] ( identifier[op_value] ), identifier[self] . identifier[classes] [ identifier[class_info] ], identifier[method_item] , identifier[off] ) identifier[self] . identifier[classes] [ identifier[class_info] ]. identifier[AddXrefFrom] ( identifier[REF_TYPE] ( identifier[op_value] ), identifier[self] . identifier[classes] [ identifier[cur_cls_name] ], identifier[current_method] , identifier[off] ) keyword[elif] literal[int] <= identifier[op_value] <= literal[int] : identifier[string_value] = identifier[instruction] . identifier[cm] . identifier[vm] . identifier[get_cm_string] ( identifier[instruction] . identifier[get_ref_kind] ()) keyword[if] identifier[string_value] keyword[not] keyword[in] identifier[self] . identifier[strings] : identifier[self] . identifier[strings] [ identifier[string_value] ]= identifier[StringAnalysis] ( identifier[string_value] ) identifier[self] . identifier[strings] [ identifier[string_value] ]. identifier[AddXrefFrom] ( identifier[self] . identifier[classes] [ identifier[cur_cls_name] ], identifier[current_method] ) keyword[elif] literal[int] <= identifier[op_value] <= literal[int] : identifier[idx_field] = identifier[instruction] . identifier[get_ref_kind] () identifier[field_info] = identifier[instruction] . identifier[cm] . identifier[vm] . identifier[get_cm_field] ( identifier[idx_field] ) identifier[field_item] = identifier[instruction] . identifier[cm] . identifier[vm] . identifier[get_field_descriptor] ( identifier[field_info] [ literal[int] ], identifier[field_info] [ literal[int] ], identifier[field_info] [ literal[int] ]) keyword[if] identifier[field_item] : keyword[if] ( literal[int] <= identifier[op_value] <= literal[int] ) keyword[or] ( literal[int] <= identifier[op_value] <= literal[int] ): identifier[self] . identifier[classes] [ identifier[cur_cls_name] ]. identifier[AddFXrefRead] ( identifier[current_method] , identifier[self] . identifier[classes] [ identifier[cur_cls_name] ], identifier[field_item] ) keyword[else] : identifier[self] . identifier[classes] [ identifier[cur_cls_name] ]. identifier[AddFXrefWrite] ( identifier[current_method] , identifier[self] . identifier[classes] [ identifier[cur_cls_name] ], identifier[field_item] ) identifier[off] += identifier[instruction] . identifier[get_length] ()
def _create_xref(self, current_class): """ Create the xref for `current_class` There are four steps involved in getting the xrefs: * Xrefs for classes * for method calls * for string usage * for field manipulation All these information are stored in the *Analysis Objects. Note that this might be quite slow, as all instructions are parsed. :param androguard.core.bytecodes.dvm.ClassDefItem current_class: The class to create xrefs for """ cur_cls_name = current_class.get_name() log.debug('Creating XREF/DREF for %s' % cur_cls_name) for current_method in current_class.get_methods(): log.debug('Creating XREF for %s' % current_method) off = 0 for instruction in current_method.get_instructions(): op_value = instruction.get_op_value() # 1) check for class calls: const-class (0x1c), new-instance (0x22) if op_value in [28, 34]: idx_type = instruction.get_ref_kind() # type_info is the string like 'Ljava/lang/Object;' type_info = instruction.cm.vm.get_cm_type(idx_type) # Internal xref related to class manipulation # FIXME should the xref really only set if the class is in self.classes? If an external class is added later, it will be added too! # See https://github.com/androguard/androguard/blob/d720ebf2a9c8e2a28484f1c81fdddbc57e04c157/androguard/core/analysis/analysis.py#L806 # Before the check would go for internal classes only! # FIXME: effectively ignoring calls to itself - do we want that? if type_info != cur_cls_name: if type_info not in self.classes: # Create new external class self.classes[type_info] = ClassAnalysis(ExternalClass(type_info)) # depends on [control=['if'], data=['type_info']] cur_cls = self.classes[cur_cls_name] oth_cls = self.classes[type_info] # FIXME: xref_to does not work here! current_method is wrong, as it is not the target! cur_cls.AddXrefTo(REF_TYPE(op_value), oth_cls, current_method, off) oth_cls.AddXrefFrom(REF_TYPE(op_value), cur_cls, current_method, off) # depends on [control=['if'], data=['type_info', 'cur_cls_name']] # depends on [control=['if'], data=['op_value']] # 2) check for method calls: invoke-* (0x6e ... 0x72), invoke-xxx/range (0x74 ... 0x78) elif 110 <= op_value <= 114 or 116 <= op_value <= 120: idx_meth = instruction.get_ref_kind() method_info = instruction.cm.vm.get_cm_method(idx_meth) if method_info: class_info = method_info[0] method_item = None # TODO: should create get_method_descriptor inside Analysis for vm in self.vms: method_item = vm.get_method_descriptor(method_info[0], method_info[1], ''.join(method_info[2])) if method_item: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['vm']] if not method_item: # Seems to be an external class, create it first # Beware: if not all DEX files are loaded at the time create_xref runs # you will run into problems! if method_info[0] not in self.classes: self.classes[method_info[0]] = ClassAnalysis(ExternalClass(method_info[0])) # depends on [control=['if'], data=[]] method_item = self.classes[method_info[0]].get_fake_method(method_info[1], method_info[2]) # depends on [control=['if'], data=[]] self.classes[cur_cls_name].AddMXrefTo(current_method, self.classes[class_info], method_item, off) self.classes[class_info].AddMXrefFrom(method_item, self.classes[cur_cls_name], current_method, off) # Internal xref related to class manipulation if class_info in self.classes and class_info != cur_cls_name: self.classes[cur_cls_name].AddXrefTo(REF_TYPE(op_value), self.classes[class_info], method_item, off) self.classes[class_info].AddXrefFrom(REF_TYPE(op_value), self.classes[cur_cls_name], current_method, off) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # 3) check for string usage: const-string (0x1a), const-string/jumbo (0x1b) elif 26 <= op_value <= 27: string_value = instruction.cm.vm.get_cm_string(instruction.get_ref_kind()) if string_value not in self.strings: self.strings[string_value] = StringAnalysis(string_value) # depends on [control=['if'], data=['string_value']] # TODO: The bytecode offset is stored for classes but not here? self.strings[string_value].AddXrefFrom(self.classes[cur_cls_name], current_method) # depends on [control=['if'], data=[]] # TODO maybe we should add a step 3a) here and check for all const fields. You can then xref for integers etc! # But: This does not work, as const fields are usually optimized internally to const calls... # 4) check for field usage: i*op (0x52 ... 0x5f), s*op (0x60 ... 0x6d) elif 82 <= op_value <= 109: idx_field = instruction.get_ref_kind() field_info = instruction.cm.vm.get_cm_field(idx_field) field_item = instruction.cm.vm.get_field_descriptor(field_info[0], field_info[2], field_info[1]) # TODO: The bytecode offset is stored for classes but not here? if field_item: if 82 <= op_value <= 88 or 96 <= op_value <= 102: # read access to a field self.classes[cur_cls_name].AddFXrefRead(current_method, self.classes[cur_cls_name], field_item) # depends on [control=['if'], data=[]] else: # write access to a field self.classes[cur_cls_name].AddFXrefWrite(current_method, self.classes[cur_cls_name], field_item) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['op_value']] off += instruction.get_length() # depends on [control=['for'], data=['instruction']] # depends on [control=['for'], data=['current_method']]
def fetch_timeline_history_files(self, max_timeline): """Copy all timeline history files found on the server without checking if we have them or not. The history files are very small so reuploading them should not matter.""" while max_timeline > 1: self.c.execute("TIMELINE_HISTORY {}".format(max_timeline)) timeline_history = self.c.fetchone() history_filename = timeline_history[0] history_data = timeline_history[1].tobytes() self.log.debug("Received timeline history: %s for timeline %r", history_filename, max_timeline) compression_event = { "type": "CLOSE_WRITE", "compress_to_memory": True, "delete_file_after_compression": False, "input_data": BytesIO(history_data), "full_path": history_filename, "site": self.site, } self.compression_queue.put(compression_event) max_timeline -= 1
def function[fetch_timeline_history_files, parameter[self, max_timeline]]: constant[Copy all timeline history files found on the server without checking if we have them or not. The history files are very small so reuploading them should not matter.] while compare[name[max_timeline] greater[>] constant[1]] begin[:] call[name[self].c.execute, parameter[call[constant[TIMELINE_HISTORY {}].format, parameter[name[max_timeline]]]]] variable[timeline_history] assign[=] call[name[self].c.fetchone, parameter[]] variable[history_filename] assign[=] call[name[timeline_history]][constant[0]] variable[history_data] assign[=] call[call[name[timeline_history]][constant[1]].tobytes, parameter[]] call[name[self].log.debug, parameter[constant[Received timeline history: %s for timeline %r], name[history_filename], name[max_timeline]]] variable[compression_event] assign[=] dictionary[[<ast.Constant object at 0x7da1b188c8e0>, <ast.Constant object at 0x7da1b188d6c0>, <ast.Constant object at 0x7da1b188c490>, <ast.Constant object at 0x7da1b188d840>, <ast.Constant object at 0x7da1b188d3c0>, <ast.Constant object at 0x7da1b188d240>], [<ast.Constant object at 0x7da1b188cd30>, <ast.Constant object at 0x7da1b188cac0>, <ast.Constant object at 0x7da1b18dcfd0>, <ast.Call object at 0x7da1b18dc2e0>, <ast.Name object at 0x7da1b18dc370>, <ast.Attribute object at 0x7da1b1950c40>]] call[name[self].compression_queue.put, parameter[name[compression_event]]] <ast.AugAssign object at 0x7da1b18a3ca0>
keyword[def] identifier[fetch_timeline_history_files] ( identifier[self] , identifier[max_timeline] ): literal[string] keyword[while] identifier[max_timeline] > literal[int] : identifier[self] . identifier[c] . identifier[execute] ( literal[string] . identifier[format] ( identifier[max_timeline] )) identifier[timeline_history] = identifier[self] . identifier[c] . identifier[fetchone] () identifier[history_filename] = identifier[timeline_history] [ literal[int] ] identifier[history_data] = identifier[timeline_history] [ literal[int] ]. identifier[tobytes] () identifier[self] . identifier[log] . identifier[debug] ( literal[string] , identifier[history_filename] , identifier[max_timeline] ) identifier[compression_event] ={ literal[string] : literal[string] , literal[string] : keyword[True] , literal[string] : keyword[False] , literal[string] : identifier[BytesIO] ( identifier[history_data] ), literal[string] : identifier[history_filename] , literal[string] : identifier[self] . identifier[site] , } identifier[self] . identifier[compression_queue] . identifier[put] ( identifier[compression_event] ) identifier[max_timeline] -= literal[int]
def fetch_timeline_history_files(self, max_timeline): """Copy all timeline history files found on the server without checking if we have them or not. The history files are very small so reuploading them should not matter.""" while max_timeline > 1: self.c.execute('TIMELINE_HISTORY {}'.format(max_timeline)) timeline_history = self.c.fetchone() history_filename = timeline_history[0] history_data = timeline_history[1].tobytes() self.log.debug('Received timeline history: %s for timeline %r', history_filename, max_timeline) compression_event = {'type': 'CLOSE_WRITE', 'compress_to_memory': True, 'delete_file_after_compression': False, 'input_data': BytesIO(history_data), 'full_path': history_filename, 'site': self.site} self.compression_queue.put(compression_event) max_timeline -= 1 # depends on [control=['while'], data=['max_timeline']]
def to_contracts(instruments, prices, multipliers, desired_ccy=None, instr_fx=None, fx_rates=None, rounder=None): """ Convert notional amount of tradeable instruments to number of instrument contracts, rounding to nearest integer number of contracts. Parameters ---------- instruments: pandas.Series Series of instrument holdings. Index is instrument name and values are notional amount on instrument. prices: pandas.Series Series of instrument prices. Index is instrument name and values are instrument prices. prices.index should be a superset of instruments.index multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of instruments.index desired_ccy: str Three letter string representing desired currency to convert notional values to, e.g. 'USD'. If None is given currency conversion is ignored. instr_fx: pandas.Series Series of instrument fx denominations. Index is instrument name and values are three letter strings representing the currency the instrument is denominated in. instr_fx.index should match prices.index fx_rates: pandas.Series Series of fx rates used for conversion to desired_ccy. Index is strings representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the corresponding exchange rates. rounder: function Function to round pd.Series contracts to integers, if None default pd.Series.round is used. Returns ------- pandas.Series of contract numbers of instruments with Index of instruments names """ contracts = _instr_conv(instruments, prices, multipliers, False, desired_ccy, instr_fx, fx_rates) if rounder is None: rounder = pd.Series.round contracts = rounder(contracts) contracts = contracts.astype(int) return contracts
def function[to_contracts, parameter[instruments, prices, multipliers, desired_ccy, instr_fx, fx_rates, rounder]]: constant[ Convert notional amount of tradeable instruments to number of instrument contracts, rounding to nearest integer number of contracts. Parameters ---------- instruments: pandas.Series Series of instrument holdings. Index is instrument name and values are notional amount on instrument. prices: pandas.Series Series of instrument prices. Index is instrument name and values are instrument prices. prices.index should be a superset of instruments.index multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of instruments.index desired_ccy: str Three letter string representing desired currency to convert notional values to, e.g. 'USD'. If None is given currency conversion is ignored. instr_fx: pandas.Series Series of instrument fx denominations. Index is instrument name and values are three letter strings representing the currency the instrument is denominated in. instr_fx.index should match prices.index fx_rates: pandas.Series Series of fx rates used for conversion to desired_ccy. Index is strings representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the corresponding exchange rates. rounder: function Function to round pd.Series contracts to integers, if None default pd.Series.round is used. Returns ------- pandas.Series of contract numbers of instruments with Index of instruments names ] variable[contracts] assign[=] call[name[_instr_conv], parameter[name[instruments], name[prices], name[multipliers], constant[False], name[desired_ccy], name[instr_fx], name[fx_rates]]] if compare[name[rounder] is constant[None]] begin[:] variable[rounder] assign[=] name[pd].Series.round variable[contracts] assign[=] call[name[rounder], parameter[name[contracts]]] variable[contracts] assign[=] call[name[contracts].astype, parameter[name[int]]] return[name[contracts]]
keyword[def] identifier[to_contracts] ( identifier[instruments] , identifier[prices] , identifier[multipliers] , identifier[desired_ccy] = keyword[None] , identifier[instr_fx] = keyword[None] , identifier[fx_rates] = keyword[None] , identifier[rounder] = keyword[None] ): literal[string] identifier[contracts] = identifier[_instr_conv] ( identifier[instruments] , identifier[prices] , identifier[multipliers] , keyword[False] , identifier[desired_ccy] , identifier[instr_fx] , identifier[fx_rates] ) keyword[if] identifier[rounder] keyword[is] keyword[None] : identifier[rounder] = identifier[pd] . identifier[Series] . identifier[round] identifier[contracts] = identifier[rounder] ( identifier[contracts] ) identifier[contracts] = identifier[contracts] . identifier[astype] ( identifier[int] ) keyword[return] identifier[contracts]
def to_contracts(instruments, prices, multipliers, desired_ccy=None, instr_fx=None, fx_rates=None, rounder=None): """ Convert notional amount of tradeable instruments to number of instrument contracts, rounding to nearest integer number of contracts. Parameters ---------- instruments: pandas.Series Series of instrument holdings. Index is instrument name and values are notional amount on instrument. prices: pandas.Series Series of instrument prices. Index is instrument name and values are instrument prices. prices.index should be a superset of instruments.index multipliers: pandas.Series Series of instrument multipliers. Index is instrument name and values are the multiplier associated with the contract. multipliers.index should be a superset of instruments.index desired_ccy: str Three letter string representing desired currency to convert notional values to, e.g. 'USD'. If None is given currency conversion is ignored. instr_fx: pandas.Series Series of instrument fx denominations. Index is instrument name and values are three letter strings representing the currency the instrument is denominated in. instr_fx.index should match prices.index fx_rates: pandas.Series Series of fx rates used for conversion to desired_ccy. Index is strings representing the FX pair, e.g. 'AUDUSD' or 'USDCAD'. Values are the corresponding exchange rates. rounder: function Function to round pd.Series contracts to integers, if None default pd.Series.round is used. Returns ------- pandas.Series of contract numbers of instruments with Index of instruments names """ contracts = _instr_conv(instruments, prices, multipliers, False, desired_ccy, instr_fx, fx_rates) if rounder is None: rounder = pd.Series.round # depends on [control=['if'], data=['rounder']] contracts = rounder(contracts) contracts = contracts.astype(int) return contracts
def add_feature(self, feature={}, organism=None, sequence=None): """ Add a feature :type feature: dict :param feature: Feature information :type organism: str :param organism: Organism Common Name :type sequence: str :param sequence: Sequence Name :rtype: dict :return: A standard apollo feature dictionary ({"features": [{...}]}) """ data = { 'features': feature, } data = self._update_data(data, organism, sequence) return self.post('addFeature', data)
def function[add_feature, parameter[self, feature, organism, sequence]]: constant[ Add a feature :type feature: dict :param feature: Feature information :type organism: str :param organism: Organism Common Name :type sequence: str :param sequence: Sequence Name :rtype: dict :return: A standard apollo feature dictionary ({"features": [{...}]}) ] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b254f220>], [<ast.Name object at 0x7da1b254c6d0>]] variable[data] assign[=] call[name[self]._update_data, parameter[name[data], name[organism], name[sequence]]] return[call[name[self].post, parameter[constant[addFeature], name[data]]]]
keyword[def] identifier[add_feature] ( identifier[self] , identifier[feature] ={}, identifier[organism] = keyword[None] , identifier[sequence] = keyword[None] ): literal[string] identifier[data] ={ literal[string] : identifier[feature] , } identifier[data] = identifier[self] . identifier[_update_data] ( identifier[data] , identifier[organism] , identifier[sequence] ) keyword[return] identifier[self] . identifier[post] ( literal[string] , identifier[data] )
def add_feature(self, feature={}, organism=None, sequence=None): """ Add a feature :type feature: dict :param feature: Feature information :type organism: str :param organism: Organism Common Name :type sequence: str :param sequence: Sequence Name :rtype: dict :return: A standard apollo feature dictionary ({"features": [{...}]}) """ data = {'features': feature} data = self._update_data(data, organism, sequence) return self.post('addFeature', data)
def get_suffixes(): """Get a list of all the filename suffixes supported by libvips. Returns: [string] """ names = [] if at_least_libvips(8, 8): array = vips_lib.vips_foreign_get_suffixes() i = 0 while array[i] != ffi.NULL: name = _to_string(array[i]) if name not in names: names.append(name) glib_lib.g_free(array[i]) i += 1 glib_lib.g_free(array) return names
def function[get_suffixes, parameter[]]: constant[Get a list of all the filename suffixes supported by libvips. Returns: [string] ] variable[names] assign[=] list[[]] if call[name[at_least_libvips], parameter[constant[8], constant[8]]] begin[:] variable[array] assign[=] call[name[vips_lib].vips_foreign_get_suffixes, parameter[]] variable[i] assign[=] constant[0] while compare[call[name[array]][name[i]] not_equal[!=] name[ffi].NULL] begin[:] variable[name] assign[=] call[name[_to_string], parameter[call[name[array]][name[i]]]] if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[names]] begin[:] call[name[names].append, parameter[name[name]]] call[name[glib_lib].g_free, parameter[call[name[array]][name[i]]]] <ast.AugAssign object at 0x7da18eb55120> call[name[glib_lib].g_free, parameter[name[array]]] return[name[names]]
keyword[def] identifier[get_suffixes] (): literal[string] identifier[names] =[] keyword[if] identifier[at_least_libvips] ( literal[int] , literal[int] ): identifier[array] = identifier[vips_lib] . identifier[vips_foreign_get_suffixes] () identifier[i] = literal[int] keyword[while] identifier[array] [ identifier[i] ]!= identifier[ffi] . identifier[NULL] : identifier[name] = identifier[_to_string] ( identifier[array] [ identifier[i] ]) keyword[if] identifier[name] keyword[not] keyword[in] identifier[names] : identifier[names] . identifier[append] ( identifier[name] ) identifier[glib_lib] . identifier[g_free] ( identifier[array] [ identifier[i] ]) identifier[i] += literal[int] identifier[glib_lib] . identifier[g_free] ( identifier[array] ) keyword[return] identifier[names]
def get_suffixes(): """Get a list of all the filename suffixes supported by libvips. Returns: [string] """ names = [] if at_least_libvips(8, 8): array = vips_lib.vips_foreign_get_suffixes() i = 0 while array[i] != ffi.NULL: name = _to_string(array[i]) if name not in names: names.append(name) # depends on [control=['if'], data=['name', 'names']] glib_lib.g_free(array[i]) i += 1 # depends on [control=['while'], data=[]] glib_lib.g_free(array) # depends on [control=['if'], data=[]] return names
def _histplot_op(values, values2, rotated, ax, hist_kwargs): """Add a histogram for the data to the axes.""" if values2 is not None: raise NotImplementedError("Insert hexbin plot here") bins = hist_kwargs.pop("bins") if bins is None: bins = get_bins(values) ax.hist(values, bins=bins, **hist_kwargs) if rotated: ax.set_yticks(bins[:-1]) else: ax.set_xticks(bins[:-1]) if hist_kwargs["label"] is not None: ax.legend() return ax
def function[_histplot_op, parameter[values, values2, rotated, ax, hist_kwargs]]: constant[Add a histogram for the data to the axes.] if compare[name[values2] is_not constant[None]] begin[:] <ast.Raise object at 0x7da1b1bd0a90> variable[bins] assign[=] call[name[hist_kwargs].pop, parameter[constant[bins]]] if compare[name[bins] is constant[None]] begin[:] variable[bins] assign[=] call[name[get_bins], parameter[name[values]]] call[name[ax].hist, parameter[name[values]]] if name[rotated] begin[:] call[name[ax].set_yticks, parameter[call[name[bins]][<ast.Slice object at 0x7da1b1bd10c0>]]] if compare[call[name[hist_kwargs]][constant[label]] is_not constant[None]] begin[:] call[name[ax].legend, parameter[]] return[name[ax]]
keyword[def] identifier[_histplot_op] ( identifier[values] , identifier[values2] , identifier[rotated] , identifier[ax] , identifier[hist_kwargs] ): literal[string] keyword[if] identifier[values2] keyword[is] keyword[not] keyword[None] : keyword[raise] identifier[NotImplementedError] ( literal[string] ) identifier[bins] = identifier[hist_kwargs] . identifier[pop] ( literal[string] ) keyword[if] identifier[bins] keyword[is] keyword[None] : identifier[bins] = identifier[get_bins] ( identifier[values] ) identifier[ax] . identifier[hist] ( identifier[values] , identifier[bins] = identifier[bins] ,** identifier[hist_kwargs] ) keyword[if] identifier[rotated] : identifier[ax] . identifier[set_yticks] ( identifier[bins] [:- literal[int] ]) keyword[else] : identifier[ax] . identifier[set_xticks] ( identifier[bins] [:- literal[int] ]) keyword[if] identifier[hist_kwargs] [ literal[string] ] keyword[is] keyword[not] keyword[None] : identifier[ax] . identifier[legend] () keyword[return] identifier[ax]
def _histplot_op(values, values2, rotated, ax, hist_kwargs): """Add a histogram for the data to the axes.""" if values2 is not None: raise NotImplementedError('Insert hexbin plot here') # depends on [control=['if'], data=[]] bins = hist_kwargs.pop('bins') if bins is None: bins = get_bins(values) # depends on [control=['if'], data=['bins']] ax.hist(values, bins=bins, **hist_kwargs) if rotated: ax.set_yticks(bins[:-1]) # depends on [control=['if'], data=[]] else: ax.set_xticks(bins[:-1]) if hist_kwargs['label'] is not None: ax.legend() # depends on [control=['if'], data=[]] return ax
def types(self, *args): """Used for debugging, returns type of each arg. TYPES,ARG_1,...,ARG_N %{TYPES:A,...,10} -> 'str(A) str(B) ... int(10)' """ return ', '.join(['{0}({1})'.format(type(arg).__name__, arg) for arg in args])
def function[types, parameter[self]]: constant[Used for debugging, returns type of each arg. TYPES,ARG_1,...,ARG_N %{TYPES:A,...,10} -> 'str(A) str(B) ... int(10)' ] return[call[constant[, ].join, parameter[<ast.ListComp object at 0x7da20c6a8490>]]]
keyword[def] identifier[types] ( identifier[self] ,* identifier[args] ): literal[string] keyword[return] literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[type] ( identifier[arg] ). identifier[__name__] , identifier[arg] ) keyword[for] identifier[arg] keyword[in] identifier[args] ])
def types(self, *args): """Used for debugging, returns type of each arg. TYPES,ARG_1,...,ARG_N %{TYPES:A,...,10} -> 'str(A) str(B) ... int(10)' """ return ', '.join(['{0}({1})'.format(type(arg).__name__, arg) for arg in args])
def bundle_details(self, io_handler, bundle_id): """ Prints the details of the bundle with the given ID or name """ bundle = None try: # Convert the given ID into an integer bundle_id = int(bundle_id) except ValueError: # Not an integer, suppose it's a bundle name for bundle in self._context.get_bundles(): if bundle.get_symbolic_name() == bundle_id: break else: # Bundle not found bundle = None else: # Integer ID: direct access try: bundle = self._context.get_bundle(bundle_id) except constants.BundleException: pass if bundle is None: # No matching bundle io_handler.write_line("Unknown bundle ID: {0}", bundle_id) return False lines = [ "ID......: {0}".format(bundle.get_bundle_id()), "Name....: {0}".format(bundle.get_symbolic_name()), "Version.: {0}".format(bundle.get_version()), "State...: {0}".format( self._utils.bundlestate_to_str(bundle.get_state()) ), "Location: {0}".format(bundle.get_location()), "Published services:", ] try: services = bundle.get_registered_services() if services: for svc_ref in services: lines.append("\t{0}".format(svc_ref)) else: lines.append("\tn/a") except constants.BundleException as ex: # Bundle in a invalid state lines.append("\tError: {0}".format(ex)) lines.append("Services used by this bundle:") try: services = bundle.get_services_in_use() if services: for svc_ref in services: lines.append("\t{0}".format(svc_ref)) else: lines.append("\tn/a") except constants.BundleException as ex: # Bundle in a invalid state lines.append("\tError: {0}".format(ex)) lines.append("") io_handler.write("\n".join(lines)) return None
def function[bundle_details, parameter[self, io_handler, bundle_id]]: constant[ Prints the details of the bundle with the given ID or name ] variable[bundle] assign[=] constant[None] <ast.Try object at 0x7da1b0472290> if compare[name[bundle] is constant[None]] begin[:] call[name[io_handler].write_line, parameter[constant[Unknown bundle ID: {0}], name[bundle_id]]] return[constant[False]] variable[lines] assign[=] list[[<ast.Call object at 0x7da1b0472c20>, <ast.Call object at 0x7da1b04712a0>, <ast.Call object at 0x7da1b0470940>, <ast.Call object at 0x7da1b0473d90>, <ast.Call object at 0x7da1b04717b0>, <ast.Constant object at 0x7da1b04709a0>]] <ast.Try object at 0x7da1b0472ef0> call[name[lines].append, parameter[constant[Services used by this bundle:]]] <ast.Try object at 0x7da1b0472710> call[name[lines].append, parameter[constant[]]] call[name[io_handler].write, parameter[call[constant[ ].join, parameter[name[lines]]]]] return[constant[None]]
keyword[def] identifier[bundle_details] ( identifier[self] , identifier[io_handler] , identifier[bundle_id] ): literal[string] identifier[bundle] = keyword[None] keyword[try] : identifier[bundle_id] = identifier[int] ( identifier[bundle_id] ) keyword[except] identifier[ValueError] : keyword[for] identifier[bundle] keyword[in] identifier[self] . identifier[_context] . identifier[get_bundles] (): keyword[if] identifier[bundle] . identifier[get_symbolic_name] ()== identifier[bundle_id] : keyword[break] keyword[else] : identifier[bundle] = keyword[None] keyword[else] : keyword[try] : identifier[bundle] = identifier[self] . identifier[_context] . identifier[get_bundle] ( identifier[bundle_id] ) keyword[except] identifier[constants] . identifier[BundleException] : keyword[pass] keyword[if] identifier[bundle] keyword[is] keyword[None] : identifier[io_handler] . identifier[write_line] ( literal[string] , identifier[bundle_id] ) keyword[return] keyword[False] identifier[lines] =[ literal[string] . identifier[format] ( identifier[bundle] . identifier[get_bundle_id] ()), literal[string] . identifier[format] ( identifier[bundle] . identifier[get_symbolic_name] ()), literal[string] . identifier[format] ( identifier[bundle] . identifier[get_version] ()), literal[string] . identifier[format] ( identifier[self] . identifier[_utils] . identifier[bundlestate_to_str] ( identifier[bundle] . identifier[get_state] ()) ), literal[string] . identifier[format] ( identifier[bundle] . identifier[get_location] ()), literal[string] , ] keyword[try] : identifier[services] = identifier[bundle] . identifier[get_registered_services] () keyword[if] identifier[services] : keyword[for] identifier[svc_ref] keyword[in] identifier[services] : identifier[lines] . identifier[append] ( literal[string] . identifier[format] ( identifier[svc_ref] )) keyword[else] : identifier[lines] . identifier[append] ( literal[string] ) keyword[except] identifier[constants] . identifier[BundleException] keyword[as] identifier[ex] : identifier[lines] . identifier[append] ( literal[string] . identifier[format] ( identifier[ex] )) identifier[lines] . identifier[append] ( literal[string] ) keyword[try] : identifier[services] = identifier[bundle] . identifier[get_services_in_use] () keyword[if] identifier[services] : keyword[for] identifier[svc_ref] keyword[in] identifier[services] : identifier[lines] . identifier[append] ( literal[string] . identifier[format] ( identifier[svc_ref] )) keyword[else] : identifier[lines] . identifier[append] ( literal[string] ) keyword[except] identifier[constants] . identifier[BundleException] keyword[as] identifier[ex] : identifier[lines] . identifier[append] ( literal[string] . identifier[format] ( identifier[ex] )) identifier[lines] . identifier[append] ( literal[string] ) identifier[io_handler] . identifier[write] ( literal[string] . identifier[join] ( identifier[lines] )) keyword[return] keyword[None]
def bundle_details(self, io_handler, bundle_id): """ Prints the details of the bundle with the given ID or name """ bundle = None try: # Convert the given ID into an integer bundle_id = int(bundle_id) # depends on [control=['try'], data=[]] except ValueError: # Not an integer, suppose it's a bundle name for bundle in self._context.get_bundles(): if bundle.get_symbolic_name() == bundle_id: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['bundle']] else: # Bundle not found bundle = None # depends on [control=['except'], data=[]] else: # Integer ID: direct access try: bundle = self._context.get_bundle(bundle_id) # depends on [control=['try'], data=[]] except constants.BundleException: pass # depends on [control=['except'], data=[]] if bundle is None: # No matching bundle io_handler.write_line('Unknown bundle ID: {0}', bundle_id) return False # depends on [control=['if'], data=[]] lines = ['ID......: {0}'.format(bundle.get_bundle_id()), 'Name....: {0}'.format(bundle.get_symbolic_name()), 'Version.: {0}'.format(bundle.get_version()), 'State...: {0}'.format(self._utils.bundlestate_to_str(bundle.get_state())), 'Location: {0}'.format(bundle.get_location()), 'Published services:'] try: services = bundle.get_registered_services() if services: for svc_ref in services: lines.append('\t{0}'.format(svc_ref)) # depends on [control=['for'], data=['svc_ref']] # depends on [control=['if'], data=[]] else: lines.append('\tn/a') # depends on [control=['try'], data=[]] except constants.BundleException as ex: # Bundle in a invalid state lines.append('\tError: {0}'.format(ex)) # depends on [control=['except'], data=['ex']] lines.append('Services used by this bundle:') try: services = bundle.get_services_in_use() if services: for svc_ref in services: lines.append('\t{0}'.format(svc_ref)) # depends on [control=['for'], data=['svc_ref']] # depends on [control=['if'], data=[]] else: lines.append('\tn/a') # depends on [control=['try'], data=[]] except constants.BundleException as ex: # Bundle in a invalid state lines.append('\tError: {0}'.format(ex)) # depends on [control=['except'], data=['ex']] lines.append('') io_handler.write('\n'.join(lines)) return None
def receiver(signal, **kwargs): """ A decorator for connecting receivers to signals. Used by passing in the signal (or list of signals) and keyword arguments to connect:: @receiver(post_save, sender=MyModel) def signal_receiver(sender, **kwargs): ... @receiver([post_save, post_delete], sender=MyModel) def signals_receiver(sender, **kwargs): ... """ def _decorator(func): if isinstance(signal, (list, tuple)): for s in signal: s.connect(func, **kwargs) else: signal.connect(func, **kwargs) return func return _decorator
def function[receiver, parameter[signal]]: constant[ A decorator for connecting receivers to signals. Used by passing in the signal (or list of signals) and keyword arguments to connect:: @receiver(post_save, sender=MyModel) def signal_receiver(sender, **kwargs): ... @receiver([post_save, post_delete], sender=MyModel) def signals_receiver(sender, **kwargs): ... ] def function[_decorator, parameter[func]]: if call[name[isinstance], parameter[name[signal], tuple[[<ast.Name object at 0x7da20e9560e0>, <ast.Name object at 0x7da20e954070>]]]] begin[:] for taget[name[s]] in starred[name[signal]] begin[:] call[name[s].connect, parameter[name[func]]] return[name[func]] return[name[_decorator]]
keyword[def] identifier[receiver] ( identifier[signal] ,** identifier[kwargs] ): literal[string] keyword[def] identifier[_decorator] ( identifier[func] ): keyword[if] identifier[isinstance] ( identifier[signal] ,( identifier[list] , identifier[tuple] )): keyword[for] identifier[s] keyword[in] identifier[signal] : identifier[s] . identifier[connect] ( identifier[func] ,** identifier[kwargs] ) keyword[else] : identifier[signal] . identifier[connect] ( identifier[func] ,** identifier[kwargs] ) keyword[return] identifier[func] keyword[return] identifier[_decorator]
def receiver(signal, **kwargs): """ A decorator for connecting receivers to signals. Used by passing in the signal (or list of signals) and keyword arguments to connect:: @receiver(post_save, sender=MyModel) def signal_receiver(sender, **kwargs): ... @receiver([post_save, post_delete], sender=MyModel) def signals_receiver(sender, **kwargs): ... """ def _decorator(func): if isinstance(signal, (list, tuple)): for s in signal: s.connect(func, **kwargs) # depends on [control=['for'], data=['s']] # depends on [control=['if'], data=[]] else: signal.connect(func, **kwargs) return func return _decorator
def print_statistics(self): """Prints out the Q1, Q2, and cR statistics for the variogram fit. NOTE that ideally Q1 is close to zero, Q2 is close to 1, and cR is as small as possible. """ print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR)
def function[print_statistics, parameter[self]]: constant[Prints out the Q1, Q2, and cR statistics for the variogram fit. NOTE that ideally Q1 is close to zero, Q2 is close to 1, and cR is as small as possible. ] call[name[print], parameter[constant[Q1 =], name[self].Q1]] call[name[print], parameter[constant[Q2 =], name[self].Q2]] call[name[print], parameter[constant[cR =], name[self].cR]]
keyword[def] identifier[print_statistics] ( identifier[self] ): literal[string] identifier[print] ( literal[string] , identifier[self] . identifier[Q1] ) identifier[print] ( literal[string] , identifier[self] . identifier[Q2] ) identifier[print] ( literal[string] , identifier[self] . identifier[cR] )
def print_statistics(self): """Prints out the Q1, Q2, and cR statistics for the variogram fit. NOTE that ideally Q1 is close to zero, Q2 is close to 1, and cR is as small as possible. """ print('Q1 =', self.Q1) print('Q2 =', self.Q2) print('cR =', self.cR)
def remove_lib(lib_name): """remove library. :param lib_name: library name (e.g. 'PS2Keyboard') :rtype: None """ targ_dlib = libraries_dir() / lib_name log.debug('remove %s', targ_dlib) targ_dlib.rmtree()
def function[remove_lib, parameter[lib_name]]: constant[remove library. :param lib_name: library name (e.g. 'PS2Keyboard') :rtype: None ] variable[targ_dlib] assign[=] binary_operation[call[name[libraries_dir], parameter[]] / name[lib_name]] call[name[log].debug, parameter[constant[remove %s], name[targ_dlib]]] call[name[targ_dlib].rmtree, parameter[]]
keyword[def] identifier[remove_lib] ( identifier[lib_name] ): literal[string] identifier[targ_dlib] = identifier[libraries_dir] ()/ identifier[lib_name] identifier[log] . identifier[debug] ( literal[string] , identifier[targ_dlib] ) identifier[targ_dlib] . identifier[rmtree] ()
def remove_lib(lib_name): """remove library. :param lib_name: library name (e.g. 'PS2Keyboard') :rtype: None """ targ_dlib = libraries_dir() / lib_name log.debug('remove %s', targ_dlib) targ_dlib.rmtree()
def ellipse_from_second_moments_ijv(i,j, image, labels, indexes, wants_compactness = False): """Calculate measurements of ellipses equivalent to the second moments of labels i,j - coordinates of each point image - the intensity at each point labels - for each labeled object, derive an ellipse indexes - sequence of indexes to process returns the following arrays: coordinates of the center of the ellipse eccentricity major axis length minor axis length orientation some definitions taken from "Image Moments-Based Structuring and Tracking of Objects", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO, http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf particularly equation 5 (which has some errors in it). These yield the rectangle with equivalent second moments. I translate to the ellipse by multiplying by 1.154701 which is Matlab's calculation of the major and minor axis length for a square of length X divided by the actual length of the side of a square of that length. eccentricity is the distance between foci divided by the major axis length orientation is the angle of the major axis with respect to the X axis """ if len(indexes) == 0: return [np.zeros((0,2))] + [np.zeros((0,))] * (5 if wants_compactness else 4) if len(i) == 0: return ([np.zeros((len(indexes), 2)), np.ones(len(indexes))] + [np.zeros(len(indexes))] * (4 if wants_compactness else 3)) # # Normalize to center of object for stability # nlabels = np.max(indexes)+1 m = np.array([[None, 0, None], [0, None, None], [None, None, None]], object) if np.all(image == 1): image = 1 m[0,0] = intensity = np.bincount(labels) else: m[0,0] = intensity = np.bincount(labels, image) ic = np.bincount(labels, i * image) / intensity jc = np.bincount(labels, j * image) / intensity i = i - ic[labels] j = j - jc[labels] # # Start by calculating the moments m[p][q] of the image # sum(i**p j**q) # # m[1,0] = 0 via normalization # m[0,1] = 0 via normalization m[1,1] = np.bincount(labels, i*j*image) m[2,0] = np.bincount(labels, i*i*image) m[0,2] = np.bincount(labels, j*j*image) a = m[2,0] / m[0,0] b = 2*m[1,1]/m[0,0] c = m[0,2] / m[0,0] theta = np.arctan2(b,c-a) / 2 temp = np.sqrt(b**2+(a-c)**2) # # If you do a linear regression of the circles from 1 to 50 radius # in Matlab, the resultant values fit a line with slope=.9975 and # intercept .095. I'm adjusting the lengths accordingly. # mystery_constant = 0.095 mystery_multiplier = 0.9975 major_axis_len = (np.sqrt(8*(a+c+temp)) * mystery_multiplier + mystery_constant) minor_axis_len = (np.sqrt(8*(a+c-temp)) * mystery_multiplier + mystery_constant) eccentricity = np.sqrt(1-(minor_axis_len / major_axis_len)**2) compactness = 2 * np.pi * (a + c) / m[0,0] return ([np.column_stack((ic[indexes], jc[indexes])), eccentricity[indexes], major_axis_len[indexes], minor_axis_len[indexes], theta[indexes]] + ([compactness[indexes]] if wants_compactness else []))
def function[ellipse_from_second_moments_ijv, parameter[i, j, image, labels, indexes, wants_compactness]]: constant[Calculate measurements of ellipses equivalent to the second moments of labels i,j - coordinates of each point image - the intensity at each point labels - for each labeled object, derive an ellipse indexes - sequence of indexes to process returns the following arrays: coordinates of the center of the ellipse eccentricity major axis length minor axis length orientation some definitions taken from "Image Moments-Based Structuring and Tracking of Objects", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO, http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf particularly equation 5 (which has some errors in it). These yield the rectangle with equivalent second moments. I translate to the ellipse by multiplying by 1.154701 which is Matlab's calculation of the major and minor axis length for a square of length X divided by the actual length of the side of a square of that length. eccentricity is the distance between foci divided by the major axis length orientation is the angle of the major axis with respect to the X axis ] if compare[call[name[len], parameter[name[indexes]]] equal[==] constant[0]] begin[:] return[binary_operation[list[[<ast.Call object at 0x7da20c76fd00>]] + binary_operation[list[[<ast.Call object at 0x7da20c76fbe0>]] * <ast.IfExp object at 0x7da20c76e0e0>]]] if compare[call[name[len], parameter[name[i]]] equal[==] constant[0]] begin[:] return[binary_operation[list[[<ast.Call object at 0x7da20c76dba0>, <ast.Call object at 0x7da20c76d300>]] + binary_operation[list[[<ast.Call object at 0x7da20c76c0a0>]] * <ast.IfExp object at 0x7da20c76d120>]]] variable[nlabels] assign[=] binary_operation[call[name[np].max, parameter[name[indexes]]] + constant[1]] variable[m] assign[=] call[name[np].array, parameter[list[[<ast.List object at 0x7da20c76dd80>, <ast.List object at 0x7da20c76c760>, <ast.List object at 0x7da20c76cfd0>]], name[object]]] if call[name[np].all, parameter[compare[name[image] equal[==] constant[1]]]] begin[:] variable[image] assign[=] constant[1] call[name[m]][tuple[[<ast.Constant object at 0x7da20c76ee60>, <ast.Constant object at 0x7da20c76f9d0>]]] assign[=] call[name[np].bincount, parameter[name[labels]]] variable[ic] assign[=] binary_operation[call[name[np].bincount, parameter[name[labels], binary_operation[name[i] * name[image]]]] / name[intensity]] variable[jc] assign[=] binary_operation[call[name[np].bincount, parameter[name[labels], binary_operation[name[j] * name[image]]]] / name[intensity]] variable[i] assign[=] binary_operation[name[i] - call[name[ic]][name[labels]]] variable[j] assign[=] binary_operation[name[j] - call[name[jc]][name[labels]]] call[name[m]][tuple[[<ast.Constant object at 0x7da20c76cd00>, <ast.Constant object at 0x7da20c76d8d0>]]] assign[=] call[name[np].bincount, parameter[name[labels], binary_operation[binary_operation[name[i] * name[j]] * name[image]]]] call[name[m]][tuple[[<ast.Constant object at 0x7da20c76f2e0>, <ast.Constant object at 0x7da20c76f130>]]] assign[=] call[name[np].bincount, parameter[name[labels], binary_operation[binary_operation[name[i] * name[i]] * name[image]]]] call[name[m]][tuple[[<ast.Constant object at 0x7da20c76cdf0>, <ast.Constant object at 0x7da20c76eef0>]]] assign[=] call[name[np].bincount, parameter[name[labels], binary_operation[binary_operation[name[j] * name[j]] * name[image]]]] variable[a] assign[=] binary_operation[call[name[m]][tuple[[<ast.Constant object at 0x7da20c76d450>, <ast.Constant object at 0x7da20c76dc60>]]] / call[name[m]][tuple[[<ast.Constant object at 0x7da20c76cd90>, <ast.Constant object at 0x7da20c76d990>]]]] variable[b] assign[=] binary_operation[binary_operation[constant[2] * call[name[m]][tuple[[<ast.Constant object at 0x7da20c76efb0>, <ast.Constant object at 0x7da20c76c670>]]]] / call[name[m]][tuple[[<ast.Constant object at 0x7da20c76dab0>, <ast.Constant object at 0x7da20c76f640>]]]] variable[c] assign[=] binary_operation[call[name[m]][tuple[[<ast.Constant object at 0x7da20c76c2b0>, <ast.Constant object at 0x7da20c76e380>]]] / call[name[m]][tuple[[<ast.Constant object at 0x7da20cabe980>, <ast.Constant object at 0x7da20cabfb80>]]]] variable[theta] assign[=] binary_operation[call[name[np].arctan2, parameter[name[b], binary_operation[name[c] - name[a]]]] / constant[2]] variable[temp] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[name[b] ** constant[2]] + binary_operation[binary_operation[name[a] - name[c]] ** constant[2]]]]] variable[mystery_constant] assign[=] constant[0.095] variable[mystery_multiplier] assign[=] constant[0.9975] variable[major_axis_len] assign[=] binary_operation[binary_operation[call[name[np].sqrt, parameter[binary_operation[constant[8] * binary_operation[binary_operation[name[a] + name[c]] + name[temp]]]]] * name[mystery_multiplier]] + name[mystery_constant]] variable[minor_axis_len] assign[=] binary_operation[binary_operation[call[name[np].sqrt, parameter[binary_operation[constant[8] * binary_operation[binary_operation[name[a] + name[c]] - name[temp]]]]] * name[mystery_multiplier]] + name[mystery_constant]] variable[eccentricity] assign[=] call[name[np].sqrt, parameter[binary_operation[constant[1] - binary_operation[binary_operation[name[minor_axis_len] / name[major_axis_len]] ** constant[2]]]]] variable[compactness] assign[=] binary_operation[binary_operation[binary_operation[constant[2] * name[np].pi] * binary_operation[name[a] + name[c]]] / call[name[m]][tuple[[<ast.Constant object at 0x7da20c76edd0>, <ast.Constant object at 0x7da20c76c400>]]]] return[binary_operation[list[[<ast.Call object at 0x7da20c76fb80>, <ast.Subscript object at 0x7da20c76ca90>, <ast.Subscript object at 0x7da20c6c7e80>, <ast.Subscript object at 0x7da20c6c7d60>, <ast.Subscript object at 0x7da20c6c5e10>]] + <ast.IfExp object at 0x7da20c6c6c80>]]
keyword[def] identifier[ellipse_from_second_moments_ijv] ( identifier[i] , identifier[j] , identifier[image] , identifier[labels] , identifier[indexes] , identifier[wants_compactness] = keyword[False] ): literal[string] keyword[if] identifier[len] ( identifier[indexes] )== literal[int] : keyword[return] [ identifier[np] . identifier[zeros] (( literal[int] , literal[int] ))]+[ identifier[np] . identifier[zeros] (( literal[int] ,))]*( literal[int] keyword[if] identifier[wants_compactness] keyword[else] literal[int] ) keyword[if] identifier[len] ( identifier[i] )== literal[int] : keyword[return] ([ identifier[np] . identifier[zeros] (( identifier[len] ( identifier[indexes] ), literal[int] )), identifier[np] . identifier[ones] ( identifier[len] ( identifier[indexes] ))]+ [ identifier[np] . identifier[zeros] ( identifier[len] ( identifier[indexes] ))]*( literal[int] keyword[if] identifier[wants_compactness] keyword[else] literal[int] )) identifier[nlabels] = identifier[np] . identifier[max] ( identifier[indexes] )+ literal[int] identifier[m] = identifier[np] . identifier[array] ([[ keyword[None] , literal[int] , keyword[None] ], [ literal[int] , keyword[None] , keyword[None] ], [ keyword[None] , keyword[None] , keyword[None] ]], identifier[object] ) keyword[if] identifier[np] . identifier[all] ( identifier[image] == literal[int] ): identifier[image] = literal[int] identifier[m] [ literal[int] , literal[int] ]= identifier[intensity] = identifier[np] . identifier[bincount] ( identifier[labels] ) keyword[else] : identifier[m] [ literal[int] , literal[int] ]= identifier[intensity] = identifier[np] . identifier[bincount] ( identifier[labels] , identifier[image] ) identifier[ic] = identifier[np] . identifier[bincount] ( identifier[labels] , identifier[i] * identifier[image] )/ identifier[intensity] identifier[jc] = identifier[np] . identifier[bincount] ( identifier[labels] , identifier[j] * identifier[image] )/ identifier[intensity] identifier[i] = identifier[i] - identifier[ic] [ identifier[labels] ] identifier[j] = identifier[j] - identifier[jc] [ identifier[labels] ] identifier[m] [ literal[int] , literal[int] ]= identifier[np] . identifier[bincount] ( identifier[labels] , identifier[i] * identifier[j] * identifier[image] ) identifier[m] [ literal[int] , literal[int] ]= identifier[np] . identifier[bincount] ( identifier[labels] , identifier[i] * identifier[i] * identifier[image] ) identifier[m] [ literal[int] , literal[int] ]= identifier[np] . identifier[bincount] ( identifier[labels] , identifier[j] * identifier[j] * identifier[image] ) identifier[a] = identifier[m] [ literal[int] , literal[int] ]/ identifier[m] [ literal[int] , literal[int] ] identifier[b] = literal[int] * identifier[m] [ literal[int] , literal[int] ]/ identifier[m] [ literal[int] , literal[int] ] identifier[c] = identifier[m] [ literal[int] , literal[int] ]/ identifier[m] [ literal[int] , literal[int] ] identifier[theta] = identifier[np] . identifier[arctan2] ( identifier[b] , identifier[c] - identifier[a] )/ literal[int] identifier[temp] = identifier[np] . identifier[sqrt] ( identifier[b] ** literal[int] +( identifier[a] - identifier[c] )** literal[int] ) identifier[mystery_constant] = literal[int] identifier[mystery_multiplier] = literal[int] identifier[major_axis_len] =( identifier[np] . identifier[sqrt] ( literal[int] *( identifier[a] + identifier[c] + identifier[temp] ))* identifier[mystery_multiplier] + identifier[mystery_constant] ) identifier[minor_axis_len] =( identifier[np] . identifier[sqrt] ( literal[int] *( identifier[a] + identifier[c] - identifier[temp] ))* identifier[mystery_multiplier] + identifier[mystery_constant] ) identifier[eccentricity] = identifier[np] . identifier[sqrt] ( literal[int] -( identifier[minor_axis_len] / identifier[major_axis_len] )** literal[int] ) identifier[compactness] = literal[int] * identifier[np] . identifier[pi] *( identifier[a] + identifier[c] )/ identifier[m] [ literal[int] , literal[int] ] keyword[return] ([ identifier[np] . identifier[column_stack] (( identifier[ic] [ identifier[indexes] ], identifier[jc] [ identifier[indexes] ])), identifier[eccentricity] [ identifier[indexes] ], identifier[major_axis_len] [ identifier[indexes] ], identifier[minor_axis_len] [ identifier[indexes] ], identifier[theta] [ identifier[indexes] ]]+ ([ identifier[compactness] [ identifier[indexes] ]] keyword[if] identifier[wants_compactness] keyword[else] []))
def ellipse_from_second_moments_ijv(i, j, image, labels, indexes, wants_compactness=False): """Calculate measurements of ellipses equivalent to the second moments of labels i,j - coordinates of each point image - the intensity at each point labels - for each labeled object, derive an ellipse indexes - sequence of indexes to process returns the following arrays: coordinates of the center of the ellipse eccentricity major axis length minor axis length orientation some definitions taken from "Image Moments-Based Structuring and Tracking of Objects", LOURENA ROCHA, LUIZ VELHO, PAULO CEZAR P. CARVALHO, http://sibgrapi.sid.inpe.br/col/sid.inpe.br/banon/2002/10.23.11.34/doc/35.pdf particularly equation 5 (which has some errors in it). These yield the rectangle with equivalent second moments. I translate to the ellipse by multiplying by 1.154701 which is Matlab's calculation of the major and minor axis length for a square of length X divided by the actual length of the side of a square of that length. eccentricity is the distance between foci divided by the major axis length orientation is the angle of the major axis with respect to the X axis """ if len(indexes) == 0: return [np.zeros((0, 2))] + [np.zeros((0,))] * (5 if wants_compactness else 4) # depends on [control=['if'], data=[]] if len(i) == 0: return [np.zeros((len(indexes), 2)), np.ones(len(indexes))] + [np.zeros(len(indexes))] * (4 if wants_compactness else 3) # depends on [control=['if'], data=[]] # # Normalize to center of object for stability # nlabels = np.max(indexes) + 1 m = np.array([[None, 0, None], [0, None, None], [None, None, None]], object) if np.all(image == 1): image = 1 m[0, 0] = intensity = np.bincount(labels) # depends on [control=['if'], data=[]] else: m[0, 0] = intensity = np.bincount(labels, image) ic = np.bincount(labels, i * image) / intensity jc = np.bincount(labels, j * image) / intensity i = i - ic[labels] j = j - jc[labels] # # Start by calculating the moments m[p][q] of the image # sum(i**p j**q) # # m[1,0] = 0 via normalization # m[0,1] = 0 via normalization m[1, 1] = np.bincount(labels, i * j * image) m[2, 0] = np.bincount(labels, i * i * image) m[0, 2] = np.bincount(labels, j * j * image) a = m[2, 0] / m[0, 0] b = 2 * m[1, 1] / m[0, 0] c = m[0, 2] / m[0, 0] theta = np.arctan2(b, c - a) / 2 temp = np.sqrt(b ** 2 + (a - c) ** 2) # # If you do a linear regression of the circles from 1 to 50 radius # in Matlab, the resultant values fit a line with slope=.9975 and # intercept .095. I'm adjusting the lengths accordingly. # mystery_constant = 0.095 mystery_multiplier = 0.9975 major_axis_len = np.sqrt(8 * (a + c + temp)) * mystery_multiplier + mystery_constant minor_axis_len = np.sqrt(8 * (a + c - temp)) * mystery_multiplier + mystery_constant eccentricity = np.sqrt(1 - (minor_axis_len / major_axis_len) ** 2) compactness = 2 * np.pi * (a + c) / m[0, 0] return [np.column_stack((ic[indexes], jc[indexes])), eccentricity[indexes], major_axis_len[indexes], minor_axis_len[indexes], theta[indexes]] + ([compactness[indexes]] if wants_compactness else [])
def daisy_chains(self, kih, max_path_length=None): """ Generator for daisy chains (complementary kihs) associated with a knob. Notes ----- Daisy chain graph is the directed graph with edges from knob residue to each hole residue for each KnobIntoHole in self. Given a KnobIntoHole, the daisy chains are non-trivial paths in this graph (walks along the directed edges) that begin and end at the knob. These paths must be of length <= max_path_length Parameters ---------- kih : KnobIntoHole interaction. max_path_length : int or None Maximum length of a daisy chain. Defaults to number of chains in self.ampal_parent. This is the maximum sensible value. Larger values than this will cause slow running of this function. """ if max_path_length is None: max_path_length = len(self.ampal_parent) g = self.daisy_chain_graph paths = networkx.all_simple_paths(g, source=kih.knob, target=kih.knob, cutoff=max_path_length) return paths
def function[daisy_chains, parameter[self, kih, max_path_length]]: constant[ Generator for daisy chains (complementary kihs) associated with a knob. Notes ----- Daisy chain graph is the directed graph with edges from knob residue to each hole residue for each KnobIntoHole in self. Given a KnobIntoHole, the daisy chains are non-trivial paths in this graph (walks along the directed edges) that begin and end at the knob. These paths must be of length <= max_path_length Parameters ---------- kih : KnobIntoHole interaction. max_path_length : int or None Maximum length of a daisy chain. Defaults to number of chains in self.ampal_parent. This is the maximum sensible value. Larger values than this will cause slow running of this function. ] if compare[name[max_path_length] is constant[None]] begin[:] variable[max_path_length] assign[=] call[name[len], parameter[name[self].ampal_parent]] variable[g] assign[=] name[self].daisy_chain_graph variable[paths] assign[=] call[name[networkx].all_simple_paths, parameter[name[g]]] return[name[paths]]
keyword[def] identifier[daisy_chains] ( identifier[self] , identifier[kih] , identifier[max_path_length] = keyword[None] ): literal[string] keyword[if] identifier[max_path_length] keyword[is] keyword[None] : identifier[max_path_length] = identifier[len] ( identifier[self] . identifier[ampal_parent] ) identifier[g] = identifier[self] . identifier[daisy_chain_graph] identifier[paths] = identifier[networkx] . identifier[all_simple_paths] ( identifier[g] , identifier[source] = identifier[kih] . identifier[knob] , identifier[target] = identifier[kih] . identifier[knob] , identifier[cutoff] = identifier[max_path_length] ) keyword[return] identifier[paths]
def daisy_chains(self, kih, max_path_length=None): """ Generator for daisy chains (complementary kihs) associated with a knob. Notes ----- Daisy chain graph is the directed graph with edges from knob residue to each hole residue for each KnobIntoHole in self. Given a KnobIntoHole, the daisy chains are non-trivial paths in this graph (walks along the directed edges) that begin and end at the knob. These paths must be of length <= max_path_length Parameters ---------- kih : KnobIntoHole interaction. max_path_length : int or None Maximum length of a daisy chain. Defaults to number of chains in self.ampal_parent. This is the maximum sensible value. Larger values than this will cause slow running of this function. """ if max_path_length is None: max_path_length = len(self.ampal_parent) # depends on [control=['if'], data=['max_path_length']] g = self.daisy_chain_graph paths = networkx.all_simple_paths(g, source=kih.knob, target=kih.knob, cutoff=max_path_length) return paths
def update_repository(self, repository_form): """Updates an existing repository. arg: repository_form (osid.repository.RepositoryForm): the form containing the elements to be updated raise: IllegalState - ``repository_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``repository_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``repository_form`` did not originate from ``get_repository_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.update_bin_template if self._catalog_session is not None: return self._catalog_session.update_catalog(catalog_form=repository_form) collection = JSONClientValidated('repository', collection='Repository', runtime=self._runtime) if not isinstance(repository_form, ABCRepositoryForm): raise errors.InvalidArgument('argument type is not an RepositoryForm') if not repository_form.is_for_update(): raise errors.InvalidArgument('the RepositoryForm is for update only, not create') try: if self._forms[repository_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('repository_form already used in an update transaction') except KeyError: raise errors.Unsupported('repository_form did not originate from this session') if not repository_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') collection.save(repository_form._my_map) # save is deprecated - change to replace_one self._forms[repository_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned return objects.Repository(osid_object_map=repository_form._my_map, runtime=self._runtime, proxy=self._proxy)
def function[update_repository, parameter[self, repository_form]]: constant[Updates an existing repository. arg: repository_form (osid.repository.RepositoryForm): the form containing the elements to be updated raise: IllegalState - ``repository_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``repository_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``repository_form`` did not originate from ``get_repository_form_for_update()`` *compliance: mandatory -- This method must be implemented.* ] if compare[name[self]._catalog_session is_not constant[None]] begin[:] return[call[name[self]._catalog_session.update_catalog, parameter[]]] variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[repository]]] if <ast.UnaryOp object at 0x7da1b0a732b0> begin[:] <ast.Raise object at 0x7da1b0a71a20> if <ast.UnaryOp object at 0x7da1b0a70af0> begin[:] <ast.Raise object at 0x7da1b0a73d00> <ast.Try object at 0x7da1b0a703d0> if <ast.UnaryOp object at 0x7da1b0a71d80> begin[:] <ast.Raise object at 0x7da1b0a72680> call[name[collection].save, parameter[name[repository_form]._my_map]] call[name[self]._forms][call[call[name[repository_form].get_id, parameter[]].get_identifier, parameter[]]] assign[=] name[UPDATED] return[call[name[objects].Repository, parameter[]]]
keyword[def] identifier[update_repository] ( identifier[self] , identifier[repository_form] ): literal[string] keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[self] . identifier[_catalog_session] . identifier[update_catalog] ( identifier[catalog_form] = identifier[repository_form] ) identifier[collection] = identifier[JSONClientValidated] ( literal[string] , identifier[collection] = literal[string] , identifier[runtime] = identifier[self] . identifier[_runtime] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[repository_form] , identifier[ABCRepositoryForm] ): keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] ) keyword[if] keyword[not] identifier[repository_form] . identifier[is_for_update] (): keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] ) keyword[try] : keyword[if] identifier[self] . identifier[_forms] [ identifier[repository_form] . identifier[get_id] (). identifier[get_identifier] ()]== identifier[UPDATED] : keyword[raise] identifier[errors] . identifier[IllegalState] ( literal[string] ) keyword[except] identifier[KeyError] : keyword[raise] identifier[errors] . identifier[Unsupported] ( literal[string] ) keyword[if] keyword[not] identifier[repository_form] . identifier[is_valid] (): keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] ) identifier[collection] . identifier[save] ( identifier[repository_form] . identifier[_my_map] ) identifier[self] . identifier[_forms] [ identifier[repository_form] . identifier[get_id] (). identifier[get_identifier] ()]= identifier[UPDATED] keyword[return] identifier[objects] . identifier[Repository] ( identifier[osid_object_map] = identifier[repository_form] . identifier[_my_map] , identifier[runtime] = identifier[self] . identifier[_runtime] , identifier[proxy] = identifier[self] . identifier[_proxy] )
def update_repository(self, repository_form): """Updates an existing repository. arg: repository_form (osid.repository.RepositoryForm): the form containing the elements to be updated raise: IllegalState - ``repository_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``repository_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``repository_form`` did not originate from ``get_repository_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.update_bin_template if self._catalog_session is not None: return self._catalog_session.update_catalog(catalog_form=repository_form) # depends on [control=['if'], data=[]] collection = JSONClientValidated('repository', collection='Repository', runtime=self._runtime) if not isinstance(repository_form, ABCRepositoryForm): raise errors.InvalidArgument('argument type is not an RepositoryForm') # depends on [control=['if'], data=[]] if not repository_form.is_for_update(): raise errors.InvalidArgument('the RepositoryForm is for update only, not create') # depends on [control=['if'], data=[]] try: if self._forms[repository_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('repository_form already used in an update transaction') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except KeyError: raise errors.Unsupported('repository_form did not originate from this session') # depends on [control=['except'], data=[]] if not repository_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') # depends on [control=['if'], data=[]] collection.save(repository_form._my_map) # save is deprecated - change to replace_one self._forms[repository_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned return objects.Repository(osid_object_map=repository_form._my_map, runtime=self._runtime, proxy=self._proxy)
def plot_net(fignum): """ Draws circle and tick marks for equal area projection. """ # make the perimeter plt.figure(num=fignum,) plt.clf() plt.axis("off") Dcirc = np.arange(0, 361.) Icirc = np.zeros(361, 'f') Xcirc, Ycirc = [], [] for k in range(361): XY = pmag.dimap(Dcirc[k], Icirc[k]) Xcirc.append(XY[0]) Ycirc.append(XY[1]) plt.plot(Xcirc, Ycirc, 'k') # put on the tick marks Xsym, Ysym = [], [] for I in range(10, 100, 10): XY = pmag.dimap(0., I) Xsym.append(XY[0]) Ysym.append(XY[1]) plt.plot(Xsym, Ysym, 'k+') Xsym, Ysym = [], [] for I in range(10, 90, 10): XY = pmag.dimap(90., I) Xsym.append(XY[0]) Ysym.append(XY[1]) plt.plot(Xsym, Ysym, 'k+') Xsym, Ysym = [], [] for I in range(10, 90, 10): XY = pmag.dimap(180., I) Xsym.append(XY[0]) Ysym.append(XY[1]) plt.plot(Xsym, Ysym, 'k+') Xsym, Ysym = [], [] for I in range(10, 90, 10): XY = pmag.dimap(270., I) Xsym.append(XY[0]) Ysym.append(XY[1]) plt.plot(Xsym, Ysym, 'k+') for D in range(0, 360, 10): Xtick, Ytick = [], [] for I in range(4): XY = pmag.dimap(D, I) Xtick.append(XY[0]) Ytick.append(XY[1]) plt.plot(Xtick, Ytick, 'k') plt.axis("equal") plt.axis((-1.05, 1.05, -1.05, 1.05))
def function[plot_net, parameter[fignum]]: constant[ Draws circle and tick marks for equal area projection. ] call[name[plt].figure, parameter[]] call[name[plt].clf, parameter[]] call[name[plt].axis, parameter[constant[off]]] variable[Dcirc] assign[=] call[name[np].arange, parameter[constant[0], constant[361.0]]] variable[Icirc] assign[=] call[name[np].zeros, parameter[constant[361], constant[f]]] <ast.Tuple object at 0x7da1b036e410> assign[=] tuple[[<ast.List object at 0x7da1b036e860>, <ast.List object at 0x7da1b036e530>]] for taget[name[k]] in starred[call[name[range], parameter[constant[361]]]] begin[:] variable[XY] assign[=] call[name[pmag].dimap, parameter[call[name[Dcirc]][name[k]], call[name[Icirc]][name[k]]]] call[name[Xcirc].append, parameter[call[name[XY]][constant[0]]]] call[name[Ycirc].append, parameter[call[name[XY]][constant[1]]]] call[name[plt].plot, parameter[name[Xcirc], name[Ycirc], constant[k]]] <ast.Tuple object at 0x7da1b02df580> assign[=] tuple[[<ast.List object at 0x7da1b02dfc40>, <ast.List object at 0x7da1b02dde40>]] for taget[name[I]] in starred[call[name[range], parameter[constant[10], constant[100], constant[10]]]] begin[:] variable[XY] assign[=] call[name[pmag].dimap, parameter[constant[0.0], name[I]]] call[name[Xsym].append, parameter[call[name[XY]][constant[0]]]] call[name[Ysym].append, parameter[call[name[XY]][constant[1]]]] call[name[plt].plot, parameter[name[Xsym], name[Ysym], constant[k+]]] <ast.Tuple object at 0x7da1b02de440> assign[=] tuple[[<ast.List object at 0x7da1b02de650>, <ast.List object at 0x7da1b02de710>]] for taget[name[I]] in starred[call[name[range], parameter[constant[10], constant[90], constant[10]]]] begin[:] variable[XY] assign[=] call[name[pmag].dimap, parameter[constant[90.0], name[I]]] call[name[Xsym].append, parameter[call[name[XY]][constant[0]]]] call[name[Ysym].append, parameter[call[name[XY]][constant[1]]]] call[name[plt].plot, parameter[name[Xsym], name[Ysym], constant[k+]]] <ast.Tuple object at 0x7da1b02df0a0> assign[=] tuple[[<ast.List object at 0x7da1b02deef0>, <ast.List object at 0x7da1b02def80>]] for taget[name[I]] in starred[call[name[range], parameter[constant[10], constant[90], constant[10]]]] begin[:] variable[XY] assign[=] call[name[pmag].dimap, parameter[constant[180.0], name[I]]] call[name[Xsym].append, parameter[call[name[XY]][constant[0]]]] call[name[Ysym].append, parameter[call[name[XY]][constant[1]]]] call[name[plt].plot, parameter[name[Xsym], name[Ysym], constant[k+]]] <ast.Tuple object at 0x7da1b02dded0> assign[=] tuple[[<ast.List object at 0x7da1b02ddff0>, <ast.List object at 0x7da1b02de2f0>]] for taget[name[I]] in starred[call[name[range], parameter[constant[10], constant[90], constant[10]]]] begin[:] variable[XY] assign[=] call[name[pmag].dimap, parameter[constant[270.0], name[I]]] call[name[Xsym].append, parameter[call[name[XY]][constant[0]]]] call[name[Ysym].append, parameter[call[name[XY]][constant[1]]]] call[name[plt].plot, parameter[name[Xsym], name[Ysym], constant[k+]]] for taget[name[D]] in starred[call[name[range], parameter[constant[0], constant[360], constant[10]]]] begin[:] <ast.Tuple object at 0x7da1b02d0fa0> assign[=] tuple[[<ast.List object at 0x7da1b02d0460>, <ast.List object at 0x7da1b02d00d0>]] for taget[name[I]] in starred[call[name[range], parameter[constant[4]]]] begin[:] variable[XY] assign[=] call[name[pmag].dimap, parameter[name[D], name[I]]] call[name[Xtick].append, parameter[call[name[XY]][constant[0]]]] call[name[Ytick].append, parameter[call[name[XY]][constant[1]]]] call[name[plt].plot, parameter[name[Xtick], name[Ytick], constant[k]]] call[name[plt].axis, parameter[constant[equal]]] call[name[plt].axis, parameter[tuple[[<ast.UnaryOp object at 0x7da1b02d1870>, <ast.Constant object at 0x7da1b02d1570>, <ast.UnaryOp object at 0x7da1b02d0820>, <ast.Constant object at 0x7da1b02d12a0>]]]]
keyword[def] identifier[plot_net] ( identifier[fignum] ): literal[string] identifier[plt] . identifier[figure] ( identifier[num] = identifier[fignum] ,) identifier[plt] . identifier[clf] () identifier[plt] . identifier[axis] ( literal[string] ) identifier[Dcirc] = identifier[np] . identifier[arange] ( literal[int] , literal[int] ) identifier[Icirc] = identifier[np] . identifier[zeros] ( literal[int] , literal[string] ) identifier[Xcirc] , identifier[Ycirc] =[],[] keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] ): identifier[XY] = identifier[pmag] . identifier[dimap] ( identifier[Dcirc] [ identifier[k] ], identifier[Icirc] [ identifier[k] ]) identifier[Xcirc] . identifier[append] ( identifier[XY] [ literal[int] ]) identifier[Ycirc] . identifier[append] ( identifier[XY] [ literal[int] ]) identifier[plt] . identifier[plot] ( identifier[Xcirc] , identifier[Ycirc] , literal[string] ) identifier[Xsym] , identifier[Ysym] =[],[] keyword[for] identifier[I] keyword[in] identifier[range] ( literal[int] , literal[int] , literal[int] ): identifier[XY] = identifier[pmag] . identifier[dimap] ( literal[int] , identifier[I] ) identifier[Xsym] . identifier[append] ( identifier[XY] [ literal[int] ]) identifier[Ysym] . identifier[append] ( identifier[XY] [ literal[int] ]) identifier[plt] . identifier[plot] ( identifier[Xsym] , identifier[Ysym] , literal[string] ) identifier[Xsym] , identifier[Ysym] =[],[] keyword[for] identifier[I] keyword[in] identifier[range] ( literal[int] , literal[int] , literal[int] ): identifier[XY] = identifier[pmag] . identifier[dimap] ( literal[int] , identifier[I] ) identifier[Xsym] . identifier[append] ( identifier[XY] [ literal[int] ]) identifier[Ysym] . identifier[append] ( identifier[XY] [ literal[int] ]) identifier[plt] . identifier[plot] ( identifier[Xsym] , identifier[Ysym] , literal[string] ) identifier[Xsym] , identifier[Ysym] =[],[] keyword[for] identifier[I] keyword[in] identifier[range] ( literal[int] , literal[int] , literal[int] ): identifier[XY] = identifier[pmag] . identifier[dimap] ( literal[int] , identifier[I] ) identifier[Xsym] . identifier[append] ( identifier[XY] [ literal[int] ]) identifier[Ysym] . identifier[append] ( identifier[XY] [ literal[int] ]) identifier[plt] . identifier[plot] ( identifier[Xsym] , identifier[Ysym] , literal[string] ) identifier[Xsym] , identifier[Ysym] =[],[] keyword[for] identifier[I] keyword[in] identifier[range] ( literal[int] , literal[int] , literal[int] ): identifier[XY] = identifier[pmag] . identifier[dimap] ( literal[int] , identifier[I] ) identifier[Xsym] . identifier[append] ( identifier[XY] [ literal[int] ]) identifier[Ysym] . identifier[append] ( identifier[XY] [ literal[int] ]) identifier[plt] . identifier[plot] ( identifier[Xsym] , identifier[Ysym] , literal[string] ) keyword[for] identifier[D] keyword[in] identifier[range] ( literal[int] , literal[int] , literal[int] ): identifier[Xtick] , identifier[Ytick] =[],[] keyword[for] identifier[I] keyword[in] identifier[range] ( literal[int] ): identifier[XY] = identifier[pmag] . identifier[dimap] ( identifier[D] , identifier[I] ) identifier[Xtick] . identifier[append] ( identifier[XY] [ literal[int] ]) identifier[Ytick] . identifier[append] ( identifier[XY] [ literal[int] ]) identifier[plt] . identifier[plot] ( identifier[Xtick] , identifier[Ytick] , literal[string] ) identifier[plt] . identifier[axis] ( literal[string] ) identifier[plt] . identifier[axis] ((- literal[int] , literal[int] ,- literal[int] , literal[int] ))
def plot_net(fignum): """ Draws circle and tick marks for equal area projection. """ # make the perimeter plt.figure(num=fignum) plt.clf() plt.axis('off') Dcirc = np.arange(0, 361.0) Icirc = np.zeros(361, 'f') (Xcirc, Ycirc) = ([], []) for k in range(361): XY = pmag.dimap(Dcirc[k], Icirc[k]) Xcirc.append(XY[0]) Ycirc.append(XY[1]) # depends on [control=['for'], data=['k']] plt.plot(Xcirc, Ycirc, 'k') # put on the tick marks (Xsym, Ysym) = ([], []) for I in range(10, 100, 10): XY = pmag.dimap(0.0, I) Xsym.append(XY[0]) Ysym.append(XY[1]) # depends on [control=['for'], data=['I']] plt.plot(Xsym, Ysym, 'k+') (Xsym, Ysym) = ([], []) for I in range(10, 90, 10): XY = pmag.dimap(90.0, I) Xsym.append(XY[0]) Ysym.append(XY[1]) # depends on [control=['for'], data=['I']] plt.plot(Xsym, Ysym, 'k+') (Xsym, Ysym) = ([], []) for I in range(10, 90, 10): XY = pmag.dimap(180.0, I) Xsym.append(XY[0]) Ysym.append(XY[1]) # depends on [control=['for'], data=['I']] plt.plot(Xsym, Ysym, 'k+') (Xsym, Ysym) = ([], []) for I in range(10, 90, 10): XY = pmag.dimap(270.0, I) Xsym.append(XY[0]) Ysym.append(XY[1]) # depends on [control=['for'], data=['I']] plt.plot(Xsym, Ysym, 'k+') for D in range(0, 360, 10): (Xtick, Ytick) = ([], []) for I in range(4): XY = pmag.dimap(D, I) Xtick.append(XY[0]) Ytick.append(XY[1]) # depends on [control=['for'], data=['I']] plt.plot(Xtick, Ytick, 'k') # depends on [control=['for'], data=['D']] plt.axis('equal') plt.axis((-1.05, 1.05, -1.05, 1.05))
def nodeSatisfiesValues(cntxt: Context, n: Node, nc: ShExJ.NodeConstraint, _c: DebugContext) -> bool: """ `5.4.5 Values Constraint <http://shex.io/shex-semantics/#values>`_ For a node n and constraint value v, nodeSatisfies(n, v) if n matches some valueSetValue vsv in v. """ if nc.values is None: return True else: if any(_nodeSatisfiesValue(cntxt, n, vsv) for vsv in nc.values): return True else: cntxt.fail_reason = f"Node: {cntxt.n3_mapper.n3(n)} not in value set:\n\t " \ f"{as_json(cntxt.type_last(nc), indent=None)[:60]}..." return False
def function[nodeSatisfiesValues, parameter[cntxt, n, nc, _c]]: constant[ `5.4.5 Values Constraint <http://shex.io/shex-semantics/#values>`_ For a node n and constraint value v, nodeSatisfies(n, v) if n matches some valueSetValue vsv in v. ] if compare[name[nc].values is constant[None]] begin[:] return[constant[True]]
keyword[def] identifier[nodeSatisfiesValues] ( identifier[cntxt] : identifier[Context] , identifier[n] : identifier[Node] , identifier[nc] : identifier[ShExJ] . identifier[NodeConstraint] , identifier[_c] : identifier[DebugContext] )-> identifier[bool] : literal[string] keyword[if] identifier[nc] . identifier[values] keyword[is] keyword[None] : keyword[return] keyword[True] keyword[else] : keyword[if] identifier[any] ( identifier[_nodeSatisfiesValue] ( identifier[cntxt] , identifier[n] , identifier[vsv] ) keyword[for] identifier[vsv] keyword[in] identifier[nc] . identifier[values] ): keyword[return] keyword[True] keyword[else] : identifier[cntxt] . identifier[fail_reason] = literal[string] literal[string] keyword[return] keyword[False]
def nodeSatisfiesValues(cntxt: Context, n: Node, nc: ShExJ.NodeConstraint, _c: DebugContext) -> bool: """ `5.4.5 Values Constraint <http://shex.io/shex-semantics/#values>`_ For a node n and constraint value v, nodeSatisfies(n, v) if n matches some valueSetValue vsv in v. """ if nc.values is None: return True # depends on [control=['if'], data=[]] elif any((_nodeSatisfiesValue(cntxt, n, vsv) for vsv in nc.values)): return True # depends on [control=['if'], data=[]] else: cntxt.fail_reason = f'Node: {cntxt.n3_mapper.n3(n)} not in value set:\n\t {as_json(cntxt.type_last(nc), indent=None)[:60]}...' return False
def filter_dict(d, keys): """ Creates a new dict from an existing dict that only has the given keys """ return {k: v for k, v in d.items() if k in keys}
def function[filter_dict, parameter[d, keys]]: constant[ Creates a new dict from an existing dict that only has the given keys ] return[<ast.DictComp object at 0x7da204565870>]
keyword[def] identifier[filter_dict] ( identifier[d] , identifier[keys] ): literal[string] keyword[return] { identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d] . identifier[items] () keyword[if] identifier[k] keyword[in] identifier[keys] }
def filter_dict(d, keys): """ Creates a new dict from an existing dict that only has the given keys """ return {k: v for (k, v) in d.items() if k in keys}
def _significand(self): """Return the significand of self, as a BigFloat. If self is a nonzero finite number, return a BigFloat m with the same precision as self, such that 0.5 <= m < 1. and self = +/-m * 2**e for some exponent e. If self is zero, infinity or nan, return a copy of self with the sign set to 0. """ m = self.copy() if self and is_finite(self): mpfr.mpfr_set_exp(m, 0) mpfr.mpfr_setsign(m, m, False, ROUND_TIES_TO_EVEN) return m
def function[_significand, parameter[self]]: constant[Return the significand of self, as a BigFloat. If self is a nonzero finite number, return a BigFloat m with the same precision as self, such that 0.5 <= m < 1. and self = +/-m * 2**e for some exponent e. If self is zero, infinity or nan, return a copy of self with the sign set to 0. ] variable[m] assign[=] call[name[self].copy, parameter[]] if <ast.BoolOp object at 0x7da207f99db0> begin[:] call[name[mpfr].mpfr_set_exp, parameter[name[m], constant[0]]] call[name[mpfr].mpfr_setsign, parameter[name[m], name[m], constant[False], name[ROUND_TIES_TO_EVEN]]] return[name[m]]
keyword[def] identifier[_significand] ( identifier[self] ): literal[string] identifier[m] = identifier[self] . identifier[copy] () keyword[if] identifier[self] keyword[and] identifier[is_finite] ( identifier[self] ): identifier[mpfr] . identifier[mpfr_set_exp] ( identifier[m] , literal[int] ) identifier[mpfr] . identifier[mpfr_setsign] ( identifier[m] , identifier[m] , keyword[False] , identifier[ROUND_TIES_TO_EVEN] ) keyword[return] identifier[m]
def _significand(self): """Return the significand of self, as a BigFloat. If self is a nonzero finite number, return a BigFloat m with the same precision as self, such that 0.5 <= m < 1. and self = +/-m * 2**e for some exponent e. If self is zero, infinity or nan, return a copy of self with the sign set to 0. """ m = self.copy() if self and is_finite(self): mpfr.mpfr_set_exp(m, 0) # depends on [control=['if'], data=[]] mpfr.mpfr_setsign(m, m, False, ROUND_TIES_TO_EVEN) return m
async def page_view(self, url: str, title: str, user_id: str, user_lang: str='') -> None: """ Log a page view. :param url: URL of the "page" :param title: Title of the "page" :param user_id: ID of the user seeing the page. :param user_lang: Current language of the UI. """ ga_url = 'https://www.google-analytics.com/collect' args = { 'v': '1', 'ds': 'web', 'de': 'UTF-8', 'tid': self.ga_id, 'cid': self.hash_user_id(user_id), 't': 'pageview', 'dh': self.ga_domain, 'dp': url, 'dt': title, } if user_lang: args['ul'] = user_lang logger.debug('GA settings = %s', urlencode(args)) async with self.session.post(ga_url, data=args) as r: if r.status == 200: logger.debug(f'Sent to GA {url} ({title}) for user {user_id}') else: logger.warning(f'Could not contact GA')
<ast.AsyncFunctionDef object at 0x7da18f721360>
keyword[async] keyword[def] identifier[page_view] ( identifier[self] , identifier[url] : identifier[str] , identifier[title] : identifier[str] , identifier[user_id] : identifier[str] , identifier[user_lang] : identifier[str] = literal[string] )-> keyword[None] : literal[string] identifier[ga_url] = literal[string] identifier[args] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : identifier[self] . identifier[ga_id] , literal[string] : identifier[self] . identifier[hash_user_id] ( identifier[user_id] ), literal[string] : literal[string] , literal[string] : identifier[self] . identifier[ga_domain] , literal[string] : identifier[url] , literal[string] : identifier[title] , } keyword[if] identifier[user_lang] : identifier[args] [ literal[string] ]= identifier[user_lang] identifier[logger] . identifier[debug] ( literal[string] , identifier[urlencode] ( identifier[args] )) keyword[async] keyword[with] identifier[self] . identifier[session] . identifier[post] ( identifier[ga_url] , identifier[data] = identifier[args] ) keyword[as] identifier[r] : keyword[if] identifier[r] . identifier[status] == literal[int] : identifier[logger] . identifier[debug] ( literal[string] ) keyword[else] : identifier[logger] . identifier[warning] ( literal[string] )
async def page_view(self, url: str, title: str, user_id: str, user_lang: str='') -> None: """ Log a page view. :param url: URL of the "page" :param title: Title of the "page" :param user_id: ID of the user seeing the page. :param user_lang: Current language of the UI. """ ga_url = 'https://www.google-analytics.com/collect' args = {'v': '1', 'ds': 'web', 'de': 'UTF-8', 'tid': self.ga_id, 'cid': self.hash_user_id(user_id), 't': 'pageview', 'dh': self.ga_domain, 'dp': url, 'dt': title} if user_lang: args['ul'] = user_lang # depends on [control=['if'], data=[]] logger.debug('GA settings = %s', urlencode(args)) async with self.session.post(ga_url, data=args) as r: if r.status == 200: logger.debug(f'Sent to GA {url} ({title}) for user {user_id}') # depends on [control=['if'], data=[]] else: logger.warning(f'Could not contact GA')
def intersect(self,range2): """Return the chunk they overlap as a range. options is passed to result from this object :param range2: :type range2: GenomicRange :return: Range with the intersecting segement, or None if not overlapping :rtype: GenomicRange """ if not self.overlaps(range2): return None return type(self)(self.chr,max(self.start,range2.start)+self._start_offset,min(self.end,range2.end),self.payload,self.dir)
def function[intersect, parameter[self, range2]]: constant[Return the chunk they overlap as a range. options is passed to result from this object :param range2: :type range2: GenomicRange :return: Range with the intersecting segement, or None if not overlapping :rtype: GenomicRange ] if <ast.UnaryOp object at 0x7da20c7c85b0> begin[:] return[constant[None]] return[call[call[name[type], parameter[name[self]]], parameter[name[self].chr, binary_operation[call[name[max], parameter[name[self].start, name[range2].start]] + name[self]._start_offset], call[name[min], parameter[name[self].end, name[range2].end]], name[self].payload, name[self].dir]]]
keyword[def] identifier[intersect] ( identifier[self] , identifier[range2] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[overlaps] ( identifier[range2] ): keyword[return] keyword[None] keyword[return] identifier[type] ( identifier[self] )( identifier[self] . identifier[chr] , identifier[max] ( identifier[self] . identifier[start] , identifier[range2] . identifier[start] )+ identifier[self] . identifier[_start_offset] , identifier[min] ( identifier[self] . identifier[end] , identifier[range2] . identifier[end] ), identifier[self] . identifier[payload] , identifier[self] . identifier[dir] )
def intersect(self, range2): """Return the chunk they overlap as a range. options is passed to result from this object :param range2: :type range2: GenomicRange :return: Range with the intersecting segement, or None if not overlapping :rtype: GenomicRange """ if not self.overlaps(range2): return None # depends on [control=['if'], data=[]] return type(self)(self.chr, max(self.start, range2.start) + self._start_offset, min(self.end, range2.end), self.payload, self.dir)
def download(self, source, dest): """ Download an archive file. :param str source: URL pointing to an archive file. :param str dest: Local path location to download archive file to. """ # propagate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): auth, barehost = splituser(netloc) if auth is not None: source = urlunparse((proto, barehost, path, params, query, fragment)) username, password = splitpasswd(auth) passman = HTTPPasswordMgrWithDefaultRealm() # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) authhandler = HTTPBasicAuthHandler(passman) opener = build_opener(authhandler) install_opener(opener) response = urlopen(source) try: with open(dest, 'wb') as dest_file: dest_file.write(response.read()) except Exception as e: if os.path.isfile(dest): os.unlink(dest) raise e
def function[download, parameter[self, source, dest]]: constant[ Download an archive file. :param str source: URL pointing to an archive file. :param str dest: Local path location to download archive file to. ] <ast.Tuple object at 0x7da18dc9a920> assign[=] call[name[urlparse], parameter[name[source]]] if compare[name[proto] in tuple[[<ast.Constant object at 0x7da18dc9b610>, <ast.Constant object at 0x7da18dc98190>]]] begin[:] <ast.Tuple object at 0x7da18dc9ab30> assign[=] call[name[splituser], parameter[name[netloc]]] if compare[name[auth] is_not constant[None]] begin[:] variable[source] assign[=] call[name[urlunparse], parameter[tuple[[<ast.Name object at 0x7da18dc9b760>, <ast.Name object at 0x7da18dc98220>, <ast.Name object at 0x7da18dc99000>, <ast.Name object at 0x7da18dc988e0>, <ast.Name object at 0x7da18dc9af80>, <ast.Name object at 0x7da18dc9a080>]]]] <ast.Tuple object at 0x7da18dc9bb80> assign[=] call[name[splitpasswd], parameter[name[auth]]] variable[passman] assign[=] call[name[HTTPPasswordMgrWithDefaultRealm], parameter[]] call[name[passman].add_password, parameter[constant[None], name[source], name[username], name[password]]] variable[authhandler] assign[=] call[name[HTTPBasicAuthHandler], parameter[name[passman]]] variable[opener] assign[=] call[name[build_opener], parameter[name[authhandler]]] call[name[install_opener], parameter[name[opener]]] variable[response] assign[=] call[name[urlopen], parameter[name[source]]] <ast.Try object at 0x7da18dc98c70>
keyword[def] identifier[download] ( identifier[self] , identifier[source] , identifier[dest] ): literal[string] identifier[proto] , identifier[netloc] , identifier[path] , identifier[params] , identifier[query] , identifier[fragment] = identifier[urlparse] ( identifier[source] ) keyword[if] identifier[proto] keyword[in] ( literal[string] , literal[string] ): identifier[auth] , identifier[barehost] = identifier[splituser] ( identifier[netloc] ) keyword[if] identifier[auth] keyword[is] keyword[not] keyword[None] : identifier[source] = identifier[urlunparse] (( identifier[proto] , identifier[barehost] , identifier[path] , identifier[params] , identifier[query] , identifier[fragment] )) identifier[username] , identifier[password] = identifier[splitpasswd] ( identifier[auth] ) identifier[passman] = identifier[HTTPPasswordMgrWithDefaultRealm] () identifier[passman] . identifier[add_password] ( keyword[None] , identifier[source] , identifier[username] , identifier[password] ) identifier[authhandler] = identifier[HTTPBasicAuthHandler] ( identifier[passman] ) identifier[opener] = identifier[build_opener] ( identifier[authhandler] ) identifier[install_opener] ( identifier[opener] ) identifier[response] = identifier[urlopen] ( identifier[source] ) keyword[try] : keyword[with] identifier[open] ( identifier[dest] , literal[string] ) keyword[as] identifier[dest_file] : identifier[dest_file] . identifier[write] ( identifier[response] . identifier[read] ()) keyword[except] identifier[Exception] keyword[as] identifier[e] : keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[dest] ): identifier[os] . identifier[unlink] ( identifier[dest] ) keyword[raise] identifier[e]
def download(self, source, dest): """ Download an archive file. :param str source: URL pointing to an archive file. :param str dest: Local path location to download archive file to. """ # propagate all exceptions # URLError, OSError, etc (proto, netloc, path, params, query, fragment) = urlparse(source) if proto in ('http', 'https'): (auth, barehost) = splituser(netloc) if auth is not None: source = urlunparse((proto, barehost, path, params, query, fragment)) (username, password) = splitpasswd(auth) passman = HTTPPasswordMgrWithDefaultRealm() # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) authhandler = HTTPBasicAuthHandler(passman) opener = build_opener(authhandler) install_opener(opener) # depends on [control=['if'], data=['auth']] # depends on [control=['if'], data=['proto']] response = urlopen(source) try: with open(dest, 'wb') as dest_file: dest_file.write(response.read()) # depends on [control=['with'], data=['dest_file']] # depends on [control=['try'], data=[]] except Exception as e: if os.path.isfile(dest): os.unlink(dest) # depends on [control=['if'], data=[]] raise e # depends on [control=['except'], data=['e']]
def image_to_osd(image, lang='osd', config='', nice=0, output_type=Output.STRING): ''' Returns string containing the orientation and script detection (OSD) ''' config = '{}-psm 0 {}'.format( '' if get_tesseract_version() < '3.05' else '-', config.strip() ).strip() args = [image, 'osd', lang, config, nice] return { Output.BYTES: lambda: run_and_get_output(*(args + [True])), Output.DICT: lambda: osd_to_dict(run_and_get_output(*args)), Output.STRING: lambda: run_and_get_output(*args), }[output_type]()
def function[image_to_osd, parameter[image, lang, config, nice, output_type]]: constant[ Returns string containing the orientation and script detection (OSD) ] variable[config] assign[=] call[call[constant[{}-psm 0 {}].format, parameter[<ast.IfExp object at 0x7da1b1cb6bc0>, call[name[config].strip, parameter[]]]].strip, parameter[]] variable[args] assign[=] list[[<ast.Name object at 0x7da1b1cb7340>, <ast.Constant object at 0x7da1b1cb7310>, <ast.Name object at 0x7da1b1cb73a0>, <ast.Name object at 0x7da1b1cb7370>, <ast.Name object at 0x7da1b1cb7700>]] return[call[call[dictionary[[<ast.Attribute object at 0x7da1b1cb7640>, <ast.Attribute object at 0x7da1b1cb7610>, <ast.Attribute object at 0x7da1b1cb74c0>], [<ast.Lambda object at 0x7da1b1cb7460>, <ast.Lambda object at 0x7da1b1acdc90>, <ast.Lambda object at 0x7da1b1acd5a0>]]][name[output_type]], parameter[]]]
keyword[def] identifier[image_to_osd] ( identifier[image] , identifier[lang] = literal[string] , identifier[config] = literal[string] , identifier[nice] = literal[int] , identifier[output_type] = identifier[Output] . identifier[STRING] ): literal[string] identifier[config] = literal[string] . identifier[format] ( literal[string] keyword[if] identifier[get_tesseract_version] ()< literal[string] keyword[else] literal[string] , identifier[config] . identifier[strip] () ). identifier[strip] () identifier[args] =[ identifier[image] , literal[string] , identifier[lang] , identifier[config] , identifier[nice] ] keyword[return] { identifier[Output] . identifier[BYTES] : keyword[lambda] : identifier[run_and_get_output] (*( identifier[args] +[ keyword[True] ])), identifier[Output] . identifier[DICT] : keyword[lambda] : identifier[osd_to_dict] ( identifier[run_and_get_output] (* identifier[args] )), identifier[Output] . identifier[STRING] : keyword[lambda] : identifier[run_and_get_output] (* identifier[args] ), }[ identifier[output_type] ]()
def image_to_osd(image, lang='osd', config='', nice=0, output_type=Output.STRING): """ Returns string containing the orientation and script detection (OSD) """ config = '{}-psm 0 {}'.format('' if get_tesseract_version() < '3.05' else '-', config.strip()).strip() args = [image, 'osd', lang, config, nice] return {Output.BYTES: lambda : run_and_get_output(*args + [True]), Output.DICT: lambda : osd_to_dict(run_and_get_output(*args)), Output.STRING: lambda : run_and_get_output(*args)}[output_type]()
def matplotlib_scraper(block, block_vars, gallery_conf, **kwargs): """Scrape Matplotlib images. Parameters ---------- block : tuple A tuple containing the (label, content, line_number) of the block. block_vars : dict Dict of block variables. gallery_conf : dict Contains the configuration of Sphinx-Gallery **kwargs : dict Additional keyword arguments to pass to :meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``. The ``format`` kwarg in particular is used to set the file extension of the output file (currently only 'png' and 'svg' are supported). Returns ------- rst : str The ReSTructuredText that will be rendered to HTML containing the images. This is often produced by :func:`figure_rst`. """ matplotlib, plt = _import_matplotlib() image_path_iterator = block_vars['image_path_iterator'] image_paths = list() for fig_num, image_path in zip(plt.get_fignums(), image_path_iterator): if 'format' in kwargs: image_path = '%s.%s' % (os.path.splitext(image_path)[0], kwargs['format']) # Set the fig_num figure as the current figure as we can't # save a figure that's not the current figure. fig = plt.figure(fig_num) to_rgba = matplotlib.colors.colorConverter.to_rgba for attr in ['facecolor', 'edgecolor']: fig_attr = getattr(fig, 'get_' + attr)() default_attr = matplotlib.rcParams['figure.' + attr] if to_rgba(fig_attr) != to_rgba(default_attr) and \ attr not in kwargs: kwargs[attr] = fig_attr fig.savefig(image_path, **kwargs) image_paths.append(image_path) plt.close('all') return figure_rst(image_paths, gallery_conf['src_dir'])
def function[matplotlib_scraper, parameter[block, block_vars, gallery_conf]]: constant[Scrape Matplotlib images. Parameters ---------- block : tuple A tuple containing the (label, content, line_number) of the block. block_vars : dict Dict of block variables. gallery_conf : dict Contains the configuration of Sphinx-Gallery **kwargs : dict Additional keyword arguments to pass to :meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``. The ``format`` kwarg in particular is used to set the file extension of the output file (currently only 'png' and 'svg' are supported). Returns ------- rst : str The ReSTructuredText that will be rendered to HTML containing the images. This is often produced by :func:`figure_rst`. ] <ast.Tuple object at 0x7da1b26ad210> assign[=] call[name[_import_matplotlib], parameter[]] variable[image_path_iterator] assign[=] call[name[block_vars]][constant[image_path_iterator]] variable[image_paths] assign[=] call[name[list], parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b26ae710>, <ast.Name object at 0x7da1b26ae500>]]] in starred[call[name[zip], parameter[call[name[plt].get_fignums, parameter[]], name[image_path_iterator]]]] begin[:] if compare[constant[format] in name[kwargs]] begin[:] variable[image_path] assign[=] binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b26ae7d0>, <ast.Subscript object at 0x7da1b26adfc0>]]] variable[fig] assign[=] call[name[plt].figure, parameter[name[fig_num]]] variable[to_rgba] assign[=] name[matplotlib].colors.colorConverter.to_rgba for taget[name[attr]] in starred[list[[<ast.Constant object at 0x7da1b26ac670>, <ast.Constant object at 0x7da1b26ad4b0>]]] begin[:] variable[fig_attr] assign[=] call[call[name[getattr], parameter[name[fig], binary_operation[constant[get_] + name[attr]]]], parameter[]] variable[default_attr] assign[=] call[name[matplotlib].rcParams][binary_operation[constant[figure.] + name[attr]]] if <ast.BoolOp object at 0x7da18f00d6c0> begin[:] call[name[kwargs]][name[attr]] assign[=] name[fig_attr] call[name[fig].savefig, parameter[name[image_path]]] call[name[image_paths].append, parameter[name[image_path]]] call[name[plt].close, parameter[constant[all]]] return[call[name[figure_rst], parameter[name[image_paths], call[name[gallery_conf]][constant[src_dir]]]]]
keyword[def] identifier[matplotlib_scraper] ( identifier[block] , identifier[block_vars] , identifier[gallery_conf] ,** identifier[kwargs] ): literal[string] identifier[matplotlib] , identifier[plt] = identifier[_import_matplotlib] () identifier[image_path_iterator] = identifier[block_vars] [ literal[string] ] identifier[image_paths] = identifier[list] () keyword[for] identifier[fig_num] , identifier[image_path] keyword[in] identifier[zip] ( identifier[plt] . identifier[get_fignums] (), identifier[image_path_iterator] ): keyword[if] literal[string] keyword[in] identifier[kwargs] : identifier[image_path] = literal[string] %( identifier[os] . identifier[path] . identifier[splitext] ( identifier[image_path] )[ literal[int] ], identifier[kwargs] [ literal[string] ]) identifier[fig] = identifier[plt] . identifier[figure] ( identifier[fig_num] ) identifier[to_rgba] = identifier[matplotlib] . identifier[colors] . identifier[colorConverter] . identifier[to_rgba] keyword[for] identifier[attr] keyword[in] [ literal[string] , literal[string] ]: identifier[fig_attr] = identifier[getattr] ( identifier[fig] , literal[string] + identifier[attr] )() identifier[default_attr] = identifier[matplotlib] . identifier[rcParams] [ literal[string] + identifier[attr] ] keyword[if] identifier[to_rgba] ( identifier[fig_attr] )!= identifier[to_rgba] ( identifier[default_attr] ) keyword[and] identifier[attr] keyword[not] keyword[in] identifier[kwargs] : identifier[kwargs] [ identifier[attr] ]= identifier[fig_attr] identifier[fig] . identifier[savefig] ( identifier[image_path] ,** identifier[kwargs] ) identifier[image_paths] . identifier[append] ( identifier[image_path] ) identifier[plt] . identifier[close] ( literal[string] ) keyword[return] identifier[figure_rst] ( identifier[image_paths] , identifier[gallery_conf] [ literal[string] ])
def matplotlib_scraper(block, block_vars, gallery_conf, **kwargs): """Scrape Matplotlib images. Parameters ---------- block : tuple A tuple containing the (label, content, line_number) of the block. block_vars : dict Dict of block variables. gallery_conf : dict Contains the configuration of Sphinx-Gallery **kwargs : dict Additional keyword arguments to pass to :meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``. The ``format`` kwarg in particular is used to set the file extension of the output file (currently only 'png' and 'svg' are supported). Returns ------- rst : str The ReSTructuredText that will be rendered to HTML containing the images. This is often produced by :func:`figure_rst`. """ (matplotlib, plt) = _import_matplotlib() image_path_iterator = block_vars['image_path_iterator'] image_paths = list() for (fig_num, image_path) in zip(plt.get_fignums(), image_path_iterator): if 'format' in kwargs: image_path = '%s.%s' % (os.path.splitext(image_path)[0], kwargs['format']) # depends on [control=['if'], data=['kwargs']] # Set the fig_num figure as the current figure as we can't # save a figure that's not the current figure. fig = plt.figure(fig_num) to_rgba = matplotlib.colors.colorConverter.to_rgba for attr in ['facecolor', 'edgecolor']: fig_attr = getattr(fig, 'get_' + attr)() default_attr = matplotlib.rcParams['figure.' + attr] if to_rgba(fig_attr) != to_rgba(default_attr) and attr not in kwargs: kwargs[attr] = fig_attr # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['attr']] fig.savefig(image_path, **kwargs) image_paths.append(image_path) # depends on [control=['for'], data=[]] plt.close('all') return figure_rst(image_paths, gallery_conf['src_dir'])
def ensure_backrefs(obj, fields=None): """Ensure that all forward references on the provided object have the appropriate backreferences. :param StoredObject obj: Database record :param list fields: Optional list of field names to check """ for ref in _collect_refs(obj, fields): updated = ref['value']._update_backref( ref['field_instance']._backref_field_name, obj, ref['field_name'], ) if updated: logging.debug('Updated reference {}:{}:{}:{}:{}'.format( obj._name, obj._primary_key, ref['field_name'], ref['value']._name, ref['value']._primary_key, ))
def function[ensure_backrefs, parameter[obj, fields]]: constant[Ensure that all forward references on the provided object have the appropriate backreferences. :param StoredObject obj: Database record :param list fields: Optional list of field names to check ] for taget[name[ref]] in starred[call[name[_collect_refs], parameter[name[obj], name[fields]]]] begin[:] variable[updated] assign[=] call[call[name[ref]][constant[value]]._update_backref, parameter[call[name[ref]][constant[field_instance]]._backref_field_name, name[obj], call[name[ref]][constant[field_name]]]] if name[updated] begin[:] call[name[logging].debug, parameter[call[constant[Updated reference {}:{}:{}:{}:{}].format, parameter[name[obj]._name, name[obj]._primary_key, call[name[ref]][constant[field_name]], call[name[ref]][constant[value]]._name, call[name[ref]][constant[value]]._primary_key]]]]
keyword[def] identifier[ensure_backrefs] ( identifier[obj] , identifier[fields] = keyword[None] ): literal[string] keyword[for] identifier[ref] keyword[in] identifier[_collect_refs] ( identifier[obj] , identifier[fields] ): identifier[updated] = identifier[ref] [ literal[string] ]. identifier[_update_backref] ( identifier[ref] [ literal[string] ]. identifier[_backref_field_name] , identifier[obj] , identifier[ref] [ literal[string] ], ) keyword[if] identifier[updated] : identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[obj] . identifier[_name] , identifier[obj] . identifier[_primary_key] , identifier[ref] [ literal[string] ], identifier[ref] [ literal[string] ]. identifier[_name] , identifier[ref] [ literal[string] ]. identifier[_primary_key] , ))
def ensure_backrefs(obj, fields=None): """Ensure that all forward references on the provided object have the appropriate backreferences. :param StoredObject obj: Database record :param list fields: Optional list of field names to check """ for ref in _collect_refs(obj, fields): updated = ref['value']._update_backref(ref['field_instance']._backref_field_name, obj, ref['field_name']) if updated: logging.debug('Updated reference {}:{}:{}:{}:{}'.format(obj._name, obj._primary_key, ref['field_name'], ref['value']._name, ref['value']._primary_key)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ref']]
def remove_callback(obj, handle): """Remove a callback from an object.""" callbacks = obj._callbacks if callbacks is handle: obj._callbacks = None elif isinstance(callbacks, dllist): callbacks.remove(handle) if not callbacks: obj._callbacks = None
def function[remove_callback, parameter[obj, handle]]: constant[Remove a callback from an object.] variable[callbacks] assign[=] name[obj]._callbacks if compare[name[callbacks] is name[handle]] begin[:] name[obj]._callbacks assign[=] constant[None]
keyword[def] identifier[remove_callback] ( identifier[obj] , identifier[handle] ): literal[string] identifier[callbacks] = identifier[obj] . identifier[_callbacks] keyword[if] identifier[callbacks] keyword[is] identifier[handle] : identifier[obj] . identifier[_callbacks] = keyword[None] keyword[elif] identifier[isinstance] ( identifier[callbacks] , identifier[dllist] ): identifier[callbacks] . identifier[remove] ( identifier[handle] ) keyword[if] keyword[not] identifier[callbacks] : identifier[obj] . identifier[_callbacks] = keyword[None]
def remove_callback(obj, handle): """Remove a callback from an object.""" callbacks = obj._callbacks if callbacks is handle: obj._callbacks = None # depends on [control=['if'], data=[]] elif isinstance(callbacks, dllist): callbacks.remove(handle) if not callbacks: obj._callbacks = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def _convert_to_dict(stdout): """Wrapper function for parsing hpssacli/ssacli command. This function gets the output from hpssacli/ssacli command and calls the recursive function _get_dict to return the complete dictionary containing the RAID information. """ lines = stdout.split("\n") lines = list(filter(None, lines)) info_dict, j = _get_dict(lines, 0, 0, 0) return info_dict
def function[_convert_to_dict, parameter[stdout]]: constant[Wrapper function for parsing hpssacli/ssacli command. This function gets the output from hpssacli/ssacli command and calls the recursive function _get_dict to return the complete dictionary containing the RAID information. ] variable[lines] assign[=] call[name[stdout].split, parameter[constant[ ]]] variable[lines] assign[=] call[name[list], parameter[call[name[filter], parameter[constant[None], name[lines]]]]] <ast.Tuple object at 0x7da1b19cb460> assign[=] call[name[_get_dict], parameter[name[lines], constant[0], constant[0], constant[0]]] return[name[info_dict]]
keyword[def] identifier[_convert_to_dict] ( identifier[stdout] ): literal[string] identifier[lines] = identifier[stdout] . identifier[split] ( literal[string] ) identifier[lines] = identifier[list] ( identifier[filter] ( keyword[None] , identifier[lines] )) identifier[info_dict] , identifier[j] = identifier[_get_dict] ( identifier[lines] , literal[int] , literal[int] , literal[int] ) keyword[return] identifier[info_dict]
def _convert_to_dict(stdout): """Wrapper function for parsing hpssacli/ssacli command. This function gets the output from hpssacli/ssacli command and calls the recursive function _get_dict to return the complete dictionary containing the RAID information. """ lines = stdout.split('\n') lines = list(filter(None, lines)) (info_dict, j) = _get_dict(lines, 0, 0, 0) return info_dict
def connect(self): """Connect to the unix domain socket, which is passed to us as self.host This is in host because the format we use for the unix domain socket is: http+unix://%2Fpath%2Fto%2Fsocket.sock """ try: self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) if has_timeout(self.timeout): self.sock.settimeout(self.timeout) self.sock.connect(unquote(self.host)) except socket.error as msg: if self.sock: self.sock.close() self.sock = None raise socket.error(msg)
def function[connect, parameter[self]]: constant[Connect to the unix domain socket, which is passed to us as self.host This is in host because the format we use for the unix domain socket is: http+unix://%2Fpath%2Fto%2Fsocket.sock ] <ast.Try object at 0x7da204566e60>
keyword[def] identifier[connect] ( identifier[self] ): literal[string] keyword[try] : identifier[self] . identifier[sock] = identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_UNIX] , identifier[socket] . identifier[SOCK_STREAM] ) keyword[if] identifier[has_timeout] ( identifier[self] . identifier[timeout] ): identifier[self] . identifier[sock] . identifier[settimeout] ( identifier[self] . identifier[timeout] ) identifier[self] . identifier[sock] . identifier[connect] ( identifier[unquote] ( identifier[self] . identifier[host] )) keyword[except] identifier[socket] . identifier[error] keyword[as] identifier[msg] : keyword[if] identifier[self] . identifier[sock] : identifier[self] . identifier[sock] . identifier[close] () identifier[self] . identifier[sock] = keyword[None] keyword[raise] identifier[socket] . identifier[error] ( identifier[msg] )
def connect(self): """Connect to the unix domain socket, which is passed to us as self.host This is in host because the format we use for the unix domain socket is: http+unix://%2Fpath%2Fto%2Fsocket.sock """ try: self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) if has_timeout(self.timeout): self.sock.settimeout(self.timeout) # depends on [control=['if'], data=[]] self.sock.connect(unquote(self.host)) # depends on [control=['try'], data=[]] except socket.error as msg: if self.sock: self.sock.close() # depends on [control=['if'], data=[]] self.sock = None raise socket.error(msg) # depends on [control=['except'], data=['msg']]
def search_regexp(self, regexp, flags = 0, minAddr = None, maxAddr = None, bufferPages = -1): """ Search for the given regular expression within the process memory. @type regexp: str @param regexp: Regular expression string. @type flags: int @param flags: Regular expression flags. @type minAddr: int @param minAddr: (Optional) Start the search at this memory address. @type maxAddr: int @param maxAddr: (Optional) Stop the search at this memory address. @type bufferPages: int @param bufferPages: (Optional) Number of memory pages to buffer when performing the search. Valid values are: - C{0} or C{None}: Automatically determine the required buffer size. May not give complete results for regular expressions that match variable sized strings. - C{> 0}: Set the buffer size, in memory pages. - C{< 0}: Disable buffering entirely. This may give you a little speed gain at the cost of an increased memory usage. If the target process has very large contiguous memory regions it may actually be slower or even fail. It's also the only way to guarantee complete results for regular expressions that match variable sized strings. @rtype: iterator of tuple( int, int, str ) @return: An iterator of tuples. Each tuple contains the following: - The memory address where the pattern was found. - The size of the data that matches the pattern. - The data that matches the pattern. @raise WindowsError: An error occurred when querying or reading the process memory. """ pattern = RegExpPattern(regexp, flags) return Search.search_process(self, pattern, minAddr, maxAddr, bufferPages)
def function[search_regexp, parameter[self, regexp, flags, minAddr, maxAddr, bufferPages]]: constant[ Search for the given regular expression within the process memory. @type regexp: str @param regexp: Regular expression string. @type flags: int @param flags: Regular expression flags. @type minAddr: int @param minAddr: (Optional) Start the search at this memory address. @type maxAddr: int @param maxAddr: (Optional) Stop the search at this memory address. @type bufferPages: int @param bufferPages: (Optional) Number of memory pages to buffer when performing the search. Valid values are: - C{0} or C{None}: Automatically determine the required buffer size. May not give complete results for regular expressions that match variable sized strings. - C{> 0}: Set the buffer size, in memory pages. - C{< 0}: Disable buffering entirely. This may give you a little speed gain at the cost of an increased memory usage. If the target process has very large contiguous memory regions it may actually be slower or even fail. It's also the only way to guarantee complete results for regular expressions that match variable sized strings. @rtype: iterator of tuple( int, int, str ) @return: An iterator of tuples. Each tuple contains the following: - The memory address where the pattern was found. - The size of the data that matches the pattern. - The data that matches the pattern. @raise WindowsError: An error occurred when querying or reading the process memory. ] variable[pattern] assign[=] call[name[RegExpPattern], parameter[name[regexp], name[flags]]] return[call[name[Search].search_process, parameter[name[self], name[pattern], name[minAddr], name[maxAddr], name[bufferPages]]]]
keyword[def] identifier[search_regexp] ( identifier[self] , identifier[regexp] , identifier[flags] = literal[int] , identifier[minAddr] = keyword[None] , identifier[maxAddr] = keyword[None] , identifier[bufferPages] =- literal[int] ): literal[string] identifier[pattern] = identifier[RegExpPattern] ( identifier[regexp] , identifier[flags] ) keyword[return] identifier[Search] . identifier[search_process] ( identifier[self] , identifier[pattern] , identifier[minAddr] , identifier[maxAddr] , identifier[bufferPages] )
def search_regexp(self, regexp, flags=0, minAddr=None, maxAddr=None, bufferPages=-1): """ Search for the given regular expression within the process memory. @type regexp: str @param regexp: Regular expression string. @type flags: int @param flags: Regular expression flags. @type minAddr: int @param minAddr: (Optional) Start the search at this memory address. @type maxAddr: int @param maxAddr: (Optional) Stop the search at this memory address. @type bufferPages: int @param bufferPages: (Optional) Number of memory pages to buffer when performing the search. Valid values are: - C{0} or C{None}: Automatically determine the required buffer size. May not give complete results for regular expressions that match variable sized strings. - C{> 0}: Set the buffer size, in memory pages. - C{< 0}: Disable buffering entirely. This may give you a little speed gain at the cost of an increased memory usage. If the target process has very large contiguous memory regions it may actually be slower or even fail. It's also the only way to guarantee complete results for regular expressions that match variable sized strings. @rtype: iterator of tuple( int, int, str ) @return: An iterator of tuples. Each tuple contains the following: - The memory address where the pattern was found. - The size of the data that matches the pattern. - The data that matches the pattern. @raise WindowsError: An error occurred when querying or reading the process memory. """ pattern = RegExpPattern(regexp, flags) return Search.search_process(self, pattern, minAddr, maxAddr, bufferPages)
def extended(self) -> ListP: """The body structure attributes with extension data.""" parts = [part.extended for part in self.parts] return ListP([_Concatenated(parts), String.build(self.subtype), _ParamsList(self.content_type_params), String.build(self.content_disposition), String.build(self.content_language), String.build(self.content_location)])
def function[extended, parameter[self]]: constant[The body structure attributes with extension data.] variable[parts] assign[=] <ast.ListComp object at 0x7da20e74ad40> return[call[name[ListP], parameter[list[[<ast.Call object at 0x7da20e74b370>, <ast.Call object at 0x7da20e74af80>, <ast.Call object at 0x7da20e74bd90>, <ast.Call object at 0x7da20e74be50>, <ast.Call object at 0x7da20e748730>, <ast.Call object at 0x7da20e74a770>]]]]]
keyword[def] identifier[extended] ( identifier[self] )-> identifier[ListP] : literal[string] identifier[parts] =[ identifier[part] . identifier[extended] keyword[for] identifier[part] keyword[in] identifier[self] . identifier[parts] ] keyword[return] identifier[ListP] ([ identifier[_Concatenated] ( identifier[parts] ), identifier[String] . identifier[build] ( identifier[self] . identifier[subtype] ), identifier[_ParamsList] ( identifier[self] . identifier[content_type_params] ), identifier[String] . identifier[build] ( identifier[self] . identifier[content_disposition] ), identifier[String] . identifier[build] ( identifier[self] . identifier[content_language] ), identifier[String] . identifier[build] ( identifier[self] . identifier[content_location] )])
def extended(self) -> ListP: """The body structure attributes with extension data.""" parts = [part.extended for part in self.parts] return ListP([_Concatenated(parts), String.build(self.subtype), _ParamsList(self.content_type_params), String.build(self.content_disposition), String.build(self.content_language), String.build(self.content_location)])
def _create_justification_button(self): """Creates horizontal justification button""" iconnames = ["JustifyLeft", "JustifyCenter", "JustifyRight"] bmplist = [icons[iconname] for iconname in iconnames] self.justify_tb = _widgets.BitmapToggleButton(self, bmplist) self.justify_tb.SetToolTipString(_(u"Justification")) self.Bind(wx.EVT_BUTTON, self.OnJustification, self.justify_tb) self.AddControl(self.justify_tb)
def function[_create_justification_button, parameter[self]]: constant[Creates horizontal justification button] variable[iconnames] assign[=] list[[<ast.Constant object at 0x7da1b1722cb0>, <ast.Constant object at 0x7da1b1721e40>, <ast.Constant object at 0x7da1b1720eb0>]] variable[bmplist] assign[=] <ast.ListComp object at 0x7da1b17f87f0> name[self].justify_tb assign[=] call[name[_widgets].BitmapToggleButton, parameter[name[self], name[bmplist]]] call[name[self].justify_tb.SetToolTipString, parameter[call[name[_], parameter[constant[Justification]]]]] call[name[self].Bind, parameter[name[wx].EVT_BUTTON, name[self].OnJustification, name[self].justify_tb]] call[name[self].AddControl, parameter[name[self].justify_tb]]
keyword[def] identifier[_create_justification_button] ( identifier[self] ): literal[string] identifier[iconnames] =[ literal[string] , literal[string] , literal[string] ] identifier[bmplist] =[ identifier[icons] [ identifier[iconname] ] keyword[for] identifier[iconname] keyword[in] identifier[iconnames] ] identifier[self] . identifier[justify_tb] = identifier[_widgets] . identifier[BitmapToggleButton] ( identifier[self] , identifier[bmplist] ) identifier[self] . identifier[justify_tb] . identifier[SetToolTipString] ( identifier[_] ( literal[string] )) identifier[self] . identifier[Bind] ( identifier[wx] . identifier[EVT_BUTTON] , identifier[self] . identifier[OnJustification] , identifier[self] . identifier[justify_tb] ) identifier[self] . identifier[AddControl] ( identifier[self] . identifier[justify_tb] )
def _create_justification_button(self): """Creates horizontal justification button""" iconnames = ['JustifyLeft', 'JustifyCenter', 'JustifyRight'] bmplist = [icons[iconname] for iconname in iconnames] self.justify_tb = _widgets.BitmapToggleButton(self, bmplist) self.justify_tb.SetToolTipString(_(u'Justification')) self.Bind(wx.EVT_BUTTON, self.OnJustification, self.justify_tb) self.AddControl(self.justify_tb)
def get_config_input_source_config_source_candidate_candidate(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_config = ET.Element("get_config") config = get_config input = ET.SubElement(get_config, "input") source = ET.SubElement(input, "source") config_source = ET.SubElement(source, "config-source") candidate = ET.SubElement(config_source, "candidate") candidate = ET.SubElement(candidate, "candidate") callback = kwargs.pop('callback', self._callback) return callback(config)
def function[get_config_input_source_config_source_candidate_candidate, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[get_config] assign[=] call[name[ET].Element, parameter[constant[get_config]]] variable[config] assign[=] name[get_config] variable[input] assign[=] call[name[ET].SubElement, parameter[name[get_config], constant[input]]] variable[source] assign[=] call[name[ET].SubElement, parameter[name[input], constant[source]]] variable[config_source] assign[=] call[name[ET].SubElement, parameter[name[source], constant[config-source]]] variable[candidate] assign[=] call[name[ET].SubElement, parameter[name[config_source], constant[candidate]]] variable[candidate] assign[=] call[name[ET].SubElement, parameter[name[candidate], constant[candidate]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[get_config_input_source_config_source_candidate_candidate] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[get_config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[config] = identifier[get_config] identifier[input] = identifier[ET] . identifier[SubElement] ( identifier[get_config] , literal[string] ) identifier[source] = identifier[ET] . identifier[SubElement] ( identifier[input] , literal[string] ) identifier[config_source] = identifier[ET] . identifier[SubElement] ( identifier[source] , literal[string] ) identifier[candidate] = identifier[ET] . identifier[SubElement] ( identifier[config_source] , literal[string] ) identifier[candidate] = identifier[ET] . identifier[SubElement] ( identifier[candidate] , literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def get_config_input_source_config_source_candidate_candidate(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') get_config = ET.Element('get_config') config = get_config input = ET.SubElement(get_config, 'input') source = ET.SubElement(input, 'source') config_source = ET.SubElement(source, 'config-source') candidate = ET.SubElement(config_source, 'candidate') candidate = ET.SubElement(candidate, 'candidate') callback = kwargs.pop('callback', self._callback) return callback(config)
def get_posts(self, offset=0, limit=1000, order=None, filters=None): """ This method returns list of Posts for this Data Source starting at a given offset and not more than limit It will call content-specific methods: _format() to format output from the DataStore """ order = self._get_order(order) cache_key = self.get_cache_key(offset, limit, order, filters) content = cache.get(cache_key) # if content: # return content try: if self.up_to_date(): pass # not time to update yet else: self.update() except: raise pass # query the database for now and update later query = self._get_query(order=order, filters=filters) posts = query[int(offset):int(offset)+int(limit)] posts = self._format(posts) cache_duration = conf.GOSCALE_CACHE_DURATION if posts else 1 cache.set(cache_key, posts, cache_duration) return posts
def function[get_posts, parameter[self, offset, limit, order, filters]]: constant[ This method returns list of Posts for this Data Source starting at a given offset and not more than limit It will call content-specific methods: _format() to format output from the DataStore ] variable[order] assign[=] call[name[self]._get_order, parameter[name[order]]] variable[cache_key] assign[=] call[name[self].get_cache_key, parameter[name[offset], name[limit], name[order], name[filters]]] variable[content] assign[=] call[name[cache].get, parameter[name[cache_key]]] <ast.Try object at 0x7da18f7220b0> variable[query] assign[=] call[name[self]._get_query, parameter[]] variable[posts] assign[=] call[name[query]][<ast.Slice object at 0x7da18f720940>] variable[posts] assign[=] call[name[self]._format, parameter[name[posts]]] variable[cache_duration] assign[=] <ast.IfExp object at 0x7da18f7200d0> call[name[cache].set, parameter[name[cache_key], name[posts], name[cache_duration]]] return[name[posts]]
keyword[def] identifier[get_posts] ( identifier[self] , identifier[offset] = literal[int] , identifier[limit] = literal[int] , identifier[order] = keyword[None] , identifier[filters] = keyword[None] ): literal[string] identifier[order] = identifier[self] . identifier[_get_order] ( identifier[order] ) identifier[cache_key] = identifier[self] . identifier[get_cache_key] ( identifier[offset] , identifier[limit] , identifier[order] , identifier[filters] ) identifier[content] = identifier[cache] . identifier[get] ( identifier[cache_key] ) keyword[try] : keyword[if] identifier[self] . identifier[up_to_date] (): keyword[pass] keyword[else] : identifier[self] . identifier[update] () keyword[except] : keyword[raise] keyword[pass] identifier[query] = identifier[self] . identifier[_get_query] ( identifier[order] = identifier[order] , identifier[filters] = identifier[filters] ) identifier[posts] = identifier[query] [ identifier[int] ( identifier[offset] ): identifier[int] ( identifier[offset] )+ identifier[int] ( identifier[limit] )] identifier[posts] = identifier[self] . identifier[_format] ( identifier[posts] ) identifier[cache_duration] = identifier[conf] . identifier[GOSCALE_CACHE_DURATION] keyword[if] identifier[posts] keyword[else] literal[int] identifier[cache] . identifier[set] ( identifier[cache_key] , identifier[posts] , identifier[cache_duration] ) keyword[return] identifier[posts]
def get_posts(self, offset=0, limit=1000, order=None, filters=None): """ This method returns list of Posts for this Data Source starting at a given offset and not more than limit It will call content-specific methods: _format() to format output from the DataStore """ order = self._get_order(order) cache_key = self.get_cache_key(offset, limit, order, filters) content = cache.get(cache_key) # if content: # return content try: if self.up_to_date(): pass # not time to update yet # depends on [control=['if'], data=[]] else: self.update() # depends on [control=['try'], data=[]] except: raise pass # query the database for now and update later # depends on [control=['except'], data=[]] query = self._get_query(order=order, filters=filters) posts = query[int(offset):int(offset) + int(limit)] posts = self._format(posts) cache_duration = conf.GOSCALE_CACHE_DURATION if posts else 1 cache.set(cache_key, posts, cache_duration) return posts
def accessed(filename): ''' Retrieve how long ago a file has been accessed. :param filename: name of the file >>> print accessed(__file__) # doctest: +SKIP just now ''' if isinstance(filename, file): filename = filename.name return duration(os.stat(filename)[stat.ST_ATIME])
def function[accessed, parameter[filename]]: constant[ Retrieve how long ago a file has been accessed. :param filename: name of the file >>> print accessed(__file__) # doctest: +SKIP just now ] if call[name[isinstance], parameter[name[filename], name[file]]] begin[:] variable[filename] assign[=] name[filename].name return[call[name[duration], parameter[call[call[name[os].stat, parameter[name[filename]]]][name[stat].ST_ATIME]]]]
keyword[def] identifier[accessed] ( identifier[filename] ): literal[string] keyword[if] identifier[isinstance] ( identifier[filename] , identifier[file] ): identifier[filename] = identifier[filename] . identifier[name] keyword[return] identifier[duration] ( identifier[os] . identifier[stat] ( identifier[filename] )[ identifier[stat] . identifier[ST_ATIME] ])
def accessed(filename): """ Retrieve how long ago a file has been accessed. :param filename: name of the file >>> print accessed(__file__) # doctest: +SKIP just now """ if isinstance(filename, file): filename = filename.name # depends on [control=['if'], data=[]] return duration(os.stat(filename)[stat.ST_ATIME])
def maximum(self, node): """ find the max node when node regard as a root node :param node: :return: max node """ temp_node = node while temp_node.right is not None: temp_node = temp_node.right return temp_node
def function[maximum, parameter[self, node]]: constant[ find the max node when node regard as a root node :param node: :return: max node ] variable[temp_node] assign[=] name[node] while compare[name[temp_node].right is_not constant[None]] begin[:] variable[temp_node] assign[=] name[temp_node].right return[name[temp_node]]
keyword[def] identifier[maximum] ( identifier[self] , identifier[node] ): literal[string] identifier[temp_node] = identifier[node] keyword[while] identifier[temp_node] . identifier[right] keyword[is] keyword[not] keyword[None] : identifier[temp_node] = identifier[temp_node] . identifier[right] keyword[return] identifier[temp_node]
def maximum(self, node): """ find the max node when node regard as a root node :param node: :return: max node """ temp_node = node while temp_node.right is not None: temp_node = temp_node.right # depends on [control=['while'], data=[]] return temp_node
def _get_stddevs(self, C, stddev_types, num_sites, mag_conversion_sigma): """ Return total standard deviation. """ assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) sigma = np.zeros(num_sites) + C['sigma'] * np.log(10) sigma = np.sqrt(sigma ** 2 + (C['a'] ** 2) * (mag_conversion_sigma ** 2)) stddevs = [sigma for _ in stddev_types] return stddevs
def function[_get_stddevs, parameter[self, C, stddev_types, num_sites, mag_conversion_sigma]]: constant[ Return total standard deviation. ] assert[call[name[all], parameter[<ast.GeneratorExp object at 0x7da18f58c1f0>]]] variable[sigma] assign[=] binary_operation[call[name[np].zeros, parameter[name[num_sites]]] + binary_operation[call[name[C]][constant[sigma]] * call[name[np].log, parameter[constant[10]]]]] variable[sigma] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[name[sigma] ** constant[2]] + binary_operation[binary_operation[call[name[C]][constant[a]] ** constant[2]] * binary_operation[name[mag_conversion_sigma] ** constant[2]]]]]] variable[stddevs] assign[=] <ast.ListComp object at 0x7da18ede7b80> return[name[stddevs]]
keyword[def] identifier[_get_stddevs] ( identifier[self] , identifier[C] , identifier[stddev_types] , identifier[num_sites] , identifier[mag_conversion_sigma] ): literal[string] keyword[assert] identifier[all] ( identifier[stddev_type] keyword[in] identifier[self] . identifier[DEFINED_FOR_STANDARD_DEVIATION_TYPES] keyword[for] identifier[stddev_type] keyword[in] identifier[stddev_types] ) identifier[sigma] = identifier[np] . identifier[zeros] ( identifier[num_sites] )+ identifier[C] [ literal[string] ]* identifier[np] . identifier[log] ( literal[int] ) identifier[sigma] = identifier[np] . identifier[sqrt] ( identifier[sigma] ** literal[int] +( identifier[C] [ literal[string] ]** literal[int] )*( identifier[mag_conversion_sigma] ** literal[int] )) identifier[stddevs] =[ identifier[sigma] keyword[for] identifier[_] keyword[in] identifier[stddev_types] ] keyword[return] identifier[stddevs]
def _get_stddevs(self, C, stddev_types, num_sites, mag_conversion_sigma): """ Return total standard deviation. """ assert all((stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types)) sigma = np.zeros(num_sites) + C['sigma'] * np.log(10) sigma = np.sqrt(sigma ** 2 + C['a'] ** 2 * mag_conversion_sigma ** 2) stddevs = [sigma for _ in stddev_types] return stddevs
def get_um(method_name, response=False): """Get protobuf for given method name :param method_name: full method name (e.g. ``Player.GetGameBadgeLevels#1``) :type method_name: :class:`str` :param response: whether to return proto for response or request :type response: :class:`bool` :return: protobuf message """ key = (method_name, response) if key not in method_lookup: match = re.findall(r'^([a-z]+)\.([a-z]+)#(\d)?$', method_name, re.I) if not match: return None interface, method, version = match[0] if interface not in service_lookup: return None package = import_module(service_lookup[interface]) service = getattr(package, interface, None) if service is None: return None for method_desc in service.GetDescriptor().methods: name = "%s.%s#%d" % (interface, method_desc.name, 1) method_lookup[(name, False)] = getattr(package, method_desc.input_type.full_name, None) method_lookup[(name, True)] = getattr(package, method_desc.output_type.full_name, None) return method_lookup[key]
def function[get_um, parameter[method_name, response]]: constant[Get protobuf for given method name :param method_name: full method name (e.g. ``Player.GetGameBadgeLevels#1``) :type method_name: :class:`str` :param response: whether to return proto for response or request :type response: :class:`bool` :return: protobuf message ] variable[key] assign[=] tuple[[<ast.Name object at 0x7da1b1d4b790>, <ast.Name object at 0x7da1b1d482b0>]] if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[method_lookup]] begin[:] variable[match] assign[=] call[name[re].findall, parameter[constant[^([a-z]+)\.([a-z]+)#(\d)?$], name[method_name], name[re].I]] if <ast.UnaryOp object at 0x7da1b1d4a530> begin[:] return[constant[None]] <ast.Tuple object at 0x7da1b1d494e0> assign[=] call[name[match]][constant[0]] if compare[name[interface] <ast.NotIn object at 0x7da2590d7190> name[service_lookup]] begin[:] return[constant[None]] variable[package] assign[=] call[name[import_module], parameter[call[name[service_lookup]][name[interface]]]] variable[service] assign[=] call[name[getattr], parameter[name[package], name[interface], constant[None]]] if compare[name[service] is constant[None]] begin[:] return[constant[None]] for taget[name[method_desc]] in starred[call[name[service].GetDescriptor, parameter[]].methods] begin[:] variable[name] assign[=] binary_operation[constant[%s.%s#%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1d65060>, <ast.Attribute object at 0x7da1b1d650f0>, <ast.Constant object at 0x7da1b1d641f0>]]] call[name[method_lookup]][tuple[[<ast.Name object at 0x7da1b1d65d50>, <ast.Constant object at 0x7da1b1d64250>]]] assign[=] call[name[getattr], parameter[name[package], name[method_desc].input_type.full_name, constant[None]]] call[name[method_lookup]][tuple[[<ast.Name object at 0x7da1b1d66f20>, <ast.Constant object at 0x7da1b1d676a0>]]] assign[=] call[name[getattr], parameter[name[package], name[method_desc].output_type.full_name, constant[None]]] return[call[name[method_lookup]][name[key]]]
keyword[def] identifier[get_um] ( identifier[method_name] , identifier[response] = keyword[False] ): literal[string] identifier[key] =( identifier[method_name] , identifier[response] ) keyword[if] identifier[key] keyword[not] keyword[in] identifier[method_lookup] : identifier[match] = identifier[re] . identifier[findall] ( literal[string] , identifier[method_name] , identifier[re] . identifier[I] ) keyword[if] keyword[not] identifier[match] : keyword[return] keyword[None] identifier[interface] , identifier[method] , identifier[version] = identifier[match] [ literal[int] ] keyword[if] identifier[interface] keyword[not] keyword[in] identifier[service_lookup] : keyword[return] keyword[None] identifier[package] = identifier[import_module] ( identifier[service_lookup] [ identifier[interface] ]) identifier[service] = identifier[getattr] ( identifier[package] , identifier[interface] , keyword[None] ) keyword[if] identifier[service] keyword[is] keyword[None] : keyword[return] keyword[None] keyword[for] identifier[method_desc] keyword[in] identifier[service] . identifier[GetDescriptor] (). identifier[methods] : identifier[name] = literal[string] %( identifier[interface] , identifier[method_desc] . identifier[name] , literal[int] ) identifier[method_lookup] [( identifier[name] , keyword[False] )]= identifier[getattr] ( identifier[package] , identifier[method_desc] . identifier[input_type] . identifier[full_name] , keyword[None] ) identifier[method_lookup] [( identifier[name] , keyword[True] )]= identifier[getattr] ( identifier[package] , identifier[method_desc] . identifier[output_type] . identifier[full_name] , keyword[None] ) keyword[return] identifier[method_lookup] [ identifier[key] ]
def get_um(method_name, response=False): """Get protobuf for given method name :param method_name: full method name (e.g. ``Player.GetGameBadgeLevels#1``) :type method_name: :class:`str` :param response: whether to return proto for response or request :type response: :class:`bool` :return: protobuf message """ key = (method_name, response) if key not in method_lookup: match = re.findall('^([a-z]+)\\.([a-z]+)#(\\d)?$', method_name, re.I) if not match: return None # depends on [control=['if'], data=[]] (interface, method, version) = match[0] if interface not in service_lookup: return None # depends on [control=['if'], data=[]] package = import_module(service_lookup[interface]) service = getattr(package, interface, None) if service is None: return None # depends on [control=['if'], data=[]] for method_desc in service.GetDescriptor().methods: name = '%s.%s#%d' % (interface, method_desc.name, 1) method_lookup[name, False] = getattr(package, method_desc.input_type.full_name, None) method_lookup[name, True] = getattr(package, method_desc.output_type.full_name, None) # depends on [control=['for'], data=['method_desc']] # depends on [control=['if'], data=['method_lookup']] return method_lookup[key]
def polyline2pathd(polyline_d, is_polygon=False): """converts the string from a polyline points-attribute to a string for a Path object d-attribute""" points = COORD_PAIR_TMPLT.findall(polyline_d) closed = (float(points[0][0]) == float(points[-1][0]) and float(points[0][1]) == float(points[-1][1])) # The `parse_path` call ignores redundant 'z' (closure) commands # e.g. `parse_path('M0 0L100 100Z') == parse_path('M0 0L100 100L0 0Z')` # This check ensures that an n-point polygon is converted to an n-Line path. if is_polygon and closed: points.append(points[0]) d = 'M' + 'L'.join('{0} {1}'.format(x,y) for x,y in points) if is_polygon or closed: d += 'z' return d
def function[polyline2pathd, parameter[polyline_d, is_polygon]]: constant[converts the string from a polyline points-attribute to a string for a Path object d-attribute] variable[points] assign[=] call[name[COORD_PAIR_TMPLT].findall, parameter[name[polyline_d]]] variable[closed] assign[=] <ast.BoolOp object at 0x7da20cabfe50> if <ast.BoolOp object at 0x7da2054a7df0> begin[:] call[name[points].append, parameter[call[name[points]][constant[0]]]] variable[d] assign[=] binary_operation[constant[M] + call[constant[L].join, parameter[<ast.GeneratorExp object at 0x7da2054a51b0>]]] if <ast.BoolOp object at 0x7da2054a7130> begin[:] <ast.AugAssign object at 0x7da2054a6050> return[name[d]]
keyword[def] identifier[polyline2pathd] ( identifier[polyline_d] , identifier[is_polygon] = keyword[False] ): literal[string] identifier[points] = identifier[COORD_PAIR_TMPLT] . identifier[findall] ( identifier[polyline_d] ) identifier[closed] =( identifier[float] ( identifier[points] [ literal[int] ][ literal[int] ])== identifier[float] ( identifier[points] [- literal[int] ][ literal[int] ]) keyword[and] identifier[float] ( identifier[points] [ literal[int] ][ literal[int] ])== identifier[float] ( identifier[points] [- literal[int] ][ literal[int] ])) keyword[if] identifier[is_polygon] keyword[and] identifier[closed] : identifier[points] . identifier[append] ( identifier[points] [ literal[int] ]) identifier[d] = literal[string] + literal[string] . identifier[join] ( literal[string] . identifier[format] ( identifier[x] , identifier[y] ) keyword[for] identifier[x] , identifier[y] keyword[in] identifier[points] ) keyword[if] identifier[is_polygon] keyword[or] identifier[closed] : identifier[d] += literal[string] keyword[return] identifier[d]
def polyline2pathd(polyline_d, is_polygon=False): """converts the string from a polyline points-attribute to a string for a Path object d-attribute""" points = COORD_PAIR_TMPLT.findall(polyline_d) closed = float(points[0][0]) == float(points[-1][0]) and float(points[0][1]) == float(points[-1][1]) # The `parse_path` call ignores redundant 'z' (closure) commands # e.g. `parse_path('M0 0L100 100Z') == parse_path('M0 0L100 100L0 0Z')` # This check ensures that an n-point polygon is converted to an n-Line path. if is_polygon and closed: points.append(points[0]) # depends on [control=['if'], data=[]] d = 'M' + 'L'.join(('{0} {1}'.format(x, y) for (x, y) in points)) if is_polygon or closed: d += 'z' # depends on [control=['if'], data=[]] return d
def output_detailed(paragraphs, fp=sys.stdout): """ Same as output_default, but only <p> tags are used and the following attributes are added: class, cfclass and heading. """ for paragraph in paragraphs: output = '<p class="%s" cfclass="%s" heading="%i" xpath="%s"> %s' % ( paragraph.class_type, paragraph.cf_class, int(paragraph.heading), paragraph.xpath, cgi.escape(paragraph.text) ) print(output, file=fp)
def function[output_detailed, parameter[paragraphs, fp]]: constant[ Same as output_default, but only <p> tags are used and the following attributes are added: class, cfclass and heading. ] for taget[name[paragraph]] in starred[name[paragraphs]] begin[:] variable[output] assign[=] binary_operation[constant[<p class="%s" cfclass="%s" heading="%i" xpath="%s"> %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18ede72b0>, <ast.Attribute object at 0x7da18ede49a0>, <ast.Call object at 0x7da18ede6410>, <ast.Attribute object at 0x7da18ede5c30>, <ast.Call object at 0x7da18ede5360>]]] call[name[print], parameter[name[output]]]
keyword[def] identifier[output_detailed] ( identifier[paragraphs] , identifier[fp] = identifier[sys] . identifier[stdout] ): literal[string] keyword[for] identifier[paragraph] keyword[in] identifier[paragraphs] : identifier[output] = literal[string] %( identifier[paragraph] . identifier[class_type] , identifier[paragraph] . identifier[cf_class] , identifier[int] ( identifier[paragraph] . identifier[heading] ), identifier[paragraph] . identifier[xpath] , identifier[cgi] . identifier[escape] ( identifier[paragraph] . identifier[text] ) ) identifier[print] ( identifier[output] , identifier[file] = identifier[fp] )
def output_detailed(paragraphs, fp=sys.stdout): """ Same as output_default, but only <p> tags are used and the following attributes are added: class, cfclass and heading. """ for paragraph in paragraphs: output = '<p class="%s" cfclass="%s" heading="%i" xpath="%s"> %s' % (paragraph.class_type, paragraph.cf_class, int(paragraph.heading), paragraph.xpath, cgi.escape(paragraph.text)) print(output, file=fp) # depends on [control=['for'], data=['paragraph']]
def as_tryte_string(self): # type: () -> TryteString """ Returns a TryteString representation of the transaction. """ if not self.bundle_hash: raise with_context( exc=RuntimeError( 'Cannot get TryteString representation of {cls} instance ' 'without a bundle hash; call ``bundle.finalize()`` first ' '(``exc.context`` has more info).'.format( cls=type(self).__name__, ), ), context={ 'transaction': self, }, ) return super(ProposedTransaction, self).as_tryte_string()
def function[as_tryte_string, parameter[self]]: constant[ Returns a TryteString representation of the transaction. ] if <ast.UnaryOp object at 0x7da1b26af040> begin[:] <ast.Raise object at 0x7da1b26aecb0> return[call[call[name[super], parameter[name[ProposedTransaction], name[self]]].as_tryte_string, parameter[]]]
keyword[def] identifier[as_tryte_string] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[bundle_hash] : keyword[raise] identifier[with_context] ( identifier[exc] = identifier[RuntimeError] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[cls] = identifier[type] ( identifier[self] ). identifier[__name__] , ), ), identifier[context] ={ literal[string] : identifier[self] , }, ) keyword[return] identifier[super] ( identifier[ProposedTransaction] , identifier[self] ). identifier[as_tryte_string] ()
def as_tryte_string(self): # type: () -> TryteString '\n Returns a TryteString representation of the transaction.\n ' if not self.bundle_hash: raise with_context(exc=RuntimeError('Cannot get TryteString representation of {cls} instance without a bundle hash; call ``bundle.finalize()`` first (``exc.context`` has more info).'.format(cls=type(self).__name__)), context={'transaction': self}) # depends on [control=['if'], data=[]] return super(ProposedTransaction, self).as_tryte_string()
def subscribe(self, peer_jid): """ Request presence subscription with the given `peer_jid`. This is deliberately not a coroutine; we don’t know whether the peer is online (usually) and they may defer the confirmation very long, if they confirm at all. Use :meth:`on_subscribed` to get notified when a peer accepted a subscription request. """ self.client.enqueue( stanza.Presence(type_=structs.PresenceType.SUBSCRIBE, to=peer_jid) )
def function[subscribe, parameter[self, peer_jid]]: constant[ Request presence subscription with the given `peer_jid`. This is deliberately not a coroutine; we don’t know whether the peer is online (usually) and they may defer the confirmation very long, if they confirm at all. Use :meth:`on_subscribed` to get notified when a peer accepted a subscription request. ] call[name[self].client.enqueue, parameter[call[name[stanza].Presence, parameter[]]]]
keyword[def] identifier[subscribe] ( identifier[self] , identifier[peer_jid] ): literal[string] identifier[self] . identifier[client] . identifier[enqueue] ( identifier[stanza] . identifier[Presence] ( identifier[type_] = identifier[structs] . identifier[PresenceType] . identifier[SUBSCRIBE] , identifier[to] = identifier[peer_jid] ) )
def subscribe(self, peer_jid): """ Request presence subscription with the given `peer_jid`. This is deliberately not a coroutine; we don’t know whether the peer is online (usually) and they may defer the confirmation very long, if they confirm at all. Use :meth:`on_subscribed` to get notified when a peer accepted a subscription request. """ self.client.enqueue(stanza.Presence(type_=structs.PresenceType.SUBSCRIBE, to=peer_jid))
def _send_outgoing_route(self, outgoing_route): """Constructs `Update` message from given `outgoing_route` and sends it to peer. Also, checks if any policies prevent sending this message. Populates Adj-RIB-out with corresponding `SentRoute`. """ path = outgoing_route.path block, blocked_cause = self._apply_out_filter(path) nlri_str = outgoing_route.path.nlri.formatted_nlri_str sent_route = SentRoute(outgoing_route.path, self, block) self._adj_rib_out[nlri_str] = sent_route self._signal_bus.adj_rib_out_changed(self, sent_route) # TODO(PH): optimized by sending several prefixes per update. # Construct and send update message. if not block: update_msg = self._construct_update(outgoing_route) self._protocol.send(update_msg) # Collect update statistics. self.state.incr(PeerCounterNames.SENT_UPDATES) else: LOG.debug('prefix : %s is not sent by filter : %s', path.nlri, blocked_cause) # We have to create sent_route for every OutgoingRoute which is # not a withdraw or was for route-refresh msg. if (not outgoing_route.path.is_withdraw and not outgoing_route.for_route_refresh): # Update the destination with new sent route. tm = self._core_service.table_manager tm.remember_sent_route(sent_route)
def function[_send_outgoing_route, parameter[self, outgoing_route]]: constant[Constructs `Update` message from given `outgoing_route` and sends it to peer. Also, checks if any policies prevent sending this message. Populates Adj-RIB-out with corresponding `SentRoute`. ] variable[path] assign[=] name[outgoing_route].path <ast.Tuple object at 0x7da1b1a65f00> assign[=] call[name[self]._apply_out_filter, parameter[name[path]]] variable[nlri_str] assign[=] name[outgoing_route].path.nlri.formatted_nlri_str variable[sent_route] assign[=] call[name[SentRoute], parameter[name[outgoing_route].path, name[self], name[block]]] call[name[self]._adj_rib_out][name[nlri_str]] assign[=] name[sent_route] call[name[self]._signal_bus.adj_rib_out_changed, parameter[name[self], name[sent_route]]] if <ast.UnaryOp object at 0x7da1b1a67fa0> begin[:] variable[update_msg] assign[=] call[name[self]._construct_update, parameter[name[outgoing_route]]] call[name[self]._protocol.send, parameter[name[update_msg]]] call[name[self].state.incr, parameter[name[PeerCounterNames].SENT_UPDATES]] if <ast.BoolOp object at 0x7da1b1a65cc0> begin[:] variable[tm] assign[=] name[self]._core_service.table_manager call[name[tm].remember_sent_route, parameter[name[sent_route]]]
keyword[def] identifier[_send_outgoing_route] ( identifier[self] , identifier[outgoing_route] ): literal[string] identifier[path] = identifier[outgoing_route] . identifier[path] identifier[block] , identifier[blocked_cause] = identifier[self] . identifier[_apply_out_filter] ( identifier[path] ) identifier[nlri_str] = identifier[outgoing_route] . identifier[path] . identifier[nlri] . identifier[formatted_nlri_str] identifier[sent_route] = identifier[SentRoute] ( identifier[outgoing_route] . identifier[path] , identifier[self] , identifier[block] ) identifier[self] . identifier[_adj_rib_out] [ identifier[nlri_str] ]= identifier[sent_route] identifier[self] . identifier[_signal_bus] . identifier[adj_rib_out_changed] ( identifier[self] , identifier[sent_route] ) keyword[if] keyword[not] identifier[block] : identifier[update_msg] = identifier[self] . identifier[_construct_update] ( identifier[outgoing_route] ) identifier[self] . identifier[_protocol] . identifier[send] ( identifier[update_msg] ) identifier[self] . identifier[state] . identifier[incr] ( identifier[PeerCounterNames] . identifier[SENT_UPDATES] ) keyword[else] : identifier[LOG] . identifier[debug] ( literal[string] , identifier[path] . identifier[nlri] , identifier[blocked_cause] ) keyword[if] ( keyword[not] identifier[outgoing_route] . identifier[path] . identifier[is_withdraw] keyword[and] keyword[not] identifier[outgoing_route] . identifier[for_route_refresh] ): identifier[tm] = identifier[self] . identifier[_core_service] . identifier[table_manager] identifier[tm] . identifier[remember_sent_route] ( identifier[sent_route] )
def _send_outgoing_route(self, outgoing_route): """Constructs `Update` message from given `outgoing_route` and sends it to peer. Also, checks if any policies prevent sending this message. Populates Adj-RIB-out with corresponding `SentRoute`. """ path = outgoing_route.path (block, blocked_cause) = self._apply_out_filter(path) nlri_str = outgoing_route.path.nlri.formatted_nlri_str sent_route = SentRoute(outgoing_route.path, self, block) self._adj_rib_out[nlri_str] = sent_route self._signal_bus.adj_rib_out_changed(self, sent_route) # TODO(PH): optimized by sending several prefixes per update. # Construct and send update message. if not block: update_msg = self._construct_update(outgoing_route) self._protocol.send(update_msg) # Collect update statistics. self.state.incr(PeerCounterNames.SENT_UPDATES) # depends on [control=['if'], data=[]] else: LOG.debug('prefix : %s is not sent by filter : %s', path.nlri, blocked_cause) # We have to create sent_route for every OutgoingRoute which is # not a withdraw or was for route-refresh msg. if not outgoing_route.path.is_withdraw and (not outgoing_route.for_route_refresh): # Update the destination with new sent route. tm = self._core_service.table_manager tm.remember_sent_route(sent_route) # depends on [control=['if'], data=[]]
def set(self, key, samples, sampling_rate): """ Set the samples and sampling-rate for the given key. Existing data will be overwritten. The samples have to have ``np.float32`` datatype and values in the range of -1.0 and 1.0. Args: key (str): A key to store the data for. samples (numpy.ndarray): 1-D array of audio samples (np.float32). sampling_rate (int): The sampling-rate of the audio samples. Note: The container has to be opened in advance. """ if not np.issubdtype(samples.dtype, np.floating): raise ValueError('Samples are required as np.float32!') if len(samples.shape) > 1: raise ValueError('Only single channel supported!') self.raise_error_if_not_open() if key in self._file: del self._file[key] samples = (samples * MAX_INT16_VALUE).astype(np.int16) dset = self._file.create_dataset(key, data=samples) dset.attrs[SAMPLING_RATE_ATTR] = sampling_rate
def function[set, parameter[self, key, samples, sampling_rate]]: constant[ Set the samples and sampling-rate for the given key. Existing data will be overwritten. The samples have to have ``np.float32`` datatype and values in the range of -1.0 and 1.0. Args: key (str): A key to store the data for. samples (numpy.ndarray): 1-D array of audio samples (np.float32). sampling_rate (int): The sampling-rate of the audio samples. Note: The container has to be opened in advance. ] if <ast.UnaryOp object at 0x7da1b0b1d630> begin[:] <ast.Raise object at 0x7da1b0b1fe20> if compare[call[name[len], parameter[name[samples].shape]] greater[>] constant[1]] begin[:] <ast.Raise object at 0x7da1b0b1cbb0> call[name[self].raise_error_if_not_open, parameter[]] if compare[name[key] in name[self]._file] begin[:] <ast.Delete object at 0x7da1b0b1f3d0> variable[samples] assign[=] call[binary_operation[name[samples] * name[MAX_INT16_VALUE]].astype, parameter[name[np].int16]] variable[dset] assign[=] call[name[self]._file.create_dataset, parameter[name[key]]] call[name[dset].attrs][name[SAMPLING_RATE_ATTR]] assign[=] name[sampling_rate]
keyword[def] identifier[set] ( identifier[self] , identifier[key] , identifier[samples] , identifier[sampling_rate] ): literal[string] keyword[if] keyword[not] identifier[np] . identifier[issubdtype] ( identifier[samples] . identifier[dtype] , identifier[np] . identifier[floating] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[len] ( identifier[samples] . identifier[shape] )> literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[raise_error_if_not_open] () keyword[if] identifier[key] keyword[in] identifier[self] . identifier[_file] : keyword[del] identifier[self] . identifier[_file] [ identifier[key] ] identifier[samples] =( identifier[samples] * identifier[MAX_INT16_VALUE] ). identifier[astype] ( identifier[np] . identifier[int16] ) identifier[dset] = identifier[self] . identifier[_file] . identifier[create_dataset] ( identifier[key] , identifier[data] = identifier[samples] ) identifier[dset] . identifier[attrs] [ identifier[SAMPLING_RATE_ATTR] ]= identifier[sampling_rate]
def set(self, key, samples, sampling_rate): """ Set the samples and sampling-rate for the given key. Existing data will be overwritten. The samples have to have ``np.float32`` datatype and values in the range of -1.0 and 1.0. Args: key (str): A key to store the data for. samples (numpy.ndarray): 1-D array of audio samples (np.float32). sampling_rate (int): The sampling-rate of the audio samples. Note: The container has to be opened in advance. """ if not np.issubdtype(samples.dtype, np.floating): raise ValueError('Samples are required as np.float32!') # depends on [control=['if'], data=[]] if len(samples.shape) > 1: raise ValueError('Only single channel supported!') # depends on [control=['if'], data=[]] self.raise_error_if_not_open() if key in self._file: del self._file[key] # depends on [control=['if'], data=['key']] samples = (samples * MAX_INT16_VALUE).astype(np.int16) dset = self._file.create_dataset(key, data=samples) dset.attrs[SAMPLING_RATE_ATTR] = sampling_rate
def urlencode2(query, doseq=0, safe="", querydelimiter="&"): """Encode a sequence of two-element tuples or dictionary into a URL query string. If any values in the query arg are sequences and doseq is true, each sequence element is converted to a separate parameter. If the query arg is a sequence of two-element tuples, the order of the parameters in the output will match the order of parameters in the input. """ if hasattr(query,"items"): # mapping objects query = query.items() else: # it's a bother at times that strings and string-like objects are # sequences... try: # non-sequence items should not work with len() # non-empty strings will fail this if len(query) and not isinstance(query[0], tuple): raise TypeError # zero-length sequences of all types will get here and succeed, # but that's a minor nit - since the original implementation # allowed empty dicts that type of behavior probably should be # preserved for consistency except TypeError: ty,va,tb = sys.exc_info() raise TypeError("not a valid non-string sequence or mapping object " + tb) l = [] if not doseq: # preserve old behavior for k, v in query: k = quote_plus(str(k), safe=safe) v = quote_plus(str(v), safe=safe) l.append(k + '=' + v) else: for k, v in query: k = quote_plus(str(k), safe=safe) if isinstance(v, str): v = quote_plus(v, safe=safe) l.append(k + '=' + v) elif _is_unicode(v): # is there a reasonable way to convert to ASCII? # encode generates a string, but "replace" or "ignore" # lose information and "strict" can raise UnicodeError v = quote_plus(v.encode("ASCII","replace")) l.append(k + '=' + v) else: try: # is this a sufficient test for sequence-ness? len(v) except TypeError: # not a sequence v = quote_plus(str(v), safe=safe) l.append(k + '=' + v) else: # loop over the sequence for elt in v: l.append(k + '=' + quote_plus(str(elt))) return querydelimiter.join(l)
def function[urlencode2, parameter[query, doseq, safe, querydelimiter]]: constant[Encode a sequence of two-element tuples or dictionary into a URL query string. If any values in the query arg are sequences and doseq is true, each sequence element is converted to a separate parameter. If the query arg is a sequence of two-element tuples, the order of the parameters in the output will match the order of parameters in the input. ] if call[name[hasattr], parameter[name[query], constant[items]]] begin[:] variable[query] assign[=] call[name[query].items, parameter[]] variable[l] assign[=] list[[]] if <ast.UnaryOp object at 0x7da1b16683a0> begin[:] for taget[tuple[[<ast.Name object at 0x7da1b1668d00>, <ast.Name object at 0x7da1b1668a00>]]] in starred[name[query]] begin[:] variable[k] assign[=] call[name[quote_plus], parameter[call[name[str], parameter[name[k]]]]] variable[v] assign[=] call[name[quote_plus], parameter[call[name[str], parameter[name[v]]]]] call[name[l].append, parameter[binary_operation[binary_operation[name[k] + constant[=]] + name[v]]]] return[call[name[querydelimiter].join, parameter[name[l]]]]
keyword[def] identifier[urlencode2] ( identifier[query] , identifier[doseq] = literal[int] , identifier[safe] = literal[string] , identifier[querydelimiter] = literal[string] ): literal[string] keyword[if] identifier[hasattr] ( identifier[query] , literal[string] ): identifier[query] = identifier[query] . identifier[items] () keyword[else] : keyword[try] : keyword[if] identifier[len] ( identifier[query] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[query] [ literal[int] ], identifier[tuple] ): keyword[raise] identifier[TypeError] keyword[except] identifier[TypeError] : identifier[ty] , identifier[va] , identifier[tb] = identifier[sys] . identifier[exc_info] () keyword[raise] identifier[TypeError] ( literal[string] + identifier[tb] ) identifier[l] =[] keyword[if] keyword[not] identifier[doseq] : keyword[for] identifier[k] , identifier[v] keyword[in] identifier[query] : identifier[k] = identifier[quote_plus] ( identifier[str] ( identifier[k] ), identifier[safe] = identifier[safe] ) identifier[v] = identifier[quote_plus] ( identifier[str] ( identifier[v] ), identifier[safe] = identifier[safe] ) identifier[l] . identifier[append] ( identifier[k] + literal[string] + identifier[v] ) keyword[else] : keyword[for] identifier[k] , identifier[v] keyword[in] identifier[query] : identifier[k] = identifier[quote_plus] ( identifier[str] ( identifier[k] ), identifier[safe] = identifier[safe] ) keyword[if] identifier[isinstance] ( identifier[v] , identifier[str] ): identifier[v] = identifier[quote_plus] ( identifier[v] , identifier[safe] = identifier[safe] ) identifier[l] . identifier[append] ( identifier[k] + literal[string] + identifier[v] ) keyword[elif] identifier[_is_unicode] ( identifier[v] ): identifier[v] = identifier[quote_plus] ( identifier[v] . identifier[encode] ( literal[string] , literal[string] )) identifier[l] . identifier[append] ( identifier[k] + literal[string] + identifier[v] ) keyword[else] : keyword[try] : identifier[len] ( identifier[v] ) keyword[except] identifier[TypeError] : identifier[v] = identifier[quote_plus] ( identifier[str] ( identifier[v] ), identifier[safe] = identifier[safe] ) identifier[l] . identifier[append] ( identifier[k] + literal[string] + identifier[v] ) keyword[else] : keyword[for] identifier[elt] keyword[in] identifier[v] : identifier[l] . identifier[append] ( identifier[k] + literal[string] + identifier[quote_plus] ( identifier[str] ( identifier[elt] ))) keyword[return] identifier[querydelimiter] . identifier[join] ( identifier[l] )
def urlencode2(query, doseq=0, safe='', querydelimiter='&'): """Encode a sequence of two-element tuples or dictionary into a URL query string. If any values in the query arg are sequences and doseq is true, each sequence element is converted to a separate parameter. If the query arg is a sequence of two-element tuples, the order of the parameters in the output will match the order of parameters in the input. """ if hasattr(query, 'items'): # mapping objects query = query.items() # depends on [control=['if'], data=[]] else: # it's a bother at times that strings and string-like objects are # sequences... try: # non-sequence items should not work with len() # non-empty strings will fail this if len(query) and (not isinstance(query[0], tuple)): raise TypeError # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] # zero-length sequences of all types will get here and succeed, # but that's a minor nit - since the original implementation # allowed empty dicts that type of behavior probably should be # preserved for consistency except TypeError: (ty, va, tb) = sys.exc_info() raise TypeError('not a valid non-string sequence or mapping object ' + tb) # depends on [control=['except'], data=[]] l = [] if not doseq: # preserve old behavior for (k, v) in query: k = quote_plus(str(k), safe=safe) v = quote_plus(str(v), safe=safe) l.append(k + '=' + v) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: for (k, v) in query: k = quote_plus(str(k), safe=safe) if isinstance(v, str): v = quote_plus(v, safe=safe) l.append(k + '=' + v) # depends on [control=['if'], data=[]] elif _is_unicode(v): # is there a reasonable way to convert to ASCII? # encode generates a string, but "replace" or "ignore" # lose information and "strict" can raise UnicodeError v = quote_plus(v.encode('ASCII', 'replace')) l.append(k + '=' + v) # depends on [control=['if'], data=[]] else: try: # is this a sufficient test for sequence-ness? len(v) # depends on [control=['try'], data=[]] except TypeError: # not a sequence v = quote_plus(str(v), safe=safe) l.append(k + '=' + v) # depends on [control=['except'], data=[]] else: # loop over the sequence for elt in v: l.append(k + '=' + quote_plus(str(elt))) # depends on [control=['for'], data=['elt']] # depends on [control=['for'], data=[]] return querydelimiter.join(l)
def get_t_tag_content( t, parent, remove_bold, remove_italics, meta_data): """ Generate the string data that for this particular t tag. """ if t is None or t.text is None: return '' # Need to escape the text so that we do not accidentally put in text # that is not valid XML. # cgi will replace things like & < > with &amp; &lt; &gt; text = cgi.escape(t.text) # Wrap the text with any modifiers it might have (bold, italics or # underline) el_is_bold = not remove_bold and ( is_bold(parent) or is_underlined(parent) ) el_is_italics = not remove_italics and is_italics(parent) if el_is_bold: text = '<strong>%s</strong>' % text if el_is_italics: text = '<em>%s</em>' % text return text
def function[get_t_tag_content, parameter[t, parent, remove_bold, remove_italics, meta_data]]: constant[ Generate the string data that for this particular t tag. ] if <ast.BoolOp object at 0x7da1b0348970> begin[:] return[constant[]] variable[text] assign[=] call[name[cgi].escape, parameter[name[t].text]] variable[el_is_bold] assign[=] <ast.BoolOp object at 0x7da1b023d5a0> variable[el_is_italics] assign[=] <ast.BoolOp object at 0x7da1b023c7f0> if name[el_is_bold] begin[:] variable[text] assign[=] binary_operation[constant[<strong>%s</strong>] <ast.Mod object at 0x7da2590d6920> name[text]] if name[el_is_italics] begin[:] variable[text] assign[=] binary_operation[constant[<em>%s</em>] <ast.Mod object at 0x7da2590d6920> name[text]] return[name[text]]
keyword[def] identifier[get_t_tag_content] ( identifier[t] , identifier[parent] , identifier[remove_bold] , identifier[remove_italics] , identifier[meta_data] ): literal[string] keyword[if] identifier[t] keyword[is] keyword[None] keyword[or] identifier[t] . identifier[text] keyword[is] keyword[None] : keyword[return] literal[string] identifier[text] = identifier[cgi] . identifier[escape] ( identifier[t] . identifier[text] ) identifier[el_is_bold] = keyword[not] identifier[remove_bold] keyword[and] ( identifier[is_bold] ( identifier[parent] ) keyword[or] identifier[is_underlined] ( identifier[parent] ) ) identifier[el_is_italics] = keyword[not] identifier[remove_italics] keyword[and] identifier[is_italics] ( identifier[parent] ) keyword[if] identifier[el_is_bold] : identifier[text] = literal[string] % identifier[text] keyword[if] identifier[el_is_italics] : identifier[text] = literal[string] % identifier[text] keyword[return] identifier[text]
def get_t_tag_content(t, parent, remove_bold, remove_italics, meta_data): """ Generate the string data that for this particular t tag. """ if t is None or t.text is None: return '' # depends on [control=['if'], data=[]] # Need to escape the text so that we do not accidentally put in text # that is not valid XML. # cgi will replace things like & < > with &amp; &lt; &gt; text = cgi.escape(t.text) # Wrap the text with any modifiers it might have (bold, italics or # underline) el_is_bold = not remove_bold and (is_bold(parent) or is_underlined(parent)) el_is_italics = not remove_italics and is_italics(parent) if el_is_bold: text = '<strong>%s</strong>' % text # depends on [control=['if'], data=[]] if el_is_italics: text = '<em>%s</em>' % text # depends on [control=['if'], data=[]] return text
def setup(self): """Check if the bluetooth controller is available.""" try: subprocess.check_output(["hcitool", "clock"], stderr=subprocess.STDOUT) except subprocess.CalledProcessError: raise BackendError("'hcitool clock' returned error. Make sure " "your bluetooth device is powered up with " "'hciconfig hciX up'.") except OSError: raise BackendError("'hcitool' could not be found, make sure you " "have bluez-utils installed.")
def function[setup, parameter[self]]: constant[Check if the bluetooth controller is available.] <ast.Try object at 0x7da1b0948460>
keyword[def] identifier[setup] ( identifier[self] ): literal[string] keyword[try] : identifier[subprocess] . identifier[check_output] ([ literal[string] , literal[string] ], identifier[stderr] = identifier[subprocess] . identifier[STDOUT] ) keyword[except] identifier[subprocess] . identifier[CalledProcessError] : keyword[raise] identifier[BackendError] ( literal[string] literal[string] literal[string] ) keyword[except] identifier[OSError] : keyword[raise] identifier[BackendError] ( literal[string] literal[string] )
def setup(self): """Check if the bluetooth controller is available.""" try: subprocess.check_output(['hcitool', 'clock'], stderr=subprocess.STDOUT) # depends on [control=['try'], data=[]] except subprocess.CalledProcessError: raise BackendError("'hcitool clock' returned error. Make sure your bluetooth device is powered up with 'hciconfig hciX up'.") # depends on [control=['except'], data=[]] except OSError: raise BackendError("'hcitool' could not be found, make sure you have bluez-utils installed.") # depends on [control=['except'], data=[]]
def extend(self, obj): r""" This function is used to add objects to the project. Arguments can be single OpenPNM objects, an OpenPNM project list, or a plain list of OpenPNM objects. """ if type(obj) is not list: obj = [obj] for item in obj: if hasattr(item, '_mro'): if 'GenericNetwork' in item._mro(): if self.network: raise Exception('Project already has a network') # Must use append since extend breaks the dicts up into # separate objects, while append keeps it as a single object. super().append(item) else: raise Exception('Only OpenPNM objects can be added')
def function[extend, parameter[self, obj]]: constant[ This function is used to add objects to the project. Arguments can be single OpenPNM objects, an OpenPNM project list, or a plain list of OpenPNM objects. ] if compare[call[name[type], parameter[name[obj]]] is_not name[list]] begin[:] variable[obj] assign[=] list[[<ast.Name object at 0x7da204567160>]] for taget[name[item]] in starred[name[obj]] begin[:] if call[name[hasattr], parameter[name[item], constant[_mro]]] begin[:] if compare[constant[GenericNetwork] in call[name[item]._mro, parameter[]]] begin[:] if name[self].network begin[:] <ast.Raise object at 0x7da2045645e0> call[call[name[super], parameter[]].append, parameter[name[item]]]
keyword[def] identifier[extend] ( identifier[self] , identifier[obj] ): literal[string] keyword[if] identifier[type] ( identifier[obj] ) keyword[is] keyword[not] identifier[list] : identifier[obj] =[ identifier[obj] ] keyword[for] identifier[item] keyword[in] identifier[obj] : keyword[if] identifier[hasattr] ( identifier[item] , literal[string] ): keyword[if] literal[string] keyword[in] identifier[item] . identifier[_mro] (): keyword[if] identifier[self] . identifier[network] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[super] (). identifier[append] ( identifier[item] ) keyword[else] : keyword[raise] identifier[Exception] ( literal[string] )
def extend(self, obj): """ This function is used to add objects to the project. Arguments can be single OpenPNM objects, an OpenPNM project list, or a plain list of OpenPNM objects. """ if type(obj) is not list: obj = [obj] # depends on [control=['if'], data=[]] for item in obj: if hasattr(item, '_mro'): if 'GenericNetwork' in item._mro(): if self.network: raise Exception('Project already has a network') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # Must use append since extend breaks the dicts up into # separate objects, while append keeps it as a single object. super().append(item) # depends on [control=['if'], data=[]] else: raise Exception('Only OpenPNM objects can be added') # depends on [control=['for'], data=['item']]
def get_description_lines(docstring): """Extract the description from the given docstring. This grabs everything up to the first occurrence of something that looks like a parameter description. The docstring will be dedented and cleaned up using the standard Sphinx methods. :param str docstring: The source docstring. :returns: list """ if prepare_docstring is None: raise ImportError('sphinx must be installed to use this function.') if not isinstance(docstring, str): return [] lines = [] for line in prepare_docstring(docstring): if DESCRIPTION_END_RE.match(line): break lines.append(line) if lines and lines[-1] != '': lines.append('') return lines
def function[get_description_lines, parameter[docstring]]: constant[Extract the description from the given docstring. This grabs everything up to the first occurrence of something that looks like a parameter description. The docstring will be dedented and cleaned up using the standard Sphinx methods. :param str docstring: The source docstring. :returns: list ] if compare[name[prepare_docstring] is constant[None]] begin[:] <ast.Raise object at 0x7da18f58fdc0> if <ast.UnaryOp object at 0x7da18f58fc70> begin[:] return[list[[]]] variable[lines] assign[=] list[[]] for taget[name[line]] in starred[call[name[prepare_docstring], parameter[name[docstring]]]] begin[:] if call[name[DESCRIPTION_END_RE].match, parameter[name[line]]] begin[:] break call[name[lines].append, parameter[name[line]]] if <ast.BoolOp object at 0x7da18f58f5b0> begin[:] call[name[lines].append, parameter[constant[]]] return[name[lines]]
keyword[def] identifier[get_description_lines] ( identifier[docstring] ): literal[string] keyword[if] identifier[prepare_docstring] keyword[is] keyword[None] : keyword[raise] identifier[ImportError] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[docstring] , identifier[str] ): keyword[return] [] identifier[lines] =[] keyword[for] identifier[line] keyword[in] identifier[prepare_docstring] ( identifier[docstring] ): keyword[if] identifier[DESCRIPTION_END_RE] . identifier[match] ( identifier[line] ): keyword[break] identifier[lines] . identifier[append] ( identifier[line] ) keyword[if] identifier[lines] keyword[and] identifier[lines] [- literal[int] ]!= literal[string] : identifier[lines] . identifier[append] ( literal[string] ) keyword[return] identifier[lines]
def get_description_lines(docstring): """Extract the description from the given docstring. This grabs everything up to the first occurrence of something that looks like a parameter description. The docstring will be dedented and cleaned up using the standard Sphinx methods. :param str docstring: The source docstring. :returns: list """ if prepare_docstring is None: raise ImportError('sphinx must be installed to use this function.') # depends on [control=['if'], data=[]] if not isinstance(docstring, str): return [] # depends on [control=['if'], data=[]] lines = [] for line in prepare_docstring(docstring): if DESCRIPTION_END_RE.match(line): break # depends on [control=['if'], data=[]] lines.append(line) # depends on [control=['for'], data=['line']] if lines and lines[-1] != '': lines.append('') # depends on [control=['if'], data=[]] return lines
def read(args): """Reading the configure file and adds non-existing attributes to 'args'""" if args.config_file is None or not isfile(args.config_file): return logging.info("Reading configure file: %s"%args.config_file) config = cparser.ConfigParser() config.read(args.config_file) if not config.has_section('lrcloud'): raise RuntimeError("Configure file has no [lrcloud] section!") for (name, value) in config.items('lrcloud'): if value == "True": value = True elif value == "False": value = False if getattr(args, name) is None: setattr(args, name, value)
def function[read, parameter[args]]: constant[Reading the configure file and adds non-existing attributes to 'args'] if <ast.BoolOp object at 0x7da20c6a8ca0> begin[:] return[None] call[name[logging].info, parameter[binary_operation[constant[Reading configure file: %s] <ast.Mod object at 0x7da2590d6920> name[args].config_file]]] variable[config] assign[=] call[name[cparser].ConfigParser, parameter[]] call[name[config].read, parameter[name[args].config_file]] if <ast.UnaryOp object at 0x7da20c6abf40> begin[:] <ast.Raise object at 0x7da20c6a8250> for taget[tuple[[<ast.Name object at 0x7da20c6ab9d0>, <ast.Name object at 0x7da20c6aa950>]]] in starred[call[name[config].items, parameter[constant[lrcloud]]]] begin[:] if compare[name[value] equal[==] constant[True]] begin[:] variable[value] assign[=] constant[True] if compare[call[name[getattr], parameter[name[args], name[name]]] is constant[None]] begin[:] call[name[setattr], parameter[name[args], name[name], name[value]]]
keyword[def] identifier[read] ( identifier[args] ): literal[string] keyword[if] identifier[args] . identifier[config_file] keyword[is] keyword[None] keyword[or] keyword[not] identifier[isfile] ( identifier[args] . identifier[config_file] ): keyword[return] identifier[logging] . identifier[info] ( literal[string] % identifier[args] . identifier[config_file] ) identifier[config] = identifier[cparser] . identifier[ConfigParser] () identifier[config] . identifier[read] ( identifier[args] . identifier[config_file] ) keyword[if] keyword[not] identifier[config] . identifier[has_section] ( literal[string] ): keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[for] ( identifier[name] , identifier[value] ) keyword[in] identifier[config] . identifier[items] ( literal[string] ): keyword[if] identifier[value] == literal[string] : identifier[value] = keyword[True] keyword[elif] identifier[value] == literal[string] : identifier[value] = keyword[False] keyword[if] identifier[getattr] ( identifier[args] , identifier[name] ) keyword[is] keyword[None] : identifier[setattr] ( identifier[args] , identifier[name] , identifier[value] )
def read(args): """Reading the configure file and adds non-existing attributes to 'args'""" if args.config_file is None or not isfile(args.config_file): return # depends on [control=['if'], data=[]] logging.info('Reading configure file: %s' % args.config_file) config = cparser.ConfigParser() config.read(args.config_file) if not config.has_section('lrcloud'): raise RuntimeError('Configure file has no [lrcloud] section!') # depends on [control=['if'], data=[]] for (name, value) in config.items('lrcloud'): if value == 'True': value = True # depends on [control=['if'], data=['value']] elif value == 'False': value = False # depends on [control=['if'], data=['value']] if getattr(args, name) is None: setattr(args, name, value) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def get_widget(self): """ Create the widget for the URL type. """ form_field = self.get_form_field() widget = form_field.widget if isinstance(widget, type): widget = widget() # Widget instantiation needs to happen manually. # Auto skip if choices is not an existing attribute. form_field_choices = getattr(form_field, 'choices', None) if form_field_choices is not None: if hasattr(widget, 'choices'): widget.choices = form_field_choices return widget
def function[get_widget, parameter[self]]: constant[ Create the widget for the URL type. ] variable[form_field] assign[=] call[name[self].get_form_field, parameter[]] variable[widget] assign[=] name[form_field].widget if call[name[isinstance], parameter[name[widget], name[type]]] begin[:] variable[widget] assign[=] call[name[widget], parameter[]] variable[form_field_choices] assign[=] call[name[getattr], parameter[name[form_field], constant[choices], constant[None]]] if compare[name[form_field_choices] is_not constant[None]] begin[:] if call[name[hasattr], parameter[name[widget], constant[choices]]] begin[:] name[widget].choices assign[=] name[form_field_choices] return[name[widget]]
keyword[def] identifier[get_widget] ( identifier[self] ): literal[string] identifier[form_field] = identifier[self] . identifier[get_form_field] () identifier[widget] = identifier[form_field] . identifier[widget] keyword[if] identifier[isinstance] ( identifier[widget] , identifier[type] ): identifier[widget] = identifier[widget] () identifier[form_field_choices] = identifier[getattr] ( identifier[form_field] , literal[string] , keyword[None] ) keyword[if] identifier[form_field_choices] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[hasattr] ( identifier[widget] , literal[string] ): identifier[widget] . identifier[choices] = identifier[form_field_choices] keyword[return] identifier[widget]
def get_widget(self): """ Create the widget for the URL type. """ form_field = self.get_form_field() widget = form_field.widget if isinstance(widget, type): widget = widget() # depends on [control=['if'], data=[]] # Widget instantiation needs to happen manually. # Auto skip if choices is not an existing attribute. form_field_choices = getattr(form_field, 'choices', None) if form_field_choices is not None: if hasattr(widget, 'choices'): widget.choices = form_field_choices # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['form_field_choices']] return widget
def format_hep(citation_elements): """Format hep-th report numbers with a dash e.g. replaces hep-th-9711200 with hep-th/9711200 """ prefixes = ('astro-ph-', 'hep-th-', 'hep-ph-', 'hep-ex-', 'hep-lat-', 'math-ph-') for el in citation_elements: if el['type'] == 'REPORTNUMBER': for p in prefixes: if el['report_num'].startswith(p): el['report_num'] = el['report_num'][:len(p) - 1] + '/' + \ el['report_num'][len(p):] return citation_elements
def function[format_hep, parameter[citation_elements]]: constant[Format hep-th report numbers with a dash e.g. replaces hep-th-9711200 with hep-th/9711200 ] variable[prefixes] assign[=] tuple[[<ast.Constant object at 0x7da1b13b5630>, <ast.Constant object at 0x7da1b13b40d0>, <ast.Constant object at 0x7da1b13b50c0>, <ast.Constant object at 0x7da1b13b4c40>, <ast.Constant object at 0x7da1b13b7e20>, <ast.Constant object at 0x7da1b13b53c0>]] for taget[name[el]] in starred[name[citation_elements]] begin[:] if compare[call[name[el]][constant[type]] equal[==] constant[REPORTNUMBER]] begin[:] for taget[name[p]] in starred[name[prefixes]] begin[:] if call[call[name[el]][constant[report_num]].startswith, parameter[name[p]]] begin[:] call[name[el]][constant[report_num]] assign[=] binary_operation[binary_operation[call[call[name[el]][constant[report_num]]][<ast.Slice object at 0x7da1b1378250>] + constant[/]] + call[call[name[el]][constant[report_num]]][<ast.Slice object at 0x7da1b1379b70>]] return[name[citation_elements]]
keyword[def] identifier[format_hep] ( identifier[citation_elements] ): literal[string] identifier[prefixes] =( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ) keyword[for] identifier[el] keyword[in] identifier[citation_elements] : keyword[if] identifier[el] [ literal[string] ]== literal[string] : keyword[for] identifier[p] keyword[in] identifier[prefixes] : keyword[if] identifier[el] [ literal[string] ]. identifier[startswith] ( identifier[p] ): identifier[el] [ literal[string] ]= identifier[el] [ literal[string] ][: identifier[len] ( identifier[p] )- literal[int] ]+ literal[string] + identifier[el] [ literal[string] ][ identifier[len] ( identifier[p] ):] keyword[return] identifier[citation_elements]
def format_hep(citation_elements): """Format hep-th report numbers with a dash e.g. replaces hep-th-9711200 with hep-th/9711200 """ prefixes = ('astro-ph-', 'hep-th-', 'hep-ph-', 'hep-ex-', 'hep-lat-', 'math-ph-') for el in citation_elements: if el['type'] == 'REPORTNUMBER': for p in prefixes: if el['report_num'].startswith(p): el['report_num'] = el['report_num'][:len(p) - 1] + '/' + el['report_num'][len(p):] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['el']] return citation_elements
def flags(self, flags): """ It might be nice to make flags not a list of strings. This is made harder by the control-spec: `...controllers MUST tolerate unrecognized flags and lines...` There is some current work in Twisted for open-ended constants (enums) support however, it seems. """ if isinstance(flags, (six.text_type, bytes)): flags = flags.split() self._flags = [x.lower() for x in flags] self.name_is_unique = 'named' in self._flags
def function[flags, parameter[self, flags]]: constant[ It might be nice to make flags not a list of strings. This is made harder by the control-spec: `...controllers MUST tolerate unrecognized flags and lines...` There is some current work in Twisted for open-ended constants (enums) support however, it seems. ] if call[name[isinstance], parameter[name[flags], tuple[[<ast.Attribute object at 0x7da1b07cd120>, <ast.Name object at 0x7da1b07cf1f0>]]]] begin[:] variable[flags] assign[=] call[name[flags].split, parameter[]] name[self]._flags assign[=] <ast.ListComp object at 0x7da1b07cd0c0> name[self].name_is_unique assign[=] compare[constant[named] in name[self]._flags]
keyword[def] identifier[flags] ( identifier[self] , identifier[flags] ): literal[string] keyword[if] identifier[isinstance] ( identifier[flags] ,( identifier[six] . identifier[text_type] , identifier[bytes] )): identifier[flags] = identifier[flags] . identifier[split] () identifier[self] . identifier[_flags] =[ identifier[x] . identifier[lower] () keyword[for] identifier[x] keyword[in] identifier[flags] ] identifier[self] . identifier[name_is_unique] = literal[string] keyword[in] identifier[self] . identifier[_flags]
def flags(self, flags): """ It might be nice to make flags not a list of strings. This is made harder by the control-spec: `...controllers MUST tolerate unrecognized flags and lines...` There is some current work in Twisted for open-ended constants (enums) support however, it seems. """ if isinstance(flags, (six.text_type, bytes)): flags = flags.split() # depends on [control=['if'], data=[]] self._flags = [x.lower() for x in flags] self.name_is_unique = 'named' in self._flags
def map_(cache: Mapping[Domain, Range]) -> Operator[Map[Domain, Range]]: """ Returns decorator that calls wrapped function if nothing was found in cache for its argument. Wrapped function arguments should be hashable. """ def wrapper(function: Map[Domain, Range]) -> Map[Domain, Range]: @wraps(function) def wrapped(argument: Domain) -> Range: try: return cache[argument] except KeyError: return function(argument) return wrapped return wrapper
def function[map_, parameter[cache]]: constant[ Returns decorator that calls wrapped function if nothing was found in cache for its argument. Wrapped function arguments should be hashable. ] def function[wrapper, parameter[function]]: def function[wrapped, parameter[argument]]: <ast.Try object at 0x7da18dc06da0> return[name[wrapped]] return[name[wrapper]]
keyword[def] identifier[map_] ( identifier[cache] : identifier[Mapping] [ identifier[Domain] , identifier[Range] ])-> identifier[Operator] [ identifier[Map] [ identifier[Domain] , identifier[Range] ]]: literal[string] keyword[def] identifier[wrapper] ( identifier[function] : identifier[Map] [ identifier[Domain] , identifier[Range] ])-> identifier[Map] [ identifier[Domain] , identifier[Range] ]: @ identifier[wraps] ( identifier[function] ) keyword[def] identifier[wrapped] ( identifier[argument] : identifier[Domain] )-> identifier[Range] : keyword[try] : keyword[return] identifier[cache] [ identifier[argument] ] keyword[except] identifier[KeyError] : keyword[return] identifier[function] ( identifier[argument] ) keyword[return] identifier[wrapped] keyword[return] identifier[wrapper]
def map_(cache: Mapping[Domain, Range]) -> Operator[Map[Domain, Range]]: """ Returns decorator that calls wrapped function if nothing was found in cache for its argument. Wrapped function arguments should be hashable. """ def wrapper(function: Map[Domain, Range]) -> Map[Domain, Range]: @wraps(function) def wrapped(argument: Domain) -> Range: try: return cache[argument] # depends on [control=['try'], data=[]] except KeyError: return function(argument) # depends on [control=['except'], data=[]] return wrapped return wrapper
def _parse_gene_anatomy(self, fh, limit): """ Process anat_entity files with columns: Ensembl gene ID,gene name, anatomical entity ID, anatomical entity name, rank score, XRefs to BTO :param fh: filehandle :param limit: int, limit per group :return: None """ dataframe = pd.read_csv(fh, sep='\t') col = self.files['anat_entity']['columns'] if list(dataframe) != col: LOG.warning( '\nExpected headers: %s\nRecived headers: %s', col, list(dataframe)) gene_groups = dataframe.sort_values( 'rank score', ascending=False).groupby('Ensembl gene ID') if limit is None: limit = 20 gene_groups = gene_groups.head(limit).groupby('Ensembl gene ID') for gene, group in gene_groups: for index, row in group.iterrows(): self._add_gene_anatomy_association( row['Ensembl gene ID'].strip(), row['anatomical entity ID'].strip(), row['rank score'] ) # uberon <==> bto equivelance? return
def function[_parse_gene_anatomy, parameter[self, fh, limit]]: constant[ Process anat_entity files with columns: Ensembl gene ID,gene name, anatomical entity ID, anatomical entity name, rank score, XRefs to BTO :param fh: filehandle :param limit: int, limit per group :return: None ] variable[dataframe] assign[=] call[name[pd].read_csv, parameter[name[fh]]] variable[col] assign[=] call[call[name[self].files][constant[anat_entity]]][constant[columns]] if compare[call[name[list], parameter[name[dataframe]]] not_equal[!=] name[col]] begin[:] call[name[LOG].warning, parameter[constant[ Expected headers: %s Recived headers: %s], name[col], call[name[list], parameter[name[dataframe]]]]] variable[gene_groups] assign[=] call[call[name[dataframe].sort_values, parameter[constant[rank score]]].groupby, parameter[constant[Ensembl gene ID]]] if compare[name[limit] is constant[None]] begin[:] variable[limit] assign[=] constant[20] variable[gene_groups] assign[=] call[call[name[gene_groups].head, parameter[name[limit]]].groupby, parameter[constant[Ensembl gene ID]]] for taget[tuple[[<ast.Name object at 0x7da20e9555a0>, <ast.Name object at 0x7da20e957340>]]] in starred[name[gene_groups]] begin[:] for taget[tuple[[<ast.Name object at 0x7da20e957a90>, <ast.Name object at 0x7da20e957f70>]]] in starred[call[name[group].iterrows, parameter[]]] begin[:] call[name[self]._add_gene_anatomy_association, parameter[call[call[name[row]][constant[Ensembl gene ID]].strip, parameter[]], call[call[name[row]][constant[anatomical entity ID]].strip, parameter[]], call[name[row]][constant[rank score]]]] return[None]
keyword[def] identifier[_parse_gene_anatomy] ( identifier[self] , identifier[fh] , identifier[limit] ): literal[string] identifier[dataframe] = identifier[pd] . identifier[read_csv] ( identifier[fh] , identifier[sep] = literal[string] ) identifier[col] = identifier[self] . identifier[files] [ literal[string] ][ literal[string] ] keyword[if] identifier[list] ( identifier[dataframe] )!= identifier[col] : identifier[LOG] . identifier[warning] ( literal[string] , identifier[col] , identifier[list] ( identifier[dataframe] )) identifier[gene_groups] = identifier[dataframe] . identifier[sort_values] ( literal[string] , identifier[ascending] = keyword[False] ). identifier[groupby] ( literal[string] ) keyword[if] identifier[limit] keyword[is] keyword[None] : identifier[limit] = literal[int] identifier[gene_groups] = identifier[gene_groups] . identifier[head] ( identifier[limit] ). identifier[groupby] ( literal[string] ) keyword[for] identifier[gene] , identifier[group] keyword[in] identifier[gene_groups] : keyword[for] identifier[index] , identifier[row] keyword[in] identifier[group] . identifier[iterrows] (): identifier[self] . identifier[_add_gene_anatomy_association] ( identifier[row] [ literal[string] ]. identifier[strip] (), identifier[row] [ literal[string] ]. identifier[strip] (), identifier[row] [ literal[string] ] ) keyword[return]
def _parse_gene_anatomy(self, fh, limit): """ Process anat_entity files with columns: Ensembl gene ID,gene name, anatomical entity ID, anatomical entity name, rank score, XRefs to BTO :param fh: filehandle :param limit: int, limit per group :return: None """ dataframe = pd.read_csv(fh, sep='\t') col = self.files['anat_entity']['columns'] if list(dataframe) != col: LOG.warning('\nExpected headers: %s\nRecived headers: %s', col, list(dataframe)) # depends on [control=['if'], data=['col']] gene_groups = dataframe.sort_values('rank score', ascending=False).groupby('Ensembl gene ID') if limit is None: limit = 20 # depends on [control=['if'], data=['limit']] gene_groups = gene_groups.head(limit).groupby('Ensembl gene ID') for (gene, group) in gene_groups: for (index, row) in group.iterrows(): self._add_gene_anatomy_association(row['Ensembl gene ID'].strip(), row['anatomical entity ID'].strip(), row['rank score']) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # uberon <==> bto equivelance? return
def event_group_update( self, event_group_id, names=[], sport_id="0.0.0", account=None, **kwargs ): """ Update an event group. This needs to be **proposed**. :param str event_id: Id of the event group to update :param list names: Internationalized names, e.g. ``[['de', 'Foo'], ['en', 'bar']]`` :param str sport_id: Sport ID to create the event group for (defaults to *relative* id ``0.0.0``) :param str account: (optional) the account to allow access to (defaults to ``default_account``) """ assert isinstance(names, list) if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") if sport_id[0] == "1": # Test if object exists Sport(sport_id) else: # Test if object is proposed test_proposal_in_buffer( kwargs.get("append_to", self.propbuffer), "sport_create", sport_id ) account = Account(account) event_group = EventGroup(event_group_id) op = operations.Event_group_update( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "event_group_id": event_group["id"], "new_name": names, "new_sport_id": sport_id, "prefix": self.prefix, } ) return self.finalizeOp(op, account["name"], "active", **kwargs)
def function[event_group_update, parameter[self, event_group_id, names, sport_id, account]]: constant[ Update an event group. This needs to be **proposed**. :param str event_id: Id of the event group to update :param list names: Internationalized names, e.g. ``[['de', 'Foo'], ['en', 'bar']]`` :param str sport_id: Sport ID to create the event group for (defaults to *relative* id ``0.0.0``) :param str account: (optional) the account to allow access to (defaults to ``default_account``) ] assert[call[name[isinstance], parameter[name[names], name[list]]]] if <ast.UnaryOp object at 0x7da1b103a7d0> begin[:] if compare[constant[default_account] in name[self].config] begin[:] variable[account] assign[=] call[name[self].config][constant[default_account]] if <ast.UnaryOp object at 0x7da1b103b400> begin[:] <ast.Raise object at 0x7da1b103ba60> if compare[call[name[sport_id]][constant[0]] equal[==] constant[1]] begin[:] call[name[Sport], parameter[name[sport_id]]] variable[account] assign[=] call[name[Account], parameter[name[account]]] variable[event_group] assign[=] call[name[EventGroup], parameter[name[event_group_id]]] variable[op] assign[=] call[name[operations].Event_group_update, parameter[]] return[call[name[self].finalizeOp, parameter[name[op], call[name[account]][constant[name]], constant[active]]]]
keyword[def] identifier[event_group_update] ( identifier[self] , identifier[event_group_id] , identifier[names] =[], identifier[sport_id] = literal[string] , identifier[account] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[names] , identifier[list] ) keyword[if] keyword[not] identifier[account] : keyword[if] literal[string] keyword[in] identifier[self] . identifier[config] : identifier[account] = identifier[self] . identifier[config] [ literal[string] ] keyword[if] keyword[not] identifier[account] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[sport_id] [ literal[int] ]== literal[string] : identifier[Sport] ( identifier[sport_id] ) keyword[else] : identifier[test_proposal_in_buffer] ( identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[propbuffer] ), literal[string] , identifier[sport_id] ) identifier[account] = identifier[Account] ( identifier[account] ) identifier[event_group] = identifier[EventGroup] ( identifier[event_group_id] ) identifier[op] = identifier[operations] . identifier[Event_group_update] ( **{ literal[string] :{ literal[string] : literal[int] , literal[string] : literal[string] }, literal[string] : identifier[event_group] [ literal[string] ], literal[string] : identifier[names] , literal[string] : identifier[sport_id] , literal[string] : identifier[self] . identifier[prefix] , } ) keyword[return] identifier[self] . identifier[finalizeOp] ( identifier[op] , identifier[account] [ literal[string] ], literal[string] ,** identifier[kwargs] )
def event_group_update(self, event_group_id, names=[], sport_id='0.0.0', account=None, **kwargs): """ Update an event group. This needs to be **proposed**. :param str event_id: Id of the event group to update :param list names: Internationalized names, e.g. ``[['de', 'Foo'], ['en', 'bar']]`` :param str sport_id: Sport ID to create the event group for (defaults to *relative* id ``0.0.0``) :param str account: (optional) the account to allow access to (defaults to ``default_account``) """ assert isinstance(names, list) if not account: if 'default_account' in self.config: account = self.config['default_account'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if not account: raise ValueError('You need to provide an account') # depends on [control=['if'], data=[]] if sport_id[0] == '1': # Test if object exists Sport(sport_id) # depends on [control=['if'], data=[]] else: # Test if object is proposed test_proposal_in_buffer(kwargs.get('append_to', self.propbuffer), 'sport_create', sport_id) account = Account(account) event_group = EventGroup(event_group_id) op = operations.Event_group_update(**{'fee': {'amount': 0, 'asset_id': '1.3.0'}, 'event_group_id': event_group['id'], 'new_name': names, 'new_sport_id': sport_id, 'prefix': self.prefix}) return self.finalizeOp(op, account['name'], 'active', **kwargs)
def enable(): """ Enable all benchmarking. """ Benchmark.enable = True ComparisonBenchmark.enable = True BenchmarkedFunction.enable = True BenchmarkedClass.enable = True
def function[enable, parameter[]]: constant[ Enable all benchmarking. ] name[Benchmark].enable assign[=] constant[True] name[ComparisonBenchmark].enable assign[=] constant[True] name[BenchmarkedFunction].enable assign[=] constant[True] name[BenchmarkedClass].enable assign[=] constant[True]
keyword[def] identifier[enable] (): literal[string] identifier[Benchmark] . identifier[enable] = keyword[True] identifier[ComparisonBenchmark] . identifier[enable] = keyword[True] identifier[BenchmarkedFunction] . identifier[enable] = keyword[True] identifier[BenchmarkedClass] . identifier[enable] = keyword[True]
def enable(): """ Enable all benchmarking. """ Benchmark.enable = True ComparisonBenchmark.enable = True BenchmarkedFunction.enable = True BenchmarkedClass.enable = True
def create_parser(subparsers): ''' :param subparsers: :return: ''' parser = subparsers.add_parser( 'version', help='Print version of heron-cli', usage="%(prog)s [options] [cluster]", add_help=True) add_version_titles(parser) parser.add_argument( 'cluster', nargs='?', type=str, default="", help='Name of the cluster') cli_args.add_service_url(parser) parser.set_defaults(subcommand='version') return parser
def function[create_parser, parameter[subparsers]]: constant[ :param subparsers: :return: ] variable[parser] assign[=] call[name[subparsers].add_parser, parameter[constant[version]]] call[name[add_version_titles], parameter[name[parser]]] call[name[parser].add_argument, parameter[constant[cluster]]] call[name[cli_args].add_service_url, parameter[name[parser]]] call[name[parser].set_defaults, parameter[]] return[name[parser]]
keyword[def] identifier[create_parser] ( identifier[subparsers] ): literal[string] identifier[parser] = identifier[subparsers] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] , identifier[usage] = literal[string] , identifier[add_help] = keyword[True] ) identifier[add_version_titles] ( identifier[parser] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] , identifier[type] = identifier[str] , identifier[default] = literal[string] , identifier[help] = literal[string] ) identifier[cli_args] . identifier[add_service_url] ( identifier[parser] ) identifier[parser] . identifier[set_defaults] ( identifier[subcommand] = literal[string] ) keyword[return] identifier[parser]
def create_parser(subparsers): """ :param subparsers: :return: """ parser = subparsers.add_parser('version', help='Print version of heron-cli', usage='%(prog)s [options] [cluster]', add_help=True) add_version_titles(parser) parser.add_argument('cluster', nargs='?', type=str, default='', help='Name of the cluster') cli_args.add_service_url(parser) parser.set_defaults(subcommand='version') return parser
def _vcf_info(start, end, mate_id, info=None): """Return breakend information line with mate and imprecise location. """ out = "SVTYPE=BND;MATEID={mate};IMPRECISE;CIPOS=0,{size}".format( mate=mate_id, size=end-start) if info is not None: extra_info = ";".join("{0}={1}".format(k, v) for k, v in info.iteritems()) out = "{0};{1}".format(out, extra_info) return out
def function[_vcf_info, parameter[start, end, mate_id, info]]: constant[Return breakend information line with mate and imprecise location. ] variable[out] assign[=] call[constant[SVTYPE=BND;MATEID={mate};IMPRECISE;CIPOS=0,{size}].format, parameter[]] if compare[name[info] is_not constant[None]] begin[:] variable[extra_info] assign[=] call[constant[;].join, parameter[<ast.GeneratorExp object at 0x7da1b19bb040>]] variable[out] assign[=] call[constant[{0};{1}].format, parameter[name[out], name[extra_info]]] return[name[out]]
keyword[def] identifier[_vcf_info] ( identifier[start] , identifier[end] , identifier[mate_id] , identifier[info] = keyword[None] ): literal[string] identifier[out] = literal[string] . identifier[format] ( identifier[mate] = identifier[mate_id] , identifier[size] = identifier[end] - identifier[start] ) keyword[if] identifier[info] keyword[is] keyword[not] keyword[None] : identifier[extra_info] = literal[string] . identifier[join] ( literal[string] . identifier[format] ( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[info] . identifier[iteritems] ()) identifier[out] = literal[string] . identifier[format] ( identifier[out] , identifier[extra_info] ) keyword[return] identifier[out]
def _vcf_info(start, end, mate_id, info=None): """Return breakend information line with mate and imprecise location. """ out = 'SVTYPE=BND;MATEID={mate};IMPRECISE;CIPOS=0,{size}'.format(mate=mate_id, size=end - start) if info is not None: extra_info = ';'.join(('{0}={1}'.format(k, v) for (k, v) in info.iteritems())) out = '{0};{1}'.format(out, extra_info) # depends on [control=['if'], data=['info']] return out
def set_dir(self, dir_): """Sets directory, auto-loads, updates all GUI contents.""" self.__lock_set_dir(dir_) self.__lock_auto_load() self.__lock_update_table() self.__update_info() self.__update_window_title()
def function[set_dir, parameter[self, dir_]]: constant[Sets directory, auto-loads, updates all GUI contents.] call[name[self].__lock_set_dir, parameter[name[dir_]]] call[name[self].__lock_auto_load, parameter[]] call[name[self].__lock_update_table, parameter[]] call[name[self].__update_info, parameter[]] call[name[self].__update_window_title, parameter[]]
keyword[def] identifier[set_dir] ( identifier[self] , identifier[dir_] ): literal[string] identifier[self] . identifier[__lock_set_dir] ( identifier[dir_] ) identifier[self] . identifier[__lock_auto_load] () identifier[self] . identifier[__lock_update_table] () identifier[self] . identifier[__update_info] () identifier[self] . identifier[__update_window_title] ()
def set_dir(self, dir_): """Sets directory, auto-loads, updates all GUI contents.""" self.__lock_set_dir(dir_) self.__lock_auto_load() self.__lock_update_table() self.__update_info() self.__update_window_title()
def request_timeout(self, timeout): """ Context manager implements opportunity to change request timeout in current context :param timeout: :return: """ timeout = self._prepare_timeout(timeout) token = self._ctx_timeout.set(timeout) try: yield finally: self._ctx_timeout.reset(token)
def function[request_timeout, parameter[self, timeout]]: constant[ Context manager implements opportunity to change request timeout in current context :param timeout: :return: ] variable[timeout] assign[=] call[name[self]._prepare_timeout, parameter[name[timeout]]] variable[token] assign[=] call[name[self]._ctx_timeout.set, parameter[name[timeout]]] <ast.Try object at 0x7da1b1734160>
keyword[def] identifier[request_timeout] ( identifier[self] , identifier[timeout] ): literal[string] identifier[timeout] = identifier[self] . identifier[_prepare_timeout] ( identifier[timeout] ) identifier[token] = identifier[self] . identifier[_ctx_timeout] . identifier[set] ( identifier[timeout] ) keyword[try] : keyword[yield] keyword[finally] : identifier[self] . identifier[_ctx_timeout] . identifier[reset] ( identifier[token] )
def request_timeout(self, timeout): """ Context manager implements opportunity to change request timeout in current context :param timeout: :return: """ timeout = self._prepare_timeout(timeout) token = self._ctx_timeout.set(timeout) try: yield # depends on [control=['try'], data=[]] finally: self._ctx_timeout.reset(token)
async def ws_handler(self, protocol, path): """ This is the main handler function for the 'websockets' implementation to call into. We just wait for close then return, and instead allow 'send' and 'receive' events to drive the flow. """ self.handshake_completed_event.set() await self.closed_event.wait()
<ast.AsyncFunctionDef object at 0x7da1b26aee90>
keyword[async] keyword[def] identifier[ws_handler] ( identifier[self] , identifier[protocol] , identifier[path] ): literal[string] identifier[self] . identifier[handshake_completed_event] . identifier[set] () keyword[await] identifier[self] . identifier[closed_event] . identifier[wait] ()
async def ws_handler(self, protocol, path): """ This is the main handler function for the 'websockets' implementation to call into. We just wait for close then return, and instead allow 'send' and 'receive' events to drive the flow. """ self.handshake_completed_event.set() await self.closed_event.wait()
def main(args=sys.argv[1:]): """ Run Grole static file server """ args = parse_args(args) if args.verbose: logging.basicConfig(level=logging.DEBUG) elif args.quiet: logging.basicConfig(level=logging.ERROR) else: logging.basicConfig(level=logging.INFO) app = Grole() serve_static(app, '', args.directory, not args.noindex) app.run(args.address, args.port)
def function[main, parameter[args]]: constant[ Run Grole static file server ] variable[args] assign[=] call[name[parse_args], parameter[name[args]]] if name[args].verbose begin[:] call[name[logging].basicConfig, parameter[]] variable[app] assign[=] call[name[Grole], parameter[]] call[name[serve_static], parameter[name[app], constant[], name[args].directory, <ast.UnaryOp object at 0x7da1b1c648e0>]] call[name[app].run, parameter[name[args].address, name[args].port]]
keyword[def] identifier[main] ( identifier[args] = identifier[sys] . identifier[argv] [ literal[int] :]): literal[string] identifier[args] = identifier[parse_args] ( identifier[args] ) keyword[if] identifier[args] . identifier[verbose] : identifier[logging] . identifier[basicConfig] ( identifier[level] = identifier[logging] . identifier[DEBUG] ) keyword[elif] identifier[args] . identifier[quiet] : identifier[logging] . identifier[basicConfig] ( identifier[level] = identifier[logging] . identifier[ERROR] ) keyword[else] : identifier[logging] . identifier[basicConfig] ( identifier[level] = identifier[logging] . identifier[INFO] ) identifier[app] = identifier[Grole] () identifier[serve_static] ( identifier[app] , literal[string] , identifier[args] . identifier[directory] , keyword[not] identifier[args] . identifier[noindex] ) identifier[app] . identifier[run] ( identifier[args] . identifier[address] , identifier[args] . identifier[port] )
def main(args=sys.argv[1:]): """ Run Grole static file server """ args = parse_args(args) if args.verbose: logging.basicConfig(level=logging.DEBUG) # depends on [control=['if'], data=[]] elif args.quiet: logging.basicConfig(level=logging.ERROR) # depends on [control=['if'], data=[]] else: logging.basicConfig(level=logging.INFO) app = Grole() serve_static(app, '', args.directory, not args.noindex) app.run(args.address, args.port)
def iter_pipe(pipe, processors): """Allow for iterators to return either an item or an iterator of items.""" if isinstance(pipe, str): pipe = [pipe] for it in processors: pipe = it(pipe) yield from pipe
def function[iter_pipe, parameter[pipe, processors]]: constant[Allow for iterators to return either an item or an iterator of items.] if call[name[isinstance], parameter[name[pipe], name[str]]] begin[:] variable[pipe] assign[=] list[[<ast.Name object at 0x7da1b0380ee0>]] for taget[name[it]] in starred[name[processors]] begin[:] variable[pipe] assign[=] call[name[it], parameter[name[pipe]]] <ast.YieldFrom object at 0x7da1b0351e10>
keyword[def] identifier[iter_pipe] ( identifier[pipe] , identifier[processors] ): literal[string] keyword[if] identifier[isinstance] ( identifier[pipe] , identifier[str] ): identifier[pipe] =[ identifier[pipe] ] keyword[for] identifier[it] keyword[in] identifier[processors] : identifier[pipe] = identifier[it] ( identifier[pipe] ) keyword[yield] keyword[from] identifier[pipe]
def iter_pipe(pipe, processors): """Allow for iterators to return either an item or an iterator of items.""" if isinstance(pipe, str): pipe = [pipe] # depends on [control=['if'], data=[]] for it in processors: pipe = it(pipe) # depends on [control=['for'], data=['it']] yield from pipe
def act(self): """ Carries out the action associated with the Save button """ g = get_root(self).globals g.clog.info('\nSaving current application to disk') # check instrument parameters if not g.ipars.check(): g.clog.warn('Invalid instrument parameters; save failed.') return False # check run parameters rok, msg = g.rpars.check() if not rok: g.clog.warn('Invalid run parameters; save failed.') g.clog.warn(msg) return False # Get data to save data = createJSON(g, full=False) # Save to disk if saveJSON(g, data): # modify buttons g.observe.load.enable() g.observe.unfreeze.disable() # unfreeze the instrument and run params g.ipars.unfreeze() g.rpars.unfreeze() return True else: return False
def function[act, parameter[self]]: constant[ Carries out the action associated with the Save button ] variable[g] assign[=] call[name[get_root], parameter[name[self]]].globals call[name[g].clog.info, parameter[constant[ Saving current application to disk]]] if <ast.UnaryOp object at 0x7da20cabdc30> begin[:] call[name[g].clog.warn, parameter[constant[Invalid instrument parameters; save failed.]]] return[constant[False]] <ast.Tuple object at 0x7da20c6a83d0> assign[=] call[name[g].rpars.check, parameter[]] if <ast.UnaryOp object at 0x7da20c6a80a0> begin[:] call[name[g].clog.warn, parameter[constant[Invalid run parameters; save failed.]]] call[name[g].clog.warn, parameter[name[msg]]] return[constant[False]] variable[data] assign[=] call[name[createJSON], parameter[name[g]]] if call[name[saveJSON], parameter[name[g], name[data]]] begin[:] call[name[g].observe.load.enable, parameter[]] call[name[g].observe.unfreeze.disable, parameter[]] call[name[g].ipars.unfreeze, parameter[]] call[name[g].rpars.unfreeze, parameter[]] return[constant[True]]
keyword[def] identifier[act] ( identifier[self] ): literal[string] identifier[g] = identifier[get_root] ( identifier[self] ). identifier[globals] identifier[g] . identifier[clog] . identifier[info] ( literal[string] ) keyword[if] keyword[not] identifier[g] . identifier[ipars] . identifier[check] (): identifier[g] . identifier[clog] . identifier[warn] ( literal[string] ) keyword[return] keyword[False] identifier[rok] , identifier[msg] = identifier[g] . identifier[rpars] . identifier[check] () keyword[if] keyword[not] identifier[rok] : identifier[g] . identifier[clog] . identifier[warn] ( literal[string] ) identifier[g] . identifier[clog] . identifier[warn] ( identifier[msg] ) keyword[return] keyword[False] identifier[data] = identifier[createJSON] ( identifier[g] , identifier[full] = keyword[False] ) keyword[if] identifier[saveJSON] ( identifier[g] , identifier[data] ): identifier[g] . identifier[observe] . identifier[load] . identifier[enable] () identifier[g] . identifier[observe] . identifier[unfreeze] . identifier[disable] () identifier[g] . identifier[ipars] . identifier[unfreeze] () identifier[g] . identifier[rpars] . identifier[unfreeze] () keyword[return] keyword[True] keyword[else] : keyword[return] keyword[False]
def act(self): """ Carries out the action associated with the Save button """ g = get_root(self).globals g.clog.info('\nSaving current application to disk') # check instrument parameters if not g.ipars.check(): g.clog.warn('Invalid instrument parameters; save failed.') return False # depends on [control=['if'], data=[]] # check run parameters (rok, msg) = g.rpars.check() if not rok: g.clog.warn('Invalid run parameters; save failed.') g.clog.warn(msg) return False # depends on [control=['if'], data=[]] # Get data to save data = createJSON(g, full=False) # Save to disk if saveJSON(g, data): # modify buttons g.observe.load.enable() g.observe.unfreeze.disable() # unfreeze the instrument and run params g.ipars.unfreeze() g.rpars.unfreeze() return True # depends on [control=['if'], data=[]] else: return False
def _align(x, y, local = False): """ https://medium.com/towards-data-science/pairwise-sequence-alignment-using-biopython-d1a9d0ba861f """ if local: aligned_x = pairwise2.align.localxx(x, y) else: aligned_x = pairwise2.align.globalms(x, y, 1, -1, -1, -0.5) if aligned_x: sorted_alignments = sorted(aligned_x, key=operator.itemgetter(2)) e = enumerate(sorted_alignments[0][0]) nts = [i for i,c in e if c != "-"] return [min(nts), max(nts)]
def function[_align, parameter[x, y, local]]: constant[ https://medium.com/towards-data-science/pairwise-sequence-alignment-using-biopython-d1a9d0ba861f ] if name[local] begin[:] variable[aligned_x] assign[=] call[name[pairwise2].align.localxx, parameter[name[x], name[y]]] if name[aligned_x] begin[:] variable[sorted_alignments] assign[=] call[name[sorted], parameter[name[aligned_x]]] variable[e] assign[=] call[name[enumerate], parameter[call[call[name[sorted_alignments]][constant[0]]][constant[0]]]] variable[nts] assign[=] <ast.ListComp object at 0x7da1b0338520> return[list[[<ast.Call object at 0x7da1b0338f40>, <ast.Call object at 0x7da1b0338a60>]]]
keyword[def] identifier[_align] ( identifier[x] , identifier[y] , identifier[local] = keyword[False] ): literal[string] keyword[if] identifier[local] : identifier[aligned_x] = identifier[pairwise2] . identifier[align] . identifier[localxx] ( identifier[x] , identifier[y] ) keyword[else] : identifier[aligned_x] = identifier[pairwise2] . identifier[align] . identifier[globalms] ( identifier[x] , identifier[y] , literal[int] ,- literal[int] ,- literal[int] ,- literal[int] ) keyword[if] identifier[aligned_x] : identifier[sorted_alignments] = identifier[sorted] ( identifier[aligned_x] , identifier[key] = identifier[operator] . identifier[itemgetter] ( literal[int] )) identifier[e] = identifier[enumerate] ( identifier[sorted_alignments] [ literal[int] ][ literal[int] ]) identifier[nts] =[ identifier[i] keyword[for] identifier[i] , identifier[c] keyword[in] identifier[e] keyword[if] identifier[c] != literal[string] ] keyword[return] [ identifier[min] ( identifier[nts] ), identifier[max] ( identifier[nts] )]
def _align(x, y, local=False): """ https://medium.com/towards-data-science/pairwise-sequence-alignment-using-biopython-d1a9d0ba861f """ if local: aligned_x = pairwise2.align.localxx(x, y) # depends on [control=['if'], data=[]] else: aligned_x = pairwise2.align.globalms(x, y, 1, -1, -1, -0.5) if aligned_x: sorted_alignments = sorted(aligned_x, key=operator.itemgetter(2)) e = enumerate(sorted_alignments[0][0]) nts = [i for (i, c) in e if c != '-'] return [min(nts), max(nts)] # depends on [control=['if'], data=[]]
def cl_mutect(self, params, tmp_dir): """Define parameters to run the mutect paired algorithm. """ gatk_jar = self._get_jar("muTect", ["mutect"]) # Decrease memory slightly from configuration to avoid memory allocation errors jvm_opts = config_utils.adjust_opts(self._jvm_opts, {"algorithm": {"memory_adjust": {"magnitude": 1.1, "direction": "decrease"}}}) return ["java"] + jvm_opts + get_default_jvm_opts(tmp_dir) + \ ["-jar", gatk_jar] + [str(x) for x in params]
def function[cl_mutect, parameter[self, params, tmp_dir]]: constant[Define parameters to run the mutect paired algorithm. ] variable[gatk_jar] assign[=] call[name[self]._get_jar, parameter[constant[muTect], list[[<ast.Constant object at 0x7da1b18dacb0>]]]] variable[jvm_opts] assign[=] call[name[config_utils].adjust_opts, parameter[name[self]._jvm_opts, dictionary[[<ast.Constant object at 0x7da1b18d8b80>], [<ast.Dict object at 0x7da1b18d8b20>]]]] return[binary_operation[binary_operation[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da1b18d8f40>]] + name[jvm_opts]] + call[name[get_default_jvm_opts], parameter[name[tmp_dir]]]] + list[[<ast.Constant object at 0x7da1b18d8e80>, <ast.Name object at 0x7da1b18d8a90>]]] + <ast.ListComp object at 0x7da1b18d8670>]]
keyword[def] identifier[cl_mutect] ( identifier[self] , identifier[params] , identifier[tmp_dir] ): literal[string] identifier[gatk_jar] = identifier[self] . identifier[_get_jar] ( literal[string] ,[ literal[string] ]) identifier[jvm_opts] = identifier[config_utils] . identifier[adjust_opts] ( identifier[self] . identifier[_jvm_opts] , { literal[string] :{ literal[string] : { literal[string] : literal[int] , literal[string] : literal[string] }}}) keyword[return] [ literal[string] ]+ identifier[jvm_opts] + identifier[get_default_jvm_opts] ( identifier[tmp_dir] )+[ literal[string] , identifier[gatk_jar] ]+[ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[params] ]
def cl_mutect(self, params, tmp_dir): """Define parameters to run the mutect paired algorithm. """ gatk_jar = self._get_jar('muTect', ['mutect']) # Decrease memory slightly from configuration to avoid memory allocation errors jvm_opts = config_utils.adjust_opts(self._jvm_opts, {'algorithm': {'memory_adjust': {'magnitude': 1.1, 'direction': 'decrease'}}}) return ['java'] + jvm_opts + get_default_jvm_opts(tmp_dir) + ['-jar', gatk_jar] + [str(x) for x in params]
def read_nb(fname): "Read a notebook in `fname` and return its corresponding json" with open(fname,'r') as f: return nbformat.reads(f.read(), as_version=4)
def function[read_nb, parameter[fname]]: constant[Read a notebook in `fname` and return its corresponding json] with call[name[open], parameter[name[fname], constant[r]]] begin[:] return[call[name[nbformat].reads, parameter[call[name[f].read, parameter[]]]]]
keyword[def] identifier[read_nb] ( identifier[fname] ): literal[string] keyword[with] identifier[open] ( identifier[fname] , literal[string] ) keyword[as] identifier[f] : keyword[return] identifier[nbformat] . identifier[reads] ( identifier[f] . identifier[read] (), identifier[as_version] = literal[int] )
def read_nb(fname): """Read a notebook in `fname` and return its corresponding json""" with open(fname, 'r') as f: return nbformat.reads(f.read(), as_version=4) # depends on [control=['with'], data=['f']]
def nt_counts(bam, positions, stranded=False, vcf=False, bed=False): """ Find the number of nucleotides covered at all positions in a bed or vcf file. Parameters ---------- bam : str or pysam.calignmentfile.AlignmentFile Bam file opened with pysam or path to bam file (must be sorted and indexed). positions : str or pybedtools.BedTool Path to bed or vcf file or pybedtools.BedTool object. The extension is used to determine whether the file is a bed or vcf (.bed vs .vcf). stranded : boolean Boolean indicating whether read data is stranded and stranded nucleotide counts should be returned. Assumes R1 read on reverse strand implies + strand coverage etc. vcf : boolean Set to True if you are providing a vcf file that doesn't have a .vcf suffix. bed : boolean Set to True if you are providing a bed file that doesn't have a .bed suffix. Returns ------- counts : pandas.DataFrame Data frame with the counts for each base in the region. The index of this data frame is one-based for compatibility with VCF files. """ if not bed and not vcf: if type(positions) == pbt.bedtool.BedTool: df = positions.to_dataframe() elif positions[-4:] == '.bed': bed = True elif positions[-4:] == '.vcf': vcf = True else: sys.stderr.write('Positions must be BedTool, bed file, or vcf ' 'file.\n') if bed: df = pbt.BedTool(positions).to_dataframe() elif vcf: from variants import vcf_as_df tdf = vcf_as_df(positions) df = pd.DataFrame(index=tdf.index) df['chrom'] = tdf.CHROM df['start'] = tdf.POS - 1 df['end'] = tdf.POS res = [] for i in df.index: region = [df.ix[i, 'chrom'], df.ix[i, 'start'], df.ix[i, 'end']] res.append(get_region_nt_counts(region, bam, stranded)) res = pd.concat(res) return res
def function[nt_counts, parameter[bam, positions, stranded, vcf, bed]]: constant[ Find the number of nucleotides covered at all positions in a bed or vcf file. Parameters ---------- bam : str or pysam.calignmentfile.AlignmentFile Bam file opened with pysam or path to bam file (must be sorted and indexed). positions : str or pybedtools.BedTool Path to bed or vcf file or pybedtools.BedTool object. The extension is used to determine whether the file is a bed or vcf (.bed vs .vcf). stranded : boolean Boolean indicating whether read data is stranded and stranded nucleotide counts should be returned. Assumes R1 read on reverse strand implies + strand coverage etc. vcf : boolean Set to True if you are providing a vcf file that doesn't have a .vcf suffix. bed : boolean Set to True if you are providing a bed file that doesn't have a .bed suffix. Returns ------- counts : pandas.DataFrame Data frame with the counts for each base in the region. The index of this data frame is one-based for compatibility with VCF files. ] if <ast.BoolOp object at 0x7da2045655d0> begin[:] if compare[call[name[type], parameter[name[positions]]] equal[==] name[pbt].bedtool.BedTool] begin[:] variable[df] assign[=] call[name[positions].to_dataframe, parameter[]] if name[bed] begin[:] variable[df] assign[=] call[call[name[pbt].BedTool, parameter[name[positions]]].to_dataframe, parameter[]] variable[res] assign[=] list[[]] for taget[name[i]] in starred[name[df].index] begin[:] variable[region] assign[=] list[[<ast.Subscript object at 0x7da204565120>, <ast.Subscript object at 0x7da204564070>, <ast.Subscript object at 0x7da204565c30>]] call[name[res].append, parameter[call[name[get_region_nt_counts], parameter[name[region], name[bam], name[stranded]]]]] variable[res] assign[=] call[name[pd].concat, parameter[name[res]]] return[name[res]]
keyword[def] identifier[nt_counts] ( identifier[bam] , identifier[positions] , identifier[stranded] = keyword[False] , identifier[vcf] = keyword[False] , identifier[bed] = keyword[False] ): literal[string] keyword[if] keyword[not] identifier[bed] keyword[and] keyword[not] identifier[vcf] : keyword[if] identifier[type] ( identifier[positions] )== identifier[pbt] . identifier[bedtool] . identifier[BedTool] : identifier[df] = identifier[positions] . identifier[to_dataframe] () keyword[elif] identifier[positions] [- literal[int] :]== literal[string] : identifier[bed] = keyword[True] keyword[elif] identifier[positions] [- literal[int] :]== literal[string] : identifier[vcf] = keyword[True] keyword[else] : identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] literal[string] ) keyword[if] identifier[bed] : identifier[df] = identifier[pbt] . identifier[BedTool] ( identifier[positions] ). identifier[to_dataframe] () keyword[elif] identifier[vcf] : keyword[from] identifier[variants] keyword[import] identifier[vcf_as_df] identifier[tdf] = identifier[vcf_as_df] ( identifier[positions] ) identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[index] = identifier[tdf] . identifier[index] ) identifier[df] [ literal[string] ]= identifier[tdf] . identifier[CHROM] identifier[df] [ literal[string] ]= identifier[tdf] . identifier[POS] - literal[int] identifier[df] [ literal[string] ]= identifier[tdf] . identifier[POS] identifier[res] =[] keyword[for] identifier[i] keyword[in] identifier[df] . identifier[index] : identifier[region] =[ identifier[df] . identifier[ix] [ identifier[i] , literal[string] ], identifier[df] . identifier[ix] [ identifier[i] , literal[string] ], identifier[df] . identifier[ix] [ identifier[i] , literal[string] ]] identifier[res] . identifier[append] ( identifier[get_region_nt_counts] ( identifier[region] , identifier[bam] , identifier[stranded] )) identifier[res] = identifier[pd] . identifier[concat] ( identifier[res] ) keyword[return] identifier[res]
def nt_counts(bam, positions, stranded=False, vcf=False, bed=False): """ Find the number of nucleotides covered at all positions in a bed or vcf file. Parameters ---------- bam : str or pysam.calignmentfile.AlignmentFile Bam file opened with pysam or path to bam file (must be sorted and indexed). positions : str or pybedtools.BedTool Path to bed or vcf file or pybedtools.BedTool object. The extension is used to determine whether the file is a bed or vcf (.bed vs .vcf). stranded : boolean Boolean indicating whether read data is stranded and stranded nucleotide counts should be returned. Assumes R1 read on reverse strand implies + strand coverage etc. vcf : boolean Set to True if you are providing a vcf file that doesn't have a .vcf suffix. bed : boolean Set to True if you are providing a bed file that doesn't have a .bed suffix. Returns ------- counts : pandas.DataFrame Data frame with the counts for each base in the region. The index of this data frame is one-based for compatibility with VCF files. """ if not bed and (not vcf): if type(positions) == pbt.bedtool.BedTool: df = positions.to_dataframe() # depends on [control=['if'], data=[]] elif positions[-4:] == '.bed': bed = True # depends on [control=['if'], data=[]] elif positions[-4:] == '.vcf': vcf = True # depends on [control=['if'], data=[]] else: sys.stderr.write('Positions must be BedTool, bed file, or vcf file.\n') # depends on [control=['if'], data=[]] if bed: df = pbt.BedTool(positions).to_dataframe() # depends on [control=['if'], data=[]] elif vcf: from variants import vcf_as_df tdf = vcf_as_df(positions) df = pd.DataFrame(index=tdf.index) df['chrom'] = tdf.CHROM df['start'] = tdf.POS - 1 df['end'] = tdf.POS # depends on [control=['if'], data=[]] res = [] for i in df.index: region = [df.ix[i, 'chrom'], df.ix[i, 'start'], df.ix[i, 'end']] res.append(get_region_nt_counts(region, bam, stranded)) # depends on [control=['for'], data=['i']] res = pd.concat(res) return res
def _core_qft(qubits: List[int], coeff: int) -> Program: """ Generates the core program to perform the quantum Fourier transform :param qubits: A list of qubit indexes. :param coeff: A modifier for the angle used in rotations (-1 for inverse QFT, 1 for QFT) :return: A Quil program to compute the core (inverse) QFT of the qubits. """ q = qubits[0] qs = qubits[1:] if 1 == len(qubits): return [H(q)] else: n = 1 + len(qs) cR = [] for idx, i in enumerate(range(n - 1, 0, -1)): q_idx = qs[idx] angle = math.pi / 2 ** (n - i) cR.append(CPHASE(coeff * angle, q, q_idx)) return _core_qft(qs, coeff) + list(reversed(cR)) + [H(q)]
def function[_core_qft, parameter[qubits, coeff]]: constant[ Generates the core program to perform the quantum Fourier transform :param qubits: A list of qubit indexes. :param coeff: A modifier for the angle used in rotations (-1 for inverse QFT, 1 for QFT) :return: A Quil program to compute the core (inverse) QFT of the qubits. ] variable[q] assign[=] call[name[qubits]][constant[0]] variable[qs] assign[=] call[name[qubits]][<ast.Slice object at 0x7da2041da530>] if compare[constant[1] equal[==] call[name[len], parameter[name[qubits]]]] begin[:] return[list[[<ast.Call object at 0x7da2041d8f70>]]]
keyword[def] identifier[_core_qft] ( identifier[qubits] : identifier[List] [ identifier[int] ], identifier[coeff] : identifier[int] )-> identifier[Program] : literal[string] identifier[q] = identifier[qubits] [ literal[int] ] identifier[qs] = identifier[qubits] [ literal[int] :] keyword[if] literal[int] == identifier[len] ( identifier[qubits] ): keyword[return] [ identifier[H] ( identifier[q] )] keyword[else] : identifier[n] = literal[int] + identifier[len] ( identifier[qs] ) identifier[cR] =[] keyword[for] identifier[idx] , identifier[i] keyword[in] identifier[enumerate] ( identifier[range] ( identifier[n] - literal[int] , literal[int] ,- literal[int] )): identifier[q_idx] = identifier[qs] [ identifier[idx] ] identifier[angle] = identifier[math] . identifier[pi] / literal[int] **( identifier[n] - identifier[i] ) identifier[cR] . identifier[append] ( identifier[CPHASE] ( identifier[coeff] * identifier[angle] , identifier[q] , identifier[q_idx] )) keyword[return] identifier[_core_qft] ( identifier[qs] , identifier[coeff] )+ identifier[list] ( identifier[reversed] ( identifier[cR] ))+[ identifier[H] ( identifier[q] )]
def _core_qft(qubits: List[int], coeff: int) -> Program: """ Generates the core program to perform the quantum Fourier transform :param qubits: A list of qubit indexes. :param coeff: A modifier for the angle used in rotations (-1 for inverse QFT, 1 for QFT) :return: A Quil program to compute the core (inverse) QFT of the qubits. """ q = qubits[0] qs = qubits[1:] if 1 == len(qubits): return [H(q)] # depends on [control=['if'], data=[]] else: n = 1 + len(qs) cR = [] for (idx, i) in enumerate(range(n - 1, 0, -1)): q_idx = qs[idx] angle = math.pi / 2 ** (n - i) cR.append(CPHASE(coeff * angle, q, q_idx)) # depends on [control=['for'], data=[]] return _core_qft(qs, coeff) + list(reversed(cR)) + [H(q)]
def maximum_impact_estimation(membership_matrix, max_iters=1000): """An expectation maximization technique that produces pathway definitions devoid of crosstalk. That is, each gene is mapped to the pathway in which it has the greatest predicted impact; this removes any overlap between pathway definitions. Parameters ----------- membership_matrix : numpy.array(float), shape = [n, k] The observed gene-to-pathway membership matrix, where n is the number of genes and k is the number of pathways we are interested in. max_iters : int (default=1000) The maximum number of expectation-maximization steps to take. Returns ----------- dict(int -> set(int)), a dictionary mapping a pathway to a set of genes. These are the pathway definitions after the maximum impact estimation procedure has been applied to remove crosstalk. - The keys are ints corresponding to the pathway column indices in the membership matrix. - The values are sets of ints corresponding to gene row indices in the membership matrix. """ # Initialize the probability vector as the sum of each column in the # membership matrix normalized by the sum of the entire membership matrix. # The probability at some index j in the vector represents the likelihood # that a pathway (column) j is defined by the current set of genes (rows) # in the membership matrix. pr_0 = np.sum(membership_matrix, axis=0) / np.sum(membership_matrix) pr_1 = _update_probabilities(pr_0, membership_matrix) epsilon = np.linalg.norm(pr_1 - pr_0)/100. pr_old = pr_1 check_for_convergence = epsilon count = 0 while epsilon > NEAR_ZERO and check_for_convergence >= epsilon: count += 1 if count > max_iters: print("Reached the maximum number of iterations {0}".format( max_iters)) break pr_new = _update_probabilities(pr_old, membership_matrix) check_for_convergence = np.linalg.norm(pr_new - pr_old) pr_old = pr_new pr_final = pr_old # renaming for readability corrected_pathway_definitions = {} n, k = membership_matrix.shape for gene_index in range(n): gene_membership = membership_matrix[gene_index] denominator = np.dot(gene_membership, pr_final) # Approximation is used to prevent divide by zero warning. # Since we are only looking for the _most_ probable pathway in which a # gene contributes its maximum impact, precision is not as important # as maintaining the relative differences between each # pathway's probability. if denominator < NEAR_ZERO: denominator = NEAR_ZERO # This is equivalent to one row in what Donato et al. (2013) refer # to as the underlying (latent) Z matrix. conditional_pathway_pr = (np.multiply(gene_membership, pr_final) / denominator) all_pathways_at_max = np.where( conditional_pathway_pr == conditional_pathway_pr.max())[0] gene_in_pathways = np.where(gene_membership == 1)[0] all_pathways_at_max = np.intersect1d( all_pathways_at_max, gene_in_pathways) pathway_index = np.random.choice(all_pathways_at_max) if pathway_index not in corrected_pathway_definitions: corrected_pathway_definitions[pathway_index] = set() corrected_pathway_definitions[pathway_index].add(gene_index) return corrected_pathway_definitions
def function[maximum_impact_estimation, parameter[membership_matrix, max_iters]]: constant[An expectation maximization technique that produces pathway definitions devoid of crosstalk. That is, each gene is mapped to the pathway in which it has the greatest predicted impact; this removes any overlap between pathway definitions. Parameters ----------- membership_matrix : numpy.array(float), shape = [n, k] The observed gene-to-pathway membership matrix, where n is the number of genes and k is the number of pathways we are interested in. max_iters : int (default=1000) The maximum number of expectation-maximization steps to take. Returns ----------- dict(int -> set(int)), a dictionary mapping a pathway to a set of genes. These are the pathway definitions after the maximum impact estimation procedure has been applied to remove crosstalk. - The keys are ints corresponding to the pathway column indices in the membership matrix. - The values are sets of ints corresponding to gene row indices in the membership matrix. ] variable[pr_0] assign[=] binary_operation[call[name[np].sum, parameter[name[membership_matrix]]] / call[name[np].sum, parameter[name[membership_matrix]]]] variable[pr_1] assign[=] call[name[_update_probabilities], parameter[name[pr_0], name[membership_matrix]]] variable[epsilon] assign[=] binary_operation[call[name[np].linalg.norm, parameter[binary_operation[name[pr_1] - name[pr_0]]]] / constant[100.0]] variable[pr_old] assign[=] name[pr_1] variable[check_for_convergence] assign[=] name[epsilon] variable[count] assign[=] constant[0] while <ast.BoolOp object at 0x7da20c6c7790> begin[:] <ast.AugAssign object at 0x7da20c6c7e80> if compare[name[count] greater[>] name[max_iters]] begin[:] call[name[print], parameter[call[constant[Reached the maximum number of iterations {0}].format, parameter[name[max_iters]]]]] break variable[pr_new] assign[=] call[name[_update_probabilities], parameter[name[pr_old], name[membership_matrix]]] variable[check_for_convergence] assign[=] call[name[np].linalg.norm, parameter[binary_operation[name[pr_new] - name[pr_old]]]] variable[pr_old] assign[=] name[pr_new] variable[pr_final] assign[=] name[pr_old] variable[corrected_pathway_definitions] assign[=] dictionary[[], []] <ast.Tuple object at 0x7da20c6c68c0> assign[=] name[membership_matrix].shape for taget[name[gene_index]] in starred[call[name[range], parameter[name[n]]]] begin[:] variable[gene_membership] assign[=] call[name[membership_matrix]][name[gene_index]] variable[denominator] assign[=] call[name[np].dot, parameter[name[gene_membership], name[pr_final]]] if compare[name[denominator] less[<] name[NEAR_ZERO]] begin[:] variable[denominator] assign[=] name[NEAR_ZERO] variable[conditional_pathway_pr] assign[=] binary_operation[call[name[np].multiply, parameter[name[gene_membership], name[pr_final]]] / name[denominator]] variable[all_pathways_at_max] assign[=] call[call[name[np].where, parameter[compare[name[conditional_pathway_pr] equal[==] call[name[conditional_pathway_pr].max, parameter[]]]]]][constant[0]] variable[gene_in_pathways] assign[=] call[call[name[np].where, parameter[compare[name[gene_membership] equal[==] constant[1]]]]][constant[0]] variable[all_pathways_at_max] assign[=] call[name[np].intersect1d, parameter[name[all_pathways_at_max], name[gene_in_pathways]]] variable[pathway_index] assign[=] call[name[np].random.choice, parameter[name[all_pathways_at_max]]] if compare[name[pathway_index] <ast.NotIn object at 0x7da2590d7190> name[corrected_pathway_definitions]] begin[:] call[name[corrected_pathway_definitions]][name[pathway_index]] assign[=] call[name[set], parameter[]] call[call[name[corrected_pathway_definitions]][name[pathway_index]].add, parameter[name[gene_index]]] return[name[corrected_pathway_definitions]]
keyword[def] identifier[maximum_impact_estimation] ( identifier[membership_matrix] , identifier[max_iters] = literal[int] ): literal[string] identifier[pr_0] = identifier[np] . identifier[sum] ( identifier[membership_matrix] , identifier[axis] = literal[int] )/ identifier[np] . identifier[sum] ( identifier[membership_matrix] ) identifier[pr_1] = identifier[_update_probabilities] ( identifier[pr_0] , identifier[membership_matrix] ) identifier[epsilon] = identifier[np] . identifier[linalg] . identifier[norm] ( identifier[pr_1] - identifier[pr_0] )/ literal[int] identifier[pr_old] = identifier[pr_1] identifier[check_for_convergence] = identifier[epsilon] identifier[count] = literal[int] keyword[while] identifier[epsilon] > identifier[NEAR_ZERO] keyword[and] identifier[check_for_convergence] >= identifier[epsilon] : identifier[count] += literal[int] keyword[if] identifier[count] > identifier[max_iters] : identifier[print] ( literal[string] . identifier[format] ( identifier[max_iters] )) keyword[break] identifier[pr_new] = identifier[_update_probabilities] ( identifier[pr_old] , identifier[membership_matrix] ) identifier[check_for_convergence] = identifier[np] . identifier[linalg] . identifier[norm] ( identifier[pr_new] - identifier[pr_old] ) identifier[pr_old] = identifier[pr_new] identifier[pr_final] = identifier[pr_old] identifier[corrected_pathway_definitions] ={} identifier[n] , identifier[k] = identifier[membership_matrix] . identifier[shape] keyword[for] identifier[gene_index] keyword[in] identifier[range] ( identifier[n] ): identifier[gene_membership] = identifier[membership_matrix] [ identifier[gene_index] ] identifier[denominator] = identifier[np] . identifier[dot] ( identifier[gene_membership] , identifier[pr_final] ) keyword[if] identifier[denominator] < identifier[NEAR_ZERO] : identifier[denominator] = identifier[NEAR_ZERO] identifier[conditional_pathway_pr] =( identifier[np] . identifier[multiply] ( identifier[gene_membership] , identifier[pr_final] )/ identifier[denominator] ) identifier[all_pathways_at_max] = identifier[np] . identifier[where] ( identifier[conditional_pathway_pr] == identifier[conditional_pathway_pr] . identifier[max] ())[ literal[int] ] identifier[gene_in_pathways] = identifier[np] . identifier[where] ( identifier[gene_membership] == literal[int] )[ literal[int] ] identifier[all_pathways_at_max] = identifier[np] . identifier[intersect1d] ( identifier[all_pathways_at_max] , identifier[gene_in_pathways] ) identifier[pathway_index] = identifier[np] . identifier[random] . identifier[choice] ( identifier[all_pathways_at_max] ) keyword[if] identifier[pathway_index] keyword[not] keyword[in] identifier[corrected_pathway_definitions] : identifier[corrected_pathway_definitions] [ identifier[pathway_index] ]= identifier[set] () identifier[corrected_pathway_definitions] [ identifier[pathway_index] ]. identifier[add] ( identifier[gene_index] ) keyword[return] identifier[corrected_pathway_definitions]
def maximum_impact_estimation(membership_matrix, max_iters=1000): """An expectation maximization technique that produces pathway definitions devoid of crosstalk. That is, each gene is mapped to the pathway in which it has the greatest predicted impact; this removes any overlap between pathway definitions. Parameters ----------- membership_matrix : numpy.array(float), shape = [n, k] The observed gene-to-pathway membership matrix, where n is the number of genes and k is the number of pathways we are interested in. max_iters : int (default=1000) The maximum number of expectation-maximization steps to take. Returns ----------- dict(int -> set(int)), a dictionary mapping a pathway to a set of genes. These are the pathway definitions after the maximum impact estimation procedure has been applied to remove crosstalk. - The keys are ints corresponding to the pathway column indices in the membership matrix. - The values are sets of ints corresponding to gene row indices in the membership matrix. """ # Initialize the probability vector as the sum of each column in the # membership matrix normalized by the sum of the entire membership matrix. # The probability at some index j in the vector represents the likelihood # that a pathway (column) j is defined by the current set of genes (rows) # in the membership matrix. pr_0 = np.sum(membership_matrix, axis=0) / np.sum(membership_matrix) pr_1 = _update_probabilities(pr_0, membership_matrix) epsilon = np.linalg.norm(pr_1 - pr_0) / 100.0 pr_old = pr_1 check_for_convergence = epsilon count = 0 while epsilon > NEAR_ZERO and check_for_convergence >= epsilon: count += 1 if count > max_iters: print('Reached the maximum number of iterations {0}'.format(max_iters)) break # depends on [control=['if'], data=['max_iters']] pr_new = _update_probabilities(pr_old, membership_matrix) check_for_convergence = np.linalg.norm(pr_new - pr_old) pr_old = pr_new # depends on [control=['while'], data=[]] pr_final = pr_old # renaming for readability corrected_pathway_definitions = {} (n, k) = membership_matrix.shape for gene_index in range(n): gene_membership = membership_matrix[gene_index] denominator = np.dot(gene_membership, pr_final) # Approximation is used to prevent divide by zero warning. # Since we are only looking for the _most_ probable pathway in which a # gene contributes its maximum impact, precision is not as important # as maintaining the relative differences between each # pathway's probability. if denominator < NEAR_ZERO: denominator = NEAR_ZERO # depends on [control=['if'], data=['denominator', 'NEAR_ZERO']] # This is equivalent to one row in what Donato et al. (2013) refer # to as the underlying (latent) Z matrix. conditional_pathway_pr = np.multiply(gene_membership, pr_final) / denominator all_pathways_at_max = np.where(conditional_pathway_pr == conditional_pathway_pr.max())[0] gene_in_pathways = np.where(gene_membership == 1)[0] all_pathways_at_max = np.intersect1d(all_pathways_at_max, gene_in_pathways) pathway_index = np.random.choice(all_pathways_at_max) if pathway_index not in corrected_pathway_definitions: corrected_pathway_definitions[pathway_index] = set() # depends on [control=['if'], data=['pathway_index', 'corrected_pathway_definitions']] corrected_pathway_definitions[pathway_index].add(gene_index) # depends on [control=['for'], data=['gene_index']] return corrected_pathway_definitions
def to_dotfile(G: nx.DiGraph, filename: str): """ Output a networkx graph to a DOT file. """ A = to_agraph(G) A.write(filename)
def function[to_dotfile, parameter[G, filename]]: constant[ Output a networkx graph to a DOT file. ] variable[A] assign[=] call[name[to_agraph], parameter[name[G]]] call[name[A].write, parameter[name[filename]]]
keyword[def] identifier[to_dotfile] ( identifier[G] : identifier[nx] . identifier[DiGraph] , identifier[filename] : identifier[str] ): literal[string] identifier[A] = identifier[to_agraph] ( identifier[G] ) identifier[A] . identifier[write] ( identifier[filename] )
def to_dotfile(G: nx.DiGraph, filename: str): """ Output a networkx graph to a DOT file. """ A = to_agraph(G) A.write(filename)
def main(): """This program prints doubled values!""" import numpy X=arange(.1,10.1,.2) #make a list of numbers Y=myfunc(X) # calls myfunc with argument X for i in range(len(X)): print(X[i],Y[i])
def function[main, parameter[]]: constant[This program prints doubled values!] import module[numpy] variable[X] assign[=] call[name[arange], parameter[constant[0.1], constant[10.1], constant[0.2]]] variable[Y] assign[=] call[name[myfunc], parameter[name[X]]] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[X]]]]]] begin[:] call[name[print], parameter[call[name[X]][name[i]], call[name[Y]][name[i]]]]
keyword[def] identifier[main] (): literal[string] keyword[import] identifier[numpy] identifier[X] = identifier[arange] ( literal[int] , literal[int] , literal[int] ) identifier[Y] = identifier[myfunc] ( identifier[X] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[X] )): identifier[print] ( identifier[X] [ identifier[i] ], identifier[Y] [ identifier[i] ])
def main(): """This program prints doubled values!""" import numpy X = arange(0.1, 10.1, 0.2) #make a list of numbers Y = myfunc(X) # calls myfunc with argument X for i in range(len(X)): print(X[i], Y[i]) # depends on [control=['for'], data=['i']]
def draw(self, name, dname, draw_branches=True): """ Writes the current graph as a PNG file :param str name: filename (without .png) :param str dname: directory of the output png :param draw_branches: :return: """ from pydot import Dot, Edge import os g = Dot() g.set_node_defaults(color='lightgray', style='filled', shape='box', fontname='Courier', fontsize='10') for node in sorted(self.nodes, key=lambda x: x.num): if draw_branches and node.type.is_cond: g.add_edge(Edge(str(node), str(node.true), color='green')) g.add_edge(Edge(str(node), str(node.false), color='red')) else: for suc in self.sucs(node): g.add_edge(Edge(str(node), str(suc), color='blue')) for except_node in self.catch_edges.get(node, []): g.add_edge(Edge(str(node), str(except_node), color='black', style='dashed')) g.write(os.path.join(dname, '%s.png' % name), format='png')
def function[draw, parameter[self, name, dname, draw_branches]]: constant[ Writes the current graph as a PNG file :param str name: filename (without .png) :param str dname: directory of the output png :param draw_branches: :return: ] from relative_module[pydot] import module[Dot], module[Edge] import module[os] variable[g] assign[=] call[name[Dot], parameter[]] call[name[g].set_node_defaults, parameter[]] for taget[name[node]] in starred[call[name[sorted], parameter[name[self].nodes]]] begin[:] if <ast.BoolOp object at 0x7da20c6c7790> begin[:] call[name[g].add_edge, parameter[call[name[Edge], parameter[call[name[str], parameter[name[node]]], call[name[str], parameter[name[node].true]]]]]] call[name[g].add_edge, parameter[call[name[Edge], parameter[call[name[str], parameter[name[node]]], call[name[str], parameter[name[node].false]]]]]] for taget[name[except_node]] in starred[call[name[self].catch_edges.get, parameter[name[node], list[[]]]]] begin[:] call[name[g].add_edge, parameter[call[name[Edge], parameter[call[name[str], parameter[name[node]]], call[name[str], parameter[name[except_node]]]]]]] call[name[g].write, parameter[call[name[os].path.join, parameter[name[dname], binary_operation[constant[%s.png] <ast.Mod object at 0x7da2590d6920> name[name]]]]]]
keyword[def] identifier[draw] ( identifier[self] , identifier[name] , identifier[dname] , identifier[draw_branches] = keyword[True] ): literal[string] keyword[from] identifier[pydot] keyword[import] identifier[Dot] , identifier[Edge] keyword[import] identifier[os] identifier[g] = identifier[Dot] () identifier[g] . identifier[set_node_defaults] ( identifier[color] = literal[string] , identifier[style] = literal[string] , identifier[shape] = literal[string] , identifier[fontname] = literal[string] , identifier[fontsize] = literal[string] ) keyword[for] identifier[node] keyword[in] identifier[sorted] ( identifier[self] . identifier[nodes] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[num] ): keyword[if] identifier[draw_branches] keyword[and] identifier[node] . identifier[type] . identifier[is_cond] : identifier[g] . identifier[add_edge] ( identifier[Edge] ( identifier[str] ( identifier[node] ), identifier[str] ( identifier[node] . identifier[true] ), identifier[color] = literal[string] )) identifier[g] . identifier[add_edge] ( identifier[Edge] ( identifier[str] ( identifier[node] ), identifier[str] ( identifier[node] . identifier[false] ), identifier[color] = literal[string] )) keyword[else] : keyword[for] identifier[suc] keyword[in] identifier[self] . identifier[sucs] ( identifier[node] ): identifier[g] . identifier[add_edge] ( identifier[Edge] ( identifier[str] ( identifier[node] ), identifier[str] ( identifier[suc] ), identifier[color] = literal[string] )) keyword[for] identifier[except_node] keyword[in] identifier[self] . identifier[catch_edges] . identifier[get] ( identifier[node] ,[]): identifier[g] . identifier[add_edge] ( identifier[Edge] ( identifier[str] ( identifier[node] ), identifier[str] ( identifier[except_node] ), identifier[color] = literal[string] , identifier[style] = literal[string] )) identifier[g] . identifier[write] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dname] , literal[string] % identifier[name] ), identifier[format] = literal[string] )
def draw(self, name, dname, draw_branches=True): """ Writes the current graph as a PNG file :param str name: filename (without .png) :param str dname: directory of the output png :param draw_branches: :return: """ from pydot import Dot, Edge import os g = Dot() g.set_node_defaults(color='lightgray', style='filled', shape='box', fontname='Courier', fontsize='10') for node in sorted(self.nodes, key=lambda x: x.num): if draw_branches and node.type.is_cond: g.add_edge(Edge(str(node), str(node.true), color='green')) g.add_edge(Edge(str(node), str(node.false), color='red')) # depends on [control=['if'], data=[]] else: for suc in self.sucs(node): g.add_edge(Edge(str(node), str(suc), color='blue')) # depends on [control=['for'], data=['suc']] for except_node in self.catch_edges.get(node, []): g.add_edge(Edge(str(node), str(except_node), color='black', style='dashed')) # depends on [control=['for'], data=['except_node']] # depends on [control=['for'], data=['node']] g.write(os.path.join(dname, '%s.png' % name), format='png')
def get_server(self, name): """Like :meth:`.get`, but only mechanisms inheriting :class:`ServerMechanism` will be returned. Args: name: The SASL mechanism name. Returns: The mechanism object or ``None`` """ mech = self.get(name) return mech if isinstance(mech, ServerMechanism) else None
def function[get_server, parameter[self, name]]: constant[Like :meth:`.get`, but only mechanisms inheriting :class:`ServerMechanism` will be returned. Args: name: The SASL mechanism name. Returns: The mechanism object or ``None`` ] variable[mech] assign[=] call[name[self].get, parameter[name[name]]] return[<ast.IfExp object at 0x7da1b1b7edd0>]
keyword[def] identifier[get_server] ( identifier[self] , identifier[name] ): literal[string] identifier[mech] = identifier[self] . identifier[get] ( identifier[name] ) keyword[return] identifier[mech] keyword[if] identifier[isinstance] ( identifier[mech] , identifier[ServerMechanism] ) keyword[else] keyword[None]
def get_server(self, name): """Like :meth:`.get`, but only mechanisms inheriting :class:`ServerMechanism` will be returned. Args: name: The SASL mechanism name. Returns: The mechanism object or ``None`` """ mech = self.get(name) return mech if isinstance(mech, ServerMechanism) else None
def obfn_f(self, Xf=None): r"""Compute data fidelity term :math:`(1/2) \sum_k \| W (\sum_m \mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k) \|_2^2`. This is used for backtracking. Since the backtracking is computed in the DFT, it is important to preserve the DFT scaling. """ if Xf is None: Xf = self.Xf Rf = self.eval_Rf(Xf) R = sl.irfftn(Rf, self.cri.Nv, self.cri.axisN) WRf = sl.rfftn(self.W * R, self.cri.Nv, self.cri.axisN) return 0.5 * np.linalg.norm(WRf.flatten(), 2)**2
def function[obfn_f, parameter[self, Xf]]: constant[Compute data fidelity term :math:`(1/2) \sum_k \| W (\sum_m \mathbf{d}_m * \mathbf{x}_{k,m} - \mathbf{s}_k) \|_2^2`. This is used for backtracking. Since the backtracking is computed in the DFT, it is important to preserve the DFT scaling. ] if compare[name[Xf] is constant[None]] begin[:] variable[Xf] assign[=] name[self].Xf variable[Rf] assign[=] call[name[self].eval_Rf, parameter[name[Xf]]] variable[R] assign[=] call[name[sl].irfftn, parameter[name[Rf], name[self].cri.Nv, name[self].cri.axisN]] variable[WRf] assign[=] call[name[sl].rfftn, parameter[binary_operation[name[self].W * name[R]], name[self].cri.Nv, name[self].cri.axisN]] return[binary_operation[constant[0.5] * binary_operation[call[name[np].linalg.norm, parameter[call[name[WRf].flatten, parameter[]], constant[2]]] ** constant[2]]]]
keyword[def] identifier[obfn_f] ( identifier[self] , identifier[Xf] = keyword[None] ): literal[string] keyword[if] identifier[Xf] keyword[is] keyword[None] : identifier[Xf] = identifier[self] . identifier[Xf] identifier[Rf] = identifier[self] . identifier[eval_Rf] ( identifier[Xf] ) identifier[R] = identifier[sl] . identifier[irfftn] ( identifier[Rf] , identifier[self] . identifier[cri] . identifier[Nv] , identifier[self] . identifier[cri] . identifier[axisN] ) identifier[WRf] = identifier[sl] . identifier[rfftn] ( identifier[self] . identifier[W] * identifier[R] , identifier[self] . identifier[cri] . identifier[Nv] , identifier[self] . identifier[cri] . identifier[axisN] ) keyword[return] literal[int] * identifier[np] . identifier[linalg] . identifier[norm] ( identifier[WRf] . identifier[flatten] (), literal[int] )** literal[int]
def obfn_f(self, Xf=None): """Compute data fidelity term :math:`(1/2) \\sum_k \\| W (\\sum_m \\mathbf{d}_m * \\mathbf{x}_{k,m} - \\mathbf{s}_k) \\|_2^2`. This is used for backtracking. Since the backtracking is computed in the DFT, it is important to preserve the DFT scaling. """ if Xf is None: Xf = self.Xf # depends on [control=['if'], data=['Xf']] Rf = self.eval_Rf(Xf) R = sl.irfftn(Rf, self.cri.Nv, self.cri.axisN) WRf = sl.rfftn(self.W * R, self.cri.Nv, self.cri.axisN) return 0.5 * np.linalg.norm(WRf.flatten(), 2) ** 2
def zGetUpdate(self): """Update the lens""" status,ret = -998, None ret = self._sendDDEcommand("GetUpdate") if ret != None: status = int(ret) #Note: Zemax returns -1 if GetUpdate fails. return status
def function[zGetUpdate, parameter[self]]: constant[Update the lens] <ast.Tuple object at 0x7da20c9937f0> assign[=] tuple[[<ast.UnaryOp object at 0x7da20c990550>, <ast.Constant object at 0x7da20c9909a0>]] variable[ret] assign[=] call[name[self]._sendDDEcommand, parameter[constant[GetUpdate]]] if compare[name[ret] not_equal[!=] constant[None]] begin[:] variable[status] assign[=] call[name[int], parameter[name[ret]]] return[name[status]]
keyword[def] identifier[zGetUpdate] ( identifier[self] ): literal[string] identifier[status] , identifier[ret] =- literal[int] , keyword[None] identifier[ret] = identifier[self] . identifier[_sendDDEcommand] ( literal[string] ) keyword[if] identifier[ret] != keyword[None] : identifier[status] = identifier[int] ( identifier[ret] ) keyword[return] identifier[status]
def zGetUpdate(self): """Update the lens""" (status, ret) = (-998, None) ret = self._sendDDEcommand('GetUpdate') if ret != None: status = int(ret) #Note: Zemax returns -1 if GetUpdate fails. # depends on [control=['if'], data=['ret']] return status
def sampling_rate(self): """ Return the sampling rate. """ with self.container.open_if_needed(mode='r') as cnt: return cnt.get(self.key)[1]
def function[sampling_rate, parameter[self]]: constant[ Return the sampling rate. ] with call[name[self].container.open_if_needed, parameter[]] begin[:] return[call[call[name[cnt].get, parameter[name[self].key]]][constant[1]]]
keyword[def] identifier[sampling_rate] ( identifier[self] ): literal[string] keyword[with] identifier[self] . identifier[container] . identifier[open_if_needed] ( identifier[mode] = literal[string] ) keyword[as] identifier[cnt] : keyword[return] identifier[cnt] . identifier[get] ( identifier[self] . identifier[key] )[ literal[int] ]
def sampling_rate(self): """ Return the sampling rate. """ with self.container.open_if_needed(mode='r') as cnt: return cnt.get(self.key)[1] # depends on [control=['with'], data=['cnt']]
def custom_module_classes(): """ MultiQC Custom Content class. This module does a lot of different things depending on the input and is as flexible as possible. NB: THIS IS TOTALLY DIFFERENT TO ALL OTHER MODULES """ # Dict to hold parsed data. Each key should contain a custom data type # eg. output from a particular script. Note that this script may pick # up many different types of data from many different sources. # Second level keys should be 'config' and 'data'. Data key should then # contain sample names, and finally data. cust_mods = defaultdict(lambda: defaultdict(lambda: OrderedDict())) # Dictionary to hold search patterns - start with those defined in the config search_patterns = ['custom_content'] # First - find files using patterns described in the config config_data = getattr(config, 'custom_data', {}) for k,f in config_data.items(): # Check that we have a dictionary if type(f) != dict: log.debug("config.custom_data row was not a dictionary: {}".format(k)) continue c_id = f.get('id', k) # Data supplied in with config (eg. from a multiqc_config.yaml file in working directory) if 'data' in f: cust_mods[c_id]['data'].update( f['data'] ) cust_mods[c_id]['config'].update( { k:v for k, v in f.items() if k is not 'data' } ) cust_mods[c_id]['config']['id'] = cust_mods[c_id]['config'].get('id', c_id) continue # Custom Content ID has search patterns in the config if c_id in report.files: cust_mods[c_id]['config'] = f cust_mods[c_id]['config']['id'] = cust_mods[c_id]['config'].get('id', c_id) search_patterns.append(c_id) continue # We should have had something by now log.warn("Found section '{}' in config for under custom_data, but no data or search patterns.".format(c_id)) # Now go through each of the file search patterns bm = BaseMultiqcModule() for k in search_patterns: num_sp_found_files = 0 for f in bm.find_log_files(k): num_sp_found_files += 1 # Handle any exception without messing up for remaining custom content files try: f_extension = os.path.splitext(f['fn'])[1] # YAML and JSON files are the easiest parsed_data = None if f_extension == '.yaml' or f_extension == '.yml': try: parsed_data = yaml_ordered_load(f['f']) except Exception as e: log.warning("Error parsing YAML file '{}' (probably invalid YAML)".format(f['fn'])) log.warning("YAML error: {}".format(e)) break elif f_extension == '.json': try: # Use OrderedDict for objects so that column order is honoured parsed_data = json.loads(f['f'], object_pairs_hook=OrderedDict) except Exception as e: log.warning("Error parsing JSON file '{}' (probably invalid JSON)".format(f['fn'])) log.warning("JSON error: {}".format(e)) break elif f_extension == '.png' or f_extension == '.jpeg' or f_extension == '.jpg': image_string = base64.b64encode(f['f'].read()).decode('utf-8') image_format = 'png' if f_extension == '.png' else 'jpg' img_html = '<div class="mqc-custom-content-image"><img src="data:image/{};base64,{}" /></div>'.format(image_format, image_string) parsed_data = { 'id': f['s_name'], 'plot_type': 'image', 'section_name': f['s_name'].replace('_', ' ').replace('-', ' ').replace('.', ' '), 'description': 'Embedded image <code>{}</code>'.format(f['fn']), 'data': img_html } if parsed_data is not None: c_id = parsed_data.get('id', k) if len(parsed_data.get('data', {})) > 0: if type(parsed_data['data']) == str: cust_mods[c_id]['data'] = parsed_data['data'] else: cust_mods[c_id]['data'].update( parsed_data['data'] ) cust_mods[c_id]['config'].update ( { j:k for j,k in parsed_data.items() if j != 'data' } ) else: log.warning("No data found in {}".format(f['fn'])) # txt, csv, tsv etc else: # Look for configuration details in the header m_config = _find_file_header( f ) s_name = None if m_config is not None: c_id = m_config.get('id', k) # Update the base config with anything parsed from the file b_config = cust_mods.get(c_id, {}).get('config', {}) b_config.update( m_config ) # Now set the module config to the merged dict m_config = dict(b_config) s_name = m_config.get('sample_name') else: c_id = k m_config = cust_mods.get(c_id, {}).get('config', {}) # Guess sample name if not given if s_name is None: s_name = bm.clean_s_name(f['s_name'], f['root']) # Guess c_id if no information known if k == 'custom_content': c_id = s_name # Add information about the file to the config dict if 'files' not in m_config: m_config['files'] = dict() m_config['files'].update( { s_name : { 'fn': f['fn'], 'root': f['root'] } } ) # Guess file format if not given if m_config.get('file_format') is None: m_config['file_format'] = _guess_file_format( f ) # Parse data try: parsed_data, conf = _parse_txt( f, m_config ) if parsed_data is None or len(parsed_data) == 0: log.warning("Not able to parse custom data in {}".format(f['fn'])) else: # Did we get a new section id from the file? if conf.get('id') is not None: c_id = conf.get('id') # heatmap - special data type if type(parsed_data) == list: cust_mods[c_id]['data'] = parsed_data elif conf.get('plot_type') == 'html': cust_mods[c_id]['data'] = parsed_data else: cust_mods[c_id]['data'].update(parsed_data) cust_mods[c_id]['config'].update(conf) except (IndexError, AttributeError, TypeError): log.error("Unexpected parsing error for {}".format(f['fn']), exc_info=True) raise # testing except Exception as e: log.error("Uncaught exception raised for file '{}'".format(f['fn'])) log.exception(e) # Give log message if no files found for search pattern if num_sp_found_files == 0 and k != 'custom_content': log.debug("No samples found: custom content ({})".format(k)) # Filter to strip out ignored sample names for k in cust_mods: cust_mods[k]['data'] = bm.ignore_samples(cust_mods[k]['data']) # Remove any configs that have no data remove_cids = [ k for k in cust_mods if len(cust_mods[k]['data']) == 0 ] for k in remove_cids: del cust_mods[k] if len(cust_mods) == 0: raise UserWarning # Go through each data type parsed_modules = list() for module_id, mod in cust_mods.items(): # General Stats if mod['config'].get('plot_type') == 'generalstats': gsheaders = mod['config'].get('pconfig') if gsheaders is None: headers = set() for d in mod['data'].values(): headers.update(d.keys()) headers = list(headers) headers.sort() gsheaders = OrderedDict() for h in headers: gsheaders[h] = dict() # Headers is a list of dicts if type(gsheaders) == list: gsheaders_dict = OrderedDict() for gsheader in gsheaders: for col_id, col_data in gsheader.items(): gsheaders_dict[col_id] = col_data gsheaders = gsheaders_dict # Add namespace and description if not specified for m_id in gsheaders: if 'namespace' not in gsheaders[m_id]: gsheaders[m_id]['namespace'] = mod['config'].get('namespace', module_id) bm.general_stats_addcols(mod['data'], gsheaders) # Initialise this new module class and append to list else: parsed_modules.append( MultiqcModule(module_id, mod) ) if mod['config'].get('plot_type') == 'html': log.info("{}: Found 1 sample (html)".format(module_id)) if mod['config'].get('plot_type') == 'image': log.info("{}: Found 1 sample (image)".format(module_id)) else: log.info("{}: Found {} samples ({})".format(module_id, len(mod['data']), mod['config'].get('plot_type'))) # Sort sections if we have a config option for order mod_order = getattr(config, 'custom_content', {}).get('order', []) sorted_modules = [parsed_mod for parsed_mod in parsed_modules if parsed_mod.anchor not in mod_order ] sorted_modules.extend([parsed_mod for mod_id in mod_order for parsed_mod in parsed_modules if parsed_mod.anchor == mod_id ]) # If we only have General Stats columns then there are no module outputs if len(sorted_modules) == 0: raise UserWarning return sorted_modules
def function[custom_module_classes, parameter[]]: constant[ MultiQC Custom Content class. This module does a lot of different things depending on the input and is as flexible as possible. NB: THIS IS TOTALLY DIFFERENT TO ALL OTHER MODULES ] variable[cust_mods] assign[=] call[name[defaultdict], parameter[<ast.Lambda object at 0x7da2054a71f0>]] variable[search_patterns] assign[=] list[[<ast.Constant object at 0x7da2054a4c40>]] variable[config_data] assign[=] call[name[getattr], parameter[name[config], constant[custom_data], dictionary[[], []]]] for taget[tuple[[<ast.Name object at 0x7da2054a6440>, <ast.Name object at 0x7da2054a6da0>]]] in starred[call[name[config_data].items, parameter[]]] begin[:] if compare[call[name[type], parameter[name[f]]] not_equal[!=] name[dict]] begin[:] call[name[log].debug, parameter[call[constant[config.custom_data row was not a dictionary: {}].format, parameter[name[k]]]]] continue variable[c_id] assign[=] call[name[f].get, parameter[constant[id], name[k]]] if compare[constant[data] in name[f]] begin[:] call[call[call[name[cust_mods]][name[c_id]]][constant[data]].update, parameter[call[name[f]][constant[data]]]] call[call[call[name[cust_mods]][name[c_id]]][constant[config]].update, parameter[<ast.DictComp object at 0x7da2054a5840>]] call[call[call[name[cust_mods]][name[c_id]]][constant[config]]][constant[id]] assign[=] call[call[call[name[cust_mods]][name[c_id]]][constant[config]].get, parameter[constant[id], name[c_id]]] continue if compare[name[c_id] in name[report].files] begin[:] call[call[name[cust_mods]][name[c_id]]][constant[config]] assign[=] name[f] call[call[call[name[cust_mods]][name[c_id]]][constant[config]]][constant[id]] assign[=] call[call[call[name[cust_mods]][name[c_id]]][constant[config]].get, parameter[constant[id], name[c_id]]] call[name[search_patterns].append, parameter[name[c_id]]] continue call[name[log].warn, parameter[call[constant[Found section '{}' in config for under custom_data, but no data or search patterns.].format, parameter[name[c_id]]]]] variable[bm] assign[=] call[name[BaseMultiqcModule], parameter[]] for taget[name[k]] in starred[name[search_patterns]] begin[:] variable[num_sp_found_files] assign[=] constant[0] for taget[name[f]] in starred[call[name[bm].find_log_files, parameter[name[k]]]] begin[:] <ast.AugAssign object at 0x7da1b2347df0> <ast.Try object at 0x7da1b2347c40> if <ast.BoolOp object at 0x7da204564f40> begin[:] call[name[log].debug, parameter[call[constant[No samples found: custom content ({})].format, parameter[name[k]]]]] for taget[name[k]] in starred[name[cust_mods]] begin[:] call[call[name[cust_mods]][name[k]]][constant[data]] assign[=] call[name[bm].ignore_samples, parameter[call[call[name[cust_mods]][name[k]]][constant[data]]]] variable[remove_cids] assign[=] <ast.ListComp object at 0x7da204565f90> for taget[name[k]] in starred[name[remove_cids]] begin[:] <ast.Delete object at 0x7da204567640> if compare[call[name[len], parameter[name[cust_mods]]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da2045641f0> variable[parsed_modules] assign[=] call[name[list], parameter[]] for taget[tuple[[<ast.Name object at 0x7da204567bb0>, <ast.Name object at 0x7da204564fd0>]]] in starred[call[name[cust_mods].items, parameter[]]] begin[:] if compare[call[call[name[mod]][constant[config]].get, parameter[constant[plot_type]]] equal[==] constant[generalstats]] begin[:] variable[gsheaders] assign[=] call[call[name[mod]][constant[config]].get, parameter[constant[pconfig]]] if compare[name[gsheaders] is constant[None]] begin[:] variable[headers] assign[=] call[name[set], parameter[]] for taget[name[d]] in starred[call[call[name[mod]][constant[data]].values, parameter[]]] begin[:] call[name[headers].update, parameter[call[name[d].keys, parameter[]]]] variable[headers] assign[=] call[name[list], parameter[name[headers]]] call[name[headers].sort, parameter[]] variable[gsheaders] assign[=] call[name[OrderedDict], parameter[]] for taget[name[h]] in starred[name[headers]] begin[:] call[name[gsheaders]][name[h]] assign[=] call[name[dict], parameter[]] if compare[call[name[type], parameter[name[gsheaders]]] equal[==] name[list]] begin[:] variable[gsheaders_dict] assign[=] call[name[OrderedDict], parameter[]] for taget[name[gsheader]] in starred[name[gsheaders]] begin[:] for taget[tuple[[<ast.Name object at 0x7da18bcc8310>, <ast.Name object at 0x7da18bccbfa0>]]] in starred[call[name[gsheader].items, parameter[]]] begin[:] call[name[gsheaders_dict]][name[col_id]] assign[=] name[col_data] variable[gsheaders] assign[=] name[gsheaders_dict] for taget[name[m_id]] in starred[name[gsheaders]] begin[:] if compare[constant[namespace] <ast.NotIn object at 0x7da2590d7190> call[name[gsheaders]][name[m_id]]] begin[:] call[call[name[gsheaders]][name[m_id]]][constant[namespace]] assign[=] call[call[name[mod]][constant[config]].get, parameter[constant[namespace], name[module_id]]] call[name[bm].general_stats_addcols, parameter[call[name[mod]][constant[data]], name[gsheaders]]] variable[mod_order] assign[=] call[call[name[getattr], parameter[name[config], constant[custom_content], dictionary[[], []]]].get, parameter[constant[order], list[[]]]] variable[sorted_modules] assign[=] <ast.ListComp object at 0x7da18bc73e80> call[name[sorted_modules].extend, parameter[<ast.ListComp object at 0x7da18bc725f0>]] if compare[call[name[len], parameter[name[sorted_modules]]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da18bc720e0> return[name[sorted_modules]]
keyword[def] identifier[custom_module_classes] (): literal[string] identifier[cust_mods] = identifier[defaultdict] ( keyword[lambda] : identifier[defaultdict] ( keyword[lambda] : identifier[OrderedDict] ())) identifier[search_patterns] =[ literal[string] ] identifier[config_data] = identifier[getattr] ( identifier[config] , literal[string] ,{}) keyword[for] identifier[k] , identifier[f] keyword[in] identifier[config_data] . identifier[items] (): keyword[if] identifier[type] ( identifier[f] )!= identifier[dict] : identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[k] )) keyword[continue] identifier[c_id] = identifier[f] . identifier[get] ( literal[string] , identifier[k] ) keyword[if] literal[string] keyword[in] identifier[f] : identifier[cust_mods] [ identifier[c_id] ][ literal[string] ]. identifier[update] ( identifier[f] [ literal[string] ]) identifier[cust_mods] [ identifier[c_id] ][ literal[string] ]. identifier[update] ({ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[f] . identifier[items] () keyword[if] identifier[k] keyword[is] keyword[not] literal[string] }) identifier[cust_mods] [ identifier[c_id] ][ literal[string] ][ literal[string] ]= identifier[cust_mods] [ identifier[c_id] ][ literal[string] ]. identifier[get] ( literal[string] , identifier[c_id] ) keyword[continue] keyword[if] identifier[c_id] keyword[in] identifier[report] . identifier[files] : identifier[cust_mods] [ identifier[c_id] ][ literal[string] ]= identifier[f] identifier[cust_mods] [ identifier[c_id] ][ literal[string] ][ literal[string] ]= identifier[cust_mods] [ identifier[c_id] ][ literal[string] ]. identifier[get] ( literal[string] , identifier[c_id] ) identifier[search_patterns] . identifier[append] ( identifier[c_id] ) keyword[continue] identifier[log] . identifier[warn] ( literal[string] . identifier[format] ( identifier[c_id] )) identifier[bm] = identifier[BaseMultiqcModule] () keyword[for] identifier[k] keyword[in] identifier[search_patterns] : identifier[num_sp_found_files] = literal[int] keyword[for] identifier[f] keyword[in] identifier[bm] . identifier[find_log_files] ( identifier[k] ): identifier[num_sp_found_files] += literal[int] keyword[try] : identifier[f_extension] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[f] [ literal[string] ])[ literal[int] ] identifier[parsed_data] = keyword[None] keyword[if] identifier[f_extension] == literal[string] keyword[or] identifier[f_extension] == literal[string] : keyword[try] : identifier[parsed_data] = identifier[yaml_ordered_load] ( identifier[f] [ literal[string] ]) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[f] [ literal[string] ])) identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[e] )) keyword[break] keyword[elif] identifier[f_extension] == literal[string] : keyword[try] : identifier[parsed_data] = identifier[json] . identifier[loads] ( identifier[f] [ literal[string] ], identifier[object_pairs_hook] = identifier[OrderedDict] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[f] [ literal[string] ])) identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[e] )) keyword[break] keyword[elif] identifier[f_extension] == literal[string] keyword[or] identifier[f_extension] == literal[string] keyword[or] identifier[f_extension] == literal[string] : identifier[image_string] = identifier[base64] . identifier[b64encode] ( identifier[f] [ literal[string] ]. identifier[read] ()). identifier[decode] ( literal[string] ) identifier[image_format] = literal[string] keyword[if] identifier[f_extension] == literal[string] keyword[else] literal[string] identifier[img_html] = literal[string] . identifier[format] ( identifier[image_format] , identifier[image_string] ) identifier[parsed_data] ={ literal[string] : identifier[f] [ literal[string] ], literal[string] : literal[string] , literal[string] : identifier[f] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ), literal[string] : literal[string] . identifier[format] ( identifier[f] [ literal[string] ]), literal[string] : identifier[img_html] } keyword[if] identifier[parsed_data] keyword[is] keyword[not] keyword[None] : identifier[c_id] = identifier[parsed_data] . identifier[get] ( literal[string] , identifier[k] ) keyword[if] identifier[len] ( identifier[parsed_data] . identifier[get] ( literal[string] ,{}))> literal[int] : keyword[if] identifier[type] ( identifier[parsed_data] [ literal[string] ])== identifier[str] : identifier[cust_mods] [ identifier[c_id] ][ literal[string] ]= identifier[parsed_data] [ literal[string] ] keyword[else] : identifier[cust_mods] [ identifier[c_id] ][ literal[string] ]. identifier[update] ( identifier[parsed_data] [ literal[string] ]) identifier[cust_mods] [ identifier[c_id] ][ literal[string] ]. identifier[update] ({ identifier[j] : identifier[k] keyword[for] identifier[j] , identifier[k] keyword[in] identifier[parsed_data] . identifier[items] () keyword[if] identifier[j] != literal[string] }) keyword[else] : identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[f] [ literal[string] ])) keyword[else] : identifier[m_config] = identifier[_find_file_header] ( identifier[f] ) identifier[s_name] = keyword[None] keyword[if] identifier[m_config] keyword[is] keyword[not] keyword[None] : identifier[c_id] = identifier[m_config] . identifier[get] ( literal[string] , identifier[k] ) identifier[b_config] = identifier[cust_mods] . identifier[get] ( identifier[c_id] ,{}). identifier[get] ( literal[string] ,{}) identifier[b_config] . identifier[update] ( identifier[m_config] ) identifier[m_config] = identifier[dict] ( identifier[b_config] ) identifier[s_name] = identifier[m_config] . identifier[get] ( literal[string] ) keyword[else] : identifier[c_id] = identifier[k] identifier[m_config] = identifier[cust_mods] . identifier[get] ( identifier[c_id] ,{}). identifier[get] ( literal[string] ,{}) keyword[if] identifier[s_name] keyword[is] keyword[None] : identifier[s_name] = identifier[bm] . identifier[clean_s_name] ( identifier[f] [ literal[string] ], identifier[f] [ literal[string] ]) keyword[if] identifier[k] == literal[string] : identifier[c_id] = identifier[s_name] keyword[if] literal[string] keyword[not] keyword[in] identifier[m_config] : identifier[m_config] [ literal[string] ]= identifier[dict] () identifier[m_config] [ literal[string] ]. identifier[update] ({ identifier[s_name] :{ literal[string] : identifier[f] [ literal[string] ], literal[string] : identifier[f] [ literal[string] ]}}) keyword[if] identifier[m_config] . identifier[get] ( literal[string] ) keyword[is] keyword[None] : identifier[m_config] [ literal[string] ]= identifier[_guess_file_format] ( identifier[f] ) keyword[try] : identifier[parsed_data] , identifier[conf] = identifier[_parse_txt] ( identifier[f] , identifier[m_config] ) keyword[if] identifier[parsed_data] keyword[is] keyword[None] keyword[or] identifier[len] ( identifier[parsed_data] )== literal[int] : identifier[log] . identifier[warning] ( literal[string] . identifier[format] ( identifier[f] [ literal[string] ])) keyword[else] : keyword[if] identifier[conf] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] : identifier[c_id] = identifier[conf] . identifier[get] ( literal[string] ) keyword[if] identifier[type] ( identifier[parsed_data] )== identifier[list] : identifier[cust_mods] [ identifier[c_id] ][ literal[string] ]= identifier[parsed_data] keyword[elif] identifier[conf] . identifier[get] ( literal[string] )== literal[string] : identifier[cust_mods] [ identifier[c_id] ][ literal[string] ]= identifier[parsed_data] keyword[else] : identifier[cust_mods] [ identifier[c_id] ][ literal[string] ]. identifier[update] ( identifier[parsed_data] ) identifier[cust_mods] [ identifier[c_id] ][ literal[string] ]. identifier[update] ( identifier[conf] ) keyword[except] ( identifier[IndexError] , identifier[AttributeError] , identifier[TypeError] ): identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[f] [ literal[string] ]), identifier[exc_info] = keyword[True] ) keyword[raise] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[f] [ literal[string] ])) identifier[log] . identifier[exception] ( identifier[e] ) keyword[if] identifier[num_sp_found_files] == literal[int] keyword[and] identifier[k] != literal[string] : identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[k] )) keyword[for] identifier[k] keyword[in] identifier[cust_mods] : identifier[cust_mods] [ identifier[k] ][ literal[string] ]= identifier[bm] . identifier[ignore_samples] ( identifier[cust_mods] [ identifier[k] ][ literal[string] ]) identifier[remove_cids] =[ identifier[k] keyword[for] identifier[k] keyword[in] identifier[cust_mods] keyword[if] identifier[len] ( identifier[cust_mods] [ identifier[k] ][ literal[string] ])== literal[int] ] keyword[for] identifier[k] keyword[in] identifier[remove_cids] : keyword[del] identifier[cust_mods] [ identifier[k] ] keyword[if] identifier[len] ( identifier[cust_mods] )== literal[int] : keyword[raise] identifier[UserWarning] identifier[parsed_modules] = identifier[list] () keyword[for] identifier[module_id] , identifier[mod] keyword[in] identifier[cust_mods] . identifier[items] (): keyword[if] identifier[mod] [ literal[string] ]. identifier[get] ( literal[string] )== literal[string] : identifier[gsheaders] = identifier[mod] [ literal[string] ]. identifier[get] ( literal[string] ) keyword[if] identifier[gsheaders] keyword[is] keyword[None] : identifier[headers] = identifier[set] () keyword[for] identifier[d] keyword[in] identifier[mod] [ literal[string] ]. identifier[values] (): identifier[headers] . identifier[update] ( identifier[d] . identifier[keys] ()) identifier[headers] = identifier[list] ( identifier[headers] ) identifier[headers] . identifier[sort] () identifier[gsheaders] = identifier[OrderedDict] () keyword[for] identifier[h] keyword[in] identifier[headers] : identifier[gsheaders] [ identifier[h] ]= identifier[dict] () keyword[if] identifier[type] ( identifier[gsheaders] )== identifier[list] : identifier[gsheaders_dict] = identifier[OrderedDict] () keyword[for] identifier[gsheader] keyword[in] identifier[gsheaders] : keyword[for] identifier[col_id] , identifier[col_data] keyword[in] identifier[gsheader] . identifier[items] (): identifier[gsheaders_dict] [ identifier[col_id] ]= identifier[col_data] identifier[gsheaders] = identifier[gsheaders_dict] keyword[for] identifier[m_id] keyword[in] identifier[gsheaders] : keyword[if] literal[string] keyword[not] keyword[in] identifier[gsheaders] [ identifier[m_id] ]: identifier[gsheaders] [ identifier[m_id] ][ literal[string] ]= identifier[mod] [ literal[string] ]. identifier[get] ( literal[string] , identifier[module_id] ) identifier[bm] . identifier[general_stats_addcols] ( identifier[mod] [ literal[string] ], identifier[gsheaders] ) keyword[else] : identifier[parsed_modules] . identifier[append] ( identifier[MultiqcModule] ( identifier[module_id] , identifier[mod] )) keyword[if] identifier[mod] [ literal[string] ]. identifier[get] ( literal[string] )== literal[string] : identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[module_id] )) keyword[if] identifier[mod] [ literal[string] ]. identifier[get] ( literal[string] )== literal[string] : identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[module_id] )) keyword[else] : identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[module_id] , identifier[len] ( identifier[mod] [ literal[string] ]), identifier[mod] [ literal[string] ]. identifier[get] ( literal[string] ))) identifier[mod_order] = identifier[getattr] ( identifier[config] , literal[string] ,{}). identifier[get] ( literal[string] ,[]) identifier[sorted_modules] =[ identifier[parsed_mod] keyword[for] identifier[parsed_mod] keyword[in] identifier[parsed_modules] keyword[if] identifier[parsed_mod] . identifier[anchor] keyword[not] keyword[in] identifier[mod_order] ] identifier[sorted_modules] . identifier[extend] ([ identifier[parsed_mod] keyword[for] identifier[mod_id] keyword[in] identifier[mod_order] keyword[for] identifier[parsed_mod] keyword[in] identifier[parsed_modules] keyword[if] identifier[parsed_mod] . identifier[anchor] == identifier[mod_id] ]) keyword[if] identifier[len] ( identifier[sorted_modules] )== literal[int] : keyword[raise] identifier[UserWarning] keyword[return] identifier[sorted_modules]
def custom_module_classes(): """ MultiQC Custom Content class. This module does a lot of different things depending on the input and is as flexible as possible. NB: THIS IS TOTALLY DIFFERENT TO ALL OTHER MODULES """ # Dict to hold parsed data. Each key should contain a custom data type # eg. output from a particular script. Note that this script may pick # up many different types of data from many different sources. # Second level keys should be 'config' and 'data'. Data key should then # contain sample names, and finally data. cust_mods = defaultdict(lambda : defaultdict(lambda : OrderedDict())) # Dictionary to hold search patterns - start with those defined in the config search_patterns = ['custom_content'] # First - find files using patterns described in the config config_data = getattr(config, 'custom_data', {}) for (k, f) in config_data.items(): # Check that we have a dictionary if type(f) != dict: log.debug('config.custom_data row was not a dictionary: {}'.format(k)) continue # depends on [control=['if'], data=[]] c_id = f.get('id', k) # Data supplied in with config (eg. from a multiqc_config.yaml file in working directory) if 'data' in f: cust_mods[c_id]['data'].update(f['data']) cust_mods[c_id]['config'].update({k: v for (k, v) in f.items() if k is not 'data'}) cust_mods[c_id]['config']['id'] = cust_mods[c_id]['config'].get('id', c_id) continue # depends on [control=['if'], data=['f']] # Custom Content ID has search patterns in the config if c_id in report.files: cust_mods[c_id]['config'] = f cust_mods[c_id]['config']['id'] = cust_mods[c_id]['config'].get('id', c_id) search_patterns.append(c_id) continue # depends on [control=['if'], data=['c_id']] # We should have had something by now log.warn("Found section '{}' in config for under custom_data, but no data or search patterns.".format(c_id)) # depends on [control=['for'], data=[]] # Now go through each of the file search patterns bm = BaseMultiqcModule() for k in search_patterns: num_sp_found_files = 0 for f in bm.find_log_files(k): num_sp_found_files += 1 # Handle any exception without messing up for remaining custom content files try: f_extension = os.path.splitext(f['fn'])[1] # YAML and JSON files are the easiest parsed_data = None if f_extension == '.yaml' or f_extension == '.yml': try: parsed_data = yaml_ordered_load(f['f']) # depends on [control=['try'], data=[]] except Exception as e: log.warning("Error parsing YAML file '{}' (probably invalid YAML)".format(f['fn'])) log.warning('YAML error: {}'.format(e)) break # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] elif f_extension == '.json': try: # Use OrderedDict for objects so that column order is honoured parsed_data = json.loads(f['f'], object_pairs_hook=OrderedDict) # depends on [control=['try'], data=[]] except Exception as e: log.warning("Error parsing JSON file '{}' (probably invalid JSON)".format(f['fn'])) log.warning('JSON error: {}'.format(e)) break # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] elif f_extension == '.png' or f_extension == '.jpeg' or f_extension == '.jpg': image_string = base64.b64encode(f['f'].read()).decode('utf-8') image_format = 'png' if f_extension == '.png' else 'jpg' img_html = '<div class="mqc-custom-content-image"><img src="data:image/{};base64,{}" /></div>'.format(image_format, image_string) parsed_data = {'id': f['s_name'], 'plot_type': 'image', 'section_name': f['s_name'].replace('_', ' ').replace('-', ' ').replace('.', ' '), 'description': 'Embedded image <code>{}</code>'.format(f['fn']), 'data': img_html} # depends on [control=['if'], data=[]] if parsed_data is not None: c_id = parsed_data.get('id', k) if len(parsed_data.get('data', {})) > 0: if type(parsed_data['data']) == str: cust_mods[c_id]['data'] = parsed_data['data'] # depends on [control=['if'], data=[]] else: cust_mods[c_id]['data'].update(parsed_data['data']) cust_mods[c_id]['config'].update({j: k for (j, k) in parsed_data.items() if j != 'data'}) # depends on [control=['if'], data=[]] else: log.warning('No data found in {}'.format(f['fn'])) # depends on [control=['if'], data=['parsed_data']] else: # txt, csv, tsv etc # Look for configuration details in the header m_config = _find_file_header(f) s_name = None if m_config is not None: c_id = m_config.get('id', k) # Update the base config with anything parsed from the file b_config = cust_mods.get(c_id, {}).get('config', {}) b_config.update(m_config) # Now set the module config to the merged dict m_config = dict(b_config) s_name = m_config.get('sample_name') # depends on [control=['if'], data=['m_config']] else: c_id = k m_config = cust_mods.get(c_id, {}).get('config', {}) # Guess sample name if not given if s_name is None: s_name = bm.clean_s_name(f['s_name'], f['root']) # depends on [control=['if'], data=['s_name']] # Guess c_id if no information known if k == 'custom_content': c_id = s_name # depends on [control=['if'], data=[]] # Add information about the file to the config dict if 'files' not in m_config: m_config['files'] = dict() # depends on [control=['if'], data=['m_config']] m_config['files'].update({s_name: {'fn': f['fn'], 'root': f['root']}}) # Guess file format if not given if m_config.get('file_format') is None: m_config['file_format'] = _guess_file_format(f) # depends on [control=['if'], data=[]] # Parse data try: (parsed_data, conf) = _parse_txt(f, m_config) if parsed_data is None or len(parsed_data) == 0: log.warning('Not able to parse custom data in {}'.format(f['fn'])) # depends on [control=['if'], data=[]] else: # Did we get a new section id from the file? if conf.get('id') is not None: c_id = conf.get('id') # depends on [control=['if'], data=[]] # heatmap - special data type if type(parsed_data) == list: cust_mods[c_id]['data'] = parsed_data # depends on [control=['if'], data=[]] elif conf.get('plot_type') == 'html': cust_mods[c_id]['data'] = parsed_data # depends on [control=['if'], data=[]] else: cust_mods[c_id]['data'].update(parsed_data) cust_mods[c_id]['config'].update(conf) # depends on [control=['try'], data=[]] except (IndexError, AttributeError, TypeError): log.error('Unexpected parsing error for {}'.format(f['fn']), exc_info=True) raise # testing # depends on [control=['except'], data=[]] # depends on [control=['try'], data=[]] except Exception as e: log.error("Uncaught exception raised for file '{}'".format(f['fn'])) log.exception(e) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['f']] # Give log message if no files found for search pattern if num_sp_found_files == 0 and k != 'custom_content': log.debug('No samples found: custom content ({})'.format(k)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']] # Filter to strip out ignored sample names for k in cust_mods: cust_mods[k]['data'] = bm.ignore_samples(cust_mods[k]['data']) # depends on [control=['for'], data=['k']] # Remove any configs that have no data remove_cids = [k for k in cust_mods if len(cust_mods[k]['data']) == 0] for k in remove_cids: del cust_mods[k] # depends on [control=['for'], data=['k']] if len(cust_mods) == 0: raise UserWarning # depends on [control=['if'], data=[]] # Go through each data type parsed_modules = list() for (module_id, mod) in cust_mods.items(): # General Stats if mod['config'].get('plot_type') == 'generalstats': gsheaders = mod['config'].get('pconfig') if gsheaders is None: headers = set() for d in mod['data'].values(): headers.update(d.keys()) # depends on [control=['for'], data=['d']] headers = list(headers) headers.sort() gsheaders = OrderedDict() for h in headers: gsheaders[h] = dict() # depends on [control=['for'], data=['h']] # depends on [control=['if'], data=['gsheaders']] # Headers is a list of dicts if type(gsheaders) == list: gsheaders_dict = OrderedDict() for gsheader in gsheaders: for (col_id, col_data) in gsheader.items(): gsheaders_dict[col_id] = col_data # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['gsheader']] gsheaders = gsheaders_dict # depends on [control=['if'], data=[]] # Add namespace and description if not specified for m_id in gsheaders: if 'namespace' not in gsheaders[m_id]: gsheaders[m_id]['namespace'] = mod['config'].get('namespace', module_id) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['m_id']] bm.general_stats_addcols(mod['data'], gsheaders) # depends on [control=['if'], data=[]] else: # Initialise this new module class and append to list parsed_modules.append(MultiqcModule(module_id, mod)) if mod['config'].get('plot_type') == 'html': log.info('{}: Found 1 sample (html)'.format(module_id)) # depends on [control=['if'], data=[]] if mod['config'].get('plot_type') == 'image': log.info('{}: Found 1 sample (image)'.format(module_id)) # depends on [control=['if'], data=[]] else: log.info('{}: Found {} samples ({})'.format(module_id, len(mod['data']), mod['config'].get('plot_type'))) # depends on [control=['for'], data=[]] # Sort sections if we have a config option for order mod_order = getattr(config, 'custom_content', {}).get('order', []) sorted_modules = [parsed_mod for parsed_mod in parsed_modules if parsed_mod.anchor not in mod_order] sorted_modules.extend([parsed_mod for mod_id in mod_order for parsed_mod in parsed_modules if parsed_mod.anchor == mod_id]) # If we only have General Stats columns then there are no module outputs if len(sorted_modules) == 0: raise UserWarning # depends on [control=['if'], data=[]] return sorted_modules
def _get_taxids(self, taxids=None): """Return user-specified taxids or taxids in self.taxid2asscs""" taxid_keys = set(self.taxid2asscs.keys()) return taxid_keys if taxids is None else set(taxids).intersection(taxid_keys)
def function[_get_taxids, parameter[self, taxids]]: constant[Return user-specified taxids or taxids in self.taxid2asscs] variable[taxid_keys] assign[=] call[name[set], parameter[call[name[self].taxid2asscs.keys, parameter[]]]] return[<ast.IfExp object at 0x7da1b23450c0>]
keyword[def] identifier[_get_taxids] ( identifier[self] , identifier[taxids] = keyword[None] ): literal[string] identifier[taxid_keys] = identifier[set] ( identifier[self] . identifier[taxid2asscs] . identifier[keys] ()) keyword[return] identifier[taxid_keys] keyword[if] identifier[taxids] keyword[is] keyword[None] keyword[else] identifier[set] ( identifier[taxids] ). identifier[intersection] ( identifier[taxid_keys] )
def _get_taxids(self, taxids=None): """Return user-specified taxids or taxids in self.taxid2asscs""" taxid_keys = set(self.taxid2asscs.keys()) return taxid_keys if taxids is None else set(taxids).intersection(taxid_keys)
def groups_create(self, name, **kwargs): """Creates a new private group, optionally including users, only if you’re part of the group.""" return self.__call_api_post('groups.create', name=name, kwargs=kwargs)
def function[groups_create, parameter[self, name]]: constant[Creates a new private group, optionally including users, only if you’re part of the group.] return[call[name[self].__call_api_post, parameter[constant[groups.create]]]]
keyword[def] identifier[groups_create] ( identifier[self] , identifier[name] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[__call_api_post] ( literal[string] , identifier[name] = identifier[name] , identifier[kwargs] = identifier[kwargs] )
def groups_create(self, name, **kwargs): """Creates a new private group, optionally including users, only if you’re part of the group.""" return self.__call_api_post('groups.create', name=name, kwargs=kwargs)
def use(self, key, value): """ Temporarily set a parameter value using the with statement. Aliasing allowed. """ old_value = self[key] try: self[key] = value yield self finally: self[key] = old_value
def function[use, parameter[self, key, value]]: constant[ Temporarily set a parameter value using the with statement. Aliasing allowed. ] variable[old_value] assign[=] call[name[self]][name[key]] <ast.Try object at 0x7da18dc9bfa0>
keyword[def] identifier[use] ( identifier[self] , identifier[key] , identifier[value] ): literal[string] identifier[old_value] = identifier[self] [ identifier[key] ] keyword[try] : identifier[self] [ identifier[key] ]= identifier[value] keyword[yield] identifier[self] keyword[finally] : identifier[self] [ identifier[key] ]= identifier[old_value]
def use(self, key, value): """ Temporarily set a parameter value using the with statement. Aliasing allowed. """ old_value = self[key] try: self[key] = value yield self # depends on [control=['try'], data=[]] finally: self[key] = old_value
def SVD_moments(U, s, V, stachans, event_list, n_SVs=4): """Depreciated.""" print('Depreciated, use svd_moments instead') return svd_moments(u=U, s=s, v=V, stachans=stachans, event_list=event_list, n_svs=n_SVs)
def function[SVD_moments, parameter[U, s, V, stachans, event_list, n_SVs]]: constant[Depreciated.] call[name[print], parameter[constant[Depreciated, use svd_moments instead]]] return[call[name[svd_moments], parameter[]]]
keyword[def] identifier[SVD_moments] ( identifier[U] , identifier[s] , identifier[V] , identifier[stachans] , identifier[event_list] , identifier[n_SVs] = literal[int] ): literal[string] identifier[print] ( literal[string] ) keyword[return] identifier[svd_moments] ( identifier[u] = identifier[U] , identifier[s] = identifier[s] , identifier[v] = identifier[V] , identifier[stachans] = identifier[stachans] , identifier[event_list] = identifier[event_list] , identifier[n_svs] = identifier[n_SVs] )
def SVD_moments(U, s, V, stachans, event_list, n_SVs=4): """Depreciated.""" print('Depreciated, use svd_moments instead') return svd_moments(u=U, s=s, v=V, stachans=stachans, event_list=event_list, n_svs=n_SVs)
def __initialize(self, sample): """! @brief Initializes internal states and resets clustering results in line with input sample. """ self.__processed = [False] * len(sample) self.__optics_objects = [optics_descriptor(i) for i in range(len(sample))] # List of OPTICS objects that corresponds to objects from input sample. self.__ordered_database = [] # List of OPTICS objects in traverse order. self.__clusters = None # Result of clustering (list of clusters where each cluster contains indexes of objects from input data). self.__noise = None
def function[__initialize, parameter[self, sample]]: constant[! @brief Initializes internal states and resets clustering results in line with input sample. ] name[self].__processed assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b0190610>]] * call[name[len], parameter[name[sample]]]] name[self].__optics_objects assign[=] <ast.ListComp object at 0x7da1b01913f0> name[self].__ordered_database assign[=] list[[]] name[self].__clusters assign[=] constant[None] name[self].__noise assign[=] constant[None]
keyword[def] identifier[__initialize] ( identifier[self] , identifier[sample] ): literal[string] identifier[self] . identifier[__processed] =[ keyword[False] ]* identifier[len] ( identifier[sample] ) identifier[self] . identifier[__optics_objects] =[ identifier[optics_descriptor] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[sample] ))] identifier[self] . identifier[__ordered_database] =[] identifier[self] . identifier[__clusters] = keyword[None] identifier[self] . identifier[__noise] = keyword[None]
def __initialize(self, sample): """! @brief Initializes internal states and resets clustering results in line with input sample. """ self.__processed = [False] * len(sample) self.__optics_objects = [optics_descriptor(i) for i in range(len(sample))] # List of OPTICS objects that corresponds to objects from input sample. self.__ordered_database = [] # List of OPTICS objects in traverse order. self.__clusters = None # Result of clustering (list of clusters where each cluster contains indexes of objects from input data). self.__noise = None
def receive(self, action=None, method=None, **kwargs): """ Create a <Receive> element :param action: Receive action URL :param method: Receive action URL method :param kwargs: additional attributes :returns: <Receive> element """ return self.nest(Receive(action=action, method=method, **kwargs))
def function[receive, parameter[self, action, method]]: constant[ Create a <Receive> element :param action: Receive action URL :param method: Receive action URL method :param kwargs: additional attributes :returns: <Receive> element ] return[call[name[self].nest, parameter[call[name[Receive], parameter[]]]]]
keyword[def] identifier[receive] ( identifier[self] , identifier[action] = keyword[None] , identifier[method] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[nest] ( identifier[Receive] ( identifier[action] = identifier[action] , identifier[method] = identifier[method] ,** identifier[kwargs] ))
def receive(self, action=None, method=None, **kwargs): """ Create a <Receive> element :param action: Receive action URL :param method: Receive action URL method :param kwargs: additional attributes :returns: <Receive> element """ return self.nest(Receive(action=action, method=method, **kwargs))
def generic_stitch(cube, arrays): """ Creates descriptors associated with array name and then sets the array as a member variable """ for name, ary in arrays.iteritems(): if name not in type(cube).__dict__: setattr(type(cube), name, ArrayDescriptor(name)) setattr(cube, name, ary)
def function[generic_stitch, parameter[cube, arrays]]: constant[ Creates descriptors associated with array name and then sets the array as a member variable ] for taget[tuple[[<ast.Name object at 0x7da207f98880>, <ast.Name object at 0x7da207f9b010>]]] in starred[call[name[arrays].iteritems, parameter[]]] begin[:] if compare[name[name] <ast.NotIn object at 0x7da2590d7190> call[name[type], parameter[name[cube]]].__dict__] begin[:] call[name[setattr], parameter[call[name[type], parameter[name[cube]]], name[name], call[name[ArrayDescriptor], parameter[name[name]]]]] call[name[setattr], parameter[name[cube], name[name], name[ary]]]
keyword[def] identifier[generic_stitch] ( identifier[cube] , identifier[arrays] ): literal[string] keyword[for] identifier[name] , identifier[ary] keyword[in] identifier[arrays] . identifier[iteritems] (): keyword[if] identifier[name] keyword[not] keyword[in] identifier[type] ( identifier[cube] ). identifier[__dict__] : identifier[setattr] ( identifier[type] ( identifier[cube] ), identifier[name] , identifier[ArrayDescriptor] ( identifier[name] )) identifier[setattr] ( identifier[cube] , identifier[name] , identifier[ary] )
def generic_stitch(cube, arrays): """ Creates descriptors associated with array name and then sets the array as a member variable """ for (name, ary) in arrays.iteritems(): if name not in type(cube).__dict__: setattr(type(cube), name, ArrayDescriptor(name)) # depends on [control=['if'], data=['name']] setattr(cube, name, ary) # depends on [control=['for'], data=[]]
def list_sets(family='ipv4'): ''' .. versionadded:: 2014.7.0 List all ipset sets. CLI Example: .. code-block:: bash salt '*' ipset.list_sets ''' cmd = '{0} list -t'.format(_ipset_cmd()) out = __salt__['cmd.run'](cmd, python_shell=False) _tmp = out.split('\n') count = 0 sets = [] sets.append({}) for item in _tmp: if not item: count = count + 1 sets.append({}) continue key, value = item.split(':', 1) sets[count][key] = value[1:] return sets
def function[list_sets, parameter[family]]: constant[ .. versionadded:: 2014.7.0 List all ipset sets. CLI Example: .. code-block:: bash salt '*' ipset.list_sets ] variable[cmd] assign[=] call[constant[{0} list -t].format, parameter[call[name[_ipset_cmd], parameter[]]]] variable[out] assign[=] call[call[name[__salt__]][constant[cmd.run]], parameter[name[cmd]]] variable[_tmp] assign[=] call[name[out].split, parameter[constant[ ]]] variable[count] assign[=] constant[0] variable[sets] assign[=] list[[]] call[name[sets].append, parameter[dictionary[[], []]]] for taget[name[item]] in starred[name[_tmp]] begin[:] if <ast.UnaryOp object at 0x7da1b20089d0> begin[:] variable[count] assign[=] binary_operation[name[count] + constant[1]] call[name[sets].append, parameter[dictionary[[], []]]] continue <ast.Tuple object at 0x7da1b20084c0> assign[=] call[name[item].split, parameter[constant[:], constant[1]]] call[call[name[sets]][name[count]]][name[key]] assign[=] call[name[value]][<ast.Slice object at 0x7da1b2136e60>] return[name[sets]]
keyword[def] identifier[list_sets] ( identifier[family] = literal[string] ): literal[string] identifier[cmd] = literal[string] . identifier[format] ( identifier[_ipset_cmd] ()) identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[python_shell] = keyword[False] ) identifier[_tmp] = identifier[out] . identifier[split] ( literal[string] ) identifier[count] = literal[int] identifier[sets] =[] identifier[sets] . identifier[append] ({}) keyword[for] identifier[item] keyword[in] identifier[_tmp] : keyword[if] keyword[not] identifier[item] : identifier[count] = identifier[count] + literal[int] identifier[sets] . identifier[append] ({}) keyword[continue] identifier[key] , identifier[value] = identifier[item] . identifier[split] ( literal[string] , literal[int] ) identifier[sets] [ identifier[count] ][ identifier[key] ]= identifier[value] [ literal[int] :] keyword[return] identifier[sets]
def list_sets(family='ipv4'): """ .. versionadded:: 2014.7.0 List all ipset sets. CLI Example: .. code-block:: bash salt '*' ipset.list_sets """ cmd = '{0} list -t'.format(_ipset_cmd()) out = __salt__['cmd.run'](cmd, python_shell=False) _tmp = out.split('\n') count = 0 sets = [] sets.append({}) for item in _tmp: if not item: count = count + 1 sets.append({}) continue # depends on [control=['if'], data=[]] (key, value) = item.split(':', 1) sets[count][key] = value[1:] # depends on [control=['for'], data=['item']] return sets
def _is_image_sequenced(image): """Determine if the image is a sequenced image.""" try: image.seek(1) image.seek(0) result = True except EOFError: result = False return result
def function[_is_image_sequenced, parameter[image]]: constant[Determine if the image is a sequenced image.] <ast.Try object at 0x7da207f02d40> return[name[result]]
keyword[def] identifier[_is_image_sequenced] ( identifier[image] ): literal[string] keyword[try] : identifier[image] . identifier[seek] ( literal[int] ) identifier[image] . identifier[seek] ( literal[int] ) identifier[result] = keyword[True] keyword[except] identifier[EOFError] : identifier[result] = keyword[False] keyword[return] identifier[result]
def _is_image_sequenced(image): """Determine if the image is a sequenced image.""" try: image.seek(1) image.seek(0) result = True # depends on [control=['try'], data=[]] except EOFError: result = False # depends on [control=['except'], data=[]] return result
def step(self): """Perform a single step of the morphological Chan-Vese evolution.""" # Assign attributes to local variables for convenience. u = self._u if u is None: raise ValueError("the levelset function is not set " "(use set_levelset)") data = self.data # Determine c0 and c1. inside = (u > 0) outside = (u <= 0) c0 = data[outside].sum() / float(outside.sum()) c1 = data[inside].sum() / float(inside.sum()) # Image attachment. dres = np.array(np.gradient(u)) abs_dres = np.abs(dres).sum(0) #aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data) aux = abs_dres * (self.lambda1*(data - c1) ** 2 - self.lambda2*(data - c0) ** 2) res = np.copy(u) res[aux < 0] = 1 res[aux > 0] = 0 # Smoothing. for i in range(self.smoothing): res = curvop(res) self._u = res
def function[step, parameter[self]]: constant[Perform a single step of the morphological Chan-Vese evolution.] variable[u] assign[=] name[self]._u if compare[name[u] is constant[None]] begin[:] <ast.Raise object at 0x7da20c76e920> variable[data] assign[=] name[self].data variable[inside] assign[=] compare[name[u] greater[>] constant[0]] variable[outside] assign[=] compare[name[u] less_or_equal[<=] constant[0]] variable[c0] assign[=] binary_operation[call[call[name[data]][name[outside]].sum, parameter[]] / call[name[float], parameter[call[name[outside].sum, parameter[]]]]] variable[c1] assign[=] binary_operation[call[call[name[data]][name[inside]].sum, parameter[]] / call[name[float], parameter[call[name[inside].sum, parameter[]]]]] variable[dres] assign[=] call[name[np].array, parameter[call[name[np].gradient, parameter[name[u]]]]] variable[abs_dres] assign[=] call[call[name[np].abs, parameter[name[dres]]].sum, parameter[constant[0]]] variable[aux] assign[=] binary_operation[name[abs_dres] * binary_operation[binary_operation[name[self].lambda1 * binary_operation[binary_operation[name[data] - name[c1]] ** constant[2]]] - binary_operation[name[self].lambda2 * binary_operation[binary_operation[name[data] - name[c0]] ** constant[2]]]]] variable[res] assign[=] call[name[np].copy, parameter[name[u]]] call[name[res]][compare[name[aux] less[<] constant[0]]] assign[=] constant[1] call[name[res]][compare[name[aux] greater[>] constant[0]]] assign[=] constant[0] for taget[name[i]] in starred[call[name[range], parameter[name[self].smoothing]]] begin[:] variable[res] assign[=] call[name[curvop], parameter[name[res]]] name[self]._u assign[=] name[res]
keyword[def] identifier[step] ( identifier[self] ): literal[string] identifier[u] = identifier[self] . identifier[_u] keyword[if] identifier[u] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) identifier[data] = identifier[self] . identifier[data] identifier[inside] =( identifier[u] > literal[int] ) identifier[outside] =( identifier[u] <= literal[int] ) identifier[c0] = identifier[data] [ identifier[outside] ]. identifier[sum] ()/ identifier[float] ( identifier[outside] . identifier[sum] ()) identifier[c1] = identifier[data] [ identifier[inside] ]. identifier[sum] ()/ identifier[float] ( identifier[inside] . identifier[sum] ()) identifier[dres] = identifier[np] . identifier[array] ( identifier[np] . identifier[gradient] ( identifier[u] )) identifier[abs_dres] = identifier[np] . identifier[abs] ( identifier[dres] ). identifier[sum] ( literal[int] ) identifier[aux] = identifier[abs_dres] *( identifier[self] . identifier[lambda1] *( identifier[data] - identifier[c1] )** literal[int] - identifier[self] . identifier[lambda2] *( identifier[data] - identifier[c0] )** literal[int] ) identifier[res] = identifier[np] . identifier[copy] ( identifier[u] ) identifier[res] [ identifier[aux] < literal[int] ]= literal[int] identifier[res] [ identifier[aux] > literal[int] ]= literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[smoothing] ): identifier[res] = identifier[curvop] ( identifier[res] ) identifier[self] . identifier[_u] = identifier[res]
def step(self): """Perform a single step of the morphological Chan-Vese evolution.""" # Assign attributes to local variables for convenience. u = self._u if u is None: raise ValueError('the levelset function is not set (use set_levelset)') # depends on [control=['if'], data=[]] data = self.data # Determine c0 and c1. inside = u > 0 outside = u <= 0 c0 = data[outside].sum() / float(outside.sum()) c1 = data[inside].sum() / float(inside.sum()) # Image attachment. dres = np.array(np.gradient(u)) abs_dres = np.abs(dres).sum(0) #aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data) aux = abs_dres * (self.lambda1 * (data - c1) ** 2 - self.lambda2 * (data - c0) ** 2) res = np.copy(u) res[aux < 0] = 1 res[aux > 0] = 0 # Smoothing. for i in range(self.smoothing): res = curvop(res) # depends on [control=['for'], data=[]] self._u = res
def _get_codes_for_values(values, categories): """ utility routine to turn values into codes given the specified categories """ from pandas.core.algorithms import _get_data_algo, _hashtables dtype_equal = is_dtype_equal(values.dtype, categories.dtype) if dtype_equal: # To prevent erroneous dtype coercion in _get_data_algo, retrieve # the underlying numpy array. gh-22702 values = getattr(values, '_ndarray_values', values) categories = getattr(categories, '_ndarray_values', categories) elif (is_extension_array_dtype(categories.dtype) and is_object_dtype(values)): # Support inferring the correct extension dtype from an array of # scalar objects. e.g. # Categorical(array[Period, Period], categories=PeriodIndex(...)) try: values = ( categories.dtype.construct_array_type()._from_sequence(values) ) except Exception: # but that may fail for any reason, so fall back to object values = ensure_object(values) categories = ensure_object(categories) else: values = ensure_object(values) categories = ensure_object(categories) (hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables) (_, _), cats = _get_data_algo(categories, _hashtables) t = hash_klass(len(cats)) t.map_locations(cats) return coerce_indexer_dtype(t.lookup(vals), cats)
def function[_get_codes_for_values, parameter[values, categories]]: constant[ utility routine to turn values into codes given the specified categories ] from relative_module[pandas.core.algorithms] import module[_get_data_algo], module[_hashtables] variable[dtype_equal] assign[=] call[name[is_dtype_equal], parameter[name[values].dtype, name[categories].dtype]] if name[dtype_equal] begin[:] variable[values] assign[=] call[name[getattr], parameter[name[values], constant[_ndarray_values], name[values]]] variable[categories] assign[=] call[name[getattr], parameter[name[categories], constant[_ndarray_values], name[categories]]] <ast.Tuple object at 0x7da18f00dc30> assign[=] call[name[_get_data_algo], parameter[name[values], name[_hashtables]]] <ast.Tuple object at 0x7da18f00dea0> assign[=] call[name[_get_data_algo], parameter[name[categories], name[_hashtables]]] variable[t] assign[=] call[name[hash_klass], parameter[call[name[len], parameter[name[cats]]]]] call[name[t].map_locations, parameter[name[cats]]] return[call[name[coerce_indexer_dtype], parameter[call[name[t].lookup, parameter[name[vals]]], name[cats]]]]
keyword[def] identifier[_get_codes_for_values] ( identifier[values] , identifier[categories] ): literal[string] keyword[from] identifier[pandas] . identifier[core] . identifier[algorithms] keyword[import] identifier[_get_data_algo] , identifier[_hashtables] identifier[dtype_equal] = identifier[is_dtype_equal] ( identifier[values] . identifier[dtype] , identifier[categories] . identifier[dtype] ) keyword[if] identifier[dtype_equal] : identifier[values] = identifier[getattr] ( identifier[values] , literal[string] , identifier[values] ) identifier[categories] = identifier[getattr] ( identifier[categories] , literal[string] , identifier[categories] ) keyword[elif] ( identifier[is_extension_array_dtype] ( identifier[categories] . identifier[dtype] ) keyword[and] identifier[is_object_dtype] ( identifier[values] )): keyword[try] : identifier[values] =( identifier[categories] . identifier[dtype] . identifier[construct_array_type] (). identifier[_from_sequence] ( identifier[values] ) ) keyword[except] identifier[Exception] : identifier[values] = identifier[ensure_object] ( identifier[values] ) identifier[categories] = identifier[ensure_object] ( identifier[categories] ) keyword[else] : identifier[values] = identifier[ensure_object] ( identifier[values] ) identifier[categories] = identifier[ensure_object] ( identifier[categories] ) ( identifier[hash_klass] , identifier[vec_klass] ), identifier[vals] = identifier[_get_data_algo] ( identifier[values] , identifier[_hashtables] ) ( identifier[_] , identifier[_] ), identifier[cats] = identifier[_get_data_algo] ( identifier[categories] , identifier[_hashtables] ) identifier[t] = identifier[hash_klass] ( identifier[len] ( identifier[cats] )) identifier[t] . identifier[map_locations] ( identifier[cats] ) keyword[return] identifier[coerce_indexer_dtype] ( identifier[t] . identifier[lookup] ( identifier[vals] ), identifier[cats] )
def _get_codes_for_values(values, categories): """ utility routine to turn values into codes given the specified categories """ from pandas.core.algorithms import _get_data_algo, _hashtables dtype_equal = is_dtype_equal(values.dtype, categories.dtype) if dtype_equal: # To prevent erroneous dtype coercion in _get_data_algo, retrieve # the underlying numpy array. gh-22702 values = getattr(values, '_ndarray_values', values) categories = getattr(categories, '_ndarray_values', categories) # depends on [control=['if'], data=[]] elif is_extension_array_dtype(categories.dtype) and is_object_dtype(values): # Support inferring the correct extension dtype from an array of # scalar objects. e.g. # Categorical(array[Period, Period], categories=PeriodIndex(...)) try: values = categories.dtype.construct_array_type()._from_sequence(values) # depends on [control=['try'], data=[]] except Exception: # but that may fail for any reason, so fall back to object values = ensure_object(values) categories = ensure_object(categories) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: values = ensure_object(values) categories = ensure_object(categories) ((hash_klass, vec_klass), vals) = _get_data_algo(values, _hashtables) ((_, _), cats) = _get_data_algo(categories, _hashtables) t = hash_klass(len(cats)) t.map_locations(cats) return coerce_indexer_dtype(t.lookup(vals), cats)
def clicks(times, fs, click=None, length=None): """Returns a signal with the signal 'click' placed at each specified time Parameters ---------- times : np.ndarray times to place clicks, in seconds fs : int desired sampling rate of the output signal click : np.ndarray click signal, defaults to a 1 kHz blip length : int desired number of samples in the output signal, defaults to ``times.max()*fs + click.shape[0] + 1`` Returns ------- click_signal : np.ndarray Synthesized click signal """ # Create default click signal if click is None: # 1 kHz tone, 100ms click = np.sin(2*np.pi*np.arange(fs*.1)*1000/(1.*fs)) # Exponential decay click *= np.exp(-np.arange(fs*.1)/(fs*.01)) # Set default length if length is None: length = int(times.max()*fs + click.shape[0] + 1) # Pre-allocate click signal click_signal = np.zeros(length) # Place clicks for time in times: # Compute the boundaries of the click start = int(time*fs) end = start + click.shape[0] # Make sure we don't try to output past the end of the signal if start >= length: break if end >= length: click_signal[start:] = click[:length - start] break # Normally, just add a click here click_signal[start:end] = click return click_signal
def function[clicks, parameter[times, fs, click, length]]: constant[Returns a signal with the signal 'click' placed at each specified time Parameters ---------- times : np.ndarray times to place clicks, in seconds fs : int desired sampling rate of the output signal click : np.ndarray click signal, defaults to a 1 kHz blip length : int desired number of samples in the output signal, defaults to ``times.max()*fs + click.shape[0] + 1`` Returns ------- click_signal : np.ndarray Synthesized click signal ] if compare[name[click] is constant[None]] begin[:] variable[click] assign[=] call[name[np].sin, parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[2] * name[np].pi] * call[name[np].arange, parameter[binary_operation[name[fs] * constant[0.1]]]]] * constant[1000]] / binary_operation[constant[1.0] * name[fs]]]]] <ast.AugAssign object at 0x7da20e9579a0> if compare[name[length] is constant[None]] begin[:] variable[length] assign[=] call[name[int], parameter[binary_operation[binary_operation[binary_operation[call[name[times].max, parameter[]] * name[fs]] + call[name[click].shape][constant[0]]] + constant[1]]]] variable[click_signal] assign[=] call[name[np].zeros, parameter[name[length]]] for taget[name[time]] in starred[name[times]] begin[:] variable[start] assign[=] call[name[int], parameter[binary_operation[name[time] * name[fs]]]] variable[end] assign[=] binary_operation[name[start] + call[name[click].shape][constant[0]]] if compare[name[start] greater_or_equal[>=] name[length]] begin[:] break if compare[name[end] greater_or_equal[>=] name[length]] begin[:] call[name[click_signal]][<ast.Slice object at 0x7da20e9b3ee0>] assign[=] call[name[click]][<ast.Slice object at 0x7da20e9b3ca0>] break call[name[click_signal]][<ast.Slice object at 0x7da20e9b0760>] assign[=] name[click] return[name[click_signal]]
keyword[def] identifier[clicks] ( identifier[times] , identifier[fs] , identifier[click] = keyword[None] , identifier[length] = keyword[None] ): literal[string] keyword[if] identifier[click] keyword[is] keyword[None] : identifier[click] = identifier[np] . identifier[sin] ( literal[int] * identifier[np] . identifier[pi] * identifier[np] . identifier[arange] ( identifier[fs] * literal[int] )* literal[int] /( literal[int] * identifier[fs] )) identifier[click] *= identifier[np] . identifier[exp] (- identifier[np] . identifier[arange] ( identifier[fs] * literal[int] )/( identifier[fs] * literal[int] )) keyword[if] identifier[length] keyword[is] keyword[None] : identifier[length] = identifier[int] ( identifier[times] . identifier[max] ()* identifier[fs] + identifier[click] . identifier[shape] [ literal[int] ]+ literal[int] ) identifier[click_signal] = identifier[np] . identifier[zeros] ( identifier[length] ) keyword[for] identifier[time] keyword[in] identifier[times] : identifier[start] = identifier[int] ( identifier[time] * identifier[fs] ) identifier[end] = identifier[start] + identifier[click] . identifier[shape] [ literal[int] ] keyword[if] identifier[start] >= identifier[length] : keyword[break] keyword[if] identifier[end] >= identifier[length] : identifier[click_signal] [ identifier[start] :]= identifier[click] [: identifier[length] - identifier[start] ] keyword[break] identifier[click_signal] [ identifier[start] : identifier[end] ]= identifier[click] keyword[return] identifier[click_signal]
def clicks(times, fs, click=None, length=None): """Returns a signal with the signal 'click' placed at each specified time Parameters ---------- times : np.ndarray times to place clicks, in seconds fs : int desired sampling rate of the output signal click : np.ndarray click signal, defaults to a 1 kHz blip length : int desired number of samples in the output signal, defaults to ``times.max()*fs + click.shape[0] + 1`` Returns ------- click_signal : np.ndarray Synthesized click signal """ # Create default click signal if click is None: # 1 kHz tone, 100ms click = np.sin(2 * np.pi * np.arange(fs * 0.1) * 1000 / (1.0 * fs)) # Exponential decay click *= np.exp(-np.arange(fs * 0.1) / (fs * 0.01)) # depends on [control=['if'], data=['click']] # Set default length if length is None: length = int(times.max() * fs + click.shape[0] + 1) # depends on [control=['if'], data=['length']] # Pre-allocate click signal click_signal = np.zeros(length) # Place clicks for time in times: # Compute the boundaries of the click start = int(time * fs) end = start + click.shape[0] # Make sure we don't try to output past the end of the signal if start >= length: break # depends on [control=['if'], data=[]] if end >= length: click_signal[start:] = click[:length - start] break # depends on [control=['if'], data=['length']] # Normally, just add a click here click_signal[start:end] = click # depends on [control=['for'], data=['time']] return click_signal
def send(self, to, subject, body, reply_to=None, **kwargs): """ Send email via AWS SES. :returns string: message id *** Composes an email message based on input data, and then immediately queues the message for sending. :type to: list of strings or string :param to: The To: field(s) of the message. :type subject: string :param subject: The subject of the message: A short summary of the content, which will appear in the recipient's inbox. :type body: string :param body: The message body. :sender: email address of the sender. String or typle(name, email) :reply_to: email to reply to **kwargs: :type cc_addresses: list of strings or string :param cc_addresses: The CC: field(s) of the message. :type bcc_addresses: list of strings or string :param bcc_addresses: The BCC: field(s) of the message. :type format: string :param format: The format of the message's body, must be either "text" or "html". :type return_path: string :param return_path: The email address to which bounce notifications are to be forwarded. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. :type text_body: string :param text_body: The text body to send with this email. :type html_body: string :param html_body: The html body to send with this email. """ if not self.sender: raise AttributeError("Sender email 'sender' or 'source' is not provided") kwargs["to_addresses"] = to kwargs["subject"] = subject kwargs["body"] = body kwargs["source"] = self._get_sender(self.sender)[0] kwargs["reply_addresses"] = self._get_sender(reply_to or self.reply_to)[2] response = self.ses.send_email(**kwargs) return response["SendEmailResponse"]["SendEmailResult"]["MessageId"]
def function[send, parameter[self, to, subject, body, reply_to]]: constant[ Send email via AWS SES. :returns string: message id *** Composes an email message based on input data, and then immediately queues the message for sending. :type to: list of strings or string :param to: The To: field(s) of the message. :type subject: string :param subject: The subject of the message: A short summary of the content, which will appear in the recipient's inbox. :type body: string :param body: The message body. :sender: email address of the sender. String or typle(name, email) :reply_to: email to reply to **kwargs: :type cc_addresses: list of strings or string :param cc_addresses: The CC: field(s) of the message. :type bcc_addresses: list of strings or string :param bcc_addresses: The BCC: field(s) of the message. :type format: string :param format: The format of the message's body, must be either "text" or "html". :type return_path: string :param return_path: The email address to which bounce notifications are to be forwarded. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. :type text_body: string :param text_body: The text body to send with this email. :type html_body: string :param html_body: The html body to send with this email. ] if <ast.UnaryOp object at 0x7da1b0bda650> begin[:] <ast.Raise object at 0x7da1b0bd94e0> call[name[kwargs]][constant[to_addresses]] assign[=] name[to] call[name[kwargs]][constant[subject]] assign[=] name[subject] call[name[kwargs]][constant[body]] assign[=] name[body] call[name[kwargs]][constant[source]] assign[=] call[call[name[self]._get_sender, parameter[name[self].sender]]][constant[0]] call[name[kwargs]][constant[reply_addresses]] assign[=] call[call[name[self]._get_sender, parameter[<ast.BoolOp object at 0x7da1b0bd99c0>]]][constant[2]] variable[response] assign[=] call[name[self].ses.send_email, parameter[]] return[call[call[call[name[response]][constant[SendEmailResponse]]][constant[SendEmailResult]]][constant[MessageId]]]
keyword[def] identifier[send] ( identifier[self] , identifier[to] , identifier[subject] , identifier[body] , identifier[reply_to] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[sender] : keyword[raise] identifier[AttributeError] ( literal[string] ) identifier[kwargs] [ literal[string] ]= identifier[to] identifier[kwargs] [ literal[string] ]= identifier[subject] identifier[kwargs] [ literal[string] ]= identifier[body] identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[_get_sender] ( identifier[self] . identifier[sender] )[ literal[int] ] identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[_get_sender] ( identifier[reply_to] keyword[or] identifier[self] . identifier[reply_to] )[ literal[int] ] identifier[response] = identifier[self] . identifier[ses] . identifier[send_email] (** identifier[kwargs] ) keyword[return] identifier[response] [ literal[string] ][ literal[string] ][ literal[string] ]
def send(self, to, subject, body, reply_to=None, **kwargs): """ Send email via AWS SES. :returns string: message id *** Composes an email message based on input data, and then immediately queues the message for sending. :type to: list of strings or string :param to: The To: field(s) of the message. :type subject: string :param subject: The subject of the message: A short summary of the content, which will appear in the recipient's inbox. :type body: string :param body: The message body. :sender: email address of the sender. String or typle(name, email) :reply_to: email to reply to **kwargs: :type cc_addresses: list of strings or string :param cc_addresses: The CC: field(s) of the message. :type bcc_addresses: list of strings or string :param bcc_addresses: The BCC: field(s) of the message. :type format: string :param format: The format of the message's body, must be either "text" or "html". :type return_path: string :param return_path: The email address to which bounce notifications are to be forwarded. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. :type text_body: string :param text_body: The text body to send with this email. :type html_body: string :param html_body: The html body to send with this email. """ if not self.sender: raise AttributeError("Sender email 'sender' or 'source' is not provided") # depends on [control=['if'], data=[]] kwargs['to_addresses'] = to kwargs['subject'] = subject kwargs['body'] = body kwargs['source'] = self._get_sender(self.sender)[0] kwargs['reply_addresses'] = self._get_sender(reply_to or self.reply_to)[2] response = self.ses.send_email(**kwargs) return response['SendEmailResponse']['SendEmailResult']['MessageId']
def extract_credentials(self, url): """ Extracts user/password from a url. Returns a tuple: (url-without-auth, username, password) """ if isinstance(url, urllib2.Request): result = urlparse.urlsplit(url.get_full_url()) else: result = urlparse.urlsplit(url) scheme, netloc, path, query, frag = result username, password = self.parse_credentials(netloc) if username is None: return url, None, None elif password is None and self.prompting: # remove the auth credentials from the url part netloc = netloc.replace('%s@' % username, '', 1) # prompt for the password prompt = 'Password for %s@%s: ' % (username, netloc) password = urllib.quote(getpass.getpass(prompt)) else: # remove the auth credentials from the url part netloc = netloc.replace('%s:%s@' % (username, password), '', 1) target_url = urlparse.urlunsplit((scheme, netloc, path, query, frag)) return target_url, username, password
def function[extract_credentials, parameter[self, url]]: constant[ Extracts user/password from a url. Returns a tuple: (url-without-auth, username, password) ] if call[name[isinstance], parameter[name[url], name[urllib2].Request]] begin[:] variable[result] assign[=] call[name[urlparse].urlsplit, parameter[call[name[url].get_full_url, parameter[]]]] <ast.Tuple object at 0x7da204567be0> assign[=] name[result] <ast.Tuple object at 0x7da2045678e0> assign[=] call[name[self].parse_credentials, parameter[name[netloc]]] if compare[name[username] is constant[None]] begin[:] return[tuple[[<ast.Name object at 0x7da2045675b0>, <ast.Constant object at 0x7da2045676a0>, <ast.Constant object at 0x7da204567f10>]]] variable[target_url] assign[=] call[name[urlparse].urlunsplit, parameter[tuple[[<ast.Name object at 0x7da20c6c7d00>, <ast.Name object at 0x7da20c6c5810>, <ast.Name object at 0x7da20c6c51e0>, <ast.Name object at 0x7da20c6c5a20>, <ast.Name object at 0x7da20c6c7a00>]]]] return[tuple[[<ast.Name object at 0x7da20c6c5e40>, <ast.Name object at 0x7da20c6c7700>, <ast.Name object at 0x7da20c6c4580>]]]
keyword[def] identifier[extract_credentials] ( identifier[self] , identifier[url] ): literal[string] keyword[if] identifier[isinstance] ( identifier[url] , identifier[urllib2] . identifier[Request] ): identifier[result] = identifier[urlparse] . identifier[urlsplit] ( identifier[url] . identifier[get_full_url] ()) keyword[else] : identifier[result] = identifier[urlparse] . identifier[urlsplit] ( identifier[url] ) identifier[scheme] , identifier[netloc] , identifier[path] , identifier[query] , identifier[frag] = identifier[result] identifier[username] , identifier[password] = identifier[self] . identifier[parse_credentials] ( identifier[netloc] ) keyword[if] identifier[username] keyword[is] keyword[None] : keyword[return] identifier[url] , keyword[None] , keyword[None] keyword[elif] identifier[password] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[prompting] : identifier[netloc] = identifier[netloc] . identifier[replace] ( literal[string] % identifier[username] , literal[string] , literal[int] ) identifier[prompt] = literal[string] %( identifier[username] , identifier[netloc] ) identifier[password] = identifier[urllib] . identifier[quote] ( identifier[getpass] . identifier[getpass] ( identifier[prompt] )) keyword[else] : identifier[netloc] = identifier[netloc] . identifier[replace] ( literal[string] %( identifier[username] , identifier[password] ), literal[string] , literal[int] ) identifier[target_url] = identifier[urlparse] . identifier[urlunsplit] (( identifier[scheme] , identifier[netloc] , identifier[path] , identifier[query] , identifier[frag] )) keyword[return] identifier[target_url] , identifier[username] , identifier[password]
def extract_credentials(self, url): """ Extracts user/password from a url. Returns a tuple: (url-without-auth, username, password) """ if isinstance(url, urllib2.Request): result = urlparse.urlsplit(url.get_full_url()) # depends on [control=['if'], data=[]] else: result = urlparse.urlsplit(url) (scheme, netloc, path, query, frag) = result (username, password) = self.parse_credentials(netloc) if username is None: return (url, None, None) # depends on [control=['if'], data=[]] elif password is None and self.prompting: # remove the auth credentials from the url part netloc = netloc.replace('%s@' % username, '', 1) # prompt for the password prompt = 'Password for %s@%s: ' % (username, netloc) password = urllib.quote(getpass.getpass(prompt)) # depends on [control=['if'], data=[]] else: # remove the auth credentials from the url part netloc = netloc.replace('%s:%s@' % (username, password), '', 1) target_url = urlparse.urlunsplit((scheme, netloc, path, query, frag)) return (target_url, username, password)
def bulk_add_entities(self, entities_and_kinds): """ Add many entities and sub-entity groups to this EntityGroup. :type entities_and_kinds: List of (Entity, EntityKind) pairs. :param entities_and_kinds: A list of entity, entity-kind pairs to add to the group. In the pairs the entity-kind can be ``None``, to add a single entity, or some entity kind to add all sub-entities of that kind. """ memberships = [EntityGroupMembership( entity_group=self, entity=entity, sub_entity_kind=sub_entity_kind, ) for entity, sub_entity_kind in entities_and_kinds] created = EntityGroupMembership.objects.bulk_create(memberships) return created
def function[bulk_add_entities, parameter[self, entities_and_kinds]]: constant[ Add many entities and sub-entity groups to this EntityGroup. :type entities_and_kinds: List of (Entity, EntityKind) pairs. :param entities_and_kinds: A list of entity, entity-kind pairs to add to the group. In the pairs the entity-kind can be ``None``, to add a single entity, or some entity kind to add all sub-entities of that kind. ] variable[memberships] assign[=] <ast.ListComp object at 0x7da18eb55b10> variable[created] assign[=] call[name[EntityGroupMembership].objects.bulk_create, parameter[name[memberships]]] return[name[created]]
keyword[def] identifier[bulk_add_entities] ( identifier[self] , identifier[entities_and_kinds] ): literal[string] identifier[memberships] =[ identifier[EntityGroupMembership] ( identifier[entity_group] = identifier[self] , identifier[entity] = identifier[entity] , identifier[sub_entity_kind] = identifier[sub_entity_kind] , ) keyword[for] identifier[entity] , identifier[sub_entity_kind] keyword[in] identifier[entities_and_kinds] ] identifier[created] = identifier[EntityGroupMembership] . identifier[objects] . identifier[bulk_create] ( identifier[memberships] ) keyword[return] identifier[created]
def bulk_add_entities(self, entities_and_kinds): """ Add many entities and sub-entity groups to this EntityGroup. :type entities_and_kinds: List of (Entity, EntityKind) pairs. :param entities_and_kinds: A list of entity, entity-kind pairs to add to the group. In the pairs the entity-kind can be ``None``, to add a single entity, or some entity kind to add all sub-entities of that kind. """ memberships = [EntityGroupMembership(entity_group=self, entity=entity, sub_entity_kind=sub_entity_kind) for (entity, sub_entity_kind) in entities_and_kinds] created = EntityGroupMembership.objects.bulk_create(memberships) return created