code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def chain_getattr(obj, attr, value=None): """Get chain attribute for an object. """ try: return _resolve_value(safe_chain_getattr(obj, attr)) except AttributeError: return value
def function[chain_getattr, parameter[obj, attr, value]]: constant[Get chain attribute for an object. ] <ast.Try object at 0x7da1b0793a00>
keyword[def] identifier[chain_getattr] ( identifier[obj] , identifier[attr] , identifier[value] = keyword[None] ): literal[string] keyword[try] : keyword[return] identifier[_resolve_value] ( identifier[safe_chain_getattr] ( identifier[obj] , identifier[attr] )) keyword[except] identifier[AttributeError] : keyword[return] identifier[value]
def chain_getattr(obj, attr, value=None): """Get chain attribute for an object. """ try: return _resolve_value(safe_chain_getattr(obj, attr)) # depends on [control=['try'], data=[]] except AttributeError: return value # depends on [control=['except'], data=[]]
async def helo(self, from_host=None): """ Sends a SMTP 'HELO' command. - Identifies the client and starts the session. If given ``from_host`` is None, defaults to the client FQDN. For further details, please check out `RFC 5321 § 4.1.1.1`_. Args: from_host (str or None): Name to use to identify the client. Raises: ConnectionResetError: If the connection with the server is unexpectedely lost. SMTPCommandFailedError: If the server refuses our HELO greeting. Returns: (int, str): A (code, message) 2-tuple containing the server response. .. _`RFC 5321 § 4.1.1.1`: https://tools.ietf.org/html/rfc5321#section-4.1.1.1 """ if from_host is None: from_host = self.fqdn code, message = await self.do_cmd("HELO", from_host) self.last_helo_response = (code, message) return code, message
<ast.AsyncFunctionDef object at 0x7da20c76dea0>
keyword[async] keyword[def] identifier[helo] ( identifier[self] , identifier[from_host] = keyword[None] ): literal[string] keyword[if] identifier[from_host] keyword[is] keyword[None] : identifier[from_host] = identifier[self] . identifier[fqdn] identifier[code] , identifier[message] = keyword[await] identifier[self] . identifier[do_cmd] ( literal[string] , identifier[from_host] ) identifier[self] . identifier[last_helo_response] =( identifier[code] , identifier[message] ) keyword[return] identifier[code] , identifier[message]
async def helo(self, from_host=None): """ Sends a SMTP 'HELO' command. - Identifies the client and starts the session. If given ``from_host`` is None, defaults to the client FQDN. For further details, please check out `RFC 5321 § 4.1.1.1`_. Args: from_host (str or None): Name to use to identify the client. Raises: ConnectionResetError: If the connection with the server is unexpectedely lost. SMTPCommandFailedError: If the server refuses our HELO greeting. Returns: (int, str): A (code, message) 2-tuple containing the server response. .. _`RFC 5321 § 4.1.1.1`: https://tools.ietf.org/html/rfc5321#section-4.1.1.1 """ if from_host is None: from_host = self.fqdn # depends on [control=['if'], data=['from_host']] (code, message) = await self.do_cmd('HELO', from_host) self.last_helo_response = (code, message) return (code, message)
def _setup_standard_deviations(self, fle): """ Reads the standard deviation tables from hdf5 and stores them in memory :param fle: HDF5 Tables as instance of :class:`h5py.File` """ # Load in total standard deviation self.stddevs = {} self.stddevs[const.StdDev.TOTAL] = hdf_arrays_to_dict(fle["Total"]) # If other standard deviations self.DEFINED_FOR_STANDARD_DEVIATION_TYPES = set( self.DEFINED_FOR_STANDARD_DEVIATION_TYPES) for stddev_type in [const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT]: if stddev_type in fle: self.stddevs[stddev_type] = hdf_arrays_to_dict( fle[stddev_type]) self.DEFINED_FOR_STANDARD_DEVIATION_TYPES.add(stddev_type)
def function[_setup_standard_deviations, parameter[self, fle]]: constant[ Reads the standard deviation tables from hdf5 and stores them in memory :param fle: HDF5 Tables as instance of :class:`h5py.File` ] name[self].stddevs assign[=] dictionary[[], []] call[name[self].stddevs][name[const].StdDev.TOTAL] assign[=] call[name[hdf_arrays_to_dict], parameter[call[name[fle]][constant[Total]]]] name[self].DEFINED_FOR_STANDARD_DEVIATION_TYPES assign[=] call[name[set], parameter[name[self].DEFINED_FOR_STANDARD_DEVIATION_TYPES]] for taget[name[stddev_type]] in starred[list[[<ast.Attribute object at 0x7da207f98f40>, <ast.Attribute object at 0x7da207f99c00>]]] begin[:] if compare[name[stddev_type] in name[fle]] begin[:] call[name[self].stddevs][name[stddev_type]] assign[=] call[name[hdf_arrays_to_dict], parameter[call[name[fle]][name[stddev_type]]]] call[name[self].DEFINED_FOR_STANDARD_DEVIATION_TYPES.add, parameter[name[stddev_type]]]
keyword[def] identifier[_setup_standard_deviations] ( identifier[self] , identifier[fle] ): literal[string] identifier[self] . identifier[stddevs] ={} identifier[self] . identifier[stddevs] [ identifier[const] . identifier[StdDev] . identifier[TOTAL] ]= identifier[hdf_arrays_to_dict] ( identifier[fle] [ literal[string] ]) identifier[self] . identifier[DEFINED_FOR_STANDARD_DEVIATION_TYPES] = identifier[set] ( identifier[self] . identifier[DEFINED_FOR_STANDARD_DEVIATION_TYPES] ) keyword[for] identifier[stddev_type] keyword[in] [ identifier[const] . identifier[StdDev] . identifier[INTER_EVENT] , identifier[const] . identifier[StdDev] . identifier[INTRA_EVENT] ]: keyword[if] identifier[stddev_type] keyword[in] identifier[fle] : identifier[self] . identifier[stddevs] [ identifier[stddev_type] ]= identifier[hdf_arrays_to_dict] ( identifier[fle] [ identifier[stddev_type] ]) identifier[self] . identifier[DEFINED_FOR_STANDARD_DEVIATION_TYPES] . identifier[add] ( identifier[stddev_type] )
def _setup_standard_deviations(self, fle): """ Reads the standard deviation tables from hdf5 and stores them in memory :param fle: HDF5 Tables as instance of :class:`h5py.File` """ # Load in total standard deviation self.stddevs = {} self.stddevs[const.StdDev.TOTAL] = hdf_arrays_to_dict(fle['Total']) # If other standard deviations self.DEFINED_FOR_STANDARD_DEVIATION_TYPES = set(self.DEFINED_FOR_STANDARD_DEVIATION_TYPES) for stddev_type in [const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT]: if stddev_type in fle: self.stddevs[stddev_type] = hdf_arrays_to_dict(fle[stddev_type]) self.DEFINED_FOR_STANDARD_DEVIATION_TYPES.add(stddev_type) # depends on [control=['if'], data=['stddev_type', 'fle']] # depends on [control=['for'], data=['stddev_type']]
def parse_legacy_argstring(argstring): ''' Preparses CLI input: ``arg1,arg2`` => ``['arg1', 'arg2']`` ``[item1, item2],arg2`` => ``[['item1', 'item2'], arg2]`` ''' argstring = argstring.replace(',', ' , ') argstring = argstring.replace('[', ' [ ') argstring = argstring.replace(']', ' ] ') argbits = shlex.split(argstring) args = [] arg_buff = [] list_buff = [] in_list = False for bit in argbits: if bit == '[' and not in_list: in_list = True continue elif bit == ']' and in_list: in_list = False args.append(list_buff) list_buff = [] continue elif bit == ',': if not in_list and arg_buff: args.append(''.join(arg_buff)) arg_buff = [] continue # Restore any broken up ,[]s bit = bit.replace(' , ', ',') bit = bit.replace(' [ ', '[') bit = bit.replace(' ] ', ']') if in_list: list_buff.append(bit) else: arg_buff.append(bit) if arg_buff: args.append(' '.join(arg_buff)) return args
def function[parse_legacy_argstring, parameter[argstring]]: constant[ Preparses CLI input: ``arg1,arg2`` => ``['arg1', 'arg2']`` ``[item1, item2],arg2`` => ``[['item1', 'item2'], arg2]`` ] variable[argstring] assign[=] call[name[argstring].replace, parameter[constant[,], constant[ , ]]] variable[argstring] assign[=] call[name[argstring].replace, parameter[constant[[], constant[ [ ]]] variable[argstring] assign[=] call[name[argstring].replace, parameter[constant[]], constant[ ] ]]] variable[argbits] assign[=] call[name[shlex].split, parameter[name[argstring]]] variable[args] assign[=] list[[]] variable[arg_buff] assign[=] list[[]] variable[list_buff] assign[=] list[[]] variable[in_list] assign[=] constant[False] for taget[name[bit]] in starred[name[argbits]] begin[:] if <ast.BoolOp object at 0x7da18dc9a5f0> begin[:] variable[in_list] assign[=] constant[True] continue variable[bit] assign[=] call[name[bit].replace, parameter[constant[ , ], constant[,]]] variable[bit] assign[=] call[name[bit].replace, parameter[constant[ [ ], constant[[]]] variable[bit] assign[=] call[name[bit].replace, parameter[constant[ ] ], constant[]]]] if name[in_list] begin[:] call[name[list_buff].append, parameter[name[bit]]] if name[arg_buff] begin[:] call[name[args].append, parameter[call[constant[ ].join, parameter[name[arg_buff]]]]] return[name[args]]
keyword[def] identifier[parse_legacy_argstring] ( identifier[argstring] ): literal[string] identifier[argstring] = identifier[argstring] . identifier[replace] ( literal[string] , literal[string] ) identifier[argstring] = identifier[argstring] . identifier[replace] ( literal[string] , literal[string] ) identifier[argstring] = identifier[argstring] . identifier[replace] ( literal[string] , literal[string] ) identifier[argbits] = identifier[shlex] . identifier[split] ( identifier[argstring] ) identifier[args] =[] identifier[arg_buff] =[] identifier[list_buff] =[] identifier[in_list] = keyword[False] keyword[for] identifier[bit] keyword[in] identifier[argbits] : keyword[if] identifier[bit] == literal[string] keyword[and] keyword[not] identifier[in_list] : identifier[in_list] = keyword[True] keyword[continue] keyword[elif] identifier[bit] == literal[string] keyword[and] identifier[in_list] : identifier[in_list] = keyword[False] identifier[args] . identifier[append] ( identifier[list_buff] ) identifier[list_buff] =[] keyword[continue] keyword[elif] identifier[bit] == literal[string] : keyword[if] keyword[not] identifier[in_list] keyword[and] identifier[arg_buff] : identifier[args] . identifier[append] ( literal[string] . identifier[join] ( identifier[arg_buff] )) identifier[arg_buff] =[] keyword[continue] identifier[bit] = identifier[bit] . identifier[replace] ( literal[string] , literal[string] ) identifier[bit] = identifier[bit] . identifier[replace] ( literal[string] , literal[string] ) identifier[bit] = identifier[bit] . identifier[replace] ( literal[string] , literal[string] ) keyword[if] identifier[in_list] : identifier[list_buff] . identifier[append] ( identifier[bit] ) keyword[else] : identifier[arg_buff] . identifier[append] ( identifier[bit] ) keyword[if] identifier[arg_buff] : identifier[args] . identifier[append] ( literal[string] . identifier[join] ( identifier[arg_buff] )) keyword[return] identifier[args]
def parse_legacy_argstring(argstring): """ Preparses CLI input: ``arg1,arg2`` => ``['arg1', 'arg2']`` ``[item1, item2],arg2`` => ``[['item1', 'item2'], arg2]`` """ argstring = argstring.replace(',', ' , ') argstring = argstring.replace('[', ' [ ') argstring = argstring.replace(']', ' ] ') argbits = shlex.split(argstring) args = [] arg_buff = [] list_buff = [] in_list = False for bit in argbits: if bit == '[' and (not in_list): in_list = True continue # depends on [control=['if'], data=[]] elif bit == ']' and in_list: in_list = False args.append(list_buff) list_buff = [] continue # depends on [control=['if'], data=[]] elif bit == ',': if not in_list and arg_buff: args.append(''.join(arg_buff)) arg_buff = [] # depends on [control=['if'], data=[]] continue # depends on [control=['if'], data=[]] # Restore any broken up ,[]s bit = bit.replace(' , ', ',') bit = bit.replace(' [ ', '[') bit = bit.replace(' ] ', ']') if in_list: list_buff.append(bit) # depends on [control=['if'], data=[]] else: arg_buff.append(bit) # depends on [control=['for'], data=['bit']] if arg_buff: args.append(' '.join(arg_buff)) # depends on [control=['if'], data=[]] return args
def parse_args(self, args, scope): """Parse arguments to mixin. Add them to scope as variables. Sets upp special variable @arguments as well. args: args (list): arguments scope (Scope): current scope raises: SyntaxError """ arguments = list(zip(args, [' '] * len(args))) if args and args[0] else None zl = itertools.zip_longest if sys.version_info[ 0] == 3 else itertools.izip_longest if self.args: parsed = [ v if hasattr(v, 'parse') else v for v in copy.copy(self.args) ] args = args if isinstance(args, list) else [args] vars = [ self._parse_arg(var, arg, scope) for arg, var in zl([a for a in args], parsed) ] for var in vars: if var: var.parse(scope) if not arguments: arguments = [v.value for v in vars if v] if not arguments: arguments = '' Variable(['@arguments', None, arguments]).parse(scope)
def function[parse_args, parameter[self, args, scope]]: constant[Parse arguments to mixin. Add them to scope as variables. Sets upp special variable @arguments as well. args: args (list): arguments scope (Scope): current scope raises: SyntaxError ] variable[arguments] assign[=] <ast.IfExp object at 0x7da1aff01690> variable[zl] assign[=] <ast.IfExp object at 0x7da1affc15d0> if name[self].args begin[:] variable[parsed] assign[=] <ast.ListComp object at 0x7da1aff015d0> variable[args] assign[=] <ast.IfExp object at 0x7da1aff01db0> variable[vars] assign[=] <ast.ListComp object at 0x7da1aff03130> for taget[name[var]] in starred[name[vars]] begin[:] if name[var] begin[:] call[name[var].parse, parameter[name[scope]]] if <ast.UnaryOp object at 0x7da1affc1870> begin[:] variable[arguments] assign[=] <ast.ListComp object at 0x7da1affc1cc0> if <ast.UnaryOp object at 0x7da1affc2860> begin[:] variable[arguments] assign[=] constant[] call[call[name[Variable], parameter[list[[<ast.Constant object at 0x7da1affc1180>, <ast.Constant object at 0x7da1affc2080>, <ast.Name object at 0x7da1affc0a90>]]]].parse, parameter[name[scope]]]
keyword[def] identifier[parse_args] ( identifier[self] , identifier[args] , identifier[scope] ): literal[string] identifier[arguments] = identifier[list] ( identifier[zip] ( identifier[args] , [ literal[string] ]* identifier[len] ( identifier[args] ))) keyword[if] identifier[args] keyword[and] identifier[args] [ literal[int] ] keyword[else] keyword[None] identifier[zl] = identifier[itertools] . identifier[zip_longest] keyword[if] identifier[sys] . identifier[version_info] [ literal[int] ]== literal[int] keyword[else] identifier[itertools] . identifier[izip_longest] keyword[if] identifier[self] . identifier[args] : identifier[parsed] =[ identifier[v] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ) keyword[else] identifier[v] keyword[for] identifier[v] keyword[in] identifier[copy] . identifier[copy] ( identifier[self] . identifier[args] ) ] identifier[args] = identifier[args] keyword[if] identifier[isinstance] ( identifier[args] , identifier[list] ) keyword[else] [ identifier[args] ] identifier[vars] =[ identifier[self] . identifier[_parse_arg] ( identifier[var] , identifier[arg] , identifier[scope] ) keyword[for] identifier[arg] , identifier[var] keyword[in] identifier[zl] ([ identifier[a] keyword[for] identifier[a] keyword[in] identifier[args] ], identifier[parsed] ) ] keyword[for] identifier[var] keyword[in] identifier[vars] : keyword[if] identifier[var] : identifier[var] . identifier[parse] ( identifier[scope] ) keyword[if] keyword[not] identifier[arguments] : identifier[arguments] =[ identifier[v] . identifier[value] keyword[for] identifier[v] keyword[in] identifier[vars] keyword[if] identifier[v] ] keyword[if] keyword[not] identifier[arguments] : identifier[arguments] = literal[string] identifier[Variable] ([ literal[string] , keyword[None] , identifier[arguments] ]). identifier[parse] ( identifier[scope] )
def parse_args(self, args, scope): """Parse arguments to mixin. Add them to scope as variables. Sets upp special variable @arguments as well. args: args (list): arguments scope (Scope): current scope raises: SyntaxError """ arguments = list(zip(args, [' '] * len(args))) if args and args[0] else None zl = itertools.zip_longest if sys.version_info[0] == 3 else itertools.izip_longest if self.args: parsed = [v if hasattr(v, 'parse') else v for v in copy.copy(self.args)] args = args if isinstance(args, list) else [args] vars = [self._parse_arg(var, arg, scope) for (arg, var) in zl([a for a in args], parsed)] for var in vars: if var: var.parse(scope) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['var']] if not arguments: arguments = [v.value for v in vars if v] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if not arguments: arguments = '' # depends on [control=['if'], data=[]] Variable(['@arguments', None, arguments]).parse(scope)
def start(self, min_nodes=None): """ Starts up all the instances in the cloud. To speed things up, all instances are started in a seperate thread. To make sure ElastiCluster is not stopped during creation of an instance, it will overwrite the sigint handler. As soon as the last started instance is returned and saved to the repository, sigint is executed as usual. A VM instance is considered 'up and running' as soon as an SSH connection can be established. If the startup timeout is reached before all instances are started, ElastiCluster stops the cluster and terminates all VM instances. This method is blocking and might take some time depending on the amount of instances to start. :param min_nodes: minimum number of nodes to start in case the quota is reached before all instances are up :type min_nodes: dict [node_kind] = number """ nodes = self.get_all_nodes() log.info("Starting cluster nodes ...") if log.DO_NOT_FORK: nodes = self._start_nodes_sequentially(nodes) else: nodes = self._start_nodes_parallel(nodes, self.thread_pool_max_size) # checkpoint cluster state self.repository.save_or_update(self) not_started_nodes = self._check_starting_nodes(nodes, self.startup_timeout) # now that all nodes are up, checkpoint cluster state again self.repository.save_or_update(self) # Try to connect to each node to gather IP addresses and SSH host keys log.info("Checking SSH connection to nodes ...") pending_nodes = nodes - not_started_nodes self._gather_node_ip_addresses(pending_nodes, self.startup_timeout) # It might be possible that the node.connect() call updated # the `preferred_ip` attribute, so, let's save the cluster # again. self.repository.save_or_update(self) # A lot of things could go wrong when starting the cluster. To # ensure a stable cluster fitting the needs of the user in terms of # cluster size, we check the minimum nodes within the node groups to # match the current setup. min_nodes = self._compute_min_nodes(min_nodes) self._check_cluster_size(min_nodes)
def function[start, parameter[self, min_nodes]]: constant[ Starts up all the instances in the cloud. To speed things up, all instances are started in a seperate thread. To make sure ElastiCluster is not stopped during creation of an instance, it will overwrite the sigint handler. As soon as the last started instance is returned and saved to the repository, sigint is executed as usual. A VM instance is considered 'up and running' as soon as an SSH connection can be established. If the startup timeout is reached before all instances are started, ElastiCluster stops the cluster and terminates all VM instances. This method is blocking and might take some time depending on the amount of instances to start. :param min_nodes: minimum number of nodes to start in case the quota is reached before all instances are up :type min_nodes: dict [node_kind] = number ] variable[nodes] assign[=] call[name[self].get_all_nodes, parameter[]] call[name[log].info, parameter[constant[Starting cluster nodes ...]]] if name[log].DO_NOT_FORK begin[:] variable[nodes] assign[=] call[name[self]._start_nodes_sequentially, parameter[name[nodes]]] call[name[self].repository.save_or_update, parameter[name[self]]] variable[not_started_nodes] assign[=] call[name[self]._check_starting_nodes, parameter[name[nodes], name[self].startup_timeout]] call[name[self].repository.save_or_update, parameter[name[self]]] call[name[log].info, parameter[constant[Checking SSH connection to nodes ...]]] variable[pending_nodes] assign[=] binary_operation[name[nodes] - name[not_started_nodes]] call[name[self]._gather_node_ip_addresses, parameter[name[pending_nodes], name[self].startup_timeout]] call[name[self].repository.save_or_update, parameter[name[self]]] variable[min_nodes] assign[=] call[name[self]._compute_min_nodes, parameter[name[min_nodes]]] call[name[self]._check_cluster_size, parameter[name[min_nodes]]]
keyword[def] identifier[start] ( identifier[self] , identifier[min_nodes] = keyword[None] ): literal[string] identifier[nodes] = identifier[self] . identifier[get_all_nodes] () identifier[log] . identifier[info] ( literal[string] ) keyword[if] identifier[log] . identifier[DO_NOT_FORK] : identifier[nodes] = identifier[self] . identifier[_start_nodes_sequentially] ( identifier[nodes] ) keyword[else] : identifier[nodes] = identifier[self] . identifier[_start_nodes_parallel] ( identifier[nodes] , identifier[self] . identifier[thread_pool_max_size] ) identifier[self] . identifier[repository] . identifier[save_or_update] ( identifier[self] ) identifier[not_started_nodes] = identifier[self] . identifier[_check_starting_nodes] ( identifier[nodes] , identifier[self] . identifier[startup_timeout] ) identifier[self] . identifier[repository] . identifier[save_or_update] ( identifier[self] ) identifier[log] . identifier[info] ( literal[string] ) identifier[pending_nodes] = identifier[nodes] - identifier[not_started_nodes] identifier[self] . identifier[_gather_node_ip_addresses] ( identifier[pending_nodes] , identifier[self] . identifier[startup_timeout] ) identifier[self] . identifier[repository] . identifier[save_or_update] ( identifier[self] ) identifier[min_nodes] = identifier[self] . identifier[_compute_min_nodes] ( identifier[min_nodes] ) identifier[self] . identifier[_check_cluster_size] ( identifier[min_nodes] )
def start(self, min_nodes=None): """ Starts up all the instances in the cloud. To speed things up, all instances are started in a seperate thread. To make sure ElastiCluster is not stopped during creation of an instance, it will overwrite the sigint handler. As soon as the last started instance is returned and saved to the repository, sigint is executed as usual. A VM instance is considered 'up and running' as soon as an SSH connection can be established. If the startup timeout is reached before all instances are started, ElastiCluster stops the cluster and terminates all VM instances. This method is blocking and might take some time depending on the amount of instances to start. :param min_nodes: minimum number of nodes to start in case the quota is reached before all instances are up :type min_nodes: dict [node_kind] = number """ nodes = self.get_all_nodes() log.info('Starting cluster nodes ...') if log.DO_NOT_FORK: nodes = self._start_nodes_sequentially(nodes) # depends on [control=['if'], data=[]] else: nodes = self._start_nodes_parallel(nodes, self.thread_pool_max_size) # checkpoint cluster state self.repository.save_or_update(self) not_started_nodes = self._check_starting_nodes(nodes, self.startup_timeout) # now that all nodes are up, checkpoint cluster state again self.repository.save_or_update(self) # Try to connect to each node to gather IP addresses and SSH host keys log.info('Checking SSH connection to nodes ...') pending_nodes = nodes - not_started_nodes self._gather_node_ip_addresses(pending_nodes, self.startup_timeout) # It might be possible that the node.connect() call updated # the `preferred_ip` attribute, so, let's save the cluster # again. self.repository.save_or_update(self) # A lot of things could go wrong when starting the cluster. To # ensure a stable cluster fitting the needs of the user in terms of # cluster size, we check the minimum nodes within the node groups to # match the current setup. min_nodes = self._compute_min_nodes(min_nodes) self._check_cluster_size(min_nodes)
def _csv_temp(self, cursor, fieldnames): """Writes the rows of `cursor` in CSV format to a temporary file and returns the path to that file. :param cursor: database cursor containing data to be output :type cursor: `sqlite3.Cursor` :param fieldnames: row headings :type fieldnames: `list` :rtype: `str` """ temp_fd, temp_path = tempfile.mkstemp(text=True) with open(temp_fd, 'w', encoding='utf-8', newline='') as results_fh: self._csv(cursor, fieldnames, results_fh) return temp_path
def function[_csv_temp, parameter[self, cursor, fieldnames]]: constant[Writes the rows of `cursor` in CSV format to a temporary file and returns the path to that file. :param cursor: database cursor containing data to be output :type cursor: `sqlite3.Cursor` :param fieldnames: row headings :type fieldnames: `list` :rtype: `str` ] <ast.Tuple object at 0x7da1b19c1d80> assign[=] call[name[tempfile].mkstemp, parameter[]] with call[name[open], parameter[name[temp_fd], constant[w]]] begin[:] call[name[self]._csv, parameter[name[cursor], name[fieldnames], name[results_fh]]] return[name[temp_path]]
keyword[def] identifier[_csv_temp] ( identifier[self] , identifier[cursor] , identifier[fieldnames] ): literal[string] identifier[temp_fd] , identifier[temp_path] = identifier[tempfile] . identifier[mkstemp] ( identifier[text] = keyword[True] ) keyword[with] identifier[open] ( identifier[temp_fd] , literal[string] , identifier[encoding] = literal[string] , identifier[newline] = literal[string] ) keyword[as] identifier[results_fh] : identifier[self] . identifier[_csv] ( identifier[cursor] , identifier[fieldnames] , identifier[results_fh] ) keyword[return] identifier[temp_path]
def _csv_temp(self, cursor, fieldnames): """Writes the rows of `cursor` in CSV format to a temporary file and returns the path to that file. :param cursor: database cursor containing data to be output :type cursor: `sqlite3.Cursor` :param fieldnames: row headings :type fieldnames: `list` :rtype: `str` """ (temp_fd, temp_path) = tempfile.mkstemp(text=True) with open(temp_fd, 'w', encoding='utf-8', newline='') as results_fh: self._csv(cursor, fieldnames, results_fh) # depends on [control=['with'], data=['results_fh']] return temp_path
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Read the data encoding the ObtainLease response payload and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is missing from the encoded payload. """ super(ObtainLeaseResponsePayload, self).read( input_stream, kmip_version=kmip_version ) local_stream = utils.BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream): self._unique_identifier = primitives.TextString( tag=enums.Tags.UNIQUE_IDENTIFIER ) self._unique_identifier.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.LEASE_TIME, local_stream): self._lease_time = primitives.Interval( tag=enums.Tags.LEASE_TIME ) self._lease_time.read(local_stream, kmip_version=kmip_version) if self.is_tag_next(enums.Tags.LAST_CHANGE_DATE, local_stream): self._last_change_date = primitives.DateTime( tag=enums.Tags.LAST_CHANGE_DATE ) self._last_change_date.read( local_stream, kmip_version=kmip_version ) self.is_oversized(local_stream)
def function[read, parameter[self, input_stream, kmip_version]]: constant[ Read the data encoding the ObtainLease response payload and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is missing from the encoded payload. ] call[call[name[super], parameter[name[ObtainLeaseResponsePayload], name[self]]].read, parameter[name[input_stream]]] variable[local_stream] assign[=] call[name[utils].BytearrayStream, parameter[call[name[input_stream].read, parameter[name[self].length]]]] if call[name[self].is_tag_next, parameter[name[enums].Tags.UNIQUE_IDENTIFIER, name[local_stream]]] begin[:] name[self]._unique_identifier assign[=] call[name[primitives].TextString, parameter[]] call[name[self]._unique_identifier.read, parameter[name[local_stream]]] if call[name[self].is_tag_next, parameter[name[enums].Tags.LEASE_TIME, name[local_stream]]] begin[:] name[self]._lease_time assign[=] call[name[primitives].Interval, parameter[]] call[name[self]._lease_time.read, parameter[name[local_stream]]] if call[name[self].is_tag_next, parameter[name[enums].Tags.LAST_CHANGE_DATE, name[local_stream]]] begin[:] name[self]._last_change_date assign[=] call[name[primitives].DateTime, parameter[]] call[name[self]._last_change_date.read, parameter[name[local_stream]]] call[name[self].is_oversized, parameter[name[local_stream]]]
keyword[def] identifier[read] ( identifier[self] , identifier[input_stream] , identifier[kmip_version] = identifier[enums] . identifier[KMIPVersion] . identifier[KMIP_1_0] ): literal[string] identifier[super] ( identifier[ObtainLeaseResponsePayload] , identifier[self] ). identifier[read] ( identifier[input_stream] , identifier[kmip_version] = identifier[kmip_version] ) identifier[local_stream] = identifier[utils] . identifier[BytearrayStream] ( identifier[input_stream] . identifier[read] ( identifier[self] . identifier[length] )) keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[UNIQUE_IDENTIFIER] , identifier[local_stream] ): identifier[self] . identifier[_unique_identifier] = identifier[primitives] . identifier[TextString] ( identifier[tag] = identifier[enums] . identifier[Tags] . identifier[UNIQUE_IDENTIFIER] ) identifier[self] . identifier[_unique_identifier] . identifier[read] ( identifier[local_stream] , identifier[kmip_version] = identifier[kmip_version] ) keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[LEASE_TIME] , identifier[local_stream] ): identifier[self] . identifier[_lease_time] = identifier[primitives] . identifier[Interval] ( identifier[tag] = identifier[enums] . identifier[Tags] . identifier[LEASE_TIME] ) identifier[self] . identifier[_lease_time] . identifier[read] ( identifier[local_stream] , identifier[kmip_version] = identifier[kmip_version] ) keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[LAST_CHANGE_DATE] , identifier[local_stream] ): identifier[self] . identifier[_last_change_date] = identifier[primitives] . identifier[DateTime] ( identifier[tag] = identifier[enums] . identifier[Tags] . identifier[LAST_CHANGE_DATE] ) identifier[self] . identifier[_last_change_date] . identifier[read] ( identifier[local_stream] , identifier[kmip_version] = identifier[kmip_version] ) identifier[self] . identifier[is_oversized] ( identifier[local_stream] )
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Read the data encoding the ObtainLease response payload and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is missing from the encoded payload. """ super(ObtainLeaseResponsePayload, self).read(input_stream, kmip_version=kmip_version) local_stream = utils.BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream): self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER) self._unique_identifier.read(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]] if self.is_tag_next(enums.Tags.LEASE_TIME, local_stream): self._lease_time = primitives.Interval(tag=enums.Tags.LEASE_TIME) self._lease_time.read(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]] if self.is_tag_next(enums.Tags.LAST_CHANGE_DATE, local_stream): self._last_change_date = primitives.DateTime(tag=enums.Tags.LAST_CHANGE_DATE) self._last_change_date.read(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]] self.is_oversized(local_stream)
def list_metering_label_rules(self, retrieve_all=True, **_params): """Fetches a list of all metering label rules for a label.""" return self.list('metering_label_rules', self.metering_label_rules_path, retrieve_all, **_params)
def function[list_metering_label_rules, parameter[self, retrieve_all]]: constant[Fetches a list of all metering label rules for a label.] return[call[name[self].list, parameter[constant[metering_label_rules], name[self].metering_label_rules_path, name[retrieve_all]]]]
keyword[def] identifier[list_metering_label_rules] ( identifier[self] , identifier[retrieve_all] = keyword[True] ,** identifier[_params] ): literal[string] keyword[return] identifier[self] . identifier[list] ( literal[string] , identifier[self] . identifier[metering_label_rules_path] , identifier[retrieve_all] , ** identifier[_params] )
def list_metering_label_rules(self, retrieve_all=True, **_params): """Fetches a list of all metering label rules for a label.""" return self.list('metering_label_rules', self.metering_label_rules_path, retrieve_all, **_params)
def create_action(self): """Create actions related to channel selection.""" actions = {} act = QAction('Load Montage...', self) act.triggered.connect(self.load_channels) act.setEnabled(False) actions['load_channels'] = act act = QAction('Save Montage...', self) act.triggered.connect(self.save_channels) act.setEnabled(False) actions['save_channels'] = act self.action = actions
def function[create_action, parameter[self]]: constant[Create actions related to channel selection.] variable[actions] assign[=] dictionary[[], []] variable[act] assign[=] call[name[QAction], parameter[constant[Load Montage...], name[self]]] call[name[act].triggered.connect, parameter[name[self].load_channels]] call[name[act].setEnabled, parameter[constant[False]]] call[name[actions]][constant[load_channels]] assign[=] name[act] variable[act] assign[=] call[name[QAction], parameter[constant[Save Montage...], name[self]]] call[name[act].triggered.connect, parameter[name[self].save_channels]] call[name[act].setEnabled, parameter[constant[False]]] call[name[actions]][constant[save_channels]] assign[=] name[act] name[self].action assign[=] name[actions]
keyword[def] identifier[create_action] ( identifier[self] ): literal[string] identifier[actions] ={} identifier[act] = identifier[QAction] ( literal[string] , identifier[self] ) identifier[act] . identifier[triggered] . identifier[connect] ( identifier[self] . identifier[load_channels] ) identifier[act] . identifier[setEnabled] ( keyword[False] ) identifier[actions] [ literal[string] ]= identifier[act] identifier[act] = identifier[QAction] ( literal[string] , identifier[self] ) identifier[act] . identifier[triggered] . identifier[connect] ( identifier[self] . identifier[save_channels] ) identifier[act] . identifier[setEnabled] ( keyword[False] ) identifier[actions] [ literal[string] ]= identifier[act] identifier[self] . identifier[action] = identifier[actions]
def create_action(self): """Create actions related to channel selection.""" actions = {} act = QAction('Load Montage...', self) act.triggered.connect(self.load_channels) act.setEnabled(False) actions['load_channels'] = act act = QAction('Save Montage...', self) act.triggered.connect(self.save_channels) act.setEnabled(False) actions['save_channels'] = act self.action = actions
def run(cls, row, reader): """ Invoke the CSV parser on an individual row The row should already be a dict from the CSV reader. The reader is passed in so we can easily reference the CSV document headers & line number when generating errors. """ cls._parse_keys(row, reader.line_num) cls._parse_relationships(row, reader.line_num)
def function[run, parameter[cls, row, reader]]: constant[ Invoke the CSV parser on an individual row The row should already be a dict from the CSV reader. The reader is passed in so we can easily reference the CSV document headers & line number when generating errors. ] call[name[cls]._parse_keys, parameter[name[row], name[reader].line_num]] call[name[cls]._parse_relationships, parameter[name[row], name[reader].line_num]]
keyword[def] identifier[run] ( identifier[cls] , identifier[row] , identifier[reader] ): literal[string] identifier[cls] . identifier[_parse_keys] ( identifier[row] , identifier[reader] . identifier[line_num] ) identifier[cls] . identifier[_parse_relationships] ( identifier[row] , identifier[reader] . identifier[line_num] )
def run(cls, row, reader): """ Invoke the CSV parser on an individual row The row should already be a dict from the CSV reader. The reader is passed in so we can easily reference the CSV document headers & line number when generating errors. """ cls._parse_keys(row, reader.line_num) cls._parse_relationships(row, reader.line_num)
def delete(self, **kwargs): """Deletes a member from a license pool You need to be careful with this method. When you use it, and it succeeds on the remote BIG-IP, the configuration of the BIG-IP will be reloaded. During this process, you will not be able to access the REST interface. This method overrides the Resource class's method because it requires that extra json kwargs be supplied. This is not a behavior that is part of the normal Resource class's delete method. :param kwargs: :return: """ if 'id' not in kwargs: # BIG-IQ requires that you provide the ID of the members to revoke # a license from. This ID is already part of the deletion URL though. # Therefore, if you do not provide it, we enumerate it for you. delete_uri = self._meta_data['uri'] if delete_uri.endswith('/'): delete_uri = delete_uri[0:-1] kwargs['id'] = os.path.basename(delete_uri) uid = uuid.UUID(kwargs['id'], version=4) if uid.hex != kwargs['id'].replace('-', ''): raise F5SDKError( "The specified ID is invalid" ) requests_params = self._handle_requests_params(kwargs) kwargs = self._check_for_python_keywords(kwargs) kwargs = self._prepare_request_json(kwargs) delete_uri = self._meta_data['uri'] session = self._meta_data['bigip']._meta_data['icr_session'] # Check the generation for match before delete force = self._check_force_arg(kwargs.pop('force', True)) if not force: self._check_generation() response = session.delete(delete_uri, json=kwargs, **requests_params) if response.status_code == 200: self.__dict__ = {'deleted': True} # This sleep is necessary to prevent BIG-IQ from being able to remove # a license. It happens in certain cases that assignments can be revoked # (and license deletion started) too quickly. Therefore, we must introduce # an artificial delay here to prevent revoking from returning before # BIG-IQ would be ready to remove the license. time.sleep(1)
def function[delete, parameter[self]]: constant[Deletes a member from a license pool You need to be careful with this method. When you use it, and it succeeds on the remote BIG-IP, the configuration of the BIG-IP will be reloaded. During this process, you will not be able to access the REST interface. This method overrides the Resource class's method because it requires that extra json kwargs be supplied. This is not a behavior that is part of the normal Resource class's delete method. :param kwargs: :return: ] if compare[constant[id] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:] variable[delete_uri] assign[=] call[name[self]._meta_data][constant[uri]] if call[name[delete_uri].endswith, parameter[constant[/]]] begin[:] variable[delete_uri] assign[=] call[name[delete_uri]][<ast.Slice object at 0x7da20e960af0>] call[name[kwargs]][constant[id]] assign[=] call[name[os].path.basename, parameter[name[delete_uri]]] variable[uid] assign[=] call[name[uuid].UUID, parameter[call[name[kwargs]][constant[id]]]] if compare[name[uid].hex not_equal[!=] call[call[name[kwargs]][constant[id]].replace, parameter[constant[-], constant[]]]] begin[:] <ast.Raise object at 0x7da1b151a920> variable[requests_params] assign[=] call[name[self]._handle_requests_params, parameter[name[kwargs]]] variable[kwargs] assign[=] call[name[self]._check_for_python_keywords, parameter[name[kwargs]]] variable[kwargs] assign[=] call[name[self]._prepare_request_json, parameter[name[kwargs]]] variable[delete_uri] assign[=] call[name[self]._meta_data][constant[uri]] variable[session] assign[=] call[call[name[self]._meta_data][constant[bigip]]._meta_data][constant[icr_session]] variable[force] assign[=] call[name[self]._check_force_arg, parameter[call[name[kwargs].pop, parameter[constant[force], constant[True]]]]] if <ast.UnaryOp object at 0x7da1b15197b0> begin[:] call[name[self]._check_generation, parameter[]] variable[response] assign[=] call[name[session].delete, parameter[name[delete_uri]]] if compare[name[response].status_code equal[==] constant[200]] begin[:] name[self].__dict__ assign[=] dictionary[[<ast.Constant object at 0x7da204347e20>], [<ast.Constant object at 0x7da2043449a0>]] call[name[time].sleep, parameter[constant[1]]]
keyword[def] identifier[delete] ( identifier[self] ,** identifier[kwargs] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : identifier[delete_uri] = identifier[self] . identifier[_meta_data] [ literal[string] ] keyword[if] identifier[delete_uri] . identifier[endswith] ( literal[string] ): identifier[delete_uri] = identifier[delete_uri] [ literal[int] :- literal[int] ] identifier[kwargs] [ literal[string] ]= identifier[os] . identifier[path] . identifier[basename] ( identifier[delete_uri] ) identifier[uid] = identifier[uuid] . identifier[UUID] ( identifier[kwargs] [ literal[string] ], identifier[version] = literal[int] ) keyword[if] identifier[uid] . identifier[hex] != identifier[kwargs] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] ): keyword[raise] identifier[F5SDKError] ( literal[string] ) identifier[requests_params] = identifier[self] . identifier[_handle_requests_params] ( identifier[kwargs] ) identifier[kwargs] = identifier[self] . identifier[_check_for_python_keywords] ( identifier[kwargs] ) identifier[kwargs] = identifier[self] . identifier[_prepare_request_json] ( identifier[kwargs] ) identifier[delete_uri] = identifier[self] . identifier[_meta_data] [ literal[string] ] identifier[session] = identifier[self] . identifier[_meta_data] [ literal[string] ]. identifier[_meta_data] [ literal[string] ] identifier[force] = identifier[self] . identifier[_check_force_arg] ( identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] )) keyword[if] keyword[not] identifier[force] : identifier[self] . identifier[_check_generation] () identifier[response] = identifier[session] . identifier[delete] ( identifier[delete_uri] , identifier[json] = identifier[kwargs] ,** identifier[requests_params] ) keyword[if] identifier[response] . identifier[status_code] == literal[int] : identifier[self] . identifier[__dict__] ={ literal[string] : keyword[True] } identifier[time] . identifier[sleep] ( literal[int] )
def delete(self, **kwargs): """Deletes a member from a license pool You need to be careful with this method. When you use it, and it succeeds on the remote BIG-IP, the configuration of the BIG-IP will be reloaded. During this process, you will not be able to access the REST interface. This method overrides the Resource class's method because it requires that extra json kwargs be supplied. This is not a behavior that is part of the normal Resource class's delete method. :param kwargs: :return: """ if 'id' not in kwargs: # BIG-IQ requires that you provide the ID of the members to revoke # a license from. This ID is already part of the deletion URL though. # Therefore, if you do not provide it, we enumerate it for you. delete_uri = self._meta_data['uri'] if delete_uri.endswith('/'): delete_uri = delete_uri[0:-1] kwargs['id'] = os.path.basename(delete_uri) # depends on [control=['if'], data=[]] uid = uuid.UUID(kwargs['id'], version=4) if uid.hex != kwargs['id'].replace('-', ''): raise F5SDKError('The specified ID is invalid') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['kwargs']] requests_params = self._handle_requests_params(kwargs) kwargs = self._check_for_python_keywords(kwargs) kwargs = self._prepare_request_json(kwargs) delete_uri = self._meta_data['uri'] session = self._meta_data['bigip']._meta_data['icr_session'] # Check the generation for match before delete force = self._check_force_arg(kwargs.pop('force', True)) if not force: self._check_generation() # depends on [control=['if'], data=[]] response = session.delete(delete_uri, json=kwargs, **requests_params) if response.status_code == 200: self.__dict__ = {'deleted': True} # depends on [control=['if'], data=[]] # This sleep is necessary to prevent BIG-IQ from being able to remove # a license. It happens in certain cases that assignments can be revoked # (and license deletion started) too quickly. Therefore, we must introduce # an artificial delay here to prevent revoking from returning before # BIG-IQ would be ready to remove the license. time.sleep(1)
def handle(client_message, handle_event_member=None, handle_event_member_list=None, handle_event_member_attribute_change=None, to_object=None): """ Event handler """ message_type = client_message.get_message_type() if message_type == EVENT_MEMBER and handle_event_member is not None: member = MemberCodec.decode(client_message, to_object) event_type = client_message.read_int() handle_event_member(member=member, event_type=event_type) if message_type == EVENT_MEMBERLIST and handle_event_member_list is not None: members_size = client_message.read_int() members = [] for _ in range(0, members_size): members_item = MemberCodec.decode(client_message, to_object) members.append(members_item) handle_event_member_list(members=members) if message_type == EVENT_MEMBERATTRIBUTECHANGE and handle_event_member_attribute_change is not None: uuid = client_message.read_str() key = client_message.read_str() operation_type = client_message.read_int() value = None if not client_message.read_bool(): value = client_message.read_str() handle_event_member_attribute_change(uuid=uuid, key=key, operation_type=operation_type, value=value)
def function[handle, parameter[client_message, handle_event_member, handle_event_member_list, handle_event_member_attribute_change, to_object]]: constant[ Event handler ] variable[message_type] assign[=] call[name[client_message].get_message_type, parameter[]] if <ast.BoolOp object at 0x7da1b16a4070> begin[:] variable[member] assign[=] call[name[MemberCodec].decode, parameter[name[client_message], name[to_object]]] variable[event_type] assign[=] call[name[client_message].read_int, parameter[]] call[name[handle_event_member], parameter[]] if <ast.BoolOp object at 0x7da1b16a55a0> begin[:] variable[members_size] assign[=] call[name[client_message].read_int, parameter[]] variable[members] assign[=] list[[]] for taget[name[_]] in starred[call[name[range], parameter[constant[0], name[members_size]]]] begin[:] variable[members_item] assign[=] call[name[MemberCodec].decode, parameter[name[client_message], name[to_object]]] call[name[members].append, parameter[name[members_item]]] call[name[handle_event_member_list], parameter[]] if <ast.BoolOp object at 0x7da1b16a6020> begin[:] variable[uuid] assign[=] call[name[client_message].read_str, parameter[]] variable[key] assign[=] call[name[client_message].read_str, parameter[]] variable[operation_type] assign[=] call[name[client_message].read_int, parameter[]] variable[value] assign[=] constant[None] if <ast.UnaryOp object at 0x7da1b16a4670> begin[:] variable[value] assign[=] call[name[client_message].read_str, parameter[]] call[name[handle_event_member_attribute_change], parameter[]]
keyword[def] identifier[handle] ( identifier[client_message] , identifier[handle_event_member] = keyword[None] , identifier[handle_event_member_list] = keyword[None] , identifier[handle_event_member_attribute_change] = keyword[None] , identifier[to_object] = keyword[None] ): literal[string] identifier[message_type] = identifier[client_message] . identifier[get_message_type] () keyword[if] identifier[message_type] == identifier[EVENT_MEMBER] keyword[and] identifier[handle_event_member] keyword[is] keyword[not] keyword[None] : identifier[member] = identifier[MemberCodec] . identifier[decode] ( identifier[client_message] , identifier[to_object] ) identifier[event_type] = identifier[client_message] . identifier[read_int] () identifier[handle_event_member] ( identifier[member] = identifier[member] , identifier[event_type] = identifier[event_type] ) keyword[if] identifier[message_type] == identifier[EVENT_MEMBERLIST] keyword[and] identifier[handle_event_member_list] keyword[is] keyword[not] keyword[None] : identifier[members_size] = identifier[client_message] . identifier[read_int] () identifier[members] =[] keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] , identifier[members_size] ): identifier[members_item] = identifier[MemberCodec] . identifier[decode] ( identifier[client_message] , identifier[to_object] ) identifier[members] . identifier[append] ( identifier[members_item] ) identifier[handle_event_member_list] ( identifier[members] = identifier[members] ) keyword[if] identifier[message_type] == identifier[EVENT_MEMBERATTRIBUTECHANGE] keyword[and] identifier[handle_event_member_attribute_change] keyword[is] keyword[not] keyword[None] : identifier[uuid] = identifier[client_message] . identifier[read_str] () identifier[key] = identifier[client_message] . identifier[read_str] () identifier[operation_type] = identifier[client_message] . identifier[read_int] () identifier[value] = keyword[None] keyword[if] keyword[not] identifier[client_message] . identifier[read_bool] (): identifier[value] = identifier[client_message] . identifier[read_str] () identifier[handle_event_member_attribute_change] ( identifier[uuid] = identifier[uuid] , identifier[key] = identifier[key] , identifier[operation_type] = identifier[operation_type] , identifier[value] = identifier[value] )
def handle(client_message, handle_event_member=None, handle_event_member_list=None, handle_event_member_attribute_change=None, to_object=None): """ Event handler """ message_type = client_message.get_message_type() if message_type == EVENT_MEMBER and handle_event_member is not None: member = MemberCodec.decode(client_message, to_object) event_type = client_message.read_int() handle_event_member(member=member, event_type=event_type) # depends on [control=['if'], data=[]] if message_type == EVENT_MEMBERLIST and handle_event_member_list is not None: members_size = client_message.read_int() members = [] for _ in range(0, members_size): members_item = MemberCodec.decode(client_message, to_object) members.append(members_item) # depends on [control=['for'], data=[]] handle_event_member_list(members=members) # depends on [control=['if'], data=[]] if message_type == EVENT_MEMBERATTRIBUTECHANGE and handle_event_member_attribute_change is not None: uuid = client_message.read_str() key = client_message.read_str() operation_type = client_message.read_int() value = None if not client_message.read_bool(): value = client_message.read_str() # depends on [control=['if'], data=[]] handle_event_member_attribute_change(uuid=uuid, key=key, operation_type=operation_type, value=value) # depends on [control=['if'], data=[]]
def file_dict(*packages, **kwargs): # pylint: disable=unused-argument ''' List the files that belong to a package, grouped by package. Not specifying any packages will return a list of _every_ file on the system's package database (not generally recommended). CLI Examples: .. code-block:: bash salt '*' pkg.file_list httpd salt '*' pkg.file_list httpd postfix salt '*' pkg.file_list ''' errors = [] ret = {} cmd_files = ['opkg', 'files'] if not packages: packages = list(list_pkgs().keys()) for package in packages: files = [] cmd = cmd_files[:] cmd.append(package) out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) for line in out['stdout'].splitlines(): if line.startswith('/'): files.append(line) elif line.startswith(' * '): errors.append(line[3:]) break else: continue if files: ret[package] = files return {'errors': errors, 'packages': ret}
def function[file_dict, parameter[]]: constant[ List the files that belong to a package, grouped by package. Not specifying any packages will return a list of _every_ file on the system's package database (not generally recommended). CLI Examples: .. code-block:: bash salt '*' pkg.file_list httpd salt '*' pkg.file_list httpd postfix salt '*' pkg.file_list ] variable[errors] assign[=] list[[]] variable[ret] assign[=] dictionary[[], []] variable[cmd_files] assign[=] list[[<ast.Constant object at 0x7da18bc73c70>, <ast.Constant object at 0x7da18bc719c0>]] if <ast.UnaryOp object at 0x7da18bc73eb0> begin[:] variable[packages] assign[=] call[name[list], parameter[call[call[name[list_pkgs], parameter[]].keys, parameter[]]]] for taget[name[package]] in starred[name[packages]] begin[:] variable[files] assign[=] list[[]] variable[cmd] assign[=] call[name[cmd_files]][<ast.Slice object at 0x7da18bc71720>] call[name[cmd].append, parameter[name[package]]] variable[out] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]] for taget[name[line]] in starred[call[call[name[out]][constant[stdout]].splitlines, parameter[]]] begin[:] if call[name[line].startswith, parameter[constant[/]]] begin[:] call[name[files].append, parameter[name[line]]] if name[files] begin[:] call[name[ret]][name[package]] assign[=] name[files] return[dictionary[[<ast.Constant object at 0x7da18bc73dc0>, <ast.Constant object at 0x7da18bc72ad0>], [<ast.Name object at 0x7da18bc73040>, <ast.Name object at 0x7da18bc72590>]]]
keyword[def] identifier[file_dict] (* identifier[packages] ,** identifier[kwargs] ): literal[string] identifier[errors] =[] identifier[ret] ={} identifier[cmd_files] =[ literal[string] , literal[string] ] keyword[if] keyword[not] identifier[packages] : identifier[packages] = identifier[list] ( identifier[list_pkgs] (). identifier[keys] ()) keyword[for] identifier[package] keyword[in] identifier[packages] : identifier[files] =[] identifier[cmd] = identifier[cmd_files] [:] identifier[cmd] . identifier[append] ( identifier[package] ) identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[output_loglevel] = literal[string] , identifier[python_shell] = keyword[False] ) keyword[for] identifier[line] keyword[in] identifier[out] [ literal[string] ]. identifier[splitlines] (): keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): identifier[files] . identifier[append] ( identifier[line] ) keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ): identifier[errors] . identifier[append] ( identifier[line] [ literal[int] :]) keyword[break] keyword[else] : keyword[continue] keyword[if] identifier[files] : identifier[ret] [ identifier[package] ]= identifier[files] keyword[return] { literal[string] : identifier[errors] , literal[string] : identifier[ret] }
def file_dict(*packages, **kwargs): # pylint: disable=unused-argument "\n List the files that belong to a package, grouped by package. Not\n specifying any packages will return a list of _every_ file on the system's\n package database (not generally recommended).\n\n CLI Examples:\n\n .. code-block:: bash\n\n salt '*' pkg.file_list httpd\n salt '*' pkg.file_list httpd postfix\n salt '*' pkg.file_list\n " errors = [] ret = {} cmd_files = ['opkg', 'files'] if not packages: packages = list(list_pkgs().keys()) # depends on [control=['if'], data=[]] for package in packages: files = [] cmd = cmd_files[:] cmd.append(package) out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) for line in out['stdout'].splitlines(): if line.startswith('/'): files.append(line) # depends on [control=['if'], data=[]] elif line.startswith(' * '): errors.append(line[3:]) break # depends on [control=['if'], data=[]] else: continue # depends on [control=['for'], data=['line']] if files: ret[package] = files # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['package']] return {'errors': errors, 'packages': ret}
def get_associated(self, klass_node, level): """return associated nodes of a class node""" if level == 0: return for association_nodes in list(klass_node.instance_attrs_type.values()) + list( klass_node.locals_type.values() ): for node in association_nodes: if isinstance(node, astroid.Instance): node = node._proxied if not (isinstance(node, astroid.ClassDef) and self.show_node(node)): continue yield node
def function[get_associated, parameter[self, klass_node, level]]: constant[return associated nodes of a class node] if compare[name[level] equal[==] constant[0]] begin[:] return[None] for taget[name[association_nodes]] in starred[binary_operation[call[name[list], parameter[call[name[klass_node].instance_attrs_type.values, parameter[]]]] + call[name[list], parameter[call[name[klass_node].locals_type.values, parameter[]]]]]] begin[:] for taget[name[node]] in starred[name[association_nodes]] begin[:] if call[name[isinstance], parameter[name[node], name[astroid].Instance]] begin[:] variable[node] assign[=] name[node]._proxied if <ast.UnaryOp object at 0x7da1b0314730> begin[:] continue <ast.Yield object at 0x7da1b03399c0>
keyword[def] identifier[get_associated] ( identifier[self] , identifier[klass_node] , identifier[level] ): literal[string] keyword[if] identifier[level] == literal[int] : keyword[return] keyword[for] identifier[association_nodes] keyword[in] identifier[list] ( identifier[klass_node] . identifier[instance_attrs_type] . identifier[values] ())+ identifier[list] ( identifier[klass_node] . identifier[locals_type] . identifier[values] () ): keyword[for] identifier[node] keyword[in] identifier[association_nodes] : keyword[if] identifier[isinstance] ( identifier[node] , identifier[astroid] . identifier[Instance] ): identifier[node] = identifier[node] . identifier[_proxied] keyword[if] keyword[not] ( identifier[isinstance] ( identifier[node] , identifier[astroid] . identifier[ClassDef] ) keyword[and] identifier[self] . identifier[show_node] ( identifier[node] )): keyword[continue] keyword[yield] identifier[node]
def get_associated(self, klass_node, level): """return associated nodes of a class node""" if level == 0: return # depends on [control=['if'], data=[]] for association_nodes in list(klass_node.instance_attrs_type.values()) + list(klass_node.locals_type.values()): for node in association_nodes: if isinstance(node, astroid.Instance): node = node._proxied # depends on [control=['if'], data=[]] if not (isinstance(node, astroid.ClassDef) and self.show_node(node)): continue # depends on [control=['if'], data=[]] yield node # depends on [control=['for'], data=['node']] # depends on [control=['for'], data=['association_nodes']]
def _get_names(names, types): """ Get names, bearing in mind that there might be no name, no type, and that the `:` separator might be wrongly used. """ if types == "": try: names, types = names.split(":") except: pass return names.split(","), types
def function[_get_names, parameter[names, types]]: constant[ Get names, bearing in mind that there might be no name, no type, and that the `:` separator might be wrongly used. ] if compare[name[types] equal[==] constant[]] begin[:] <ast.Try object at 0x7da2041d88b0> return[tuple[[<ast.Call object at 0x7da2041d9de0>, <ast.Name object at 0x7da2041d8d30>]]]
keyword[def] identifier[_get_names] ( identifier[names] , identifier[types] ): literal[string] keyword[if] identifier[types] == literal[string] : keyword[try] : identifier[names] , identifier[types] = identifier[names] . identifier[split] ( literal[string] ) keyword[except] : keyword[pass] keyword[return] identifier[names] . identifier[split] ( literal[string] ), identifier[types]
def _get_names(names, types): """ Get names, bearing in mind that there might be no name, no type, and that the `:` separator might be wrongly used. """ if types == '': try: (names, types) = names.split(':') # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['types']] return (names.split(','), types)
def format_unitary(mat, decimals=None): """Format unitary coming from the backend to present to the Qiskit user. Args: mat (list[list]): a list of list of [re, im] complex numbers decimals (int): the number of decimals in the statevector. If None, no rounding is done. Returns: list[list[complex]]: a matrix of complex numbers """ num_basis = len(mat) mat_complex = np.zeros((num_basis, num_basis), dtype=complex) for i, vec in enumerate(mat): mat_complex[i] = format_statevector(vec, decimals) return mat_complex
def function[format_unitary, parameter[mat, decimals]]: constant[Format unitary coming from the backend to present to the Qiskit user. Args: mat (list[list]): a list of list of [re, im] complex numbers decimals (int): the number of decimals in the statevector. If None, no rounding is done. Returns: list[list[complex]]: a matrix of complex numbers ] variable[num_basis] assign[=] call[name[len], parameter[name[mat]]] variable[mat_complex] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b0531e10>, <ast.Name object at 0x7da1b0533760>]]]] for taget[tuple[[<ast.Name object at 0x7da1b0532980>, <ast.Name object at 0x7da1b05330d0>]]] in starred[call[name[enumerate], parameter[name[mat]]]] begin[:] call[name[mat_complex]][name[i]] assign[=] call[name[format_statevector], parameter[name[vec], name[decimals]]] return[name[mat_complex]]
keyword[def] identifier[format_unitary] ( identifier[mat] , identifier[decimals] = keyword[None] ): literal[string] identifier[num_basis] = identifier[len] ( identifier[mat] ) identifier[mat_complex] = identifier[np] . identifier[zeros] (( identifier[num_basis] , identifier[num_basis] ), identifier[dtype] = identifier[complex] ) keyword[for] identifier[i] , identifier[vec] keyword[in] identifier[enumerate] ( identifier[mat] ): identifier[mat_complex] [ identifier[i] ]= identifier[format_statevector] ( identifier[vec] , identifier[decimals] ) keyword[return] identifier[mat_complex]
def format_unitary(mat, decimals=None): """Format unitary coming from the backend to present to the Qiskit user. Args: mat (list[list]): a list of list of [re, im] complex numbers decimals (int): the number of decimals in the statevector. If None, no rounding is done. Returns: list[list[complex]]: a matrix of complex numbers """ num_basis = len(mat) mat_complex = np.zeros((num_basis, num_basis), dtype=complex) for (i, vec) in enumerate(mat): mat_complex[i] = format_statevector(vec, decimals) # depends on [control=['for'], data=[]] return mat_complex
def messages(fp, key='@message'): """ Read lines of UTF-8 from the file-like object given in ``fp``, with the same fault-tolerance as :function:`tagalog.io.lines`, but instead yield dicts with the line data stored in the key given by ``key`` (default: "@message"). """ for line in lines(fp): txt = line.rstrip('\n') yield {key: txt}
def function[messages, parameter[fp, key]]: constant[ Read lines of UTF-8 from the file-like object given in ``fp``, with the same fault-tolerance as :function:`tagalog.io.lines`, but instead yield dicts with the line data stored in the key given by ``key`` (default: "@message"). ] for taget[name[line]] in starred[call[name[lines], parameter[name[fp]]]] begin[:] variable[txt] assign[=] call[name[line].rstrip, parameter[constant[ ]]] <ast.Yield object at 0x7da18bccb5e0>
keyword[def] identifier[messages] ( identifier[fp] , identifier[key] = literal[string] ): literal[string] keyword[for] identifier[line] keyword[in] identifier[lines] ( identifier[fp] ): identifier[txt] = identifier[line] . identifier[rstrip] ( literal[string] ) keyword[yield] { identifier[key] : identifier[txt] }
def messages(fp, key='@message'): """ Read lines of UTF-8 from the file-like object given in ``fp``, with the same fault-tolerance as :function:`tagalog.io.lines`, but instead yield dicts with the line data stored in the key given by ``key`` (default: "@message"). """ for line in lines(fp): txt = line.rstrip('\n') yield {key: txt} # depends on [control=['for'], data=['line']]
def com_google_fonts_check_metadata_nameid_copyright(ttFont, font_metadata): """Copyright field for this font on METADATA.pb matches all copyright notice entries on the name table ?""" failed = False for nameRecord in ttFont['name'].names: string = nameRecord.string.decode(nameRecord.getEncoding()) if nameRecord.nameID == NameID.COPYRIGHT_NOTICE and\ string != font_metadata.copyright: failed = True yield FAIL, ("Copyright field for this font on METADATA.pb ('{}')" " differs from a copyright notice entry" " on the name table:" " '{}'").format(font_metadata.copyright, string) if not failed: yield PASS, ("Copyright field for this font on METADATA.pb matches" " copyright notice entries on the name table.")
def function[com_google_fonts_check_metadata_nameid_copyright, parameter[ttFont, font_metadata]]: constant[Copyright field for this font on METADATA.pb matches all copyright notice entries on the name table ?] variable[failed] assign[=] constant[False] for taget[name[nameRecord]] in starred[call[name[ttFont]][constant[name]].names] begin[:] variable[string] assign[=] call[name[nameRecord].string.decode, parameter[call[name[nameRecord].getEncoding, parameter[]]]] if <ast.BoolOp object at 0x7da1b1213e50> begin[:] variable[failed] assign[=] constant[True] <ast.Yield object at 0x7da1b12f1f00> if <ast.UnaryOp object at 0x7da1b12f25f0> begin[:] <ast.Yield object at 0x7da1b12f2350>
keyword[def] identifier[com_google_fonts_check_metadata_nameid_copyright] ( identifier[ttFont] , identifier[font_metadata] ): literal[string] identifier[failed] = keyword[False] keyword[for] identifier[nameRecord] keyword[in] identifier[ttFont] [ literal[string] ]. identifier[names] : identifier[string] = identifier[nameRecord] . identifier[string] . identifier[decode] ( identifier[nameRecord] . identifier[getEncoding] ()) keyword[if] identifier[nameRecord] . identifier[nameID] == identifier[NameID] . identifier[COPYRIGHT_NOTICE] keyword[and] identifier[string] != identifier[font_metadata] . identifier[copyright] : identifier[failed] = keyword[True] keyword[yield] identifier[FAIL] ,( literal[string] literal[string] literal[string] literal[string] ). identifier[format] ( identifier[font_metadata] . identifier[copyright] , identifier[string] ) keyword[if] keyword[not] identifier[failed] : keyword[yield] identifier[PASS] ,( literal[string] literal[string] )
def com_google_fonts_check_metadata_nameid_copyright(ttFont, font_metadata): """Copyright field for this font on METADATA.pb matches all copyright notice entries on the name table ?""" failed = False for nameRecord in ttFont['name'].names: string = nameRecord.string.decode(nameRecord.getEncoding()) if nameRecord.nameID == NameID.COPYRIGHT_NOTICE and string != font_metadata.copyright: failed = True yield (FAIL, "Copyright field for this font on METADATA.pb ('{}') differs from a copyright notice entry on the name table: '{}'".format(font_metadata.copyright, string)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['nameRecord']] if not failed: yield (PASS, 'Copyright field for this font on METADATA.pb matches copyright notice entries on the name table.') # depends on [control=['if'], data=[]]
def cpp_flag(compiler): """Return the -std=c++[0x/11/14] compiler flag. The c++14 is preferred over c++0x/11 (when it is available). """ standards = ['-std=c++14', '-std=c++11', '-std=c++0x'] for standard in standards: if has_flag(compiler, [standard]): return standard raise RuntimeError( 'Unsupported compiler -- at least C++0x support ' 'is needed!' )
def function[cpp_flag, parameter[compiler]]: constant[Return the -std=c++[0x/11/14] compiler flag. The c++14 is preferred over c++0x/11 (when it is available). ] variable[standards] assign[=] list[[<ast.Constant object at 0x7da20c7c98a0>, <ast.Constant object at 0x7da20c7cb130>, <ast.Constant object at 0x7da20c7ca440>]] for taget[name[standard]] in starred[name[standards]] begin[:] if call[name[has_flag], parameter[name[compiler], list[[<ast.Name object at 0x7da20c7c8af0>]]]] begin[:] return[name[standard]] <ast.Raise object at 0x7da20c7cb8b0>
keyword[def] identifier[cpp_flag] ( identifier[compiler] ): literal[string] identifier[standards] =[ literal[string] , literal[string] , literal[string] ] keyword[for] identifier[standard] keyword[in] identifier[standards] : keyword[if] identifier[has_flag] ( identifier[compiler] ,[ identifier[standard] ]): keyword[return] identifier[standard] keyword[raise] identifier[RuntimeError] ( literal[string] literal[string] )
def cpp_flag(compiler): """Return the -std=c++[0x/11/14] compiler flag. The c++14 is preferred over c++0x/11 (when it is available). """ standards = ['-std=c++14', '-std=c++11', '-std=c++0x'] for standard in standards: if has_flag(compiler, [standard]): return standard # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['standard']] raise RuntimeError('Unsupported compiler -- at least C++0x support is needed!')
def _node_add_without_peer_leaflist(self, node_sum, child_other): '''_node_add_without_peer_leaflist Low-level api: Apply delta child_other to node_sum when there is no peer of child_other can be found under node_sum. child_other is a leaf-list node. Element node_sum will be modified during the process. Parameters ---------- node_sum : `Element` A config node in a config tree. child_other : `Element` A child of a config node in another config tree. This child has no peer under node_sum. Returns ------- None There is no return of this method. ''' s_node = self.device.get_schema_node(child_other) e = deepcopy(child_other) scope = node_sum.getchildren() siblings = self._get_sequence(scope, child_other.tag, node_sum) if s_node.get('ordered-by') == 'user' and \ child_other.get(insert_tag) is not None: if child_other.get(insert_tag) == 'first': if siblings: siblings[0].addprevious(self._del_attrib(e)) else: node_sum.append(self._del_attrib(e)) elif child_other.get(insert_tag) == 'last': if siblings: siblings[-1].addnext(self._del_attrib(e)) else: node_sum.append(self._del_attrib(e)) elif child_other.get(insert_tag) == 'before': if child_other.get(value_tag) is None: _inserterror('before', self.device.get_xpath(child_other), 'value') siblings = node_sum.findall(child_other.tag) sibling = [s for s in siblings if s.text == child_other.get(value_tag)] if not sibling: path = self.device.get_xpath(child_other) value = child_other.get(value_tag) _inserterror('before', path, 'value', value) sibling[0].addprevious(self._del_attrib(e)) elif child_other.get(insert_tag) == 'after': if child_other.get(value_tag) is None: _inserterror('after', self.device.get_xpath(child_other), 'value') siblings = node_sum.findall(child_other.tag) sibling = [s for s in siblings if s.text == child_other.get(value_tag)] if not sibling: path = self.device.get_xpath(child_other) value = child_other.get(value_tag) _inserterror('after', path, 'value', value) sibling[0].addnext(self._del_attrib(e)) else: if siblings: siblings[-1].addnext(self._del_attrib(e)) else: node_sum.append(self._del_attrib(e))
def function[_node_add_without_peer_leaflist, parameter[self, node_sum, child_other]]: constant[_node_add_without_peer_leaflist Low-level api: Apply delta child_other to node_sum when there is no peer of child_other can be found under node_sum. child_other is a leaf-list node. Element node_sum will be modified during the process. Parameters ---------- node_sum : `Element` A config node in a config tree. child_other : `Element` A child of a config node in another config tree. This child has no peer under node_sum. Returns ------- None There is no return of this method. ] variable[s_node] assign[=] call[name[self].device.get_schema_node, parameter[name[child_other]]] variable[e] assign[=] call[name[deepcopy], parameter[name[child_other]]] variable[scope] assign[=] call[name[node_sum].getchildren, parameter[]] variable[siblings] assign[=] call[name[self]._get_sequence, parameter[name[scope], name[child_other].tag, name[node_sum]]] if <ast.BoolOp object at 0x7da1b2661360> begin[:] if compare[call[name[child_other].get, parameter[name[insert_tag]]] equal[==] constant[first]] begin[:] if name[siblings] begin[:] call[call[name[siblings]][constant[0]].addprevious, parameter[call[name[self]._del_attrib, parameter[name[e]]]]]
keyword[def] identifier[_node_add_without_peer_leaflist] ( identifier[self] , identifier[node_sum] , identifier[child_other] ): literal[string] identifier[s_node] = identifier[self] . identifier[device] . identifier[get_schema_node] ( identifier[child_other] ) identifier[e] = identifier[deepcopy] ( identifier[child_other] ) identifier[scope] = identifier[node_sum] . identifier[getchildren] () identifier[siblings] = identifier[self] . identifier[_get_sequence] ( identifier[scope] , identifier[child_other] . identifier[tag] , identifier[node_sum] ) keyword[if] identifier[s_node] . identifier[get] ( literal[string] )== literal[string] keyword[and] identifier[child_other] . identifier[get] ( identifier[insert_tag] ) keyword[is] keyword[not] keyword[None] : keyword[if] identifier[child_other] . identifier[get] ( identifier[insert_tag] )== literal[string] : keyword[if] identifier[siblings] : identifier[siblings] [ literal[int] ]. identifier[addprevious] ( identifier[self] . identifier[_del_attrib] ( identifier[e] )) keyword[else] : identifier[node_sum] . identifier[append] ( identifier[self] . identifier[_del_attrib] ( identifier[e] )) keyword[elif] identifier[child_other] . identifier[get] ( identifier[insert_tag] )== literal[string] : keyword[if] identifier[siblings] : identifier[siblings] [- literal[int] ]. identifier[addnext] ( identifier[self] . identifier[_del_attrib] ( identifier[e] )) keyword[else] : identifier[node_sum] . identifier[append] ( identifier[self] . identifier[_del_attrib] ( identifier[e] )) keyword[elif] identifier[child_other] . identifier[get] ( identifier[insert_tag] )== literal[string] : keyword[if] identifier[child_other] . identifier[get] ( identifier[value_tag] ) keyword[is] keyword[None] : identifier[_inserterror] ( literal[string] , identifier[self] . identifier[device] . identifier[get_xpath] ( identifier[child_other] ), literal[string] ) identifier[siblings] = identifier[node_sum] . identifier[findall] ( identifier[child_other] . identifier[tag] ) identifier[sibling] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[siblings] keyword[if] identifier[s] . identifier[text] == identifier[child_other] . identifier[get] ( identifier[value_tag] )] keyword[if] keyword[not] identifier[sibling] : identifier[path] = identifier[self] . identifier[device] . identifier[get_xpath] ( identifier[child_other] ) identifier[value] = identifier[child_other] . identifier[get] ( identifier[value_tag] ) identifier[_inserterror] ( literal[string] , identifier[path] , literal[string] , identifier[value] ) identifier[sibling] [ literal[int] ]. identifier[addprevious] ( identifier[self] . identifier[_del_attrib] ( identifier[e] )) keyword[elif] identifier[child_other] . identifier[get] ( identifier[insert_tag] )== literal[string] : keyword[if] identifier[child_other] . identifier[get] ( identifier[value_tag] ) keyword[is] keyword[None] : identifier[_inserterror] ( literal[string] , identifier[self] . identifier[device] . identifier[get_xpath] ( identifier[child_other] ), literal[string] ) identifier[siblings] = identifier[node_sum] . identifier[findall] ( identifier[child_other] . identifier[tag] ) identifier[sibling] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[siblings] keyword[if] identifier[s] . identifier[text] == identifier[child_other] . identifier[get] ( identifier[value_tag] )] keyword[if] keyword[not] identifier[sibling] : identifier[path] = identifier[self] . identifier[device] . identifier[get_xpath] ( identifier[child_other] ) identifier[value] = identifier[child_other] . identifier[get] ( identifier[value_tag] ) identifier[_inserterror] ( literal[string] , identifier[path] , literal[string] , identifier[value] ) identifier[sibling] [ literal[int] ]. identifier[addnext] ( identifier[self] . identifier[_del_attrib] ( identifier[e] )) keyword[else] : keyword[if] identifier[siblings] : identifier[siblings] [- literal[int] ]. identifier[addnext] ( identifier[self] . identifier[_del_attrib] ( identifier[e] )) keyword[else] : identifier[node_sum] . identifier[append] ( identifier[self] . identifier[_del_attrib] ( identifier[e] ))
def _node_add_without_peer_leaflist(self, node_sum, child_other): """_node_add_without_peer_leaflist Low-level api: Apply delta child_other to node_sum when there is no peer of child_other can be found under node_sum. child_other is a leaf-list node. Element node_sum will be modified during the process. Parameters ---------- node_sum : `Element` A config node in a config tree. child_other : `Element` A child of a config node in another config tree. This child has no peer under node_sum. Returns ------- None There is no return of this method. """ s_node = self.device.get_schema_node(child_other) e = deepcopy(child_other) scope = node_sum.getchildren() siblings = self._get_sequence(scope, child_other.tag, node_sum) if s_node.get('ordered-by') == 'user' and child_other.get(insert_tag) is not None: if child_other.get(insert_tag) == 'first': if siblings: siblings[0].addprevious(self._del_attrib(e)) # depends on [control=['if'], data=[]] else: node_sum.append(self._del_attrib(e)) # depends on [control=['if'], data=[]] elif child_other.get(insert_tag) == 'last': if siblings: siblings[-1].addnext(self._del_attrib(e)) # depends on [control=['if'], data=[]] else: node_sum.append(self._del_attrib(e)) # depends on [control=['if'], data=[]] elif child_other.get(insert_tag) == 'before': if child_other.get(value_tag) is None: _inserterror('before', self.device.get_xpath(child_other), 'value') # depends on [control=['if'], data=[]] siblings = node_sum.findall(child_other.tag) sibling = [s for s in siblings if s.text == child_other.get(value_tag)] if not sibling: path = self.device.get_xpath(child_other) value = child_other.get(value_tag) _inserterror('before', path, 'value', value) # depends on [control=['if'], data=[]] sibling[0].addprevious(self._del_attrib(e)) # depends on [control=['if'], data=[]] elif child_other.get(insert_tag) == 'after': if child_other.get(value_tag) is None: _inserterror('after', self.device.get_xpath(child_other), 'value') # depends on [control=['if'], data=[]] siblings = node_sum.findall(child_other.tag) sibling = [s for s in siblings if s.text == child_other.get(value_tag)] if not sibling: path = self.device.get_xpath(child_other) value = child_other.get(value_tag) _inserterror('after', path, 'value', value) # depends on [control=['if'], data=[]] sibling[0].addnext(self._del_attrib(e)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif siblings: siblings[-1].addnext(self._del_attrib(e)) # depends on [control=['if'], data=[]] else: node_sum.append(self._del_attrib(e))
def compute_best_path(local_asn, path1, path2): """Compares given paths and returns best path. Parameters: -`local_asn`: asn of local bgpspeaker -`path1`: first path to compare -`path2`: second path to compare Best path processing will involve following steps: 1. Select a path with a reachable next hop. 2. Select the path with the highest weight. 3. If path weights are the same, select the path with the highest local preference value. 4. Prefer locally originated routes (network routes, redistributed routes, or aggregated routes) over received routes. 5. Select the route with the shortest AS-path length. 6. If all paths have the same AS-path length, select the path based on origin: IGP is preferred over EGP; EGP is preferred over Incomplete. 7. If the origins are the same, select the path with lowest MED value. 8. If the paths have the same MED values, select the path learned via EBGP over one learned via IBGP. 9. Select the route with the lowest IGP cost to the next hop. 10. Select the route received from the peer with the lowest BGP router ID. 11. Select the route received from the peer with the shorter CLUSTER_LIST length. Returns None if best-path among given paths cannot be computed else best path. Assumes paths from NC has source equal to None. """ best_path = None best_path_reason = BPR_UNKNOWN # Follow best path calculation algorithm steps. if best_path is None: best_path = _cmp_by_reachable_nh(path1, path2) best_path_reason = BPR_REACHABLE_NEXT_HOP if best_path is None: best_path = _cmp_by_highest_wg(path1, path2) best_path_reason = BPR_HIGHEST_WEIGHT if best_path is None: best_path = _cmp_by_local_pref(path1, path2) best_path_reason = BPR_LOCAL_PREF if best_path is None: best_path = _cmp_by_local_origin(path1, path2) best_path_reason = BPR_LOCAL_ORIGIN if best_path is None: best_path = _cmp_by_aspath(path1, path2) best_path_reason = BPR_ASPATH if best_path is None: best_path = _cmp_by_origin(path1, path2) best_path_reason = BPR_ORIGIN if best_path is None: best_path = _cmp_by_med(path1, path2) best_path_reason = BPR_MED if best_path is None: best_path = _cmp_by_asn(local_asn, path1, path2) best_path_reason = BPR_ASN if best_path is None: best_path = _cmp_by_igp_cost(path1, path2) best_path_reason = BPR_IGP_COST if best_path is None: best_path = _cmp_by_router_id(local_asn, path1, path2) best_path_reason = BPR_ROUTER_ID if best_path is None: best_path = _cmp_by_cluster_list(path1, path2) best_path_reason = BPR_CLUSTER_LIST if best_path is None: best_path_reason = BPR_UNKNOWN return best_path, best_path_reason
def function[compute_best_path, parameter[local_asn, path1, path2]]: constant[Compares given paths and returns best path. Parameters: -`local_asn`: asn of local bgpspeaker -`path1`: first path to compare -`path2`: second path to compare Best path processing will involve following steps: 1. Select a path with a reachable next hop. 2. Select the path with the highest weight. 3. If path weights are the same, select the path with the highest local preference value. 4. Prefer locally originated routes (network routes, redistributed routes, or aggregated routes) over received routes. 5. Select the route with the shortest AS-path length. 6. If all paths have the same AS-path length, select the path based on origin: IGP is preferred over EGP; EGP is preferred over Incomplete. 7. If the origins are the same, select the path with lowest MED value. 8. If the paths have the same MED values, select the path learned via EBGP over one learned via IBGP. 9. Select the route with the lowest IGP cost to the next hop. 10. Select the route received from the peer with the lowest BGP router ID. 11. Select the route received from the peer with the shorter CLUSTER_LIST length. Returns None if best-path among given paths cannot be computed else best path. Assumes paths from NC has source equal to None. ] variable[best_path] assign[=] constant[None] variable[best_path_reason] assign[=] name[BPR_UNKNOWN] if compare[name[best_path] is constant[None]] begin[:] variable[best_path] assign[=] call[name[_cmp_by_reachable_nh], parameter[name[path1], name[path2]]] variable[best_path_reason] assign[=] name[BPR_REACHABLE_NEXT_HOP] if compare[name[best_path] is constant[None]] begin[:] variable[best_path] assign[=] call[name[_cmp_by_highest_wg], parameter[name[path1], name[path2]]] variable[best_path_reason] assign[=] name[BPR_HIGHEST_WEIGHT] if compare[name[best_path] is constant[None]] begin[:] variable[best_path] assign[=] call[name[_cmp_by_local_pref], parameter[name[path1], name[path2]]] variable[best_path_reason] assign[=] name[BPR_LOCAL_PREF] if compare[name[best_path] is constant[None]] begin[:] variable[best_path] assign[=] call[name[_cmp_by_local_origin], parameter[name[path1], name[path2]]] variable[best_path_reason] assign[=] name[BPR_LOCAL_ORIGIN] if compare[name[best_path] is constant[None]] begin[:] variable[best_path] assign[=] call[name[_cmp_by_aspath], parameter[name[path1], name[path2]]] variable[best_path_reason] assign[=] name[BPR_ASPATH] if compare[name[best_path] is constant[None]] begin[:] variable[best_path] assign[=] call[name[_cmp_by_origin], parameter[name[path1], name[path2]]] variable[best_path_reason] assign[=] name[BPR_ORIGIN] if compare[name[best_path] is constant[None]] begin[:] variable[best_path] assign[=] call[name[_cmp_by_med], parameter[name[path1], name[path2]]] variable[best_path_reason] assign[=] name[BPR_MED] if compare[name[best_path] is constant[None]] begin[:] variable[best_path] assign[=] call[name[_cmp_by_asn], parameter[name[local_asn], name[path1], name[path2]]] variable[best_path_reason] assign[=] name[BPR_ASN] if compare[name[best_path] is constant[None]] begin[:] variable[best_path] assign[=] call[name[_cmp_by_igp_cost], parameter[name[path1], name[path2]]] variable[best_path_reason] assign[=] name[BPR_IGP_COST] if compare[name[best_path] is constant[None]] begin[:] variable[best_path] assign[=] call[name[_cmp_by_router_id], parameter[name[local_asn], name[path1], name[path2]]] variable[best_path_reason] assign[=] name[BPR_ROUTER_ID] if compare[name[best_path] is constant[None]] begin[:] variable[best_path] assign[=] call[name[_cmp_by_cluster_list], parameter[name[path1], name[path2]]] variable[best_path_reason] assign[=] name[BPR_CLUSTER_LIST] if compare[name[best_path] is constant[None]] begin[:] variable[best_path_reason] assign[=] name[BPR_UNKNOWN] return[tuple[[<ast.Name object at 0x7da1b1b0d810>, <ast.Name object at 0x7da1b1b0d7e0>]]]
keyword[def] identifier[compute_best_path] ( identifier[local_asn] , identifier[path1] , identifier[path2] ): literal[string] identifier[best_path] = keyword[None] identifier[best_path_reason] = identifier[BPR_UNKNOWN] keyword[if] identifier[best_path] keyword[is] keyword[None] : identifier[best_path] = identifier[_cmp_by_reachable_nh] ( identifier[path1] , identifier[path2] ) identifier[best_path_reason] = identifier[BPR_REACHABLE_NEXT_HOP] keyword[if] identifier[best_path] keyword[is] keyword[None] : identifier[best_path] = identifier[_cmp_by_highest_wg] ( identifier[path1] , identifier[path2] ) identifier[best_path_reason] = identifier[BPR_HIGHEST_WEIGHT] keyword[if] identifier[best_path] keyword[is] keyword[None] : identifier[best_path] = identifier[_cmp_by_local_pref] ( identifier[path1] , identifier[path2] ) identifier[best_path_reason] = identifier[BPR_LOCAL_PREF] keyword[if] identifier[best_path] keyword[is] keyword[None] : identifier[best_path] = identifier[_cmp_by_local_origin] ( identifier[path1] , identifier[path2] ) identifier[best_path_reason] = identifier[BPR_LOCAL_ORIGIN] keyword[if] identifier[best_path] keyword[is] keyword[None] : identifier[best_path] = identifier[_cmp_by_aspath] ( identifier[path1] , identifier[path2] ) identifier[best_path_reason] = identifier[BPR_ASPATH] keyword[if] identifier[best_path] keyword[is] keyword[None] : identifier[best_path] = identifier[_cmp_by_origin] ( identifier[path1] , identifier[path2] ) identifier[best_path_reason] = identifier[BPR_ORIGIN] keyword[if] identifier[best_path] keyword[is] keyword[None] : identifier[best_path] = identifier[_cmp_by_med] ( identifier[path1] , identifier[path2] ) identifier[best_path_reason] = identifier[BPR_MED] keyword[if] identifier[best_path] keyword[is] keyword[None] : identifier[best_path] = identifier[_cmp_by_asn] ( identifier[local_asn] , identifier[path1] , identifier[path2] ) identifier[best_path_reason] = identifier[BPR_ASN] keyword[if] identifier[best_path] keyword[is] keyword[None] : identifier[best_path] = identifier[_cmp_by_igp_cost] ( identifier[path1] , identifier[path2] ) identifier[best_path_reason] = identifier[BPR_IGP_COST] keyword[if] identifier[best_path] keyword[is] keyword[None] : identifier[best_path] = identifier[_cmp_by_router_id] ( identifier[local_asn] , identifier[path1] , identifier[path2] ) identifier[best_path_reason] = identifier[BPR_ROUTER_ID] keyword[if] identifier[best_path] keyword[is] keyword[None] : identifier[best_path] = identifier[_cmp_by_cluster_list] ( identifier[path1] , identifier[path2] ) identifier[best_path_reason] = identifier[BPR_CLUSTER_LIST] keyword[if] identifier[best_path] keyword[is] keyword[None] : identifier[best_path_reason] = identifier[BPR_UNKNOWN] keyword[return] identifier[best_path] , identifier[best_path_reason]
def compute_best_path(local_asn, path1, path2): """Compares given paths and returns best path. Parameters: -`local_asn`: asn of local bgpspeaker -`path1`: first path to compare -`path2`: second path to compare Best path processing will involve following steps: 1. Select a path with a reachable next hop. 2. Select the path with the highest weight. 3. If path weights are the same, select the path with the highest local preference value. 4. Prefer locally originated routes (network routes, redistributed routes, or aggregated routes) over received routes. 5. Select the route with the shortest AS-path length. 6. If all paths have the same AS-path length, select the path based on origin: IGP is preferred over EGP; EGP is preferred over Incomplete. 7. If the origins are the same, select the path with lowest MED value. 8. If the paths have the same MED values, select the path learned via EBGP over one learned via IBGP. 9. Select the route with the lowest IGP cost to the next hop. 10. Select the route received from the peer with the lowest BGP router ID. 11. Select the route received from the peer with the shorter CLUSTER_LIST length. Returns None if best-path among given paths cannot be computed else best path. Assumes paths from NC has source equal to None. """ best_path = None best_path_reason = BPR_UNKNOWN # Follow best path calculation algorithm steps. if best_path is None: best_path = _cmp_by_reachable_nh(path1, path2) best_path_reason = BPR_REACHABLE_NEXT_HOP # depends on [control=['if'], data=['best_path']] if best_path is None: best_path = _cmp_by_highest_wg(path1, path2) best_path_reason = BPR_HIGHEST_WEIGHT # depends on [control=['if'], data=['best_path']] if best_path is None: best_path = _cmp_by_local_pref(path1, path2) best_path_reason = BPR_LOCAL_PREF # depends on [control=['if'], data=['best_path']] if best_path is None: best_path = _cmp_by_local_origin(path1, path2) best_path_reason = BPR_LOCAL_ORIGIN # depends on [control=['if'], data=['best_path']] if best_path is None: best_path = _cmp_by_aspath(path1, path2) best_path_reason = BPR_ASPATH # depends on [control=['if'], data=['best_path']] if best_path is None: best_path = _cmp_by_origin(path1, path2) best_path_reason = BPR_ORIGIN # depends on [control=['if'], data=['best_path']] if best_path is None: best_path = _cmp_by_med(path1, path2) best_path_reason = BPR_MED # depends on [control=['if'], data=['best_path']] if best_path is None: best_path = _cmp_by_asn(local_asn, path1, path2) best_path_reason = BPR_ASN # depends on [control=['if'], data=['best_path']] if best_path is None: best_path = _cmp_by_igp_cost(path1, path2) best_path_reason = BPR_IGP_COST # depends on [control=['if'], data=['best_path']] if best_path is None: best_path = _cmp_by_router_id(local_asn, path1, path2) best_path_reason = BPR_ROUTER_ID # depends on [control=['if'], data=['best_path']] if best_path is None: best_path = _cmp_by_cluster_list(path1, path2) best_path_reason = BPR_CLUSTER_LIST # depends on [control=['if'], data=['best_path']] if best_path is None: best_path_reason = BPR_UNKNOWN # depends on [control=['if'], data=[]] return (best_path, best_path_reason)
def sh_report(self, full=True, latest=False): """ Show shell command necessary to clone this repository If there is no primary remote url, prefix-comment the command Keyword Arguments: full (bool): also include commands to recreate branches and remotes latest (bool): checkout repo.branch instead of repo.current_id Yields: str: shell command necessary to clone this repository """ def pathvar_repr(var): _var = var.replace('"', '\"') return '"%s"' % _var output = [] if not self.remote_url: output.append('#') output = output + ( [self.label] + self.clone_cmd + [pathvar_repr(self.remote_url)] # TODO: shell quote? + [pathvar_repr(self.relpath)] ) yield '' yield "## %s" % pathvar_repr(self.relpath) yield ' '.join(output) if full: checkout_rev = self.current_id # if latest: checkout_rev = self.branch relpath = pathvar_repr(self.relpath) if self.relpath else None relpath = relpath if relpath else '' checkout_branch_cmd = ( [self.label] + self.checkout_branch_cmd + [self.branch] + self.repo_abspath_cmd + [relpath]) checkout_rev_cmd = ( [self.label] + self.checkout_rev_cmd + [checkout_rev] + self.repo_abspath_cmd + [relpath]) if latest: checkout_cmd = checkout_branch_cmd comment = checkout_rev_cmd else: checkout_cmd = checkout_rev_cmd comment = checkout_branch_cmd yield ' '.join(c for c in checkout_cmd if c is not None) yield '### %s' % ' '.join(c for c in comment if c is not None) # output.extend([checkout_cmd, ';', ' ###', comment]) for x in self.recreate_remotes_shellcmd(): yield x
def function[sh_report, parameter[self, full, latest]]: constant[ Show shell command necessary to clone this repository If there is no primary remote url, prefix-comment the command Keyword Arguments: full (bool): also include commands to recreate branches and remotes latest (bool): checkout repo.branch instead of repo.current_id Yields: str: shell command necessary to clone this repository ] def function[pathvar_repr, parameter[var]]: variable[_var] assign[=] call[name[var].replace, parameter[constant["], constant["]]] return[binary_operation[constant["%s"] <ast.Mod object at 0x7da2590d6920> name[_var]]] variable[output] assign[=] list[[]] if <ast.UnaryOp object at 0x7da20e957670> begin[:] call[name[output].append, parameter[constant[#]]] variable[output] assign[=] binary_operation[name[output] + binary_operation[binary_operation[binary_operation[list[[<ast.Attribute object at 0x7da20e956d10>]] + name[self].clone_cmd] + list[[<ast.Call object at 0x7da20e954fd0>]]] + list[[<ast.Call object at 0x7da20e9546d0>]]]] <ast.Yield object at 0x7da20e9572b0> <ast.Yield object at 0x7da20e955c30> <ast.Yield object at 0x7da20e955e40> if name[full] begin[:] variable[checkout_rev] assign[=] name[self].current_id variable[relpath] assign[=] <ast.IfExp object at 0x7da20e9553f0> variable[relpath] assign[=] <ast.IfExp object at 0x7da20e957b20> variable[checkout_branch_cmd] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[list[[<ast.Attribute object at 0x7da20e954bb0>]] + name[self].checkout_branch_cmd] + list[[<ast.Attribute object at 0x7da20e956e00>]]] + name[self].repo_abspath_cmd] + list[[<ast.Name object at 0x7da20e956a40>]]] variable[checkout_rev_cmd] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[list[[<ast.Attribute object at 0x7da20e9576d0>]] + name[self].checkout_rev_cmd] + list[[<ast.Name object at 0x7da20e9542b0>]]] + name[self].repo_abspath_cmd] + list[[<ast.Name object at 0x7da20e9550f0>]]] if name[latest] begin[:] variable[checkout_cmd] assign[=] name[checkout_branch_cmd] variable[comment] assign[=] name[checkout_rev_cmd] <ast.Yield object at 0x7da20e9562f0> <ast.Yield object at 0x7da20e9562c0> for taget[name[x]] in starred[call[name[self].recreate_remotes_shellcmd, parameter[]]] begin[:] <ast.Yield object at 0x7da20e9549a0>
keyword[def] identifier[sh_report] ( identifier[self] , identifier[full] = keyword[True] , identifier[latest] = keyword[False] ): literal[string] keyword[def] identifier[pathvar_repr] ( identifier[var] ): identifier[_var] = identifier[var] . identifier[replace] ( literal[string] , literal[string] ) keyword[return] literal[string] % identifier[_var] identifier[output] =[] keyword[if] keyword[not] identifier[self] . identifier[remote_url] : identifier[output] . identifier[append] ( literal[string] ) identifier[output] = identifier[output] +( [ identifier[self] . identifier[label] ] + identifier[self] . identifier[clone_cmd] +[ identifier[pathvar_repr] ( identifier[self] . identifier[remote_url] )] +[ identifier[pathvar_repr] ( identifier[self] . identifier[relpath] )] ) keyword[yield] literal[string] keyword[yield] literal[string] % identifier[pathvar_repr] ( identifier[self] . identifier[relpath] ) keyword[yield] literal[string] . identifier[join] ( identifier[output] ) keyword[if] identifier[full] : identifier[checkout_rev] = identifier[self] . identifier[current_id] identifier[relpath] = identifier[pathvar_repr] ( identifier[self] . identifier[relpath] ) keyword[if] identifier[self] . identifier[relpath] keyword[else] keyword[None] identifier[relpath] = identifier[relpath] keyword[if] identifier[relpath] keyword[else] literal[string] identifier[checkout_branch_cmd] =( [ identifier[self] . identifier[label] ] + identifier[self] . identifier[checkout_branch_cmd] +[ identifier[self] . identifier[branch] ] + identifier[self] . identifier[repo_abspath_cmd] +[ identifier[relpath] ]) identifier[checkout_rev_cmd] =( [ identifier[self] . identifier[label] ] + identifier[self] . identifier[checkout_rev_cmd] +[ identifier[checkout_rev] ] + identifier[self] . identifier[repo_abspath_cmd] +[ identifier[relpath] ]) keyword[if] identifier[latest] : identifier[checkout_cmd] = identifier[checkout_branch_cmd] identifier[comment] = identifier[checkout_rev_cmd] keyword[else] : identifier[checkout_cmd] = identifier[checkout_rev_cmd] identifier[comment] = identifier[checkout_branch_cmd] keyword[yield] literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[checkout_cmd] keyword[if] identifier[c] keyword[is] keyword[not] keyword[None] ) keyword[yield] literal[string] % literal[string] . identifier[join] ( identifier[c] keyword[for] identifier[c] keyword[in] identifier[comment] keyword[if] identifier[c] keyword[is] keyword[not] keyword[None] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[recreate_remotes_shellcmd] (): keyword[yield] identifier[x]
def sh_report(self, full=True, latest=False): """ Show shell command necessary to clone this repository If there is no primary remote url, prefix-comment the command Keyword Arguments: full (bool): also include commands to recreate branches and remotes latest (bool): checkout repo.branch instead of repo.current_id Yields: str: shell command necessary to clone this repository """ def pathvar_repr(var): _var = var.replace('"', '"') return '"%s"' % _var output = [] if not self.remote_url: output.append('#') # depends on [control=['if'], data=[]] # TODO: shell quote? output = output + ([self.label] + self.clone_cmd + [pathvar_repr(self.remote_url)] + [pathvar_repr(self.relpath)]) yield '' yield ('## %s' % pathvar_repr(self.relpath)) yield ' '.join(output) if full: checkout_rev = self.current_id # if latest: checkout_rev = self.branch relpath = pathvar_repr(self.relpath) if self.relpath else None relpath = relpath if relpath else '' checkout_branch_cmd = [self.label] + self.checkout_branch_cmd + [self.branch] + self.repo_abspath_cmd + [relpath] checkout_rev_cmd = [self.label] + self.checkout_rev_cmd + [checkout_rev] + self.repo_abspath_cmd + [relpath] if latest: checkout_cmd = checkout_branch_cmd comment = checkout_rev_cmd # depends on [control=['if'], data=[]] else: checkout_cmd = checkout_rev_cmd comment = checkout_branch_cmd yield ' '.join((c for c in checkout_cmd if c is not None)) yield ('### %s' % ' '.join((c for c in comment if c is not None))) # output.extend([checkout_cmd, ';', ' ###', comment]) for x in self.recreate_remotes_shellcmd(): yield x # depends on [control=['for'], data=['x']] # depends on [control=['if'], data=[]]
def register_list(self): """Returns a list of the indices for the CPU registers. The returned indices can be used to read the register content or grab the register name. Args: self (JLink): the ``JLink`` instance Returns: List of registers. """ num_items = self.MAX_NUM_CPU_REGISTERS buf = (ctypes.c_uint32 * num_items)() num_regs = self._dll.JLINKARM_GetRegisterList(buf, num_items) return buf[:num_regs]
def function[register_list, parameter[self]]: constant[Returns a list of the indices for the CPU registers. The returned indices can be used to read the register content or grab the register name. Args: self (JLink): the ``JLink`` instance Returns: List of registers. ] variable[num_items] assign[=] name[self].MAX_NUM_CPU_REGISTERS variable[buf] assign[=] call[binary_operation[name[ctypes].c_uint32 * name[num_items]], parameter[]] variable[num_regs] assign[=] call[name[self]._dll.JLINKARM_GetRegisterList, parameter[name[buf], name[num_items]]] return[call[name[buf]][<ast.Slice object at 0x7da1b17f8880>]]
keyword[def] identifier[register_list] ( identifier[self] ): literal[string] identifier[num_items] = identifier[self] . identifier[MAX_NUM_CPU_REGISTERS] identifier[buf] =( identifier[ctypes] . identifier[c_uint32] * identifier[num_items] )() identifier[num_regs] = identifier[self] . identifier[_dll] . identifier[JLINKARM_GetRegisterList] ( identifier[buf] , identifier[num_items] ) keyword[return] identifier[buf] [: identifier[num_regs] ]
def register_list(self): """Returns a list of the indices for the CPU registers. The returned indices can be used to read the register content or grab the register name. Args: self (JLink): the ``JLink`` instance Returns: List of registers. """ num_items = self.MAX_NUM_CPU_REGISTERS buf = (ctypes.c_uint32 * num_items)() num_regs = self._dll.JLINKARM_GetRegisterList(buf, num_items) return buf[:num_regs]
def selenium(self): """Get the instance of webdriver, it starts the browser if the webdriver is not yet instantied :return: a `selenium instance <http://selenium-python.readthedocs.org/ api.html#module-selenium.webdriver.remote.webdriver>` """ if not self._web_driver: self._web_driver = self._start_driver() return self._web_driver
def function[selenium, parameter[self]]: constant[Get the instance of webdriver, it starts the browser if the webdriver is not yet instantied :return: a `selenium instance <http://selenium-python.readthedocs.org/ api.html#module-selenium.webdriver.remote.webdriver>` ] if <ast.UnaryOp object at 0x7da1b0915390> begin[:] name[self]._web_driver assign[=] call[name[self]._start_driver, parameter[]] return[name[self]._web_driver]
keyword[def] identifier[selenium] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_web_driver] : identifier[self] . identifier[_web_driver] = identifier[self] . identifier[_start_driver] () keyword[return] identifier[self] . identifier[_web_driver]
def selenium(self): """Get the instance of webdriver, it starts the browser if the webdriver is not yet instantied :return: a `selenium instance <http://selenium-python.readthedocs.org/ api.html#module-selenium.webdriver.remote.webdriver>` """ if not self._web_driver: self._web_driver = self._start_driver() # depends on [control=['if'], data=[]] return self._web_driver
def statistical_inefficiency(X, truncate_acf=True): """ Estimates the statistical inefficiency from univariate time series X The statistical inefficiency [1]_ is a measure of the correlatedness of samples in a signal. Given a signal :math:`{x_t}` with :math:`N` samples and statistical inefficiency :math:`I \in (0,1]`, there are only :math:`I \cdot N` effective or uncorrelated samples in the signal. This means that :math:`I \cdot N` should be used in order to compute statistical uncertainties. See [2]_ for a review. The statistical inefficiency is computed as :math:`I = (2 \tau)^{-1}` using the damped autocorrelation time ..1: \tau = \frac{1}{2}+\sum_{K=1}^{N} A(k) \left(1-\frac{k}{N}\right) where ..1: A(k) = \frac{\langle x_t x_{t+k} \rangle_t - \langle x^2 \rangle_t}{\mathrm{var}(x)} is the autocorrelation function of the signal :math:`{x_t}`, which is computed either for a single or multiple trajectories. Parameters ---------- X : float array or list of float arrays Univariate time series (single or multiple trajectories) truncate_acf : bool, optional, default=True When the normalized autocorrelation function passes through 0, it is truncated in order to avoid integrating random noise References ---------- .. [1] Anderson, T. W.: The Statistical Analysis of Time Series (Wiley, New York, 1971) .. [2] Janke, W: Statistical Analysis of Simulations: Data Correlations and Error Estimation Quantum Simulations of Complex Many-Body Systems: From Theory to Algorithms, Lecture Notes, J. Grotendorst, D. Marx, A. Muramatsu (Eds.), John von Neumann Institute for Computing, Juelich NIC Series 10, pp. 423-445, 2002. """ # check input assert np.ndim(X[0]) == 1, 'Data must be 1-dimensional' N = _maxlength(X) # length # mean-free data xflat = np.concatenate(X) Xmean = np.mean(xflat) X0 = [x-Xmean for x in X] # moments x2m = np.mean(xflat ** 2) # integrate damped autocorrelation corrsum = 0.0 for lag in range(N): acf = 0.0 n = 0.0 for x in X0: Nx = len(x) # length of this trajectory if (Nx > lag): # only use trajectories that are long enough acf += np.sum(x[0:Nx-lag] * x[lag:Nx]) n += float(Nx-lag) acf /= n if acf <= 0 and truncate_acf: # zero autocorrelation. Exit break elif lag > 0: # start integrating at lag 1 (effect of lag 0 is contained in the 0.5 below corrsum += acf * (1.0 - (float(lag)/float(N))) # compute damped correlation time corrtime = 0.5 + corrsum / x2m # return statistical inefficiency return 1.0 / (2 * corrtime)
def function[statistical_inefficiency, parameter[X, truncate_acf]]: constant[ Estimates the statistical inefficiency from univariate time series X The statistical inefficiency [1]_ is a measure of the correlatedness of samples in a signal. Given a signal :math:`{x_t}` with :math:`N` samples and statistical inefficiency :math:`I \in (0,1]`, there are only :math:`I \cdot N` effective or uncorrelated samples in the signal. This means that :math:`I \cdot N` should be used in order to compute statistical uncertainties. See [2]_ for a review. The statistical inefficiency is computed as :math:`I = (2 au)^{-1}` using the damped autocorrelation time ..1: au = rac{1}{2}+\sum_{K=1}^{N} A(k) \left(1- rac{k}{N} ight) where ..1: A(k) = rac{\langle x_t x_{t+k} angle_t - \langle x^2 angle_t}{\mathrm{var}(x)} is the autocorrelation function of the signal :math:`{x_t}`, which is computed either for a single or multiple trajectories. Parameters ---------- X : float array or list of float arrays Univariate time series (single or multiple trajectories) truncate_acf : bool, optional, default=True When the normalized autocorrelation function passes through 0, it is truncated in order to avoid integrating random noise References ---------- .. [1] Anderson, T. W.: The Statistical Analysis of Time Series (Wiley, New York, 1971) .. [2] Janke, W: Statistical Analysis of Simulations: Data Correlations and Error Estimation Quantum Simulations of Complex Many-Body Systems: From Theory to Algorithms, Lecture Notes, J. Grotendorst, D. Marx, A. Muramatsu (Eds.), John von Neumann Institute for Computing, Juelich NIC Series 10, pp. 423-445, 2002. ] assert[compare[call[name[np].ndim, parameter[call[name[X]][constant[0]]]] equal[==] constant[1]]] variable[N] assign[=] call[name[_maxlength], parameter[name[X]]] variable[xflat] assign[=] call[name[np].concatenate, parameter[name[X]]] variable[Xmean] assign[=] call[name[np].mean, parameter[name[xflat]]] variable[X0] assign[=] <ast.ListComp object at 0x7da20c6ab0d0> variable[x2m] assign[=] call[name[np].mean, parameter[binary_operation[name[xflat] ** constant[2]]]] variable[corrsum] assign[=] constant[0.0] for taget[name[lag]] in starred[call[name[range], parameter[name[N]]]] begin[:] variable[acf] assign[=] constant[0.0] variable[n] assign[=] constant[0.0] for taget[name[x]] in starred[name[X0]] begin[:] variable[Nx] assign[=] call[name[len], parameter[name[x]]] if compare[name[Nx] greater[>] name[lag]] begin[:] <ast.AugAssign object at 0x7da18f00d750> <ast.AugAssign object at 0x7da18f00c430> <ast.AugAssign object at 0x7da18f00c6a0> if <ast.BoolOp object at 0x7da18f00c7c0> begin[:] break variable[corrtime] assign[=] binary_operation[constant[0.5] + binary_operation[name[corrsum] / name[x2m]]] return[binary_operation[constant[1.0] / binary_operation[constant[2] * name[corrtime]]]]
keyword[def] identifier[statistical_inefficiency] ( identifier[X] , identifier[truncate_acf] = keyword[True] ): literal[string] keyword[assert] identifier[np] . identifier[ndim] ( identifier[X] [ literal[int] ])== literal[int] , literal[string] identifier[N] = identifier[_maxlength] ( identifier[X] ) identifier[xflat] = identifier[np] . identifier[concatenate] ( identifier[X] ) identifier[Xmean] = identifier[np] . identifier[mean] ( identifier[xflat] ) identifier[X0] =[ identifier[x] - identifier[Xmean] keyword[for] identifier[x] keyword[in] identifier[X] ] identifier[x2m] = identifier[np] . identifier[mean] ( identifier[xflat] ** literal[int] ) identifier[corrsum] = literal[int] keyword[for] identifier[lag] keyword[in] identifier[range] ( identifier[N] ): identifier[acf] = literal[int] identifier[n] = literal[int] keyword[for] identifier[x] keyword[in] identifier[X0] : identifier[Nx] = identifier[len] ( identifier[x] ) keyword[if] ( identifier[Nx] > identifier[lag] ): identifier[acf] += identifier[np] . identifier[sum] ( identifier[x] [ literal[int] : identifier[Nx] - identifier[lag] ]* identifier[x] [ identifier[lag] : identifier[Nx] ]) identifier[n] += identifier[float] ( identifier[Nx] - identifier[lag] ) identifier[acf] /= identifier[n] keyword[if] identifier[acf] <= literal[int] keyword[and] identifier[truncate_acf] : keyword[break] keyword[elif] identifier[lag] > literal[int] : identifier[corrsum] += identifier[acf] *( literal[int] -( identifier[float] ( identifier[lag] )/ identifier[float] ( identifier[N] ))) identifier[corrtime] = literal[int] + identifier[corrsum] / identifier[x2m] keyword[return] literal[int] /( literal[int] * identifier[corrtime] )
def statistical_inefficiency(X, truncate_acf=True): """ Estimates the statistical inefficiency from univariate time series X The statistical inefficiency [1]_ is a measure of the correlatedness of samples in a signal. Given a signal :math:`{x_t}` with :math:`N` samples and statistical inefficiency :math:`I \\in (0,1]`, there are only :math:`I \\cdot N` effective or uncorrelated samples in the signal. This means that :math:`I \\cdot N` should be used in order to compute statistical uncertainties. See [2]_ for a review. The statistical inefficiency is computed as :math:`I = (2 au)^{-1}` using the damped autocorrelation time ..1: au = \x0crac{1}{2}+\\sum_{K=1}^{N} A(k) \\left(1-\x0crac{k}{N}\right) where ..1: A(k) = \x0crac{\\langle x_t x_{t+k} \rangle_t - \\langle x^2 \rangle_t}{\\mathrm{var}(x)} is the autocorrelation function of the signal :math:`{x_t}`, which is computed either for a single or multiple trajectories. Parameters ---------- X : float array or list of float arrays Univariate time series (single or multiple trajectories) truncate_acf : bool, optional, default=True When the normalized autocorrelation function passes through 0, it is truncated in order to avoid integrating random noise References ---------- .. [1] Anderson, T. W.: The Statistical Analysis of Time Series (Wiley, New York, 1971) .. [2] Janke, W: Statistical Analysis of Simulations: Data Correlations and Error Estimation Quantum Simulations of Complex Many-Body Systems: From Theory to Algorithms, Lecture Notes, J. Grotendorst, D. Marx, A. Muramatsu (Eds.), John von Neumann Institute for Computing, Juelich NIC Series 10, pp. 423-445, 2002. """ # check input assert np.ndim(X[0]) == 1, 'Data must be 1-dimensional' N = _maxlength(X) # length # mean-free data xflat = np.concatenate(X) Xmean = np.mean(xflat) X0 = [x - Xmean for x in X] # moments x2m = np.mean(xflat ** 2) # integrate damped autocorrelation corrsum = 0.0 for lag in range(N): acf = 0.0 n = 0.0 for x in X0: Nx = len(x) # length of this trajectory if Nx > lag: # only use trajectories that are long enough acf += np.sum(x[0:Nx - lag] * x[lag:Nx]) n += float(Nx - lag) # depends on [control=['if'], data=['Nx', 'lag']] # depends on [control=['for'], data=['x']] acf /= n if acf <= 0 and truncate_acf: # zero autocorrelation. Exit break # depends on [control=['if'], data=[]] elif lag > 0: # start integrating at lag 1 (effect of lag 0 is contained in the 0.5 below corrsum += acf * (1.0 - float(lag) / float(N)) # depends on [control=['if'], data=['lag']] # depends on [control=['for'], data=['lag']] # compute damped correlation time corrtime = 0.5 + corrsum / x2m # return statistical inefficiency return 1.0 / (2 * corrtime)
def yticks(self): """Compute the yticks labels of this grid, used for plotting the y-axis ticks when visualizing a regular""" return np.linspace(np.min(self[:, 0]), np.max(self[:, 0]), 4)
def function[yticks, parameter[self]]: constant[Compute the yticks labels of this grid, used for plotting the y-axis ticks when visualizing a regular] return[call[name[np].linspace, parameter[call[name[np].min, parameter[call[name[self]][tuple[[<ast.Slice object at 0x7da20cabc400>, <ast.Constant object at 0x7da20cabfa90>]]]]], call[name[np].max, parameter[call[name[self]][tuple[[<ast.Slice object at 0x7da20cabc640>, <ast.Constant object at 0x7da20cabdfc0>]]]]], constant[4]]]]
keyword[def] identifier[yticks] ( identifier[self] ): literal[string] keyword[return] identifier[np] . identifier[linspace] ( identifier[np] . identifier[min] ( identifier[self] [:, literal[int] ]), identifier[np] . identifier[max] ( identifier[self] [:, literal[int] ]), literal[int] )
def yticks(self): """Compute the yticks labels of this grid, used for plotting the y-axis ticks when visualizing a regular""" return np.linspace(np.min(self[:, 0]), np.max(self[:, 0]), 4)
def instruction_list(self): """Return a list of instructions for this CompositeGate. If the CompositeGate itself contains composites, call this method recursively. """ instruction_list = [] for instruction in self.data: if isinstance(instruction, CompositeGate): instruction_list.extend(instruction.instruction_list()) else: instruction_list.append(instruction) return instruction_list
def function[instruction_list, parameter[self]]: constant[Return a list of instructions for this CompositeGate. If the CompositeGate itself contains composites, call this method recursively. ] variable[instruction_list] assign[=] list[[]] for taget[name[instruction]] in starred[name[self].data] begin[:] if call[name[isinstance], parameter[name[instruction], name[CompositeGate]]] begin[:] call[name[instruction_list].extend, parameter[call[name[instruction].instruction_list, parameter[]]]] return[name[instruction_list]]
keyword[def] identifier[instruction_list] ( identifier[self] ): literal[string] identifier[instruction_list] =[] keyword[for] identifier[instruction] keyword[in] identifier[self] . identifier[data] : keyword[if] identifier[isinstance] ( identifier[instruction] , identifier[CompositeGate] ): identifier[instruction_list] . identifier[extend] ( identifier[instruction] . identifier[instruction_list] ()) keyword[else] : identifier[instruction_list] . identifier[append] ( identifier[instruction] ) keyword[return] identifier[instruction_list]
def instruction_list(self): """Return a list of instructions for this CompositeGate. If the CompositeGate itself contains composites, call this method recursively. """ instruction_list = [] for instruction in self.data: if isinstance(instruction, CompositeGate): instruction_list.extend(instruction.instruction_list()) # depends on [control=['if'], data=[]] else: instruction_list.append(instruction) # depends on [control=['for'], data=['instruction']] return instruction_list
def _update_crypto(self): """ 根据当前配置内容更新 Crypto 类 """ if self.__encrypt_mode in ['compatible', 'safe'] and self.__encoding_aes_key is not None: if self.__token is None or self.__appid is None: raise NeedParamError('Please provide token and appid parameters in the construction of class.') self.__crypto = BasicCrypto(self.__token, self.__encoding_aes_key, self.__appid) else: self.__crypto = None
def function[_update_crypto, parameter[self]]: constant[ 根据当前配置内容更新 Crypto 类 ] if <ast.BoolOp object at 0x7da2054a6c80> begin[:] if <ast.BoolOp object at 0x7da2054a5c90> begin[:] <ast.Raise object at 0x7da2054a6470> name[self].__crypto assign[=] call[name[BasicCrypto], parameter[name[self].__token, name[self].__encoding_aes_key, name[self].__appid]]
keyword[def] identifier[_update_crypto] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[__encrypt_mode] keyword[in] [ literal[string] , literal[string] ] keyword[and] identifier[self] . identifier[__encoding_aes_key] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[self] . identifier[__token] keyword[is] keyword[None] keyword[or] identifier[self] . identifier[__appid] keyword[is] keyword[None] : keyword[raise] identifier[NeedParamError] ( literal[string] ) identifier[self] . identifier[__crypto] = identifier[BasicCrypto] ( identifier[self] . identifier[__token] , identifier[self] . identifier[__encoding_aes_key] , identifier[self] . identifier[__appid] ) keyword[else] : identifier[self] . identifier[__crypto] = keyword[None]
def _update_crypto(self): """ 根据当前配置内容更新 Crypto 类 """ if self.__encrypt_mode in ['compatible', 'safe'] and self.__encoding_aes_key is not None: if self.__token is None or self.__appid is None: raise NeedParamError('Please provide token and appid parameters in the construction of class.') # depends on [control=['if'], data=[]] self.__crypto = BasicCrypto(self.__token, self.__encoding_aes_key, self.__appid) # depends on [control=['if'], data=[]] else: self.__crypto = None
def feature_detector(self, tokens, index, history): """Implementing a slightly modified feature detector. @param tokens: The tokens from the sentence to tag. @param index: The current token index to tag. @param history: The previous tagged tokens. """ word = tokens[index] if index == 0: # At the beginning of the sentence prevword = prevprevword = None prevtag = prevprevtag = None #word = word.lower() # Lowercase at the beginning of sentence elif index == 1: prevword = tokens[index-1] # Note: no lowercase prevprevword = None prevtag = history[index-1] prevprevtag = None else: prevword = tokens[index-1] prevprevword = tokens[index-2] prevtag = history[index-1] prevprevtag = history[index-2] if re.match('[0-9]+([\.,][0-9]*)?|[0-9]*[\.,][0-9]+$', word): # Included "," as decimal point shape = 'number' elif re.compile('\W+$', re.UNICODE).match(word): # Included unicode flag shape = 'punct' elif re.match('([A-ZÄÖÜ]+[a-zäöüß]*-?)+$', word): # Included dash for dashed words and umlauts shape = 'upcase' elif re.match('[a-zäöüß]+', word): # Included umlauts shape = 'downcase' elif re.compile("\w+", re.UNICODE).match(word): # Included unicode flag shape = 'mixedcase' else: shape = 'other' features = { 'prevtag': prevtag, 'prevprevtag': prevprevtag, 'word': word, 'word.lower': word.lower(), 'suffix3': word.lower()[-3:], #'suffix2': word.lower()[-2:], #'suffix1': word.lower()[-1:], 'preffix1': word[:1], # included 'prevprevword': prevprevword, 'prevword': prevword, 'prevtag+word': '%s+%s' % (prevtag, word), 'prevprevtag+word': '%s+%s' % (prevprevtag, word), 'prevword+word': '%s+%s' % (prevword, word), 'shape': shape } return features
def function[feature_detector, parameter[self, tokens, index, history]]: constant[Implementing a slightly modified feature detector. @param tokens: The tokens from the sentence to tag. @param index: The current token index to tag. @param history: The previous tagged tokens. ] variable[word] assign[=] call[name[tokens]][name[index]] if compare[name[index] equal[==] constant[0]] begin[:] variable[prevword] assign[=] constant[None] variable[prevtag] assign[=] constant[None] if call[name[re].match, parameter[constant[[0-9]+([\.,][0-9]*)?|[0-9]*[\.,][0-9]+$], name[word]]] begin[:] variable[shape] assign[=] constant[number] variable[features] assign[=] dictionary[[<ast.Constant object at 0x7da20c6a9630>, <ast.Constant object at 0x7da20c6aa3e0>, <ast.Constant object at 0x7da20c6a94e0>, <ast.Constant object at 0x7da20c6ab760>, <ast.Constant object at 0x7da20c6aab30>, <ast.Constant object at 0x7da20c6aaaa0>, <ast.Constant object at 0x7da20c6aa7a0>, <ast.Constant object at 0x7da20c6aa290>, <ast.Constant object at 0x7da20c6a8a00>, <ast.Constant object at 0x7da20c6aa920>, <ast.Constant object at 0x7da20c6a98a0>, <ast.Constant object at 0x7da20c6a9b70>], [<ast.Name object at 0x7da20c6a8be0>, <ast.Name object at 0x7da20c6a97b0>, <ast.Name object at 0x7da20c6abe20>, <ast.Call object at 0x7da20c6a9270>, <ast.Subscript object at 0x7da20c6a8790>, <ast.Subscript object at 0x7da20c6ab130>, <ast.Name object at 0x7da2041dbe50>, <ast.Name object at 0x7da2041da590>, <ast.BinOp object at 0x7da2041d9a20>, <ast.BinOp object at 0x7da2041d8790>, <ast.BinOp object at 0x7da2041da5c0>, <ast.Name object at 0x7da2041d9a50>]] return[name[features]]
keyword[def] identifier[feature_detector] ( identifier[self] , identifier[tokens] , identifier[index] , identifier[history] ): literal[string] identifier[word] = identifier[tokens] [ identifier[index] ] keyword[if] identifier[index] == literal[int] : identifier[prevword] = identifier[prevprevword] = keyword[None] identifier[prevtag] = identifier[prevprevtag] = keyword[None] keyword[elif] identifier[index] == literal[int] : identifier[prevword] = identifier[tokens] [ identifier[index] - literal[int] ] identifier[prevprevword] = keyword[None] identifier[prevtag] = identifier[history] [ identifier[index] - literal[int] ] identifier[prevprevtag] = keyword[None] keyword[else] : identifier[prevword] = identifier[tokens] [ identifier[index] - literal[int] ] identifier[prevprevword] = identifier[tokens] [ identifier[index] - literal[int] ] identifier[prevtag] = identifier[history] [ identifier[index] - literal[int] ] identifier[prevprevtag] = identifier[history] [ identifier[index] - literal[int] ] keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[word] ): identifier[shape] = literal[string] keyword[elif] identifier[re] . identifier[compile] ( literal[string] , identifier[re] . identifier[UNICODE] ). identifier[match] ( identifier[word] ): identifier[shape] = literal[string] keyword[elif] identifier[re] . identifier[match] ( literal[string] , identifier[word] ): identifier[shape] = literal[string] keyword[elif] identifier[re] . identifier[match] ( literal[string] , identifier[word] ): identifier[shape] = literal[string] keyword[elif] identifier[re] . identifier[compile] ( literal[string] , identifier[re] . identifier[UNICODE] ). identifier[match] ( identifier[word] ): identifier[shape] = literal[string] keyword[else] : identifier[shape] = literal[string] identifier[features] ={ literal[string] : identifier[prevtag] , literal[string] : identifier[prevprevtag] , literal[string] : identifier[word] , literal[string] : identifier[word] . identifier[lower] (), literal[string] : identifier[word] . identifier[lower] ()[- literal[int] :], literal[string] : identifier[word] [: literal[int] ], literal[string] : identifier[prevprevword] , literal[string] : identifier[prevword] , literal[string] : literal[string] %( identifier[prevtag] , identifier[word] ), literal[string] : literal[string] %( identifier[prevprevtag] , identifier[word] ), literal[string] : literal[string] %( identifier[prevword] , identifier[word] ), literal[string] : identifier[shape] } keyword[return] identifier[features]
def feature_detector(self, tokens, index, history): """Implementing a slightly modified feature detector. @param tokens: The tokens from the sentence to tag. @param index: The current token index to tag. @param history: The previous tagged tokens. """ word = tokens[index] if index == 0: # At the beginning of the sentence prevword = prevprevword = None prevtag = prevprevtag = None # depends on [control=['if'], data=[]] #word = word.lower() # Lowercase at the beginning of sentence elif index == 1: prevword = tokens[index - 1] # Note: no lowercase prevprevword = None prevtag = history[index - 1] prevprevtag = None # depends on [control=['if'], data=['index']] else: prevword = tokens[index - 1] prevprevword = tokens[index - 2] prevtag = history[index - 1] prevprevtag = history[index - 2] if re.match('[0-9]+([\\.,][0-9]*)?|[0-9]*[\\.,][0-9]+$', word): # Included "," as decimal point shape = 'number' # depends on [control=['if'], data=[]] elif re.compile('\\W+$', re.UNICODE).match(word): # Included unicode flag shape = 'punct' # depends on [control=['if'], data=[]] elif re.match('([A-ZÄÖÜ]+[a-zäöüß]*-?)+$', word): # Included dash for dashed words and umlauts shape = 'upcase' # depends on [control=['if'], data=[]] elif re.match('[a-zäöüß]+', word): # Included umlauts shape = 'downcase' # depends on [control=['if'], data=[]] elif re.compile('\\w+', re.UNICODE).match(word): # Included unicode flag shape = 'mixedcase' # depends on [control=['if'], data=[]] else: shape = 'other' #'suffix2': word.lower()[-2:], #'suffix1': word.lower()[-1:], # included features = {'prevtag': prevtag, 'prevprevtag': prevprevtag, 'word': word, 'word.lower': word.lower(), 'suffix3': word.lower()[-3:], 'preffix1': word[:1], 'prevprevword': prevprevword, 'prevword': prevword, 'prevtag+word': '%s+%s' % (prevtag, word), 'prevprevtag+word': '%s+%s' % (prevprevtag, word), 'prevword+word': '%s+%s' % (prevword, word), 'shape': shape} return features
def __expire_files(self): """Because files are always unclean""" self.__files = OrderedDict( item for item in self.__files.items() if not item[1].expired )
def function[__expire_files, parameter[self]]: constant[Because files are always unclean] name[self].__files assign[=] call[name[OrderedDict], parameter[<ast.GeneratorExp object at 0x7da2046219c0>]]
keyword[def] identifier[__expire_files] ( identifier[self] ): literal[string] identifier[self] . identifier[__files] = identifier[OrderedDict] ( identifier[item] keyword[for] identifier[item] keyword[in] identifier[self] . identifier[__files] . identifier[items] () keyword[if] keyword[not] identifier[item] [ literal[int] ]. identifier[expired] )
def __expire_files(self): """Because files are always unclean""" self.__files = OrderedDict((item for item in self.__files.items() if not item[1].expired))
def _remove_deactivated(contexts): """Remove deactivated handlers from the chain""" # Clean ctx handlers stack_contexts = tuple([h for h in contexts[0] if h.active]) # Find new head head = contexts[1] while head is not None and not head.active: head = head.old_contexts[1] # Process chain ctx = head while ctx is not None: parent = ctx.old_contexts[1] while parent is not None: if parent.active: break ctx.old_contexts = parent.old_contexts parent = parent.old_contexts[1] ctx = parent return (stack_contexts, head)
def function[_remove_deactivated, parameter[contexts]]: constant[Remove deactivated handlers from the chain] variable[stack_contexts] assign[=] call[name[tuple], parameter[<ast.ListComp object at 0x7da18c4ce6e0>]] variable[head] assign[=] call[name[contexts]][constant[1]] while <ast.BoolOp object at 0x7da1b1c60760> begin[:] variable[head] assign[=] call[name[head].old_contexts][constant[1]] variable[ctx] assign[=] name[head] while compare[name[ctx] is_not constant[None]] begin[:] variable[parent] assign[=] call[name[ctx].old_contexts][constant[1]] while compare[name[parent] is_not constant[None]] begin[:] if name[parent].active begin[:] break name[ctx].old_contexts assign[=] name[parent].old_contexts variable[parent] assign[=] call[name[parent].old_contexts][constant[1]] variable[ctx] assign[=] name[parent] return[tuple[[<ast.Name object at 0x7da1b1b1bdc0>, <ast.Name object at 0x7da1b1b1ab30>]]]
keyword[def] identifier[_remove_deactivated] ( identifier[contexts] ): literal[string] identifier[stack_contexts] = identifier[tuple] ([ identifier[h] keyword[for] identifier[h] keyword[in] identifier[contexts] [ literal[int] ] keyword[if] identifier[h] . identifier[active] ]) identifier[head] = identifier[contexts] [ literal[int] ] keyword[while] identifier[head] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[head] . identifier[active] : identifier[head] = identifier[head] . identifier[old_contexts] [ literal[int] ] identifier[ctx] = identifier[head] keyword[while] identifier[ctx] keyword[is] keyword[not] keyword[None] : identifier[parent] = identifier[ctx] . identifier[old_contexts] [ literal[int] ] keyword[while] identifier[parent] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[parent] . identifier[active] : keyword[break] identifier[ctx] . identifier[old_contexts] = identifier[parent] . identifier[old_contexts] identifier[parent] = identifier[parent] . identifier[old_contexts] [ literal[int] ] identifier[ctx] = identifier[parent] keyword[return] ( identifier[stack_contexts] , identifier[head] )
def _remove_deactivated(contexts): """Remove deactivated handlers from the chain""" # Clean ctx handlers stack_contexts = tuple([h for h in contexts[0] if h.active]) # Find new head head = contexts[1] while head is not None and (not head.active): head = head.old_contexts[1] # depends on [control=['while'], data=[]] # Process chain ctx = head while ctx is not None: parent = ctx.old_contexts[1] while parent is not None: if parent.active: break # depends on [control=['if'], data=[]] ctx.old_contexts = parent.old_contexts parent = parent.old_contexts[1] # depends on [control=['while'], data=['parent']] ctx = parent # depends on [control=['while'], data=['ctx']] return (stack_contexts, head)
def from_columns(columns, columns_to_ignore=None): """ Creates a mapping from kind names to fc_parameters objects (which are itself mappings from feature calculators to settings) to extract only the features contained in the columns. To do so, for every feature name in columns this method 1. split the column name into col, feature, params part 2. decide which feature we are dealing with (aggregate with/without params or apply) 3. add it to the new name_to_function dict 4. set up the params :param columns: containing the feature names :type columns: list of str :param columns_to_ignore: columns which do not contain tsfresh feature names :type columns_to_ignore: list of str :return: The kind_to_fc_parameters object ready to be used in the extract_features function. :rtype: dict """ kind_to_fc_parameters = {} if columns_to_ignore is None: columns_to_ignore = [] for col in columns: if col in columns_to_ignore: continue if not isinstance(col, basestring): raise TypeError("Column name {} should be a string or unicode".format(col)) # Split according to our separator into <col_name>, <feature_name>, <feature_params> parts = col.split('__') n_parts = len(parts) if n_parts == 1: raise ValueError("Splitting of columnname {} resulted in only one part.".format(col)) kind = parts[0] feature_name = parts[1] if kind not in kind_to_fc_parameters: kind_to_fc_parameters[kind] = {} if not hasattr(feature_calculators, feature_name): raise ValueError("Unknown feature name {}".format(feature_name)) config = get_config_from_string(parts) if config: if feature_name in kind_to_fc_parameters[kind]: kind_to_fc_parameters[kind][feature_name].append(config) else: kind_to_fc_parameters[kind][feature_name] = [config] else: kind_to_fc_parameters[kind][feature_name] = None return kind_to_fc_parameters
def function[from_columns, parameter[columns, columns_to_ignore]]: constant[ Creates a mapping from kind names to fc_parameters objects (which are itself mappings from feature calculators to settings) to extract only the features contained in the columns. To do so, for every feature name in columns this method 1. split the column name into col, feature, params part 2. decide which feature we are dealing with (aggregate with/without params or apply) 3. add it to the new name_to_function dict 4. set up the params :param columns: containing the feature names :type columns: list of str :param columns_to_ignore: columns which do not contain tsfresh feature names :type columns_to_ignore: list of str :return: The kind_to_fc_parameters object ready to be used in the extract_features function. :rtype: dict ] variable[kind_to_fc_parameters] assign[=] dictionary[[], []] if compare[name[columns_to_ignore] is constant[None]] begin[:] variable[columns_to_ignore] assign[=] list[[]] for taget[name[col]] in starred[name[columns]] begin[:] if compare[name[col] in name[columns_to_ignore]] begin[:] continue if <ast.UnaryOp object at 0x7da18bccbdf0> begin[:] <ast.Raise object at 0x7da18bcca0e0> variable[parts] assign[=] call[name[col].split, parameter[constant[__]]] variable[n_parts] assign[=] call[name[len], parameter[name[parts]]] if compare[name[n_parts] equal[==] constant[1]] begin[:] <ast.Raise object at 0x7da18bccb7f0> variable[kind] assign[=] call[name[parts]][constant[0]] variable[feature_name] assign[=] call[name[parts]][constant[1]] if compare[name[kind] <ast.NotIn object at 0x7da2590d7190> name[kind_to_fc_parameters]] begin[:] call[name[kind_to_fc_parameters]][name[kind]] assign[=] dictionary[[], []] if <ast.UnaryOp object at 0x7da18bcc9ab0> begin[:] <ast.Raise object at 0x7da18bcc88e0> variable[config] assign[=] call[name[get_config_from_string], parameter[name[parts]]] if name[config] begin[:] if compare[name[feature_name] in call[name[kind_to_fc_parameters]][name[kind]]] begin[:] call[call[call[name[kind_to_fc_parameters]][name[kind]]][name[feature_name]].append, parameter[name[config]]] return[name[kind_to_fc_parameters]]
keyword[def] identifier[from_columns] ( identifier[columns] , identifier[columns_to_ignore] = keyword[None] ): literal[string] identifier[kind_to_fc_parameters] ={} keyword[if] identifier[columns_to_ignore] keyword[is] keyword[None] : identifier[columns_to_ignore] =[] keyword[for] identifier[col] keyword[in] identifier[columns] : keyword[if] identifier[col] keyword[in] identifier[columns_to_ignore] : keyword[continue] keyword[if] keyword[not] identifier[isinstance] ( identifier[col] , identifier[basestring] ): keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[col] )) identifier[parts] = identifier[col] . identifier[split] ( literal[string] ) identifier[n_parts] = identifier[len] ( identifier[parts] ) keyword[if] identifier[n_parts] == literal[int] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[col] )) identifier[kind] = identifier[parts] [ literal[int] ] identifier[feature_name] = identifier[parts] [ literal[int] ] keyword[if] identifier[kind] keyword[not] keyword[in] identifier[kind_to_fc_parameters] : identifier[kind_to_fc_parameters] [ identifier[kind] ]={} keyword[if] keyword[not] identifier[hasattr] ( identifier[feature_calculators] , identifier[feature_name] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[feature_name] )) identifier[config] = identifier[get_config_from_string] ( identifier[parts] ) keyword[if] identifier[config] : keyword[if] identifier[feature_name] keyword[in] identifier[kind_to_fc_parameters] [ identifier[kind] ]: identifier[kind_to_fc_parameters] [ identifier[kind] ][ identifier[feature_name] ]. identifier[append] ( identifier[config] ) keyword[else] : identifier[kind_to_fc_parameters] [ identifier[kind] ][ identifier[feature_name] ]=[ identifier[config] ] keyword[else] : identifier[kind_to_fc_parameters] [ identifier[kind] ][ identifier[feature_name] ]= keyword[None] keyword[return] identifier[kind_to_fc_parameters]
def from_columns(columns, columns_to_ignore=None): """ Creates a mapping from kind names to fc_parameters objects (which are itself mappings from feature calculators to settings) to extract only the features contained in the columns. To do so, for every feature name in columns this method 1. split the column name into col, feature, params part 2. decide which feature we are dealing with (aggregate with/without params or apply) 3. add it to the new name_to_function dict 4. set up the params :param columns: containing the feature names :type columns: list of str :param columns_to_ignore: columns which do not contain tsfresh feature names :type columns_to_ignore: list of str :return: The kind_to_fc_parameters object ready to be used in the extract_features function. :rtype: dict """ kind_to_fc_parameters = {} if columns_to_ignore is None: columns_to_ignore = [] # depends on [control=['if'], data=['columns_to_ignore']] for col in columns: if col in columns_to_ignore: continue # depends on [control=['if'], data=[]] if not isinstance(col, basestring): raise TypeError('Column name {} should be a string or unicode'.format(col)) # depends on [control=['if'], data=[]] # Split according to our separator into <col_name>, <feature_name>, <feature_params> parts = col.split('__') n_parts = len(parts) if n_parts == 1: raise ValueError('Splitting of columnname {} resulted in only one part.'.format(col)) # depends on [control=['if'], data=[]] kind = parts[0] feature_name = parts[1] if kind not in kind_to_fc_parameters: kind_to_fc_parameters[kind] = {} # depends on [control=['if'], data=['kind', 'kind_to_fc_parameters']] if not hasattr(feature_calculators, feature_name): raise ValueError('Unknown feature name {}'.format(feature_name)) # depends on [control=['if'], data=[]] config = get_config_from_string(parts) if config: if feature_name in kind_to_fc_parameters[kind]: kind_to_fc_parameters[kind][feature_name].append(config) # depends on [control=['if'], data=['feature_name']] else: kind_to_fc_parameters[kind][feature_name] = [config] # depends on [control=['if'], data=[]] else: kind_to_fc_parameters[kind][feature_name] = None # depends on [control=['for'], data=['col']] return kind_to_fc_parameters
def _pstore32(ins): """ Stores 2nd parameter at stack pointer (SP) + X, being X 1st parameter. 1st operand must be a SIGNED integer. """ value = ins.quad[2] offset = ins.quad[1] indirect = offset[0] == '*' if indirect: offset = offset[1:] I = int(offset) if I >= 0: I += 4 # Return Address + "push IX" output = _32bit_oper(value) if indirect: output.append('ld bc, %i' % I) output.append('call __PISTORE32') REQUIRES.add('pistore32.asm') return output # direct store output.append('ld bc, %i' % I) output.append('call __PSTORE32') REQUIRES.add('pstore32.asm') return output
def function[_pstore32, parameter[ins]]: constant[ Stores 2nd parameter at stack pointer (SP) + X, being X 1st parameter. 1st operand must be a SIGNED integer. ] variable[value] assign[=] call[name[ins].quad][constant[2]] variable[offset] assign[=] call[name[ins].quad][constant[1]] variable[indirect] assign[=] compare[call[name[offset]][constant[0]] equal[==] constant[*]] if name[indirect] begin[:] variable[offset] assign[=] call[name[offset]][<ast.Slice object at 0x7da1b0652290>] variable[I] assign[=] call[name[int], parameter[name[offset]]] if compare[name[I] greater_or_equal[>=] constant[0]] begin[:] <ast.AugAssign object at 0x7da1b0650a00> variable[output] assign[=] call[name[_32bit_oper], parameter[name[value]]] if name[indirect] begin[:] call[name[output].append, parameter[binary_operation[constant[ld bc, %i] <ast.Mod object at 0x7da2590d6920> name[I]]]] call[name[output].append, parameter[constant[call __PISTORE32]]] call[name[REQUIRES].add, parameter[constant[pistore32.asm]]] return[name[output]] call[name[output].append, parameter[binary_operation[constant[ld bc, %i] <ast.Mod object at 0x7da2590d6920> name[I]]]] call[name[output].append, parameter[constant[call __PSTORE32]]] call[name[REQUIRES].add, parameter[constant[pstore32.asm]]] return[name[output]]
keyword[def] identifier[_pstore32] ( identifier[ins] ): literal[string] identifier[value] = identifier[ins] . identifier[quad] [ literal[int] ] identifier[offset] = identifier[ins] . identifier[quad] [ literal[int] ] identifier[indirect] = identifier[offset] [ literal[int] ]== literal[string] keyword[if] identifier[indirect] : identifier[offset] = identifier[offset] [ literal[int] :] identifier[I] = identifier[int] ( identifier[offset] ) keyword[if] identifier[I] >= literal[int] : identifier[I] += literal[int] identifier[output] = identifier[_32bit_oper] ( identifier[value] ) keyword[if] identifier[indirect] : identifier[output] . identifier[append] ( literal[string] % identifier[I] ) identifier[output] . identifier[append] ( literal[string] ) identifier[REQUIRES] . identifier[add] ( literal[string] ) keyword[return] identifier[output] identifier[output] . identifier[append] ( literal[string] % identifier[I] ) identifier[output] . identifier[append] ( literal[string] ) identifier[REQUIRES] . identifier[add] ( literal[string] ) keyword[return] identifier[output]
def _pstore32(ins): """ Stores 2nd parameter at stack pointer (SP) + X, being X 1st parameter. 1st operand must be a SIGNED integer. """ value = ins.quad[2] offset = ins.quad[1] indirect = offset[0] == '*' if indirect: offset = offset[1:] # depends on [control=['if'], data=[]] I = int(offset) if I >= 0: I += 4 # Return Address + "push IX" # depends on [control=['if'], data=['I']] output = _32bit_oper(value) if indirect: output.append('ld bc, %i' % I) output.append('call __PISTORE32') REQUIRES.add('pistore32.asm') return output # depends on [control=['if'], data=[]] # direct store output.append('ld bc, %i' % I) output.append('call __PSTORE32') REQUIRES.add('pstore32.asm') return output
def element_screen_center(self, element): """ :returns: The center point of the element. :rtype: class:`dict` with the field "left" set to the X coordinate and the field "top" set to the Y coordinate. """ pos = self.element_screen_position(element) size = element.size pos["top"] += int(size["height"] / 2) pos["left"] += int(size["width"] / 2) return pos
def function[element_screen_center, parameter[self, element]]: constant[ :returns: The center point of the element. :rtype: class:`dict` with the field "left" set to the X coordinate and the field "top" set to the Y coordinate. ] variable[pos] assign[=] call[name[self].element_screen_position, parameter[name[element]]] variable[size] assign[=] name[element].size <ast.AugAssign object at 0x7da18bcc8c10> <ast.AugAssign object at 0x7da2047e8190> return[name[pos]]
keyword[def] identifier[element_screen_center] ( identifier[self] , identifier[element] ): literal[string] identifier[pos] = identifier[self] . identifier[element_screen_position] ( identifier[element] ) identifier[size] = identifier[element] . identifier[size] identifier[pos] [ literal[string] ]+= identifier[int] ( identifier[size] [ literal[string] ]/ literal[int] ) identifier[pos] [ literal[string] ]+= identifier[int] ( identifier[size] [ literal[string] ]/ literal[int] ) keyword[return] identifier[pos]
def element_screen_center(self, element): """ :returns: The center point of the element. :rtype: class:`dict` with the field "left" set to the X coordinate and the field "top" set to the Y coordinate. """ pos = self.element_screen_position(element) size = element.size pos['top'] += int(size['height'] / 2) pos['left'] += int(size['width'] / 2) return pos
def add_sparse_covariance_matrix(self,x,y,names,iidx,jidx,data): """build a pyemu.SparseMatrix instance implied by Vario2d Parameters ---------- x : (iterable of floats) x-coordinate locations y : (iterable of floats) y-coordinate locations names : (iterable of str) names of locations. If None, cov must not be None iidx : 1-D ndarray i row indices jidx : 1-D ndarray j col indices data : 1-D ndarray nonzero entries Returns ------- None """ if not isinstance(x, np.ndarray): x = np.array(x) if not isinstance(y, np.ndarray): y = np.array(y) assert x.shape[0] == y.shape[0] assert x.shape[0] == len(names) # c = np.zeros((len(names), len(names))) # np.fill_diagonal(c, self.contribution) # cov = Cov(x=c, names=names) # elif cov is not None: # assert cov.shape[0] == x.shape[0] # names = cov.row_names # c = np.zeros((len(names), 1)) + self.contribution # cont = Cov(x=c, names=names, isdiagonal=True) # cov += cont # # else: # raise Exception("Vario2d.covariance_matrix() requires either" + # "names or cov arg") # rc = self.rotation_coefs for i,name in enumerate(names): iidx.append(i) jidx.append(i) data.append(self.contribution) for i1, (n1, x1, y1) in enumerate(zip(names, x, y)): dx = x1 - x[i1 + 1:] dy = y1 - y[i1 + 1:] dxx, dyy = self._apply_rotation(dx, dy) h = np.sqrt(dxx * dxx + dyy * dyy) h[h < 0.0] = 0.0 cv = self._h_function(h) if np.any(np.isnan(cv)): raise Exception("nans in cv for i1 {0}".format(i1)) #cv[h>self.a] = 0.0 j = list(np.arange(i1+1,x.shape[0])) i = [i1] * len(j) iidx.extend(i) jidx.extend(j) data.extend(list(cv)) # replicate across the diagonal iidx.extend(j) jidx.extend(i) data.extend(list(cv))
def function[add_sparse_covariance_matrix, parameter[self, x, y, names, iidx, jidx, data]]: constant[build a pyemu.SparseMatrix instance implied by Vario2d Parameters ---------- x : (iterable of floats) x-coordinate locations y : (iterable of floats) y-coordinate locations names : (iterable of str) names of locations. If None, cov must not be None iidx : 1-D ndarray i row indices jidx : 1-D ndarray j col indices data : 1-D ndarray nonzero entries Returns ------- None ] if <ast.UnaryOp object at 0x7da1b1d4f730> begin[:] variable[x] assign[=] call[name[np].array, parameter[name[x]]] if <ast.UnaryOp object at 0x7da1b1d4cb50> begin[:] variable[y] assign[=] call[name[np].array, parameter[name[y]]] assert[compare[call[name[x].shape][constant[0]] equal[==] call[name[y].shape][constant[0]]]] assert[compare[call[name[x].shape][constant[0]] equal[==] call[name[len], parameter[name[names]]]]] for taget[tuple[[<ast.Name object at 0x7da1b1d4e590>, <ast.Name object at 0x7da1b1d4dc30>]]] in starred[call[name[enumerate], parameter[name[names]]]] begin[:] call[name[iidx].append, parameter[name[i]]] call[name[jidx].append, parameter[name[i]]] call[name[data].append, parameter[name[self].contribution]] for taget[tuple[[<ast.Name object at 0x7da1b23edde0>, <ast.Tuple object at 0x7da1b23eeb90>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[name[names], name[x], name[y]]]]]] begin[:] variable[dx] assign[=] binary_operation[name[x1] - call[name[x]][<ast.Slice object at 0x7da1b23ec4f0>]] variable[dy] assign[=] binary_operation[name[y1] - call[name[y]][<ast.Slice object at 0x7da1b23eff70>]] <ast.Tuple object at 0x7da1b23edb70> assign[=] call[name[self]._apply_rotation, parameter[name[dx], name[dy]]] variable[h] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[name[dxx] * name[dxx]] + binary_operation[name[dyy] * name[dyy]]]]] call[name[h]][compare[name[h] less[<] constant[0.0]]] assign[=] constant[0.0] variable[cv] assign[=] call[name[self]._h_function, parameter[name[h]]] if call[name[np].any, parameter[call[name[np].isnan, parameter[name[cv]]]]] begin[:] <ast.Raise object at 0x7da1b23ef8e0> variable[j] assign[=] call[name[list], parameter[call[name[np].arange, parameter[binary_operation[name[i1] + constant[1]], call[name[x].shape][constant[0]]]]]] variable[i] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b23ecd00>]] * call[name[len], parameter[name[j]]]] call[name[iidx].extend, parameter[name[i]]] call[name[jidx].extend, parameter[name[j]]] call[name[data].extend, parameter[call[name[list], parameter[name[cv]]]]] call[name[iidx].extend, parameter[name[j]]] call[name[jidx].extend, parameter[name[i]]] call[name[data].extend, parameter[call[name[list], parameter[name[cv]]]]]
keyword[def] identifier[add_sparse_covariance_matrix] ( identifier[self] , identifier[x] , identifier[y] , identifier[names] , identifier[iidx] , identifier[jidx] , identifier[data] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[x] , identifier[np] . identifier[ndarray] ): identifier[x] = identifier[np] . identifier[array] ( identifier[x] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[y] , identifier[np] . identifier[ndarray] ): identifier[y] = identifier[np] . identifier[array] ( identifier[y] ) keyword[assert] identifier[x] . identifier[shape] [ literal[int] ]== identifier[y] . identifier[shape] [ literal[int] ] keyword[assert] identifier[x] . identifier[shape] [ literal[int] ]== identifier[len] ( identifier[names] ) keyword[for] identifier[i] , identifier[name] keyword[in] identifier[enumerate] ( identifier[names] ): identifier[iidx] . identifier[append] ( identifier[i] ) identifier[jidx] . identifier[append] ( identifier[i] ) identifier[data] . identifier[append] ( identifier[self] . identifier[contribution] ) keyword[for] identifier[i1] ,( identifier[n1] , identifier[x1] , identifier[y1] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[names] , identifier[x] , identifier[y] )): identifier[dx] = identifier[x1] - identifier[x] [ identifier[i1] + literal[int] :] identifier[dy] = identifier[y1] - identifier[y] [ identifier[i1] + literal[int] :] identifier[dxx] , identifier[dyy] = identifier[self] . identifier[_apply_rotation] ( identifier[dx] , identifier[dy] ) identifier[h] = identifier[np] . identifier[sqrt] ( identifier[dxx] * identifier[dxx] + identifier[dyy] * identifier[dyy] ) identifier[h] [ identifier[h] < literal[int] ]= literal[int] identifier[cv] = identifier[self] . identifier[_h_function] ( identifier[h] ) keyword[if] identifier[np] . identifier[any] ( identifier[np] . identifier[isnan] ( identifier[cv] )): keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[i1] )) identifier[j] = identifier[list] ( identifier[np] . identifier[arange] ( identifier[i1] + literal[int] , identifier[x] . identifier[shape] [ literal[int] ])) identifier[i] =[ identifier[i1] ]* identifier[len] ( identifier[j] ) identifier[iidx] . identifier[extend] ( identifier[i] ) identifier[jidx] . identifier[extend] ( identifier[j] ) identifier[data] . identifier[extend] ( identifier[list] ( identifier[cv] )) identifier[iidx] . identifier[extend] ( identifier[j] ) identifier[jidx] . identifier[extend] ( identifier[i] ) identifier[data] . identifier[extend] ( identifier[list] ( identifier[cv] ))
def add_sparse_covariance_matrix(self, x, y, names, iidx, jidx, data): """build a pyemu.SparseMatrix instance implied by Vario2d Parameters ---------- x : (iterable of floats) x-coordinate locations y : (iterable of floats) y-coordinate locations names : (iterable of str) names of locations. If None, cov must not be None iidx : 1-D ndarray i row indices jidx : 1-D ndarray j col indices data : 1-D ndarray nonzero entries Returns ------- None """ if not isinstance(x, np.ndarray): x = np.array(x) # depends on [control=['if'], data=[]] if not isinstance(y, np.ndarray): y = np.array(y) # depends on [control=['if'], data=[]] assert x.shape[0] == y.shape[0] assert x.shape[0] == len(names) # c = np.zeros((len(names), len(names))) # np.fill_diagonal(c, self.contribution) # cov = Cov(x=c, names=names) # elif cov is not None: # assert cov.shape[0] == x.shape[0] # names = cov.row_names # c = np.zeros((len(names), 1)) + self.contribution # cont = Cov(x=c, names=names, isdiagonal=True) # cov += cont # # else: # raise Exception("Vario2d.covariance_matrix() requires either" + # "names or cov arg") # rc = self.rotation_coefs for (i, name) in enumerate(names): iidx.append(i) jidx.append(i) data.append(self.contribution) # depends on [control=['for'], data=[]] for (i1, (n1, x1, y1)) in enumerate(zip(names, x, y)): dx = x1 - x[i1 + 1:] dy = y1 - y[i1 + 1:] (dxx, dyy) = self._apply_rotation(dx, dy) h = np.sqrt(dxx * dxx + dyy * dyy) h[h < 0.0] = 0.0 cv = self._h_function(h) if np.any(np.isnan(cv)): raise Exception('nans in cv for i1 {0}'.format(i1)) # depends on [control=['if'], data=[]] #cv[h>self.a] = 0.0 j = list(np.arange(i1 + 1, x.shape[0])) i = [i1] * len(j) iidx.extend(i) jidx.extend(j) data.extend(list(cv)) # replicate across the diagonal iidx.extend(j) jidx.extend(i) data.extend(list(cv)) # depends on [control=['for'], data=[]]
def get_input(prompt, default=None, exit_msg='bye!'): """ Reads stdin, exits with a message if interrupted, EOF, or a quit message. :return: The entered input. Converts to an integer if possible. :rtype: ``str`` or ``int`` """ try: response = six.moves.input(prompt) except (KeyboardInterrupt, EOFError): print() print(exit_msg) exit() try: return int(response) except ValueError: if response.strip() == "" and default is not None: return default else: return response
def function[get_input, parameter[prompt, default, exit_msg]]: constant[ Reads stdin, exits with a message if interrupted, EOF, or a quit message. :return: The entered input. Converts to an integer if possible. :rtype: ``str`` or ``int`` ] <ast.Try object at 0x7da18fe90550> <ast.Try object at 0x7da18fe903d0>
keyword[def] identifier[get_input] ( identifier[prompt] , identifier[default] = keyword[None] , identifier[exit_msg] = literal[string] ): literal[string] keyword[try] : identifier[response] = identifier[six] . identifier[moves] . identifier[input] ( identifier[prompt] ) keyword[except] ( identifier[KeyboardInterrupt] , identifier[EOFError] ): identifier[print] () identifier[print] ( identifier[exit_msg] ) identifier[exit] () keyword[try] : keyword[return] identifier[int] ( identifier[response] ) keyword[except] identifier[ValueError] : keyword[if] identifier[response] . identifier[strip] ()== literal[string] keyword[and] identifier[default] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[default] keyword[else] : keyword[return] identifier[response]
def get_input(prompt, default=None, exit_msg='bye!'): """ Reads stdin, exits with a message if interrupted, EOF, or a quit message. :return: The entered input. Converts to an integer if possible. :rtype: ``str`` or ``int`` """ try: response = six.moves.input(prompt) # depends on [control=['try'], data=[]] except (KeyboardInterrupt, EOFError): print() print(exit_msg) exit() # depends on [control=['except'], data=[]] try: return int(response) # depends on [control=['try'], data=[]] except ValueError: if response.strip() == '' and default is not None: return default # depends on [control=['if'], data=[]] else: return response # depends on [control=['except'], data=[]]
def _unpack_case(self, case): """ Returns the contents of the case to be used in the OPF. """ base_mva = case.base_mva b = case.connected_buses l = case.online_branches g = case.online_generators nb = len(b) nl = len(l) ng = len(g) return b, l, g, nb, nl, ng, base_mva
def function[_unpack_case, parameter[self, case]]: constant[ Returns the contents of the case to be used in the OPF. ] variable[base_mva] assign[=] name[case].base_mva variable[b] assign[=] name[case].connected_buses variable[l] assign[=] name[case].online_branches variable[g] assign[=] name[case].online_generators variable[nb] assign[=] call[name[len], parameter[name[b]]] variable[nl] assign[=] call[name[len], parameter[name[l]]] variable[ng] assign[=] call[name[len], parameter[name[g]]] return[tuple[[<ast.Name object at 0x7da1b257ffd0>, <ast.Name object at 0x7da1b257ff40>, <ast.Name object at 0x7da1b257ff10>, <ast.Name object at 0x7da1b257fe80>, <ast.Name object at 0x7da1b257fe50>, <ast.Name object at 0x7da1b257fee0>, <ast.Name object at 0x7da1b257feb0>]]]
keyword[def] identifier[_unpack_case] ( identifier[self] , identifier[case] ): literal[string] identifier[base_mva] = identifier[case] . identifier[base_mva] identifier[b] = identifier[case] . identifier[connected_buses] identifier[l] = identifier[case] . identifier[online_branches] identifier[g] = identifier[case] . identifier[online_generators] identifier[nb] = identifier[len] ( identifier[b] ) identifier[nl] = identifier[len] ( identifier[l] ) identifier[ng] = identifier[len] ( identifier[g] ) keyword[return] identifier[b] , identifier[l] , identifier[g] , identifier[nb] , identifier[nl] , identifier[ng] , identifier[base_mva]
def _unpack_case(self, case): """ Returns the contents of the case to be used in the OPF. """ base_mva = case.base_mva b = case.connected_buses l = case.online_branches g = case.online_generators nb = len(b) nl = len(l) ng = len(g) return (b, l, g, nb, nl, ng, base_mva)
def _batch_norm_op(self, input_batch, mean, variance, use_batch_stats, stat_dtype): """Creates a batch normalization op. It uses the tf.nn.batch_normalization op by default and the tf.nn.fused_batch_norm op to support fused batch normalization. Args: input_batch: A input Tensor of arbitrary dimension. mean: A mean tensor, of the same dtype as `input_batch`. variance: A variance tensor, of the same dtype as `input_batch`. use_batch_stats: A bool value that indicates whether the operation should use the batch statistics. stat_dtype: TensorFlow datatype used for the moving mean and variance. Returns: A batch normalization operation. The current mean tensor, of datatype `stat_dtype`. The current variance tensor, of datatype `stat_dtype`. """ if self._fused: # For the non-training case where not using batch stats, # pass in the moving statistic variables directly. # These will already be in the correct dtype, even for float16 input. batch_norm_op, mean, variance = self._fused_batch_norm_op( input_batch, self._moving_mean, self._moving_variance, use_batch_stats) else: batch_norm_op = tf.nn.batch_normalization( input_batch, mean, variance, self._beta, self._gamma, self._eps, name="batch_norm") # We'll echo the supplied mean and variance so that they can also be used # to update the moving statistics. Cast to matching type if necessary. if input_batch.dtype.base_dtype != stat_dtype: mean = tf.cast(mean, stat_dtype) variance = tf.cast(variance, stat_dtype) return batch_norm_op, mean, variance
def function[_batch_norm_op, parameter[self, input_batch, mean, variance, use_batch_stats, stat_dtype]]: constant[Creates a batch normalization op. It uses the tf.nn.batch_normalization op by default and the tf.nn.fused_batch_norm op to support fused batch normalization. Args: input_batch: A input Tensor of arbitrary dimension. mean: A mean tensor, of the same dtype as `input_batch`. variance: A variance tensor, of the same dtype as `input_batch`. use_batch_stats: A bool value that indicates whether the operation should use the batch statistics. stat_dtype: TensorFlow datatype used for the moving mean and variance. Returns: A batch normalization operation. The current mean tensor, of datatype `stat_dtype`. The current variance tensor, of datatype `stat_dtype`. ] if name[self]._fused begin[:] <ast.Tuple object at 0x7da1b1c78a60> assign[=] call[name[self]._fused_batch_norm_op, parameter[name[input_batch], name[self]._moving_mean, name[self]._moving_variance, name[use_batch_stats]]] return[tuple[[<ast.Name object at 0x7da1b1c840d0>, <ast.Name object at 0x7da1b1c87100>, <ast.Name object at 0x7da1b1c87b80>]]]
keyword[def] identifier[_batch_norm_op] ( identifier[self] , identifier[input_batch] , identifier[mean] , identifier[variance] , identifier[use_batch_stats] , identifier[stat_dtype] ): literal[string] keyword[if] identifier[self] . identifier[_fused] : identifier[batch_norm_op] , identifier[mean] , identifier[variance] = identifier[self] . identifier[_fused_batch_norm_op] ( identifier[input_batch] , identifier[self] . identifier[_moving_mean] , identifier[self] . identifier[_moving_variance] , identifier[use_batch_stats] ) keyword[else] : identifier[batch_norm_op] = identifier[tf] . identifier[nn] . identifier[batch_normalization] ( identifier[input_batch] , identifier[mean] , identifier[variance] , identifier[self] . identifier[_beta] , identifier[self] . identifier[_gamma] , identifier[self] . identifier[_eps] , identifier[name] = literal[string] ) keyword[if] identifier[input_batch] . identifier[dtype] . identifier[base_dtype] != identifier[stat_dtype] : identifier[mean] = identifier[tf] . identifier[cast] ( identifier[mean] , identifier[stat_dtype] ) identifier[variance] = identifier[tf] . identifier[cast] ( identifier[variance] , identifier[stat_dtype] ) keyword[return] identifier[batch_norm_op] , identifier[mean] , identifier[variance]
def _batch_norm_op(self, input_batch, mean, variance, use_batch_stats, stat_dtype): """Creates a batch normalization op. It uses the tf.nn.batch_normalization op by default and the tf.nn.fused_batch_norm op to support fused batch normalization. Args: input_batch: A input Tensor of arbitrary dimension. mean: A mean tensor, of the same dtype as `input_batch`. variance: A variance tensor, of the same dtype as `input_batch`. use_batch_stats: A bool value that indicates whether the operation should use the batch statistics. stat_dtype: TensorFlow datatype used for the moving mean and variance. Returns: A batch normalization operation. The current mean tensor, of datatype `stat_dtype`. The current variance tensor, of datatype `stat_dtype`. """ if self._fused: # For the non-training case where not using batch stats, # pass in the moving statistic variables directly. # These will already be in the correct dtype, even for float16 input. (batch_norm_op, mean, variance) = self._fused_batch_norm_op(input_batch, self._moving_mean, self._moving_variance, use_batch_stats) # depends on [control=['if'], data=[]] else: batch_norm_op = tf.nn.batch_normalization(input_batch, mean, variance, self._beta, self._gamma, self._eps, name='batch_norm') # We'll echo the supplied mean and variance so that they can also be used # to update the moving statistics. Cast to matching type if necessary. if input_batch.dtype.base_dtype != stat_dtype: mean = tf.cast(mean, stat_dtype) variance = tf.cast(variance, stat_dtype) # depends on [control=['if'], data=['stat_dtype']] return (batch_norm_op, mean, variance)
def load_character_images(self, verbose=True): """ Generator to load all images in the dataset. Yields (image, character) pairs until all images have been loaded. :return: (Pillow.Image.Image, string) tuples """ for dataset in self.character_sets: assert self.get_dataset(dataset) is True, "Datasets aren't properly downloaded, " \ "rerun to try again or download datasets manually." for dataset in self.character_sets: for image, label in self.load_dataset(dataset, verbose=verbose): yield image, label
def function[load_character_images, parameter[self, verbose]]: constant[ Generator to load all images in the dataset. Yields (image, character) pairs until all images have been loaded. :return: (Pillow.Image.Image, string) tuples ] for taget[name[dataset]] in starred[name[self].character_sets] begin[:] assert[compare[call[name[self].get_dataset, parameter[name[dataset]]] is constant[True]]] for taget[name[dataset]] in starred[name[self].character_sets] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b0f28280>, <ast.Name object at 0x7da1b0f2a230>]]] in starred[call[name[self].load_dataset, parameter[name[dataset]]]] begin[:] <ast.Yield object at 0x7da1b0f28190>
keyword[def] identifier[load_character_images] ( identifier[self] , identifier[verbose] = keyword[True] ): literal[string] keyword[for] identifier[dataset] keyword[in] identifier[self] . identifier[character_sets] : keyword[assert] identifier[self] . identifier[get_dataset] ( identifier[dataset] ) keyword[is] keyword[True] , literal[string] literal[string] keyword[for] identifier[dataset] keyword[in] identifier[self] . identifier[character_sets] : keyword[for] identifier[image] , identifier[label] keyword[in] identifier[self] . identifier[load_dataset] ( identifier[dataset] , identifier[verbose] = identifier[verbose] ): keyword[yield] identifier[image] , identifier[label]
def load_character_images(self, verbose=True): """ Generator to load all images in the dataset. Yields (image, character) pairs until all images have been loaded. :return: (Pillow.Image.Image, string) tuples """ for dataset in self.character_sets: assert self.get_dataset(dataset) is True, "Datasets aren't properly downloaded, rerun to try again or download datasets manually." # depends on [control=['for'], data=['dataset']] for dataset in self.character_sets: for (image, label) in self.load_dataset(dataset, verbose=verbose): yield (image, label) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['dataset']]
def _to_epoch(self, ts): """ Adds a year to the syslog timestamp because syslog doesn't use years :param ts: The timestamp to add a year to :return: Date/time string that includes a year """ year = self.year tmpts = "%s %s" % (ts, str(self.year)) new_time = int(calendar.timegm(time.strptime(tmpts, "%b %d %H:%M:%S %Y"))) # If adding the year puts it in the future, this log must be from last year if new_time > int(time.time()): year -= 1 tmpts = "%s %s" % (ts, str(year)) new_time = int(calendar.timegm(time.strptime(tmpts, "%b %d %H:%M:%S %Y"))) return new_time
def function[_to_epoch, parameter[self, ts]]: constant[ Adds a year to the syslog timestamp because syslog doesn't use years :param ts: The timestamp to add a year to :return: Date/time string that includes a year ] variable[year] assign[=] name[self].year variable[tmpts] assign[=] binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f99c90>, <ast.Call object at 0x7da207f9b7c0>]]] variable[new_time] assign[=] call[name[int], parameter[call[name[calendar].timegm, parameter[call[name[time].strptime, parameter[name[tmpts], constant[%b %d %H:%M:%S %Y]]]]]]] if compare[name[new_time] greater[>] call[name[int], parameter[call[name[time].time, parameter[]]]]] begin[:] <ast.AugAssign object at 0x7da207f99e10> variable[tmpts] assign[=] binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f9aef0>, <ast.Call object at 0x7da207f9a1d0>]]] variable[new_time] assign[=] call[name[int], parameter[call[name[calendar].timegm, parameter[call[name[time].strptime, parameter[name[tmpts], constant[%b %d %H:%M:%S %Y]]]]]]] return[name[new_time]]
keyword[def] identifier[_to_epoch] ( identifier[self] , identifier[ts] ): literal[string] identifier[year] = identifier[self] . identifier[year] identifier[tmpts] = literal[string] %( identifier[ts] , identifier[str] ( identifier[self] . identifier[year] )) identifier[new_time] = identifier[int] ( identifier[calendar] . identifier[timegm] ( identifier[time] . identifier[strptime] ( identifier[tmpts] , literal[string] ))) keyword[if] identifier[new_time] > identifier[int] ( identifier[time] . identifier[time] ()): identifier[year] -= literal[int] identifier[tmpts] = literal[string] %( identifier[ts] , identifier[str] ( identifier[year] )) identifier[new_time] = identifier[int] ( identifier[calendar] . identifier[timegm] ( identifier[time] . identifier[strptime] ( identifier[tmpts] , literal[string] ))) keyword[return] identifier[new_time]
def _to_epoch(self, ts): """ Adds a year to the syslog timestamp because syslog doesn't use years :param ts: The timestamp to add a year to :return: Date/time string that includes a year """ year = self.year tmpts = '%s %s' % (ts, str(self.year)) new_time = int(calendar.timegm(time.strptime(tmpts, '%b %d %H:%M:%S %Y'))) # If adding the year puts it in the future, this log must be from last year if new_time > int(time.time()): year -= 1 tmpts = '%s %s' % (ts, str(year)) new_time = int(calendar.timegm(time.strptime(tmpts, '%b %d %H:%M:%S %Y'))) # depends on [control=['if'], data=['new_time']] return new_time
def PythonPercentFormat(format_str): """Use Python % format strings as template format specifiers.""" if format_str.startswith('printf '): fmt = format_str[len('printf '):] return lambda value: fmt % value else: return None
def function[PythonPercentFormat, parameter[format_str]]: constant[Use Python % format strings as template format specifiers.] if call[name[format_str].startswith, parameter[constant[printf ]]] begin[:] variable[fmt] assign[=] call[name[format_str]][<ast.Slice object at 0x7da18f00ed40>] return[<ast.Lambda object at 0x7da18f00d5a0>]
keyword[def] identifier[PythonPercentFormat] ( identifier[format_str] ): literal[string] keyword[if] identifier[format_str] . identifier[startswith] ( literal[string] ): identifier[fmt] = identifier[format_str] [ identifier[len] ( literal[string] ):] keyword[return] keyword[lambda] identifier[value] : identifier[fmt] % identifier[value] keyword[else] : keyword[return] keyword[None]
def PythonPercentFormat(format_str): """Use Python % format strings as template format specifiers.""" if format_str.startswith('printf '): fmt = format_str[len('printf '):] return lambda value: fmt % value # depends on [control=['if'], data=[]] else: return None
def _append_value(self, v_values, next_value, v_idx=None, n_vals=1): """Update a list of parsed values with a new value.""" for _ in range(n_vals): if v_idx: try: v_i = next(v_idx) except StopIteration: # Repeating commas are null-statements and can be ignored # Otherwise, we warn the user that this is a bad namelist if next_value is not None: warnings.warn('f90nml: warning: Value {0} is not assigned to ' 'any variable and has been removed.' ''.format(next_value)) # There are more values than indices, so we stop here break v_s = [self.default_start_index if idx is None else idx for idx in v_idx.first] if not self.row_major: v_i = v_i[::-1] v_s = v_s[::-1] # Multidimensional arrays if not self.sparse_arrays: pad_array(v_values, list(zip(v_i, v_s))) # We iterate inside the v_values and inspect successively # deeper lists within the list tree. If the requested index is # missing, we re-size that particular entry. # (NOTE: This is unnecessary when sparse_arrays is disabled.) v_subval = v_values for (i_v, i_s) in zip(v_i[:-1], v_s[:-1]): try: v_subval = v_subval[i_v - i_s] except IndexError: size = len(v_subval) v_subval.extend([] for _ in range(size, i_v - i_s + 1)) v_subval = v_subval[i_v - i_s] # On the deepest level, we explicitly assign the value i_v, i_s = v_i[-1], v_s[-1] try: v_subval[i_v - i_s] = next_value except IndexError: size = len(v_subval) v_subval.extend(None for _ in range(size, i_v - i_s + 1)) v_subval[i_v - i_s] = next_value else: v_values.append(next_value)
def function[_append_value, parameter[self, v_values, next_value, v_idx, n_vals]]: constant[Update a list of parsed values with a new value.] for taget[name[_]] in starred[call[name[range], parameter[name[n_vals]]]] begin[:] if name[v_idx] begin[:] <ast.Try object at 0x7da1b033d870> variable[v_s] assign[=] <ast.ListComp object at 0x7da1b033fca0> if <ast.UnaryOp object at 0x7da1b034bf40> begin[:] variable[v_i] assign[=] call[name[v_i]][<ast.Slice object at 0x7da1b0348070>] variable[v_s] assign[=] call[name[v_s]][<ast.Slice object at 0x7da1b0349360>] if <ast.UnaryOp object at 0x7da1b034add0> begin[:] call[name[pad_array], parameter[name[v_values], call[name[list], parameter[call[name[zip], parameter[name[v_i], name[v_s]]]]]]] variable[v_subval] assign[=] name[v_values] for taget[tuple[[<ast.Name object at 0x7da1b034b250>, <ast.Name object at 0x7da1b0348100>]]] in starred[call[name[zip], parameter[call[name[v_i]][<ast.Slice object at 0x7da1b0349ba0>], call[name[v_s]][<ast.Slice object at 0x7da1b0349450>]]]] begin[:] <ast.Try object at 0x7da1b034b100> <ast.Tuple object at 0x7da1b0348f10> assign[=] tuple[[<ast.Subscript object at 0x7da1b0349d20>, <ast.Subscript object at 0x7da1b0349f00>]] <ast.Try object at 0x7da1b034ac80>
keyword[def] identifier[_append_value] ( identifier[self] , identifier[v_values] , identifier[next_value] , identifier[v_idx] = keyword[None] , identifier[n_vals] = literal[int] ): literal[string] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[n_vals] ): keyword[if] identifier[v_idx] : keyword[try] : identifier[v_i] = identifier[next] ( identifier[v_idx] ) keyword[except] identifier[StopIteration] : keyword[if] identifier[next_value] keyword[is] keyword[not] keyword[None] : identifier[warnings] . identifier[warn] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[next_value] )) keyword[break] identifier[v_s] =[ identifier[self] . identifier[default_start_index] keyword[if] identifier[idx] keyword[is] keyword[None] keyword[else] identifier[idx] keyword[for] identifier[idx] keyword[in] identifier[v_idx] . identifier[first] ] keyword[if] keyword[not] identifier[self] . identifier[row_major] : identifier[v_i] = identifier[v_i] [::- literal[int] ] identifier[v_s] = identifier[v_s] [::- literal[int] ] keyword[if] keyword[not] identifier[self] . identifier[sparse_arrays] : identifier[pad_array] ( identifier[v_values] , identifier[list] ( identifier[zip] ( identifier[v_i] , identifier[v_s] ))) identifier[v_subval] = identifier[v_values] keyword[for] ( identifier[i_v] , identifier[i_s] ) keyword[in] identifier[zip] ( identifier[v_i] [:- literal[int] ], identifier[v_s] [:- literal[int] ]): keyword[try] : identifier[v_subval] = identifier[v_subval] [ identifier[i_v] - identifier[i_s] ] keyword[except] identifier[IndexError] : identifier[size] = identifier[len] ( identifier[v_subval] ) identifier[v_subval] . identifier[extend] ([] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[size] , identifier[i_v] - identifier[i_s] + literal[int] )) identifier[v_subval] = identifier[v_subval] [ identifier[i_v] - identifier[i_s] ] identifier[i_v] , identifier[i_s] = identifier[v_i] [- literal[int] ], identifier[v_s] [- literal[int] ] keyword[try] : identifier[v_subval] [ identifier[i_v] - identifier[i_s] ]= identifier[next_value] keyword[except] identifier[IndexError] : identifier[size] = identifier[len] ( identifier[v_subval] ) identifier[v_subval] . identifier[extend] ( keyword[None] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[size] , identifier[i_v] - identifier[i_s] + literal[int] )) identifier[v_subval] [ identifier[i_v] - identifier[i_s] ]= identifier[next_value] keyword[else] : identifier[v_values] . identifier[append] ( identifier[next_value] )
def _append_value(self, v_values, next_value, v_idx=None, n_vals=1): """Update a list of parsed values with a new value.""" for _ in range(n_vals): if v_idx: try: v_i = next(v_idx) # depends on [control=['try'], data=[]] except StopIteration: # Repeating commas are null-statements and can be ignored # Otherwise, we warn the user that this is a bad namelist if next_value is not None: warnings.warn('f90nml: warning: Value {0} is not assigned to any variable and has been removed.'.format(next_value)) # depends on [control=['if'], data=['next_value']] # There are more values than indices, so we stop here break # depends on [control=['except'], data=[]] v_s = [self.default_start_index if idx is None else idx for idx in v_idx.first] if not self.row_major: v_i = v_i[::-1] v_s = v_s[::-1] # depends on [control=['if'], data=[]] # Multidimensional arrays if not self.sparse_arrays: pad_array(v_values, list(zip(v_i, v_s))) # depends on [control=['if'], data=[]] # We iterate inside the v_values and inspect successively # deeper lists within the list tree. If the requested index is # missing, we re-size that particular entry. # (NOTE: This is unnecessary when sparse_arrays is disabled.) v_subval = v_values for (i_v, i_s) in zip(v_i[:-1], v_s[:-1]): try: v_subval = v_subval[i_v - i_s] # depends on [control=['try'], data=[]] except IndexError: size = len(v_subval) v_subval.extend(([] for _ in range(size, i_v - i_s + 1))) v_subval = v_subval[i_v - i_s] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # On the deepest level, we explicitly assign the value (i_v, i_s) = (v_i[-1], v_s[-1]) try: v_subval[i_v - i_s] = next_value # depends on [control=['try'], data=[]] except IndexError: size = len(v_subval) v_subval.extend((None for _ in range(size, i_v - i_s + 1))) v_subval[i_v - i_s] = next_value # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: v_values.append(next_value) # depends on [control=['for'], data=['_']]
def get_latex_maybe_optional_arg(s, pos, **parse_flags): """ Attempts to parse an optional argument. Returns a tuple `(groupnode, pos, len)` if success, otherwise returns None. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_maybe_optional_arg()` instead. """ return LatexWalker(s, **parse_flags).get_latex_maybe_optional_arg(pos=pos)
def function[get_latex_maybe_optional_arg, parameter[s, pos]]: constant[ Attempts to parse an optional argument. Returns a tuple `(groupnode, pos, len)` if success, otherwise returns None. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_maybe_optional_arg()` instead. ] return[call[call[name[LatexWalker], parameter[name[s]]].get_latex_maybe_optional_arg, parameter[]]]
keyword[def] identifier[get_latex_maybe_optional_arg] ( identifier[s] , identifier[pos] ,** identifier[parse_flags] ): literal[string] keyword[return] identifier[LatexWalker] ( identifier[s] ,** identifier[parse_flags] ). identifier[get_latex_maybe_optional_arg] ( identifier[pos] = identifier[pos] )
def get_latex_maybe_optional_arg(s, pos, **parse_flags): """ Attempts to parse an optional argument. Returns a tuple `(groupnode, pos, len)` if success, otherwise returns None. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_maybe_optional_arg()` instead. """ return LatexWalker(s, **parse_flags).get_latex_maybe_optional_arg(pos=pos)
def command_ack_send(self, command, result, force_mavlink1=False): ''' Report status of a command. Includes feedback wether the command was executed. command : Command ID, as defined by MAV_CMD enum. (uint16_t) result : See MAV_RESULT enum (uint8_t) ''' return self.send(self.command_ack_encode(command, result), force_mavlink1=force_mavlink1)
def function[command_ack_send, parameter[self, command, result, force_mavlink1]]: constant[ Report status of a command. Includes feedback wether the command was executed. command : Command ID, as defined by MAV_CMD enum. (uint16_t) result : See MAV_RESULT enum (uint8_t) ] return[call[name[self].send, parameter[call[name[self].command_ack_encode, parameter[name[command], name[result]]]]]]
keyword[def] identifier[command_ack_send] ( identifier[self] , identifier[command] , identifier[result] , identifier[force_mavlink1] = keyword[False] ): literal[string] keyword[return] identifier[self] . identifier[send] ( identifier[self] . identifier[command_ack_encode] ( identifier[command] , identifier[result] ), identifier[force_mavlink1] = identifier[force_mavlink1] )
def command_ack_send(self, command, result, force_mavlink1=False): """ Report status of a command. Includes feedback wether the command was executed. command : Command ID, as defined by MAV_CMD enum. (uint16_t) result : See MAV_RESULT enum (uint8_t) """ return self.send(self.command_ack_encode(command, result), force_mavlink1=force_mavlink1)
def get_instances(self): """ Returns the members of the LazyDict """ if self._update_members: self.update() return iter(sorted(self.members.iteritems()))
def function[get_instances, parameter[self]]: constant[ Returns the members of the LazyDict ] if name[self]._update_members begin[:] call[name[self].update, parameter[]] return[call[name[iter], parameter[call[name[sorted], parameter[call[name[self].members.iteritems, parameter[]]]]]]]
keyword[def] identifier[get_instances] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_update_members] : identifier[self] . identifier[update] () keyword[return] identifier[iter] ( identifier[sorted] ( identifier[self] . identifier[members] . identifier[iteritems] ()))
def get_instances(self): """ Returns the members of the LazyDict """ if self._update_members: self.update() # depends on [control=['if'], data=[]] return iter(sorted(self.members.iteritems()))
def get_families(data_dir=None): '''Return a list of all basis set families''' data_dir = fix_data_dir(data_dir) metadata = get_metadata(data_dir) families = set() for v in metadata.values(): families.add(v['family']) return sorted(list(families))
def function[get_families, parameter[data_dir]]: constant[Return a list of all basis set families] variable[data_dir] assign[=] call[name[fix_data_dir], parameter[name[data_dir]]] variable[metadata] assign[=] call[name[get_metadata], parameter[name[data_dir]]] variable[families] assign[=] call[name[set], parameter[]] for taget[name[v]] in starred[call[name[metadata].values, parameter[]]] begin[:] call[name[families].add, parameter[call[name[v]][constant[family]]]] return[call[name[sorted], parameter[call[name[list], parameter[name[families]]]]]]
keyword[def] identifier[get_families] ( identifier[data_dir] = keyword[None] ): literal[string] identifier[data_dir] = identifier[fix_data_dir] ( identifier[data_dir] ) identifier[metadata] = identifier[get_metadata] ( identifier[data_dir] ) identifier[families] = identifier[set] () keyword[for] identifier[v] keyword[in] identifier[metadata] . identifier[values] (): identifier[families] . identifier[add] ( identifier[v] [ literal[string] ]) keyword[return] identifier[sorted] ( identifier[list] ( identifier[families] ))
def get_families(data_dir=None): """Return a list of all basis set families""" data_dir = fix_data_dir(data_dir) metadata = get_metadata(data_dir) families = set() for v in metadata.values(): families.add(v['family']) # depends on [control=['for'], data=['v']] return sorted(list(families))
def json_request(endpoint, verb='GET', session_options=None, **options): """Like :func:`molotov.request` but extracts json from the response. """ req = functools.partial(_request, endpoint, verb, session_options, json=True, **options) return _run_in_fresh_loop(req)
def function[json_request, parameter[endpoint, verb, session_options]]: constant[Like :func:`molotov.request` but extracts json from the response. ] variable[req] assign[=] call[name[functools].partial, parameter[name[_request], name[endpoint], name[verb], name[session_options]]] return[call[name[_run_in_fresh_loop], parameter[name[req]]]]
keyword[def] identifier[json_request] ( identifier[endpoint] , identifier[verb] = literal[string] , identifier[session_options] = keyword[None] ,** identifier[options] ): literal[string] identifier[req] = identifier[functools] . identifier[partial] ( identifier[_request] , identifier[endpoint] , identifier[verb] , identifier[session_options] , identifier[json] = keyword[True] ,** identifier[options] ) keyword[return] identifier[_run_in_fresh_loop] ( identifier[req] )
def json_request(endpoint, verb='GET', session_options=None, **options): """Like :func:`molotov.request` but extracts json from the response. """ req = functools.partial(_request, endpoint, verb, session_options, json=True, **options) return _run_in_fresh_loop(req)
def algorithm(G, method_name, **kwargs): """ Apply a ``method`` from NetworkX to all :ref:`networkx.Graph <networkx:graph>` objects in the :class:`.GraphCollection` ``G``. For options, see the `list of algorithms <http://networkx.github.io/documentation/networkx-1.9/reference/algorithms.html>`_ in the NetworkX documentation. Not all of these have been tested. Parameters ---------- G : :class:`.GraphCollection` The :class:`.GraphCollection` to analyze. The specified method will be applied to each graph in ``G``. method_name : string Name of a method in NetworkX to execute on graph collection. **kwargs A list of keyword arguments that should correspond to the parameters of the specified method. Returns ------- results : dict Indexed by element (node or edge) and graph index (e.g. ``date``). Raises ------ ValueError If no such method exists. Examples -------- *Betweenness centrality:* (``G`` is a :class:`.GraphCollection`\) .. code-block:: python >>> from tethne.analyze import collection >>> BC = collection.algorithm(G, 'betweenness_centrality') >>> print BC[0] {1999: 0.010101651117889644, 2000: 0.0008689093723107329, 2001: 0.010504898852426189, 2002: 0.009338654511194512, 2003: 0.007519105636349891} """ warnings.warn("To be removed in 0.8. Use GraphCollection.analyze instead.", DeprecationWarning) return G.analyze(method_name, **kwargs)
def function[algorithm, parameter[G, method_name]]: constant[ Apply a ``method`` from NetworkX to all :ref:`networkx.Graph <networkx:graph>` objects in the :class:`.GraphCollection` ``G``. For options, see the `list of algorithms <http://networkx.github.io/documentation/networkx-1.9/reference/algorithms.html>`_ in the NetworkX documentation. Not all of these have been tested. Parameters ---------- G : :class:`.GraphCollection` The :class:`.GraphCollection` to analyze. The specified method will be applied to each graph in ``G``. method_name : string Name of a method in NetworkX to execute on graph collection. **kwargs A list of keyword arguments that should correspond to the parameters of the specified method. Returns ------- results : dict Indexed by element (node or edge) and graph index (e.g. ``date``). Raises ------ ValueError If no such method exists. Examples -------- *Betweenness centrality:* (``G`` is a :class:`.GraphCollection`\) .. code-block:: python >>> from tethne.analyze import collection >>> BC = collection.algorithm(G, 'betweenness_centrality') >>> print BC[0] {1999: 0.010101651117889644, 2000: 0.0008689093723107329, 2001: 0.010504898852426189, 2002: 0.009338654511194512, 2003: 0.007519105636349891} ] call[name[warnings].warn, parameter[constant[To be removed in 0.8. Use GraphCollection.analyze instead.], name[DeprecationWarning]]] return[call[name[G].analyze, parameter[name[method_name]]]]
keyword[def] identifier[algorithm] ( identifier[G] , identifier[method_name] ,** identifier[kwargs] ): literal[string] identifier[warnings] . identifier[warn] ( literal[string] , identifier[DeprecationWarning] ) keyword[return] identifier[G] . identifier[analyze] ( identifier[method_name] ,** identifier[kwargs] )
def algorithm(G, method_name, **kwargs): """ Apply a ``method`` from NetworkX to all :ref:`networkx.Graph <networkx:graph>` objects in the :class:`.GraphCollection` ``G``. For options, see the `list of algorithms <http://networkx.github.io/documentation/networkx-1.9/reference/algorithms.html>`_ in the NetworkX documentation. Not all of these have been tested. Parameters ---------- G : :class:`.GraphCollection` The :class:`.GraphCollection` to analyze. The specified method will be applied to each graph in ``G``. method_name : string Name of a method in NetworkX to execute on graph collection. **kwargs A list of keyword arguments that should correspond to the parameters of the specified method. Returns ------- results : dict Indexed by element (node or edge) and graph index (e.g. ``date``). Raises ------ ValueError If no such method exists. Examples -------- *Betweenness centrality:* (``G`` is a :class:`.GraphCollection`\\) .. code-block:: python >>> from tethne.analyze import collection >>> BC = collection.algorithm(G, 'betweenness_centrality') >>> print BC[0] {1999: 0.010101651117889644, 2000: 0.0008689093723107329, 2001: 0.010504898852426189, 2002: 0.009338654511194512, 2003: 0.007519105636349891} """ warnings.warn('To be removed in 0.8. Use GraphCollection.analyze instead.', DeprecationWarning) return G.analyze(method_name, **kwargs)
def get_annotations(self): ''' This method gets the annotations for the queryset. Unlike get_ordering() below, it passes the actual Case() and F() objects that will be evaluated with the queryset, returned in a dictionary that is compatible with get_ordering(). ''' rule = getConstant('registration__orgRule') # Initialize with null values that get filled in based on the logic below. annotations = { 'nullParam': Case(default_value=None,output_field=IntegerField()), 'paramOne': Case(default_value=None,output_field=IntegerField()), 'paramTwo': Case(default_value=None,output_field=IntegerField()), } if rule == 'SessionFirst': annotations.update({ 'nullParam': Case( When(session__startTime__isnull=False, then=0), When(month__isnull=False, then=1), default_value=2, output_field=IntegerField() ), 'paramOne': F('session__startTime'), 'paramTwo': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField()), }) elif rule == 'SessionAlphaFirst': annotations.update({ 'nullParam': Case( When(session__name__isnull=False, then=0), When(month__isnull=False, then=1), default_value=2, output_field=IntegerField() ), 'paramOne': F('session__name'), 'paramTwo': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField()), }) elif rule == 'Month': annotations.update({ 'nullParam': Case( When(month__isnull=False, then=0), default_value=1, output_field=IntegerField() ), 'paramOne': ExpressionWrapper(12*F('year') + F('month'), output_field=IntegerField()), }) elif rule == 'Session': annotations.update({ 'nullParam': Case( When(session__startTime__isnull=False, then=0), default_value=1, output_field=IntegerField() ), 'paramOne': F('session__startTime'), }) elif rule == 'SessionAlpha': annotations.update({ 'nullParam': Case( When(session__name__isnull=False, then=0), default_value=1, output_field=IntegerField() ), 'paramOne': F('session__name'), }) elif rule == 'SessionMonth': annotations.update({ 'nullParam': Case( When(Q(session__startTime__isnull=False) & Q(month__isnull=False), then=0), When(Q(session__startTime__isnull=True) & Q(month__isnull=False), then=1), When(Q(session__startTime__isnull=False) & Q(month__isnull=True), then=2), default_value=3, output_field=IntegerField() ), 'paramOne': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField()), 'paramTwo': F('session__startTime'), }) elif rule == 'SessionAlphaMonth': annotations.update({ 'nullParam': Case( When(Q(session__name__isnull=False) & Q(month__isnull=False), then=0), When(Q(session__name__isnull=True) & Q(month__isnull=False), then=1), When(Q(session__name__isnull=False) & Q(month__isnull=True), then=2), default_value=3, output_field=IntegerField() ), 'paramOne': ExpressionWrapper(12*F('year') + F('month'), output_field=IntegerField()), 'paramTwo': F('session__name'), }) elif rule == 'Weekday': annotations.update({ 'nullParam': Case( When(startTime__week_day__isnull=False, then=0), default_value=1, output_field=IntegerField() ), 'paramOne': ExtractWeekDay('startTime'), }) elif rule == 'MonthWeekday': annotations.update({ 'nullParam': Case( When(Q(month__isnull=False) & Q(startTime__week_day__isnull=False), then=0), default_value=1, output_field=IntegerField() ), 'paramOne': ExpressionWrapper(12*F('year') + F('month'), output_field=IntegerField()), 'paramTwo': ExtractWeekDay('startTime'), }) return annotations
def function[get_annotations, parameter[self]]: constant[ This method gets the annotations for the queryset. Unlike get_ordering() below, it passes the actual Case() and F() objects that will be evaluated with the queryset, returned in a dictionary that is compatible with get_ordering(). ] variable[rule] assign[=] call[name[getConstant], parameter[constant[registration__orgRule]]] variable[annotations] assign[=] dictionary[[<ast.Constant object at 0x7da18eb566b0>, <ast.Constant object at 0x7da18eb567a0>, <ast.Constant object at 0x7da18eb57370>], [<ast.Call object at 0x7da18eb55e10>, <ast.Call object at 0x7da18eb57cd0>, <ast.Call object at 0x7da18eb55f90>]] if compare[name[rule] equal[==] constant[SessionFirst]] begin[:] call[name[annotations].update, parameter[dictionary[[<ast.Constant object at 0x7da18eb57070>, <ast.Constant object at 0x7da18eb55d80>, <ast.Constant object at 0x7da18eb542e0>], [<ast.Call object at 0x7da18eb57df0>, <ast.Call object at 0x7da18eb57190>, <ast.Call object at 0x7da18eb56560>]]]] return[name[annotations]]
keyword[def] identifier[get_annotations] ( identifier[self] ): literal[string] identifier[rule] = identifier[getConstant] ( literal[string] ) identifier[annotations] ={ literal[string] : identifier[Case] ( identifier[default_value] = keyword[None] , identifier[output_field] = identifier[IntegerField] ()), literal[string] : identifier[Case] ( identifier[default_value] = keyword[None] , identifier[output_field] = identifier[IntegerField] ()), literal[string] : identifier[Case] ( identifier[default_value] = keyword[None] , identifier[output_field] = identifier[IntegerField] ()), } keyword[if] identifier[rule] == literal[string] : identifier[annotations] . identifier[update] ({ literal[string] : identifier[Case] ( identifier[When] ( identifier[session__startTime__isnull] = keyword[False] , identifier[then] = literal[int] ), identifier[When] ( identifier[month__isnull] = keyword[False] , identifier[then] = literal[int] ), identifier[default_value] = literal[int] , identifier[output_field] = identifier[IntegerField] () ), literal[string] : identifier[F] ( literal[string] ), literal[string] : identifier[ExpressionWrapper] ( literal[int] * identifier[F] ( literal[string] )+ identifier[F] ( literal[string] ), identifier[output_field] = identifier[IntegerField] ()), }) keyword[elif] identifier[rule] == literal[string] : identifier[annotations] . identifier[update] ({ literal[string] : identifier[Case] ( identifier[When] ( identifier[session__name__isnull] = keyword[False] , identifier[then] = literal[int] ), identifier[When] ( identifier[month__isnull] = keyword[False] , identifier[then] = literal[int] ), identifier[default_value] = literal[int] , identifier[output_field] = identifier[IntegerField] () ), literal[string] : identifier[F] ( literal[string] ), literal[string] : identifier[ExpressionWrapper] ( literal[int] * identifier[F] ( literal[string] )+ identifier[F] ( literal[string] ), identifier[output_field] = identifier[IntegerField] ()), }) keyword[elif] identifier[rule] == literal[string] : identifier[annotations] . identifier[update] ({ literal[string] : identifier[Case] ( identifier[When] ( identifier[month__isnull] = keyword[False] , identifier[then] = literal[int] ), identifier[default_value] = literal[int] , identifier[output_field] = identifier[IntegerField] () ), literal[string] : identifier[ExpressionWrapper] ( literal[int] * identifier[F] ( literal[string] )+ identifier[F] ( literal[string] ), identifier[output_field] = identifier[IntegerField] ()), }) keyword[elif] identifier[rule] == literal[string] : identifier[annotations] . identifier[update] ({ literal[string] : identifier[Case] ( identifier[When] ( identifier[session__startTime__isnull] = keyword[False] , identifier[then] = literal[int] ), identifier[default_value] = literal[int] , identifier[output_field] = identifier[IntegerField] () ), literal[string] : identifier[F] ( literal[string] ), }) keyword[elif] identifier[rule] == literal[string] : identifier[annotations] . identifier[update] ({ literal[string] : identifier[Case] ( identifier[When] ( identifier[session__name__isnull] = keyword[False] , identifier[then] = literal[int] ), identifier[default_value] = literal[int] , identifier[output_field] = identifier[IntegerField] () ), literal[string] : identifier[F] ( literal[string] ), }) keyword[elif] identifier[rule] == literal[string] : identifier[annotations] . identifier[update] ({ literal[string] : identifier[Case] ( identifier[When] ( identifier[Q] ( identifier[session__startTime__isnull] = keyword[False] )& identifier[Q] ( identifier[month__isnull] = keyword[False] ), identifier[then] = literal[int] ), identifier[When] ( identifier[Q] ( identifier[session__startTime__isnull] = keyword[True] )& identifier[Q] ( identifier[month__isnull] = keyword[False] ), identifier[then] = literal[int] ), identifier[When] ( identifier[Q] ( identifier[session__startTime__isnull] = keyword[False] )& identifier[Q] ( identifier[month__isnull] = keyword[True] ), identifier[then] = literal[int] ), identifier[default_value] = literal[int] , identifier[output_field] = identifier[IntegerField] () ), literal[string] : identifier[ExpressionWrapper] ( literal[int] * identifier[F] ( literal[string] )+ identifier[F] ( literal[string] ), identifier[output_field] = identifier[IntegerField] ()), literal[string] : identifier[F] ( literal[string] ), }) keyword[elif] identifier[rule] == literal[string] : identifier[annotations] . identifier[update] ({ literal[string] : identifier[Case] ( identifier[When] ( identifier[Q] ( identifier[session__name__isnull] = keyword[False] )& identifier[Q] ( identifier[month__isnull] = keyword[False] ), identifier[then] = literal[int] ), identifier[When] ( identifier[Q] ( identifier[session__name__isnull] = keyword[True] )& identifier[Q] ( identifier[month__isnull] = keyword[False] ), identifier[then] = literal[int] ), identifier[When] ( identifier[Q] ( identifier[session__name__isnull] = keyword[False] )& identifier[Q] ( identifier[month__isnull] = keyword[True] ), identifier[then] = literal[int] ), identifier[default_value] = literal[int] , identifier[output_field] = identifier[IntegerField] () ), literal[string] : identifier[ExpressionWrapper] ( literal[int] * identifier[F] ( literal[string] )+ identifier[F] ( literal[string] ), identifier[output_field] = identifier[IntegerField] ()), literal[string] : identifier[F] ( literal[string] ), }) keyword[elif] identifier[rule] == literal[string] : identifier[annotations] . identifier[update] ({ literal[string] : identifier[Case] ( identifier[When] ( identifier[startTime__week_day__isnull] = keyword[False] , identifier[then] = literal[int] ), identifier[default_value] = literal[int] , identifier[output_field] = identifier[IntegerField] () ), literal[string] : identifier[ExtractWeekDay] ( literal[string] ), }) keyword[elif] identifier[rule] == literal[string] : identifier[annotations] . identifier[update] ({ literal[string] : identifier[Case] ( identifier[When] ( identifier[Q] ( identifier[month__isnull] = keyword[False] )& identifier[Q] ( identifier[startTime__week_day__isnull] = keyword[False] ), identifier[then] = literal[int] ), identifier[default_value] = literal[int] , identifier[output_field] = identifier[IntegerField] () ), literal[string] : identifier[ExpressionWrapper] ( literal[int] * identifier[F] ( literal[string] )+ identifier[F] ( literal[string] ), identifier[output_field] = identifier[IntegerField] ()), literal[string] : identifier[ExtractWeekDay] ( literal[string] ), }) keyword[return] identifier[annotations]
def get_annotations(self): """ This method gets the annotations for the queryset. Unlike get_ordering() below, it passes the actual Case() and F() objects that will be evaluated with the queryset, returned in a dictionary that is compatible with get_ordering(). """ rule = getConstant('registration__orgRule') # Initialize with null values that get filled in based on the logic below. annotations = {'nullParam': Case(default_value=None, output_field=IntegerField()), 'paramOne': Case(default_value=None, output_field=IntegerField()), 'paramTwo': Case(default_value=None, output_field=IntegerField())} if rule == 'SessionFirst': annotations.update({'nullParam': Case(When(session__startTime__isnull=False, then=0), When(month__isnull=False, then=1), default_value=2, output_field=IntegerField()), 'paramOne': F('session__startTime'), 'paramTwo': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField())}) # depends on [control=['if'], data=[]] elif rule == 'SessionAlphaFirst': annotations.update({'nullParam': Case(When(session__name__isnull=False, then=0), When(month__isnull=False, then=1), default_value=2, output_field=IntegerField()), 'paramOne': F('session__name'), 'paramTwo': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField())}) # depends on [control=['if'], data=[]] elif rule == 'Month': annotations.update({'nullParam': Case(When(month__isnull=False, then=0), default_value=1, output_field=IntegerField()), 'paramOne': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField())}) # depends on [control=['if'], data=[]] elif rule == 'Session': annotations.update({'nullParam': Case(When(session__startTime__isnull=False, then=0), default_value=1, output_field=IntegerField()), 'paramOne': F('session__startTime')}) # depends on [control=['if'], data=[]] elif rule == 'SessionAlpha': annotations.update({'nullParam': Case(When(session__name__isnull=False, then=0), default_value=1, output_field=IntegerField()), 'paramOne': F('session__name')}) # depends on [control=['if'], data=[]] elif rule == 'SessionMonth': annotations.update({'nullParam': Case(When(Q(session__startTime__isnull=False) & Q(month__isnull=False), then=0), When(Q(session__startTime__isnull=True) & Q(month__isnull=False), then=1), When(Q(session__startTime__isnull=False) & Q(month__isnull=True), then=2), default_value=3, output_field=IntegerField()), 'paramOne': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField()), 'paramTwo': F('session__startTime')}) # depends on [control=['if'], data=[]] elif rule == 'SessionAlphaMonth': annotations.update({'nullParam': Case(When(Q(session__name__isnull=False) & Q(month__isnull=False), then=0), When(Q(session__name__isnull=True) & Q(month__isnull=False), then=1), When(Q(session__name__isnull=False) & Q(month__isnull=True), then=2), default_value=3, output_field=IntegerField()), 'paramOne': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField()), 'paramTwo': F('session__name')}) # depends on [control=['if'], data=[]] elif rule == 'Weekday': annotations.update({'nullParam': Case(When(startTime__week_day__isnull=False, then=0), default_value=1, output_field=IntegerField()), 'paramOne': ExtractWeekDay('startTime')}) # depends on [control=['if'], data=[]] elif rule == 'MonthWeekday': annotations.update({'nullParam': Case(When(Q(month__isnull=False) & Q(startTime__week_day__isnull=False), then=0), default_value=1, output_field=IntegerField()), 'paramOne': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField()), 'paramTwo': ExtractWeekDay('startTime')}) # depends on [control=['if'], data=[]] return annotations
def detect_with_url( self, url, return_face_id=True, return_face_landmarks=False, return_face_attributes=None, recognition_model="recognition_01", return_recognition_model=False, custom_headers=None, raw=False, **operation_config): """Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and attributes.<br /> * Optional parameters including faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise. * The extracted face feature, instead of the actual image, will be stored on server. The faceId is an identifier of the face feature and will be used in [Face - Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), and [Face - Find Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237). It will expire 24 hours after the detection call. * Higher face image quality means better detection and recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. * Faces are detectable when its size is 36x36 to 4096x4096 pixels. If need to detect very small but clear faces, please try to enlarge the input image. * Up to 64 faces can be returned for an image. Faces are ranked by face rectangle size from large to small. * Face detector prefer frontal and near-frontal faces. There are cases that faces may not be detected, e.g. exceptionally large face angles (head-pose) or being occluded, or wrong image orientation. * Attributes (age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise) may not be perfectly accurate. HeadPose's pitch value is a reserved field and will always return 0. * Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [How to specify a recognition model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model) . :param url: Publicly reachable URL of an image :type url: str :param return_face_id: A value indicating whether the operation should return faceIds of detected faces. :type return_face_id: bool :param return_face_landmarks: A value indicating whether the operation should return landmarks of the detected faces. :type return_face_landmarks: bool :param return_face_attributes: Analyze and return the one or more specified face attributes in the comma-separated string like "returnFaceAttributes=age,gender". Supported face attributes include age, gender, headPose, smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational and time cost. :type return_face_attributes: list[str or ~azure.cognitiveservices.vision.face.models.FaceAttributeType] :param recognition_model: Name of recognition model. Recognition model is used when the face features are extracted and associated with detected faceIds, (Large)FaceList or (Large)PersonGroup. A recognition model name can be provided when performing Face - Detect or (Large)FaceList - Create or (Large)PersonGroup - Create. The default value is 'recognition_01', if latest model needed, please explicitly specify the model you need. Possible values include: 'recognition_01', 'recognition_02' :type recognition_model: str or ~azure.cognitiveservices.vision.face.models.RecognitionModel :param return_recognition_model: A value indicating whether the operation should return 'recognitionModel' in response. :type return_recognition_model: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: list or ClientRawResponse if raw=true :rtype: list[~azure.cognitiveservices.vision.face.models.DetectedFace] or ~msrest.pipeline.ClientRawResponse :raises: :class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>` """ image_url = models.ImageUrl(url=url) # Construct URL url = self.detect_with_url.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if return_face_id is not None: query_parameters['returnFaceId'] = self._serialize.query("return_face_id", return_face_id, 'bool') if return_face_landmarks is not None: query_parameters['returnFaceLandmarks'] = self._serialize.query("return_face_landmarks", return_face_landmarks, 'bool') if return_face_attributes is not None: query_parameters['returnFaceAttributes'] = self._serialize.query("return_face_attributes", return_face_attributes, '[FaceAttributeType]', div=',') if recognition_model is not None: query_parameters['recognitionModel'] = self._serialize.query("recognition_model", recognition_model, 'str') if return_recognition_model is not None: query_parameters['returnRecognitionModel'] = self._serialize.query("return_recognition_model", return_recognition_model, 'bool') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(image_url, 'ImageUrl') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.APIErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('[DetectedFace]', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
def function[detect_with_url, parameter[self, url, return_face_id, return_face_landmarks, return_face_attributes, recognition_model, return_recognition_model, custom_headers, raw]]: constant[Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and attributes.<br /> * Optional parameters including faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise. * The extracted face feature, instead of the actual image, will be stored on server. The faceId is an identifier of the face feature and will be used in [Face - Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), and [Face - Find Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237). It will expire 24 hours after the detection call. * Higher face image quality means better detection and recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. * Faces are detectable when its size is 36x36 to 4096x4096 pixels. If need to detect very small but clear faces, please try to enlarge the input image. * Up to 64 faces can be returned for an image. Faces are ranked by face rectangle size from large to small. * Face detector prefer frontal and near-frontal faces. There are cases that faces may not be detected, e.g. exceptionally large face angles (head-pose) or being occluded, or wrong image orientation. * Attributes (age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise) may not be perfectly accurate. HeadPose's pitch value is a reserved field and will always return 0. * Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [How to specify a recognition model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model) . :param url: Publicly reachable URL of an image :type url: str :param return_face_id: A value indicating whether the operation should return faceIds of detected faces. :type return_face_id: bool :param return_face_landmarks: A value indicating whether the operation should return landmarks of the detected faces. :type return_face_landmarks: bool :param return_face_attributes: Analyze and return the one or more specified face attributes in the comma-separated string like "returnFaceAttributes=age,gender". Supported face attributes include age, gender, headPose, smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational and time cost. :type return_face_attributes: list[str or ~azure.cognitiveservices.vision.face.models.FaceAttributeType] :param recognition_model: Name of recognition model. Recognition model is used when the face features are extracted and associated with detected faceIds, (Large)FaceList or (Large)PersonGroup. A recognition model name can be provided when performing Face - Detect or (Large)FaceList - Create or (Large)PersonGroup - Create. The default value is 'recognition_01', if latest model needed, please explicitly specify the model you need. Possible values include: 'recognition_01', 'recognition_02' :type recognition_model: str or ~azure.cognitiveservices.vision.face.models.RecognitionModel :param return_recognition_model: A value indicating whether the operation should return 'recognitionModel' in response. :type return_recognition_model: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: list or ClientRawResponse if raw=true :rtype: list[~azure.cognitiveservices.vision.face.models.DetectedFace] or ~msrest.pipeline.ClientRawResponse :raises: :class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>` ] variable[image_url] assign[=] call[name[models].ImageUrl, parameter[]] variable[url] assign[=] call[name[self].detect_with_url.metadata][constant[url]] variable[path_format_arguments] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e6110>], [<ast.Call object at 0x7da20c6e5270>]] variable[url] assign[=] call[name[self]._client.format_url, parameter[name[url]]] variable[query_parameters] assign[=] dictionary[[], []] if compare[name[return_face_id] is_not constant[None]] begin[:] call[name[query_parameters]][constant[returnFaceId]] assign[=] call[name[self]._serialize.query, parameter[constant[return_face_id], name[return_face_id], constant[bool]]] if compare[name[return_face_landmarks] is_not constant[None]] begin[:] call[name[query_parameters]][constant[returnFaceLandmarks]] assign[=] call[name[self]._serialize.query, parameter[constant[return_face_landmarks], name[return_face_landmarks], constant[bool]]] if compare[name[return_face_attributes] is_not constant[None]] begin[:] call[name[query_parameters]][constant[returnFaceAttributes]] assign[=] call[name[self]._serialize.query, parameter[constant[return_face_attributes], name[return_face_attributes], constant[[FaceAttributeType]]]] if compare[name[recognition_model] is_not constant[None]] begin[:] call[name[query_parameters]][constant[recognitionModel]] assign[=] call[name[self]._serialize.query, parameter[constant[recognition_model], name[recognition_model], constant[str]]] if compare[name[return_recognition_model] is_not constant[None]] begin[:] call[name[query_parameters]][constant[returnRecognitionModel]] assign[=] call[name[self]._serialize.query, parameter[constant[return_recognition_model], name[return_recognition_model], constant[bool]]] variable[header_parameters] assign[=] dictionary[[], []] call[name[header_parameters]][constant[Accept]] assign[=] constant[application/json] call[name[header_parameters]][constant[Content-Type]] assign[=] constant[application/json; charset=utf-8] if name[custom_headers] begin[:] call[name[header_parameters].update, parameter[name[custom_headers]]] variable[body_content] assign[=] call[name[self]._serialize.body, parameter[name[image_url], constant[ImageUrl]]] variable[request] assign[=] call[name[self]._client.post, parameter[name[url], name[query_parameters], name[header_parameters], name[body_content]]] variable[response] assign[=] call[name[self]._client.send, parameter[name[request]]] if compare[name[response].status_code <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18dc9b6d0>]]] begin[:] <ast.Raise object at 0x7da18dc9b880> variable[deserialized] assign[=] constant[None] if compare[name[response].status_code equal[==] constant[200]] begin[:] variable[deserialized] assign[=] call[name[self]._deserialize, parameter[constant[[DetectedFace]], name[response]]] if name[raw] begin[:] variable[client_raw_response] assign[=] call[name[ClientRawResponse], parameter[name[deserialized], name[response]]] return[name[client_raw_response]] return[name[deserialized]]
keyword[def] identifier[detect_with_url] ( identifier[self] , identifier[url] , identifier[return_face_id] = keyword[True] , identifier[return_face_landmarks] = keyword[False] , identifier[return_face_attributes] = keyword[None] , identifier[recognition_model] = literal[string] , identifier[return_recognition_model] = keyword[False] , identifier[custom_headers] = keyword[None] , identifier[raw] = keyword[False] ,** identifier[operation_config] ): literal[string] identifier[image_url] = identifier[models] . identifier[ImageUrl] ( identifier[url] = identifier[url] ) identifier[url] = identifier[self] . identifier[detect_with_url] . identifier[metadata] [ literal[string] ] identifier[path_format_arguments] ={ literal[string] : identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[self] . identifier[config] . identifier[endpoint] , literal[string] , identifier[skip_quote] = keyword[True] ) } identifier[url] = identifier[self] . identifier[_client] . identifier[format_url] ( identifier[url] ,** identifier[path_format_arguments] ) identifier[query_parameters] ={} keyword[if] identifier[return_face_id] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[return_face_id] , literal[string] ) keyword[if] identifier[return_face_landmarks] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[return_face_landmarks] , literal[string] ) keyword[if] identifier[return_face_attributes] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[return_face_attributes] , literal[string] , identifier[div] = literal[string] ) keyword[if] identifier[recognition_model] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[recognition_model] , literal[string] ) keyword[if] identifier[return_recognition_model] keyword[is] keyword[not] keyword[None] : identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[return_recognition_model] , literal[string] ) identifier[header_parameters] ={} identifier[header_parameters] [ literal[string] ]= literal[string] identifier[header_parameters] [ literal[string] ]= literal[string] keyword[if] identifier[custom_headers] : identifier[header_parameters] . identifier[update] ( identifier[custom_headers] ) identifier[body_content] = identifier[self] . identifier[_serialize] . identifier[body] ( identifier[image_url] , literal[string] ) identifier[request] = identifier[self] . identifier[_client] . identifier[post] ( identifier[url] , identifier[query_parameters] , identifier[header_parameters] , identifier[body_content] ) identifier[response] = identifier[self] . identifier[_client] . identifier[send] ( identifier[request] , identifier[stream] = keyword[False] ,** identifier[operation_config] ) keyword[if] identifier[response] . identifier[status_code] keyword[not] keyword[in] [ literal[int] ]: keyword[raise] identifier[models] . identifier[APIErrorException] ( identifier[self] . identifier[_deserialize] , identifier[response] ) identifier[deserialized] = keyword[None] keyword[if] identifier[response] . identifier[status_code] == literal[int] : identifier[deserialized] = identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] ) keyword[if] identifier[raw] : identifier[client_raw_response] = identifier[ClientRawResponse] ( identifier[deserialized] , identifier[response] ) keyword[return] identifier[client_raw_response] keyword[return] identifier[deserialized]
def detect_with_url(self, url, return_face_id=True, return_face_landmarks=False, return_face_attributes=None, recognition_model='recognition_01', return_recognition_model=False, custom_headers=None, raw=False, **operation_config): """Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and attributes.<br /> * Optional parameters including faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise. * The extracted face feature, instead of the actual image, will be stored on server. The faceId is an identifier of the face feature and will be used in [Face - Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), [Face - Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), and [Face - Find Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237). It will expire 24 hours after the detection call. * Higher face image quality means better detection and recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. * Faces are detectable when its size is 36x36 to 4096x4096 pixels. If need to detect very small but clear faces, please try to enlarge the input image. * Up to 64 faces can be returned for an image. Faces are ranked by face rectangle size from large to small. * Face detector prefer frontal and near-frontal faces. There are cases that faces may not be detected, e.g. exceptionally large face angles (head-pose) or being occluded, or wrong image orientation. * Attributes (age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise) may not be perfectly accurate. HeadPose's pitch value is a reserved field and will always return 0. * Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [How to specify a recognition model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model) . :param url: Publicly reachable URL of an image :type url: str :param return_face_id: A value indicating whether the operation should return faceIds of detected faces. :type return_face_id: bool :param return_face_landmarks: A value indicating whether the operation should return landmarks of the detected faces. :type return_face_landmarks: bool :param return_face_attributes: Analyze and return the one or more specified face attributes in the comma-separated string like "returnFaceAttributes=age,gender". Supported face attributes include age, gender, headPose, smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational and time cost. :type return_face_attributes: list[str or ~azure.cognitiveservices.vision.face.models.FaceAttributeType] :param recognition_model: Name of recognition model. Recognition model is used when the face features are extracted and associated with detected faceIds, (Large)FaceList or (Large)PersonGroup. A recognition model name can be provided when performing Face - Detect or (Large)FaceList - Create or (Large)PersonGroup - Create. The default value is 'recognition_01', if latest model needed, please explicitly specify the model you need. Possible values include: 'recognition_01', 'recognition_02' :type recognition_model: str or ~azure.cognitiveservices.vision.face.models.RecognitionModel :param return_recognition_model: A value indicating whether the operation should return 'recognitionModel' in response. :type return_recognition_model: bool :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: list or ClientRawResponse if raw=true :rtype: list[~azure.cognitiveservices.vision.face.models.DetectedFace] or ~msrest.pipeline.ClientRawResponse :raises: :class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>` """ image_url = models.ImageUrl(url=url) # Construct URL url = self.detect_with_url.metadata['url'] path_format_arguments = {'Endpoint': self._serialize.url('self.config.endpoint', self.config.endpoint, 'str', skip_quote=True)} url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} if return_face_id is not None: query_parameters['returnFaceId'] = self._serialize.query('return_face_id', return_face_id, 'bool') # depends on [control=['if'], data=['return_face_id']] if return_face_landmarks is not None: query_parameters['returnFaceLandmarks'] = self._serialize.query('return_face_landmarks', return_face_landmarks, 'bool') # depends on [control=['if'], data=['return_face_landmarks']] if return_face_attributes is not None: query_parameters['returnFaceAttributes'] = self._serialize.query('return_face_attributes', return_face_attributes, '[FaceAttributeType]', div=',') # depends on [control=['if'], data=['return_face_attributes']] if recognition_model is not None: query_parameters['recognitionModel'] = self._serialize.query('recognition_model', recognition_model, 'str') # depends on [control=['if'], data=['recognition_model']] if return_recognition_model is not None: query_parameters['returnRecognitionModel'] = self._serialize.query('return_recognition_model', return_recognition_model, 'bool') # depends on [control=['if'], data=['return_recognition_model']] # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # depends on [control=['if'], data=[]] # Construct body body_content = self._serialize.body(image_url, 'ImageUrl') # Construct and send request request = self._client.post(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.APIErrorException(self._deserialize, response) # depends on [control=['if'], data=[]] deserialized = None if response.status_code == 200: deserialized = self._deserialize('[DetectedFace]', response) # depends on [control=['if'], data=[]] if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response # depends on [control=['if'], data=[]] return deserialized
def execute(self, portfolio_name=None): """ Place the order with LendingClub Parameters ---------- portfolio_name : string The name of the portfolio to add the invested loan notes to. This can be a new or existing portfolio name. Raises ------ LendingClubError Returns ------- int The completed order ID """ assert self.order_id == 0, 'This order has already been place. Start a new order.' assert len(self.loans) > 0, 'There aren\'t any loans in your order' # Place the order self.__stage_order() token = self.__get_strut_token() self.order_id = self.__place_order(token) self.__log('Order #{0} was successfully submitted'.format(self.order_id)) # Assign to portfolio if portfolio_name: return self.assign_to_portfolio(portfolio_name) return self.order_id
def function[execute, parameter[self, portfolio_name]]: constant[ Place the order with LendingClub Parameters ---------- portfolio_name : string The name of the portfolio to add the invested loan notes to. This can be a new or existing portfolio name. Raises ------ LendingClubError Returns ------- int The completed order ID ] assert[compare[name[self].order_id equal[==] constant[0]]] assert[compare[call[name[len], parameter[name[self].loans]] greater[>] constant[0]]] call[name[self].__stage_order, parameter[]] variable[token] assign[=] call[name[self].__get_strut_token, parameter[]] name[self].order_id assign[=] call[name[self].__place_order, parameter[name[token]]] call[name[self].__log, parameter[call[constant[Order #{0} was successfully submitted].format, parameter[name[self].order_id]]]] if name[portfolio_name] begin[:] return[call[name[self].assign_to_portfolio, parameter[name[portfolio_name]]]] return[name[self].order_id]
keyword[def] identifier[execute] ( identifier[self] , identifier[portfolio_name] = keyword[None] ): literal[string] keyword[assert] identifier[self] . identifier[order_id] == literal[int] , literal[string] keyword[assert] identifier[len] ( identifier[self] . identifier[loans] )> literal[int] , literal[string] identifier[self] . identifier[__stage_order] () identifier[token] = identifier[self] . identifier[__get_strut_token] () identifier[self] . identifier[order_id] = identifier[self] . identifier[__place_order] ( identifier[token] ) identifier[self] . identifier[__log] ( literal[string] . identifier[format] ( identifier[self] . identifier[order_id] )) keyword[if] identifier[portfolio_name] : keyword[return] identifier[self] . identifier[assign_to_portfolio] ( identifier[portfolio_name] ) keyword[return] identifier[self] . identifier[order_id]
def execute(self, portfolio_name=None): """ Place the order with LendingClub Parameters ---------- portfolio_name : string The name of the portfolio to add the invested loan notes to. This can be a new or existing portfolio name. Raises ------ LendingClubError Returns ------- int The completed order ID """ assert self.order_id == 0, 'This order has already been place. Start a new order.' assert len(self.loans) > 0, "There aren't any loans in your order" # Place the order self.__stage_order() token = self.__get_strut_token() self.order_id = self.__place_order(token) self.__log('Order #{0} was successfully submitted'.format(self.order_id)) # Assign to portfolio if portfolio_name: return self.assign_to_portfolio(portfolio_name) # depends on [control=['if'], data=[]] return self.order_id
def read_loom(filename: PathLike, sparse: bool = True, cleanup: bool = False, X_name: str = 'spliced', obs_names: str = 'CellID', var_names: str = 'Gene', dtype: str='float32', **kwargs) -> AnnData: """Read ``.loom``-formatted hdf5 file. This reads the whole file into memory. Beware that you have to explicitly state when you want to read the file as sparse data. Parameters ---------- filename The filename. sparse Whether to read the data matrix as sparse. cleanup: Whether to remove all obs/var keys that do not store more than one unique value. X_name: Loompy key where the data matrix is stored. obs_names: Loompy key where the observation/cell names are stored. var_names: Loompy key where the variable/gene names are stored. **kwargs: Arguments to loompy.connect """ filename = fspath(filename) # allow passing pathlib.Path objects from loompy import connect with connect(filename, 'r', **kwargs) as lc: if X_name not in lc.layers.keys(): X_name = '' X = lc.layers[X_name].sparse().T.tocsr() if sparse else lc.layers[X_name][()].T layers = OrderedDict() if X_name != '': layers['matrix'] = lc.layers[''].sparse().T.tocsr() if sparse else lc.layers[''][()].T for key in lc.layers.keys(): if key != '': layers[key] = lc.layers[key].sparse().T.tocsr() if sparse else lc.layers[key][()].T obs = dict(lc.col_attrs) if obs_names in obs.keys(): obs['obs_names'] = obs.pop(obs_names) obsm_attrs = [k for k, v in obs.items() if v.ndim > 1 and v.shape[1] > 1] obsm = {} for key in obsm_attrs: obsm[key] = obs.pop(key) var = dict(lc.row_attrs) if var_names in var.keys(): var['var_names'] = var.pop(var_names) varm_attrs = [k for k, v in var.items() if v.ndim > 1 and v.shape[1] > 1] varm = {} for key in varm_attrs: varm[key] = var.pop(key) if cleanup: for key in list(obs.keys()): if len(set(obs[key])) == 1: del obs[key] for key in list(var.keys()): if len(set(var[key])) == 1: del var[key] adata = AnnData( X, obs=obs, # not ideal: make the generator a dict... var=var, layers=layers, obsm=obsm if obsm else None, varm=varm if varm else None, dtype=dtype) return adata
def function[read_loom, parameter[filename, sparse, cleanup, X_name, obs_names, var_names, dtype]]: constant[Read ``.loom``-formatted hdf5 file. This reads the whole file into memory. Beware that you have to explicitly state when you want to read the file as sparse data. Parameters ---------- filename The filename. sparse Whether to read the data matrix as sparse. cleanup: Whether to remove all obs/var keys that do not store more than one unique value. X_name: Loompy key where the data matrix is stored. obs_names: Loompy key where the observation/cell names are stored. var_names: Loompy key where the variable/gene names are stored. **kwargs: Arguments to loompy.connect ] variable[filename] assign[=] call[name[fspath], parameter[name[filename]]] from relative_module[loompy] import module[connect] with call[name[connect], parameter[name[filename], constant[r]]] begin[:] if compare[name[X_name] <ast.NotIn object at 0x7da2590d7190> call[name[lc].layers.keys, parameter[]]] begin[:] variable[X_name] assign[=] constant[] variable[X] assign[=] <ast.IfExp object at 0x7da20c794850> variable[layers] assign[=] call[name[OrderedDict], parameter[]] if compare[name[X_name] not_equal[!=] constant[]] begin[:] call[name[layers]][constant[matrix]] assign[=] <ast.IfExp object at 0x7da204960880> for taget[name[key]] in starred[call[name[lc].layers.keys, parameter[]]] begin[:] if compare[name[key] not_equal[!=] constant[]] begin[:] call[name[layers]][name[key]] assign[=] <ast.IfExp object at 0x7da204960940> variable[obs] assign[=] call[name[dict], parameter[name[lc].col_attrs]] if compare[name[obs_names] in call[name[obs].keys, parameter[]]] begin[:] call[name[obs]][constant[obs_names]] assign[=] call[name[obs].pop, parameter[name[obs_names]]] variable[obsm_attrs] assign[=] <ast.ListComp object at 0x7da204961d80> variable[obsm] assign[=] dictionary[[], []] for taget[name[key]] in starred[name[obsm_attrs]] begin[:] call[name[obsm]][name[key]] assign[=] call[name[obs].pop, parameter[name[key]]] variable[var] assign[=] call[name[dict], parameter[name[lc].row_attrs]] if compare[name[var_names] in call[name[var].keys, parameter[]]] begin[:] call[name[var]][constant[var_names]] assign[=] call[name[var].pop, parameter[name[var_names]]] variable[varm_attrs] assign[=] <ast.ListComp object at 0x7da20c991390> variable[varm] assign[=] dictionary[[], []] for taget[name[key]] in starred[name[varm_attrs]] begin[:] call[name[varm]][name[key]] assign[=] call[name[var].pop, parameter[name[key]]] if name[cleanup] begin[:] for taget[name[key]] in starred[call[name[list], parameter[call[name[obs].keys, parameter[]]]]] begin[:] if compare[call[name[len], parameter[call[name[set], parameter[call[name[obs]][name[key]]]]]] equal[==] constant[1]] begin[:] <ast.Delete object at 0x7da20c990400> for taget[name[key]] in starred[call[name[list], parameter[call[name[var].keys, parameter[]]]]] begin[:] if compare[call[name[len], parameter[call[name[set], parameter[call[name[var]][name[key]]]]]] equal[==] constant[1]] begin[:] <ast.Delete object at 0x7da20c9924d0> variable[adata] assign[=] call[name[AnnData], parameter[name[X]]] return[name[adata]]
keyword[def] identifier[read_loom] ( identifier[filename] : identifier[PathLike] , identifier[sparse] : identifier[bool] = keyword[True] , identifier[cleanup] : identifier[bool] = keyword[False] , identifier[X_name] : identifier[str] = literal[string] , identifier[obs_names] : identifier[str] = literal[string] , identifier[var_names] : identifier[str] = literal[string] , identifier[dtype] : identifier[str] = literal[string] ,** identifier[kwargs] )-> identifier[AnnData] : literal[string] identifier[filename] = identifier[fspath] ( identifier[filename] ) keyword[from] identifier[loompy] keyword[import] identifier[connect] keyword[with] identifier[connect] ( identifier[filename] , literal[string] ,** identifier[kwargs] ) keyword[as] identifier[lc] : keyword[if] identifier[X_name] keyword[not] keyword[in] identifier[lc] . identifier[layers] . identifier[keys] (): identifier[X_name] = literal[string] identifier[X] = identifier[lc] . identifier[layers] [ identifier[X_name] ]. identifier[sparse] (). identifier[T] . identifier[tocsr] () keyword[if] identifier[sparse] keyword[else] identifier[lc] . identifier[layers] [ identifier[X_name] ][()]. identifier[T] identifier[layers] = identifier[OrderedDict] () keyword[if] identifier[X_name] != literal[string] : identifier[layers] [ literal[string] ]= identifier[lc] . identifier[layers] [ literal[string] ]. identifier[sparse] (). identifier[T] . identifier[tocsr] () keyword[if] identifier[sparse] keyword[else] identifier[lc] . identifier[layers] [ literal[string] ][()]. identifier[T] keyword[for] identifier[key] keyword[in] identifier[lc] . identifier[layers] . identifier[keys] (): keyword[if] identifier[key] != literal[string] : identifier[layers] [ identifier[key] ]= identifier[lc] . identifier[layers] [ identifier[key] ]. identifier[sparse] (). identifier[T] . identifier[tocsr] () keyword[if] identifier[sparse] keyword[else] identifier[lc] . identifier[layers] [ identifier[key] ][()]. identifier[T] identifier[obs] = identifier[dict] ( identifier[lc] . identifier[col_attrs] ) keyword[if] identifier[obs_names] keyword[in] identifier[obs] . identifier[keys] (): identifier[obs] [ literal[string] ]= identifier[obs] . identifier[pop] ( identifier[obs_names] ) identifier[obsm_attrs] =[ identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[obs] . identifier[items] () keyword[if] identifier[v] . identifier[ndim] > literal[int] keyword[and] identifier[v] . identifier[shape] [ literal[int] ]> literal[int] ] identifier[obsm] ={} keyword[for] identifier[key] keyword[in] identifier[obsm_attrs] : identifier[obsm] [ identifier[key] ]= identifier[obs] . identifier[pop] ( identifier[key] ) identifier[var] = identifier[dict] ( identifier[lc] . identifier[row_attrs] ) keyword[if] identifier[var_names] keyword[in] identifier[var] . identifier[keys] (): identifier[var] [ literal[string] ]= identifier[var] . identifier[pop] ( identifier[var_names] ) identifier[varm_attrs] =[ identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[var] . identifier[items] () keyword[if] identifier[v] . identifier[ndim] > literal[int] keyword[and] identifier[v] . identifier[shape] [ literal[int] ]> literal[int] ] identifier[varm] ={} keyword[for] identifier[key] keyword[in] identifier[varm_attrs] : identifier[varm] [ identifier[key] ]= identifier[var] . identifier[pop] ( identifier[key] ) keyword[if] identifier[cleanup] : keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[obs] . identifier[keys] ()): keyword[if] identifier[len] ( identifier[set] ( identifier[obs] [ identifier[key] ]))== literal[int] : keyword[del] identifier[obs] [ identifier[key] ] keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[var] . identifier[keys] ()): keyword[if] identifier[len] ( identifier[set] ( identifier[var] [ identifier[key] ]))== literal[int] : keyword[del] identifier[var] [ identifier[key] ] identifier[adata] = identifier[AnnData] ( identifier[X] , identifier[obs] = identifier[obs] , identifier[var] = identifier[var] , identifier[layers] = identifier[layers] , identifier[obsm] = identifier[obsm] keyword[if] identifier[obsm] keyword[else] keyword[None] , identifier[varm] = identifier[varm] keyword[if] identifier[varm] keyword[else] keyword[None] , identifier[dtype] = identifier[dtype] ) keyword[return] identifier[adata]
def read_loom(filename: PathLike, sparse: bool=True, cleanup: bool=False, X_name: str='spliced', obs_names: str='CellID', var_names: str='Gene', dtype: str='float32', **kwargs) -> AnnData: """Read ``.loom``-formatted hdf5 file. This reads the whole file into memory. Beware that you have to explicitly state when you want to read the file as sparse data. Parameters ---------- filename The filename. sparse Whether to read the data matrix as sparse. cleanup: Whether to remove all obs/var keys that do not store more than one unique value. X_name: Loompy key where the data matrix is stored. obs_names: Loompy key where the observation/cell names are stored. var_names: Loompy key where the variable/gene names are stored. **kwargs: Arguments to loompy.connect """ filename = fspath(filename) # allow passing pathlib.Path objects from loompy import connect with connect(filename, 'r', **kwargs) as lc: if X_name not in lc.layers.keys(): X_name = '' # depends on [control=['if'], data=['X_name']] X = lc.layers[X_name].sparse().T.tocsr() if sparse else lc.layers[X_name][()].T layers = OrderedDict() if X_name != '': layers['matrix'] = lc.layers[''].sparse().T.tocsr() if sparse else lc.layers[''][()].T # depends on [control=['if'], data=[]] for key in lc.layers.keys(): if key != '': layers[key] = lc.layers[key].sparse().T.tocsr() if sparse else lc.layers[key][()].T # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']] obs = dict(lc.col_attrs) if obs_names in obs.keys(): obs['obs_names'] = obs.pop(obs_names) # depends on [control=['if'], data=['obs_names']] obsm_attrs = [k for (k, v) in obs.items() if v.ndim > 1 and v.shape[1] > 1] obsm = {} for key in obsm_attrs: obsm[key] = obs.pop(key) # depends on [control=['for'], data=['key']] var = dict(lc.row_attrs) if var_names in var.keys(): var['var_names'] = var.pop(var_names) # depends on [control=['if'], data=['var_names']] varm_attrs = [k for (k, v) in var.items() if v.ndim > 1 and v.shape[1] > 1] varm = {} for key in varm_attrs: varm[key] = var.pop(key) # depends on [control=['for'], data=['key']] if cleanup: for key in list(obs.keys()): if len(set(obs[key])) == 1: del obs[key] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] for key in list(var.keys()): if len(set(var[key])) == 1: del var[key] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] # depends on [control=['if'], data=[]] # not ideal: make the generator a dict... adata = AnnData(X, obs=obs, var=var, layers=layers, obsm=obsm if obsm else None, varm=varm if varm else None, dtype=dtype) # depends on [control=['with'], data=['lc']] return adata
def get_engine_from_session(dbsession: Session) -> Engine: """ Gets the SQLAlchemy :class:`Engine` from a SQLAlchemy :class:`Session`. """ engine = dbsession.bind assert isinstance(engine, Engine) return engine
def function[get_engine_from_session, parameter[dbsession]]: constant[ Gets the SQLAlchemy :class:`Engine` from a SQLAlchemy :class:`Session`. ] variable[engine] assign[=] name[dbsession].bind assert[call[name[isinstance], parameter[name[engine], name[Engine]]]] return[name[engine]]
keyword[def] identifier[get_engine_from_session] ( identifier[dbsession] : identifier[Session] )-> identifier[Engine] : literal[string] identifier[engine] = identifier[dbsession] . identifier[bind] keyword[assert] identifier[isinstance] ( identifier[engine] , identifier[Engine] ) keyword[return] identifier[engine]
def get_engine_from_session(dbsession: Session) -> Engine: """ Gets the SQLAlchemy :class:`Engine` from a SQLAlchemy :class:`Session`. """ engine = dbsession.bind assert isinstance(engine, Engine) return engine
def run(self, arguments=None): """ Parse the logfile. Ask each filter if it accepts the line. It will only be printed if all filters accept the line. """ # add arguments from filter classes before calling superclass run for f in self.filters: for fa in f.filterArgs: self.argparser.add_argument(fa[0], **fa[1]) # now parse arguments and post-process LogFileTool.run(self, arguments) self.args = dict((k, self.args[k] if k in ['logfile', 'markers', 'timezone'] else self._arrayToString(self.args[k])) for k in self.args) # make sure logfile is always a list, even if 1 is provided # through sys.stdin if not isinstance(self.args['logfile'], list): self.args['logfile'] = [self.args['logfile']] # require at least 1 log file (either through stdin or as parameter) if len(self.args['logfile']) == 0: raise SystemExit('Error: Need at least 1 log file, either as ' 'command line parameter or through stdin.') # handle timezone parameter if len(self.args['timezone']) == 1: self.args['timezone'] = (self.args['timezone'] * len(self.args['logfile'])) elif len(self.args['timezone']) == len(self.args['logfile']): pass elif len(self.args['timezone']) == 0: self.args['timezone'] = [0] * len(self.args['logfile']) else: raise SystemExit('Error: Invalid number of timezone parameters. ' 'Use either one parameter (for global ' 'adjustment) or the number of log files ' '(for individual adjustments).') # create filter objects from classes and pass args self.filters = [f(self) for f in self.filters] # remove non-active filter objects self.filters = [f for f in self.filters if f.active] # call setup for each active filter for f in self.filters: f.setup() if self.args['shorten'] is not False: if self.args['shorten'] is None: self.args['shorten'] = 200 if self.args['verbose']: print("command line arguments") for a in self.args: print(" %s: %s" % (a, self.args[a])) print("\nactive filters: " + ", ".join([f.__class__.__name__ for f in self.filters])) print('\n====================') # handle markers parameter if len(self.args['markers']) == 1: marker = self.args['markers'][0] if marker == 'enum': self.args['markers'] = ['{%i}' % (i + 1) for i in range(len(self .args['logfile']))] elif marker == 'alpha': self.args['markers'] = ['{%s}' % chr(97 + i) for i in range(len(self .args['logfile']))] elif marker == 'none': self.args['markers'] = [None for _ in self.args['logfile']] elif marker == 'filename': self.args['markers'] = ['{%s}' % logfile.name for logfile in self.args['logfile']] elif len(self.args['markers']) == len(self.args['logfile']): pass else: raise SystemExit('Error: Number of markers not the same as ' 'number of files.') # with --human, change to ctime format if not specified otherwise if self.args['timestamp_format'] == 'none' and self.args['human']: self.args['timestamp_format'] = 'ctime' # go through each line and ask each filter if it accepts if 'logfile' not in self.args or not self.args['logfile']: raise SystemExit('no logfile found.') for logevent in self.logfile_generator(): if self.args['exclude']: # print line if any filter disagrees if any([not f.accept(logevent) for f in self.filters]): self._outputLine(logevent, self.args['shorten'], self.args['human']) else: # only print line if all filters agree if all([f.accept(logevent) for f in self.filters]): self._outputLine(logevent, self.args['shorten'], self.args['human']) # if at least one filter refuses to accept any # remaining lines, stop if any([f.skipRemaining() for f in self.filters]): # if input is not stdin if sys.stdin.isatty(): break
def function[run, parameter[self, arguments]]: constant[ Parse the logfile. Ask each filter if it accepts the line. It will only be printed if all filters accept the line. ] for taget[name[f]] in starred[name[self].filters] begin[:] for taget[name[fa]] in starred[name[f].filterArgs] begin[:] call[name[self].argparser.add_argument, parameter[call[name[fa]][constant[0]]]] call[name[LogFileTool].run, parameter[name[self], name[arguments]]] name[self].args assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b16a4a90>]] if <ast.UnaryOp object at 0x7da1b16a6260> begin[:] call[name[self].args][constant[logfile]] assign[=] list[[<ast.Subscript object at 0x7da1b16a5bd0>]] if compare[call[name[len], parameter[call[name[self].args][constant[logfile]]]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da1b16a7e20> if compare[call[name[len], parameter[call[name[self].args][constant[timezone]]]] equal[==] constant[1]] begin[:] call[name[self].args][constant[timezone]] assign[=] binary_operation[call[name[self].args][constant[timezone]] * call[name[len], parameter[call[name[self].args][constant[logfile]]]]] name[self].filters assign[=] <ast.ListComp object at 0x7da1b17d5c30> name[self].filters assign[=] <ast.ListComp object at 0x7da1b16a6470> for taget[name[f]] in starred[name[self].filters] begin[:] call[name[f].setup, parameter[]] if compare[call[name[self].args][constant[shorten]] is_not constant[False]] begin[:] if compare[call[name[self].args][constant[shorten]] is constant[None]] begin[:] call[name[self].args][constant[shorten]] assign[=] constant[200] if call[name[self].args][constant[verbose]] begin[:] call[name[print], parameter[constant[command line arguments]]] for taget[name[a]] in starred[name[self].args] begin[:] call[name[print], parameter[binary_operation[constant[ %s: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1642ef0>, <ast.Subscript object at 0x7da1b16418d0>]]]]] call[name[print], parameter[binary_operation[constant[ active filters: ] + call[constant[, ].join, parameter[<ast.ListComp object at 0x7da1b1643eb0>]]]]] call[name[print], parameter[constant[ ====================]]] if compare[call[name[len], parameter[call[name[self].args][constant[markers]]]] equal[==] constant[1]] begin[:] variable[marker] assign[=] call[call[name[self].args][constant[markers]]][constant[0]] if compare[name[marker] equal[==] constant[enum]] begin[:] call[name[self].args][constant[markers]] assign[=] <ast.ListComp object at 0x7da1b1640040> if <ast.BoolOp object at 0x7da1b1641060> begin[:] call[name[self].args][constant[timestamp_format]] assign[=] constant[ctime] if <ast.BoolOp object at 0x7da1b1641090> begin[:] <ast.Raise object at 0x7da1b1642c50> for taget[name[logevent]] in starred[call[name[self].logfile_generator, parameter[]]] begin[:] if call[name[self].args][constant[exclude]] begin[:] if call[name[any], parameter[<ast.ListComp object at 0x7da1b16416c0>]] begin[:] call[name[self]._outputLine, parameter[name[logevent], call[name[self].args][constant[shorten]], call[name[self].args][constant[human]]]]
keyword[def] identifier[run] ( identifier[self] , identifier[arguments] = keyword[None] ): literal[string] keyword[for] identifier[f] keyword[in] identifier[self] . identifier[filters] : keyword[for] identifier[fa] keyword[in] identifier[f] . identifier[filterArgs] : identifier[self] . identifier[argparser] . identifier[add_argument] ( identifier[fa] [ literal[int] ],** identifier[fa] [ literal[int] ]) identifier[LogFileTool] . identifier[run] ( identifier[self] , identifier[arguments] ) identifier[self] . identifier[args] = identifier[dict] (( identifier[k] , identifier[self] . identifier[args] [ identifier[k] ] keyword[if] identifier[k] keyword[in] [ literal[string] , literal[string] , literal[string] ] keyword[else] identifier[self] . identifier[_arrayToString] ( identifier[self] . identifier[args] [ identifier[k] ])) keyword[for] identifier[k] keyword[in] identifier[self] . identifier[args] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[args] [ literal[string] ], identifier[list] ): identifier[self] . identifier[args] [ literal[string] ]=[ identifier[self] . identifier[args] [ literal[string] ]] keyword[if] identifier[len] ( identifier[self] . identifier[args] [ literal[string] ])== literal[int] : keyword[raise] identifier[SystemExit] ( literal[string] literal[string] ) keyword[if] identifier[len] ( identifier[self] . identifier[args] [ literal[string] ])== literal[int] : identifier[self] . identifier[args] [ literal[string] ]=( identifier[self] . identifier[args] [ literal[string] ]* identifier[len] ( identifier[self] . identifier[args] [ literal[string] ])) keyword[elif] identifier[len] ( identifier[self] . identifier[args] [ literal[string] ])== identifier[len] ( identifier[self] . identifier[args] [ literal[string] ]): keyword[pass] keyword[elif] identifier[len] ( identifier[self] . identifier[args] [ literal[string] ])== literal[int] : identifier[self] . identifier[args] [ literal[string] ]=[ literal[int] ]* identifier[len] ( identifier[self] . identifier[args] [ literal[string] ]) keyword[else] : keyword[raise] identifier[SystemExit] ( literal[string] literal[string] literal[string] literal[string] ) identifier[self] . identifier[filters] =[ identifier[f] ( identifier[self] ) keyword[for] identifier[f] keyword[in] identifier[self] . identifier[filters] ] identifier[self] . identifier[filters] =[ identifier[f] keyword[for] identifier[f] keyword[in] identifier[self] . identifier[filters] keyword[if] identifier[f] . identifier[active] ] keyword[for] identifier[f] keyword[in] identifier[self] . identifier[filters] : identifier[f] . identifier[setup] () keyword[if] identifier[self] . identifier[args] [ literal[string] ] keyword[is] keyword[not] keyword[False] : keyword[if] identifier[self] . identifier[args] [ literal[string] ] keyword[is] keyword[None] : identifier[self] . identifier[args] [ literal[string] ]= literal[int] keyword[if] identifier[self] . identifier[args] [ literal[string] ]: identifier[print] ( literal[string] ) keyword[for] identifier[a] keyword[in] identifier[self] . identifier[args] : identifier[print] ( literal[string] %( identifier[a] , identifier[self] . identifier[args] [ identifier[a] ])) identifier[print] ( literal[string] + literal[string] . identifier[join] ([ identifier[f] . identifier[__class__] . identifier[__name__] keyword[for] identifier[f] keyword[in] identifier[self] . identifier[filters] ])) identifier[print] ( literal[string] ) keyword[if] identifier[len] ( identifier[self] . identifier[args] [ literal[string] ])== literal[int] : identifier[marker] = identifier[self] . identifier[args] [ literal[string] ][ literal[int] ] keyword[if] identifier[marker] == literal[string] : identifier[self] . identifier[args] [ literal[string] ]=[ literal[string] %( identifier[i] + literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[args] [ literal[string] ]))] keyword[elif] identifier[marker] == literal[string] : identifier[self] . identifier[args] [ literal[string] ]=[ literal[string] % identifier[chr] ( literal[int] + identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[args] [ literal[string] ]))] keyword[elif] identifier[marker] == literal[string] : identifier[self] . identifier[args] [ literal[string] ]=[ keyword[None] keyword[for] identifier[_] keyword[in] identifier[self] . identifier[args] [ literal[string] ]] keyword[elif] identifier[marker] == literal[string] : identifier[self] . identifier[args] [ literal[string] ]=[ literal[string] % identifier[logfile] . identifier[name] keyword[for] identifier[logfile] keyword[in] identifier[self] . identifier[args] [ literal[string] ]] keyword[elif] identifier[len] ( identifier[self] . identifier[args] [ literal[string] ])== identifier[len] ( identifier[self] . identifier[args] [ literal[string] ]): keyword[pass] keyword[else] : keyword[raise] identifier[SystemExit] ( literal[string] literal[string] ) keyword[if] identifier[self] . identifier[args] [ literal[string] ]== literal[string] keyword[and] identifier[self] . identifier[args] [ literal[string] ]: identifier[self] . identifier[args] [ literal[string] ]= literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[args] keyword[or] keyword[not] identifier[self] . identifier[args] [ literal[string] ]: keyword[raise] identifier[SystemExit] ( literal[string] ) keyword[for] identifier[logevent] keyword[in] identifier[self] . identifier[logfile_generator] (): keyword[if] identifier[self] . identifier[args] [ literal[string] ]: keyword[if] identifier[any] ([ keyword[not] identifier[f] . identifier[accept] ( identifier[logevent] ) keyword[for] identifier[f] keyword[in] identifier[self] . identifier[filters] ]): identifier[self] . identifier[_outputLine] ( identifier[logevent] , identifier[self] . identifier[args] [ literal[string] ], identifier[self] . identifier[args] [ literal[string] ]) keyword[else] : keyword[if] identifier[all] ([ identifier[f] . identifier[accept] ( identifier[logevent] ) keyword[for] identifier[f] keyword[in] identifier[self] . identifier[filters] ]): identifier[self] . identifier[_outputLine] ( identifier[logevent] , identifier[self] . identifier[args] [ literal[string] ], identifier[self] . identifier[args] [ literal[string] ]) keyword[if] identifier[any] ([ identifier[f] . identifier[skipRemaining] () keyword[for] identifier[f] keyword[in] identifier[self] . identifier[filters] ]): keyword[if] identifier[sys] . identifier[stdin] . identifier[isatty] (): keyword[break]
def run(self, arguments=None): """ Parse the logfile. Ask each filter if it accepts the line. It will only be printed if all filters accept the line. """ # add arguments from filter classes before calling superclass run for f in self.filters: for fa in f.filterArgs: self.argparser.add_argument(fa[0], **fa[1]) # depends on [control=['for'], data=['fa']] # depends on [control=['for'], data=['f']] # now parse arguments and post-process LogFileTool.run(self, arguments) self.args = dict(((k, self.args[k] if k in ['logfile', 'markers', 'timezone'] else self._arrayToString(self.args[k])) for k in self.args)) # make sure logfile is always a list, even if 1 is provided # through sys.stdin if not isinstance(self.args['logfile'], list): self.args['logfile'] = [self.args['logfile']] # depends on [control=['if'], data=[]] # require at least 1 log file (either through stdin or as parameter) if len(self.args['logfile']) == 0: raise SystemExit('Error: Need at least 1 log file, either as command line parameter or through stdin.') # depends on [control=['if'], data=[]] # handle timezone parameter if len(self.args['timezone']) == 1: self.args['timezone'] = self.args['timezone'] * len(self.args['logfile']) # depends on [control=['if'], data=[]] elif len(self.args['timezone']) == len(self.args['logfile']): pass # depends on [control=['if'], data=[]] elif len(self.args['timezone']) == 0: self.args['timezone'] = [0] * len(self.args['logfile']) # depends on [control=['if'], data=[]] else: raise SystemExit('Error: Invalid number of timezone parameters. Use either one parameter (for global adjustment) or the number of log files (for individual adjustments).') # create filter objects from classes and pass args self.filters = [f(self) for f in self.filters] # remove non-active filter objects self.filters = [f for f in self.filters if f.active] # call setup for each active filter for f in self.filters: f.setup() # depends on [control=['for'], data=['f']] if self.args['shorten'] is not False: if self.args['shorten'] is None: self.args['shorten'] = 200 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if self.args['verbose']: print('command line arguments') for a in self.args: print(' %s: %s' % (a, self.args[a])) # depends on [control=['for'], data=['a']] print('\nactive filters: ' + ', '.join([f.__class__.__name__ for f in self.filters])) print('\n====================') # depends on [control=['if'], data=[]] # handle markers parameter if len(self.args['markers']) == 1: marker = self.args['markers'][0] if marker == 'enum': self.args['markers'] = ['{%i}' % (i + 1) for i in range(len(self.args['logfile']))] # depends on [control=['if'], data=[]] elif marker == 'alpha': self.args['markers'] = ['{%s}' % chr(97 + i) for i in range(len(self.args['logfile']))] # depends on [control=['if'], data=[]] elif marker == 'none': self.args['markers'] = [None for _ in self.args['logfile']] # depends on [control=['if'], data=[]] elif marker == 'filename': self.args['markers'] = ['{%s}' % logfile.name for logfile in self.args['logfile']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif len(self.args['markers']) == len(self.args['logfile']): pass # depends on [control=['if'], data=[]] else: raise SystemExit('Error: Number of markers not the same as number of files.') # with --human, change to ctime format if not specified otherwise if self.args['timestamp_format'] == 'none' and self.args['human']: self.args['timestamp_format'] = 'ctime' # depends on [control=['if'], data=[]] # go through each line and ask each filter if it accepts if 'logfile' not in self.args or not self.args['logfile']: raise SystemExit('no logfile found.') # depends on [control=['if'], data=[]] for logevent in self.logfile_generator(): if self.args['exclude']: # print line if any filter disagrees if any([not f.accept(logevent) for f in self.filters]): self._outputLine(logevent, self.args['shorten'], self.args['human']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: # only print line if all filters agree if all([f.accept(logevent) for f in self.filters]): self._outputLine(logevent, self.args['shorten'], self.args['human']) # depends on [control=['if'], data=[]] # if at least one filter refuses to accept any # remaining lines, stop if any([f.skipRemaining() for f in self.filters]): # if input is not stdin if sys.stdin.isatty(): break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['logevent']]
def check_causatives(self, case_obj=None, institute_obj=None): """Check if there are any variants that are previously marked causative Loop through all variants that are marked 'causative' for an institute and check if any of the variants are present in the current case. Args: case_obj (dict): A Case object institute_obj (dict): check across the whole institute Returns: causatives(iterable(Variant)) """ institute_id = case_obj['owner'] if case_obj else institute_obj['_id'] institute_causative_variant_ids = self.get_causatives(institute_id) if len(institute_causative_variant_ids) == 0: return [] if case_obj: # exclude variants that are marked causative in "case_obj" case_causative_ids = set(case_obj.get('causatives', [])) institute_causative_variant_ids = list( set(institute_causative_variant_ids).difference(case_causative_ids) ) # convert from unique ids to general "variant_id" query = self.variant_collection.find( {'_id': {'$in': institute_causative_variant_ids}}, {'variant_id': 1} ) positional_variant_ids = [item['variant_id'] for item in query] filters = {'variant_id': {'$in': positional_variant_ids}} if case_obj: filters['case_id'] = case_obj['_id'] else: filters['institute'] = institute_obj['_id'] return self.variant_collection.find(filters)
def function[check_causatives, parameter[self, case_obj, institute_obj]]: constant[Check if there are any variants that are previously marked causative Loop through all variants that are marked 'causative' for an institute and check if any of the variants are present in the current case. Args: case_obj (dict): A Case object institute_obj (dict): check across the whole institute Returns: causatives(iterable(Variant)) ] variable[institute_id] assign[=] <ast.IfExp object at 0x7da2043447f0> variable[institute_causative_variant_ids] assign[=] call[name[self].get_causatives, parameter[name[institute_id]]] if compare[call[name[len], parameter[name[institute_causative_variant_ids]]] equal[==] constant[0]] begin[:] return[list[[]]] if name[case_obj] begin[:] variable[case_causative_ids] assign[=] call[name[set], parameter[call[name[case_obj].get, parameter[constant[causatives], list[[]]]]]] variable[institute_causative_variant_ids] assign[=] call[name[list], parameter[call[call[name[set], parameter[name[institute_causative_variant_ids]]].difference, parameter[name[case_causative_ids]]]]] variable[query] assign[=] call[name[self].variant_collection.find, parameter[dictionary[[<ast.Constant object at 0x7da18fe92320>], [<ast.Dict object at 0x7da18fe904f0>]], dictionary[[<ast.Constant object at 0x7da18fe926e0>], [<ast.Constant object at 0x7da18fe93a00>]]]] variable[positional_variant_ids] assign[=] <ast.ListComp object at 0x7da18fe92c20> variable[filters] assign[=] dictionary[[<ast.Constant object at 0x7da18fe915a0>], [<ast.Dict object at 0x7da18fe90790>]] if name[case_obj] begin[:] call[name[filters]][constant[case_id]] assign[=] call[name[case_obj]][constant[_id]] return[call[name[self].variant_collection.find, parameter[name[filters]]]]
keyword[def] identifier[check_causatives] ( identifier[self] , identifier[case_obj] = keyword[None] , identifier[institute_obj] = keyword[None] ): literal[string] identifier[institute_id] = identifier[case_obj] [ literal[string] ] keyword[if] identifier[case_obj] keyword[else] identifier[institute_obj] [ literal[string] ] identifier[institute_causative_variant_ids] = identifier[self] . identifier[get_causatives] ( identifier[institute_id] ) keyword[if] identifier[len] ( identifier[institute_causative_variant_ids] )== literal[int] : keyword[return] [] keyword[if] identifier[case_obj] : identifier[case_causative_ids] = identifier[set] ( identifier[case_obj] . identifier[get] ( literal[string] ,[])) identifier[institute_causative_variant_ids] = identifier[list] ( identifier[set] ( identifier[institute_causative_variant_ids] ). identifier[difference] ( identifier[case_causative_ids] ) ) identifier[query] = identifier[self] . identifier[variant_collection] . identifier[find] ( { literal[string] :{ literal[string] : identifier[institute_causative_variant_ids] }}, { literal[string] : literal[int] } ) identifier[positional_variant_ids] =[ identifier[item] [ literal[string] ] keyword[for] identifier[item] keyword[in] identifier[query] ] identifier[filters] ={ literal[string] :{ literal[string] : identifier[positional_variant_ids] }} keyword[if] identifier[case_obj] : identifier[filters] [ literal[string] ]= identifier[case_obj] [ literal[string] ] keyword[else] : identifier[filters] [ literal[string] ]= identifier[institute_obj] [ literal[string] ] keyword[return] identifier[self] . identifier[variant_collection] . identifier[find] ( identifier[filters] )
def check_causatives(self, case_obj=None, institute_obj=None): """Check if there are any variants that are previously marked causative Loop through all variants that are marked 'causative' for an institute and check if any of the variants are present in the current case. Args: case_obj (dict): A Case object institute_obj (dict): check across the whole institute Returns: causatives(iterable(Variant)) """ institute_id = case_obj['owner'] if case_obj else institute_obj['_id'] institute_causative_variant_ids = self.get_causatives(institute_id) if len(institute_causative_variant_ids) == 0: return [] # depends on [control=['if'], data=[]] if case_obj: # exclude variants that are marked causative in "case_obj" case_causative_ids = set(case_obj.get('causatives', [])) institute_causative_variant_ids = list(set(institute_causative_variant_ids).difference(case_causative_ids)) # depends on [control=['if'], data=[]] # convert from unique ids to general "variant_id" query = self.variant_collection.find({'_id': {'$in': institute_causative_variant_ids}}, {'variant_id': 1}) positional_variant_ids = [item['variant_id'] for item in query] filters = {'variant_id': {'$in': positional_variant_ids}} if case_obj: filters['case_id'] = case_obj['_id'] # depends on [control=['if'], data=[]] else: filters['institute'] = institute_obj['_id'] return self.variant_collection.find(filters)
def current_version(self, object, relations_as_of=None, check_db=False): """ Return the current version of the given object. The current version is the one having its version_end_date set to NULL. If there is not such a version then it means the object has been 'deleted' and so there is no current version available. In this case the function returns None. Note that if check_db is False and object's version_end_date is None, this does not check the database to see if there is a newer version (perhaps created by some other code), it simply returns the passed object. ``relations_as_of`` is used to fix the point in time for the version; this affects which related objects are returned when querying for object relations. See ``VersionManager.version_as_of`` for details on valid ``relations_as_of`` values. :param Versionable object: object whose current version will be returned. :param mixed relations_as_of: determines point in time used to access relations. 'start'|'end'|datetime|None :param bool check_db: Whether or not to look in the database for a more recent version :return: Versionable """ if object.version_end_date is None and not check_db: current = object else: current = self.current.filter(identity=object.identity).first() return self.adjust_version_as_of(current, relations_as_of)
def function[current_version, parameter[self, object, relations_as_of, check_db]]: constant[ Return the current version of the given object. The current version is the one having its version_end_date set to NULL. If there is not such a version then it means the object has been 'deleted' and so there is no current version available. In this case the function returns None. Note that if check_db is False and object's version_end_date is None, this does not check the database to see if there is a newer version (perhaps created by some other code), it simply returns the passed object. ``relations_as_of`` is used to fix the point in time for the version; this affects which related objects are returned when querying for object relations. See ``VersionManager.version_as_of`` for details on valid ``relations_as_of`` values. :param Versionable object: object whose current version will be returned. :param mixed relations_as_of: determines point in time used to access relations. 'start'|'end'|datetime|None :param bool check_db: Whether or not to look in the database for a more recent version :return: Versionable ] if <ast.BoolOp object at 0x7da1b1042d70> begin[:] variable[current] assign[=] name[object] return[call[name[self].adjust_version_as_of, parameter[name[current], name[relations_as_of]]]]
keyword[def] identifier[current_version] ( identifier[self] , identifier[object] , identifier[relations_as_of] = keyword[None] , identifier[check_db] = keyword[False] ): literal[string] keyword[if] identifier[object] . identifier[version_end_date] keyword[is] keyword[None] keyword[and] keyword[not] identifier[check_db] : identifier[current] = identifier[object] keyword[else] : identifier[current] = identifier[self] . identifier[current] . identifier[filter] ( identifier[identity] = identifier[object] . identifier[identity] ). identifier[first] () keyword[return] identifier[self] . identifier[adjust_version_as_of] ( identifier[current] , identifier[relations_as_of] )
def current_version(self, object, relations_as_of=None, check_db=False): """ Return the current version of the given object. The current version is the one having its version_end_date set to NULL. If there is not such a version then it means the object has been 'deleted' and so there is no current version available. In this case the function returns None. Note that if check_db is False and object's version_end_date is None, this does not check the database to see if there is a newer version (perhaps created by some other code), it simply returns the passed object. ``relations_as_of`` is used to fix the point in time for the version; this affects which related objects are returned when querying for object relations. See ``VersionManager.version_as_of`` for details on valid ``relations_as_of`` values. :param Versionable object: object whose current version will be returned. :param mixed relations_as_of: determines point in time used to access relations. 'start'|'end'|datetime|None :param bool check_db: Whether or not to look in the database for a more recent version :return: Versionable """ if object.version_end_date is None and (not check_db): current = object # depends on [control=['if'], data=[]] else: current = self.current.filter(identity=object.identity).first() return self.adjust_version_as_of(current, relations_as_of)
def new(self): # type: () -> None ''' A method to create a new UDF Logical Volume Header Descriptor. Parameters: None. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Header Descriptor already initialized') self.unique_id = 261 self._initialized = True
def function[new, parameter[self]]: constant[ A method to create a new UDF Logical Volume Header Descriptor. Parameters: None. Returns: Nothing. ] if name[self]._initialized begin[:] <ast.Raise object at 0x7da1b0f0d8a0> name[self].unique_id assign[=] constant[261] name[self]._initialized assign[=] constant[True]
keyword[def] identifier[new] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_initialized] : keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] ) identifier[self] . identifier[unique_id] = literal[int] identifier[self] . identifier[_initialized] = keyword[True]
def new(self): # type: () -> None '\n A method to create a new UDF Logical Volume Header Descriptor.\n\n Parameters:\n None.\n Returns:\n Nothing.\n ' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Header Descriptor already initialized') # depends on [control=['if'], data=[]] self.unique_id = 261 self._initialized = True
def bed(args): """ %prog fasta map.out Convert MSTMAP output into bed format. """ p = OptionParser(bed.__doc__) p.add_option("--switch", default=False, action="store_true", help="Switch reference and aligned map elements [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) mapout, = args pf = mapout.split(".")[0] mapbed = pf + ".bed" bm = BinMap(mapout) bm.print_to_bed(mapbed, switch=opts.switch) return mapbed
def function[bed, parameter[args]]: constant[ %prog fasta map.out Convert MSTMAP output into bed format. ] variable[p] assign[=] call[name[OptionParser], parameter[name[bed].__doc__]] call[name[p].add_option, parameter[constant[--switch]]] <ast.Tuple object at 0x7da1b0900640> assign[=] call[name[p].parse_args, parameter[name[args]]] if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b09bd9c0>]] <ast.Tuple object at 0x7da1b09bd690> assign[=] name[args] variable[pf] assign[=] call[call[name[mapout].split, parameter[constant[.]]]][constant[0]] variable[mapbed] assign[=] binary_operation[name[pf] + constant[.bed]] variable[bm] assign[=] call[name[BinMap], parameter[name[mapout]]] call[name[bm].print_to_bed, parameter[name[mapbed]]] return[name[mapbed]]
keyword[def] identifier[bed] ( identifier[args] ): literal[string] identifier[p] = identifier[OptionParser] ( identifier[bed] . identifier[__doc__] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] ) keyword[if] identifier[len] ( identifier[args] )!= literal[int] : identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ()) identifier[mapout] ,= identifier[args] identifier[pf] = identifier[mapout] . identifier[split] ( literal[string] )[ literal[int] ] identifier[mapbed] = identifier[pf] + literal[string] identifier[bm] = identifier[BinMap] ( identifier[mapout] ) identifier[bm] . identifier[print_to_bed] ( identifier[mapbed] , identifier[switch] = identifier[opts] . identifier[switch] ) keyword[return] identifier[mapbed]
def bed(args): """ %prog fasta map.out Convert MSTMAP output into bed format. """ p = OptionParser(bed.__doc__) p.add_option('--switch', default=False, action='store_true', help='Switch reference and aligned map elements [default: %default]') (opts, args) = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) # depends on [control=['if'], data=[]] (mapout,) = args pf = mapout.split('.')[0] mapbed = pf + '.bed' bm = BinMap(mapout) bm.print_to_bed(mapbed, switch=opts.switch) return mapbed
def add_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument ''' Add a package lock. Specify packages to lock by exact name. root operate on a different root directory. CLI Example: .. code-block:: bash salt '*' pkg.add_lock <package name> salt '*' pkg.add_lock <package1>,<package2>,<package3> salt '*' pkg.add_lock pkgs='["foo", "bar"]' ''' salt.utils.versions.warn_until('Sodium', 'This function is deprecated. Please use hold() instead.') locks = list_locks(root) added = [] try: packages = list(__salt__['pkg_resource.parse_targets'](packages)[0].keys()) except MinionError as exc: raise CommandExecutionError(exc) for pkg in packages: if not locks.get(pkg): added.append(pkg) if added: __zypper__(root=root).call('al', *added) return {'added': len(added), 'packages': added}
def function[add_lock, parameter[packages, root]]: constant[ Add a package lock. Specify packages to lock by exact name. root operate on a different root directory. CLI Example: .. code-block:: bash salt '*' pkg.add_lock <package name> salt '*' pkg.add_lock <package1>,<package2>,<package3> salt '*' pkg.add_lock pkgs='["foo", "bar"]' ] call[name[salt].utils.versions.warn_until, parameter[constant[Sodium], constant[This function is deprecated. Please use hold() instead.]]] variable[locks] assign[=] call[name[list_locks], parameter[name[root]]] variable[added] assign[=] list[[]] <ast.Try object at 0x7da1b1f2ada0> for taget[name[pkg]] in starred[name[packages]] begin[:] if <ast.UnaryOp object at 0x7da1b1f2a920> begin[:] call[name[added].append, parameter[name[pkg]]] if name[added] begin[:] call[call[name[__zypper__], parameter[]].call, parameter[constant[al], <ast.Starred object at 0x7da1b1f2b790>]] return[dictionary[[<ast.Constant object at 0x7da1b1f2a680>, <ast.Constant object at 0x7da1b1f2b340>], [<ast.Call object at 0x7da1b1f2a170>, <ast.Name object at 0x7da1b1f2b8e0>]]]
keyword[def] identifier[add_lock] ( identifier[packages] , identifier[root] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[salt] . identifier[utils] . identifier[versions] . identifier[warn_until] ( literal[string] , literal[string] ) identifier[locks] = identifier[list_locks] ( identifier[root] ) identifier[added] =[] keyword[try] : identifier[packages] = identifier[list] ( identifier[__salt__] [ literal[string] ]( identifier[packages] )[ literal[int] ]. identifier[keys] ()) keyword[except] identifier[MinionError] keyword[as] identifier[exc] : keyword[raise] identifier[CommandExecutionError] ( identifier[exc] ) keyword[for] identifier[pkg] keyword[in] identifier[packages] : keyword[if] keyword[not] identifier[locks] . identifier[get] ( identifier[pkg] ): identifier[added] . identifier[append] ( identifier[pkg] ) keyword[if] identifier[added] : identifier[__zypper__] ( identifier[root] = identifier[root] ). identifier[call] ( literal[string] ,* identifier[added] ) keyword[return] { literal[string] : identifier[len] ( identifier[added] ), literal[string] : identifier[added] }
def add_lock(packages, root=None, **kwargs): # pylint: disable=unused-argument '\n Add a package lock. Specify packages to lock by exact name.\n\n root\n operate on a different root directory.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt \'*\' pkg.add_lock <package name>\n salt \'*\' pkg.add_lock <package1>,<package2>,<package3>\n salt \'*\' pkg.add_lock pkgs=\'["foo", "bar"]\'\n ' salt.utils.versions.warn_until('Sodium', 'This function is deprecated. Please use hold() instead.') locks = list_locks(root) added = [] try: packages = list(__salt__['pkg_resource.parse_targets'](packages)[0].keys()) # depends on [control=['try'], data=[]] except MinionError as exc: raise CommandExecutionError(exc) # depends on [control=['except'], data=['exc']] for pkg in packages: if not locks.get(pkg): added.append(pkg) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pkg']] if added: __zypper__(root=root).call('al', *added) # depends on [control=['if'], data=[]] return {'added': len(added), 'packages': added}
def atlas_find_missing_zonefile_availability( peer_table=None, con=None, path=None, missing_zonefile_info=None ): """ Find the set of missing zonefiles, as well as their popularity amongst our neighbors. Only consider zonefiles that are known by at least one peer; otherwise they're missing from our clique (and we'll re-sync our neighborss' inventories every so often to make sure we detect when zonefiles become available). Return a dict, structured as: { 'zonefile hash': { 'names': [names], 'txid': first txid that set it, 'indexes': [...], 'popularity': ..., 'peers': [...], 'tried_storage': True|False } } """ # which zonefiles do we have? bit_offset = 0 bit_count = 10000 missing = [] ret = {} if missing_zonefile_info is None: while True: zfinfo = atlasdb_zonefile_find_missing( bit_offset, bit_count, con=con, path=path ) if len(zfinfo) == 0: break missing += zfinfo bit_offset += len(zfinfo) if len(missing) > 0: log.debug("Missing %s zonefiles" % len(missing)) else: missing = missing_zonefile_info if len(missing) == 0: # none! return ret with AtlasPeerTableLocked(peer_table) as ptbl: # do any other peers have this zonefile? for zfinfo in missing: popularity = 0 byte_index = (zfinfo['inv_index'] - 1) / 8 bit_index = 7 - ((zfinfo['inv_index'] - 1) % 8) peers = [] if not ret.has_key(zfinfo['zonefile_hash']): ret[zfinfo['zonefile_hash']] = { 'names': [], 'txid': zfinfo['txid'], 'indexes': [], 'block_heights': [], 'popularity': 0, 'peers': [], 'tried_storage': False } for peer_hostport in ptbl.keys(): peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl ) if len(peer_inv) <= byte_index: # too new for this peer continue if (ord(peer_inv[byte_index]) & (1 << bit_index)) == 0: # this peer doesn't have it continue if peer_hostport not in ret[zfinfo['zonefile_hash']]['peers']: popularity += 1 peers.append( peer_hostport ) ret[zfinfo['zonefile_hash']]['names'].append( zfinfo['name'] ) ret[zfinfo['zonefile_hash']]['indexes'].append( zfinfo['inv_index']-1 ) ret[zfinfo['zonefile_hash']]['block_heights'].append( zfinfo['block_height'] ) ret[zfinfo['zonefile_hash']]['popularity'] += popularity ret[zfinfo['zonefile_hash']]['peers'] += peers ret[zfinfo['zonefile_hash']]['tried_storage'] = zfinfo['tried_storage'] return ret
def function[atlas_find_missing_zonefile_availability, parameter[peer_table, con, path, missing_zonefile_info]]: constant[ Find the set of missing zonefiles, as well as their popularity amongst our neighbors. Only consider zonefiles that are known by at least one peer; otherwise they're missing from our clique (and we'll re-sync our neighborss' inventories every so often to make sure we detect when zonefiles become available). Return a dict, structured as: { 'zonefile hash': { 'names': [names], 'txid': first txid that set it, 'indexes': [...], 'popularity': ..., 'peers': [...], 'tried_storage': True|False } } ] variable[bit_offset] assign[=] constant[0] variable[bit_count] assign[=] constant[10000] variable[missing] assign[=] list[[]] variable[ret] assign[=] dictionary[[], []] if compare[name[missing_zonefile_info] is constant[None]] begin[:] while constant[True] begin[:] variable[zfinfo] assign[=] call[name[atlasdb_zonefile_find_missing], parameter[name[bit_offset], name[bit_count]]] if compare[call[name[len], parameter[name[zfinfo]]] equal[==] constant[0]] begin[:] break <ast.AugAssign object at 0x7da18bcc8e20> <ast.AugAssign object at 0x7da18bcc87f0> if compare[call[name[len], parameter[name[missing]]] greater[>] constant[0]] begin[:] call[name[log].debug, parameter[binary_operation[constant[Missing %s zonefiles] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[missing]]]]]] if compare[call[name[len], parameter[name[missing]]] equal[==] constant[0]] begin[:] return[name[ret]] with call[name[AtlasPeerTableLocked], parameter[name[peer_table]]] begin[:] for taget[name[zfinfo]] in starred[name[missing]] begin[:] variable[popularity] assign[=] constant[0] variable[byte_index] assign[=] binary_operation[binary_operation[call[name[zfinfo]][constant[inv_index]] - constant[1]] / constant[8]] variable[bit_index] assign[=] binary_operation[constant[7] - binary_operation[binary_operation[call[name[zfinfo]][constant[inv_index]] - constant[1]] <ast.Mod object at 0x7da2590d6920> constant[8]]] variable[peers] assign[=] list[[]] if <ast.UnaryOp object at 0x7da18bcc8fa0> begin[:] call[name[ret]][call[name[zfinfo]][constant[zonefile_hash]]] assign[=] dictionary[[<ast.Constant object at 0x7da18bccb8e0>, <ast.Constant object at 0x7da18bcc8dc0>, <ast.Constant object at 0x7da18bcc9b70>, <ast.Constant object at 0x7da18bcc8070>, <ast.Constant object at 0x7da18bccbd30>, <ast.Constant object at 0x7da18bcc9150>, <ast.Constant object at 0x7da18bcc9fc0>], [<ast.List object at 0x7da18bccbdf0>, <ast.Subscript object at 0x7da18bcc9c60>, <ast.List object at 0x7da18bccb160>, <ast.List object at 0x7da18bccbca0>, <ast.Constant object at 0x7da18bcc9ae0>, <ast.List object at 0x7da18bccbe80>, <ast.Constant object at 0x7da18bcca1d0>]] for taget[name[peer_hostport]] in starred[call[name[ptbl].keys, parameter[]]] begin[:] variable[peer_inv] assign[=] call[name[atlas_peer_get_zonefile_inventory], parameter[name[peer_hostport]]] if compare[call[name[len], parameter[name[peer_inv]]] less_or_equal[<=] name[byte_index]] begin[:] continue if compare[binary_operation[call[name[ord], parameter[call[name[peer_inv]][name[byte_index]]]] <ast.BitAnd object at 0x7da2590d6b60> binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> name[bit_index]]] equal[==] constant[0]] begin[:] continue if compare[name[peer_hostport] <ast.NotIn object at 0x7da2590d7190> call[call[name[ret]][call[name[zfinfo]][constant[zonefile_hash]]]][constant[peers]]] begin[:] <ast.AugAssign object at 0x7da1b1632260> call[name[peers].append, parameter[name[peer_hostport]]] call[call[call[name[ret]][call[name[zfinfo]][constant[zonefile_hash]]]][constant[names]].append, parameter[call[name[zfinfo]][constant[name]]]] call[call[call[name[ret]][call[name[zfinfo]][constant[zonefile_hash]]]][constant[indexes]].append, parameter[binary_operation[call[name[zfinfo]][constant[inv_index]] - constant[1]]]] call[call[call[name[ret]][call[name[zfinfo]][constant[zonefile_hash]]]][constant[block_heights]].append, parameter[call[name[zfinfo]][constant[block_height]]]] <ast.AugAssign object at 0x7da1b1723eb0> <ast.AugAssign object at 0x7da1b1721750> call[call[name[ret]][call[name[zfinfo]][constant[zonefile_hash]]]][constant[tried_storage]] assign[=] call[name[zfinfo]][constant[tried_storage]] return[name[ret]]
keyword[def] identifier[atlas_find_missing_zonefile_availability] ( identifier[peer_table] = keyword[None] , identifier[con] = keyword[None] , identifier[path] = keyword[None] , identifier[missing_zonefile_info] = keyword[None] ): literal[string] identifier[bit_offset] = literal[int] identifier[bit_count] = literal[int] identifier[missing] =[] identifier[ret] ={} keyword[if] identifier[missing_zonefile_info] keyword[is] keyword[None] : keyword[while] keyword[True] : identifier[zfinfo] = identifier[atlasdb_zonefile_find_missing] ( identifier[bit_offset] , identifier[bit_count] , identifier[con] = identifier[con] , identifier[path] = identifier[path] ) keyword[if] identifier[len] ( identifier[zfinfo] )== literal[int] : keyword[break] identifier[missing] += identifier[zfinfo] identifier[bit_offset] += identifier[len] ( identifier[zfinfo] ) keyword[if] identifier[len] ( identifier[missing] )> literal[int] : identifier[log] . identifier[debug] ( literal[string] % identifier[len] ( identifier[missing] )) keyword[else] : identifier[missing] = identifier[missing_zonefile_info] keyword[if] identifier[len] ( identifier[missing] )== literal[int] : keyword[return] identifier[ret] keyword[with] identifier[AtlasPeerTableLocked] ( identifier[peer_table] ) keyword[as] identifier[ptbl] : keyword[for] identifier[zfinfo] keyword[in] identifier[missing] : identifier[popularity] = literal[int] identifier[byte_index] =( identifier[zfinfo] [ literal[string] ]- literal[int] )/ literal[int] identifier[bit_index] = literal[int] -(( identifier[zfinfo] [ literal[string] ]- literal[int] )% literal[int] ) identifier[peers] =[] keyword[if] keyword[not] identifier[ret] . identifier[has_key] ( identifier[zfinfo] [ literal[string] ]): identifier[ret] [ identifier[zfinfo] [ literal[string] ]]={ literal[string] :[], literal[string] : identifier[zfinfo] [ literal[string] ], literal[string] :[], literal[string] :[], literal[string] : literal[int] , literal[string] :[], literal[string] : keyword[False] } keyword[for] identifier[peer_hostport] keyword[in] identifier[ptbl] . identifier[keys] (): identifier[peer_inv] = identifier[atlas_peer_get_zonefile_inventory] ( identifier[peer_hostport] , identifier[peer_table] = identifier[ptbl] ) keyword[if] identifier[len] ( identifier[peer_inv] )<= identifier[byte_index] : keyword[continue] keyword[if] ( identifier[ord] ( identifier[peer_inv] [ identifier[byte_index] ])&( literal[int] << identifier[bit_index] ))== literal[int] : keyword[continue] keyword[if] identifier[peer_hostport] keyword[not] keyword[in] identifier[ret] [ identifier[zfinfo] [ literal[string] ]][ literal[string] ]: identifier[popularity] += literal[int] identifier[peers] . identifier[append] ( identifier[peer_hostport] ) identifier[ret] [ identifier[zfinfo] [ literal[string] ]][ literal[string] ]. identifier[append] ( identifier[zfinfo] [ literal[string] ]) identifier[ret] [ identifier[zfinfo] [ literal[string] ]][ literal[string] ]. identifier[append] ( identifier[zfinfo] [ literal[string] ]- literal[int] ) identifier[ret] [ identifier[zfinfo] [ literal[string] ]][ literal[string] ]. identifier[append] ( identifier[zfinfo] [ literal[string] ]) identifier[ret] [ identifier[zfinfo] [ literal[string] ]][ literal[string] ]+= identifier[popularity] identifier[ret] [ identifier[zfinfo] [ literal[string] ]][ literal[string] ]+= identifier[peers] identifier[ret] [ identifier[zfinfo] [ literal[string] ]][ literal[string] ]= identifier[zfinfo] [ literal[string] ] keyword[return] identifier[ret]
def atlas_find_missing_zonefile_availability(peer_table=None, con=None, path=None, missing_zonefile_info=None): """ Find the set of missing zonefiles, as well as their popularity amongst our neighbors. Only consider zonefiles that are known by at least one peer; otherwise they're missing from our clique (and we'll re-sync our neighborss' inventories every so often to make sure we detect when zonefiles become available). Return a dict, structured as: { 'zonefile hash': { 'names': [names], 'txid': first txid that set it, 'indexes': [...], 'popularity': ..., 'peers': [...], 'tried_storage': True|False } } """ # which zonefiles do we have? bit_offset = 0 bit_count = 10000 missing = [] ret = {} if missing_zonefile_info is None: while True: zfinfo = atlasdb_zonefile_find_missing(bit_offset, bit_count, con=con, path=path) if len(zfinfo) == 0: break # depends on [control=['if'], data=[]] missing += zfinfo bit_offset += len(zfinfo) # depends on [control=['while'], data=[]] if len(missing) > 0: log.debug('Missing %s zonefiles' % len(missing)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: missing = missing_zonefile_info if len(missing) == 0: # none! return ret # depends on [control=['if'], data=[]] with AtlasPeerTableLocked(peer_table) as ptbl: # do any other peers have this zonefile? for zfinfo in missing: popularity = 0 byte_index = (zfinfo['inv_index'] - 1) / 8 bit_index = 7 - (zfinfo['inv_index'] - 1) % 8 peers = [] if not ret.has_key(zfinfo['zonefile_hash']): ret[zfinfo['zonefile_hash']] = {'names': [], 'txid': zfinfo['txid'], 'indexes': [], 'block_heights': [], 'popularity': 0, 'peers': [], 'tried_storage': False} # depends on [control=['if'], data=[]] for peer_hostport in ptbl.keys(): peer_inv = atlas_peer_get_zonefile_inventory(peer_hostport, peer_table=ptbl) if len(peer_inv) <= byte_index: # too new for this peer continue # depends on [control=['if'], data=[]] if ord(peer_inv[byte_index]) & 1 << bit_index == 0: # this peer doesn't have it continue # depends on [control=['if'], data=[]] if peer_hostport not in ret[zfinfo['zonefile_hash']]['peers']: popularity += 1 peers.append(peer_hostport) # depends on [control=['if'], data=['peer_hostport']] # depends on [control=['for'], data=['peer_hostport']] ret[zfinfo['zonefile_hash']]['names'].append(zfinfo['name']) ret[zfinfo['zonefile_hash']]['indexes'].append(zfinfo['inv_index'] - 1) ret[zfinfo['zonefile_hash']]['block_heights'].append(zfinfo['block_height']) ret[zfinfo['zonefile_hash']]['popularity'] += popularity ret[zfinfo['zonefile_hash']]['peers'] += peers ret[zfinfo['zonefile_hash']]['tried_storage'] = zfinfo['tried_storage'] # depends on [control=['for'], data=['zfinfo']] # depends on [control=['with'], data=['ptbl']] return ret
def atlasdb_get_zonefiles_by_hash(zonefile_hash, block_height=None, con=None, path=None): """ Find all instances of this zone file in the atlasdb. Optionally filter on block height Returns [{'name': ..., 'zonefile_hash': ..., 'txid': ..., 'inv_index': ..., 'block_height': ..., 'present': ..., 'tried_storage': ...}], in blockchain order Returns None if the zone file is not in the db, or if block_height is set, return None if the zone file is not at this block height. """ with AtlasDBOpen(con=con, path=path) as dbcon: sql = 'SELECT * FROM zonefiles WHERE zonefile_hash = ?' args = (zonefile_hash,) if block_height: sql += ' AND block_height = ?' args += (block_height,) sql += ' ORDER BY inv_index;' cur = dbcon.cursor() res = atlasdb_query_execute(cur, sql, args) ret = [] for zfinfo in res: row = {} row.update(zfinfo) ret.append(row) if len(ret) == 0: return None return ret
def function[atlasdb_get_zonefiles_by_hash, parameter[zonefile_hash, block_height, con, path]]: constant[ Find all instances of this zone file in the atlasdb. Optionally filter on block height Returns [{'name': ..., 'zonefile_hash': ..., 'txid': ..., 'inv_index': ..., 'block_height': ..., 'present': ..., 'tried_storage': ...}], in blockchain order Returns None if the zone file is not in the db, or if block_height is set, return None if the zone file is not at this block height. ] with call[name[AtlasDBOpen], parameter[]] begin[:] variable[sql] assign[=] constant[SELECT * FROM zonefiles WHERE zonefile_hash = ?] variable[args] assign[=] tuple[[<ast.Name object at 0x7da1b1721f30>]] if name[block_height] begin[:] <ast.AugAssign object at 0x7da1b17219f0> <ast.AugAssign object at 0x7da1b1720f10> <ast.AugAssign object at 0x7da1b17222c0> variable[cur] assign[=] call[name[dbcon].cursor, parameter[]] variable[res] assign[=] call[name[atlasdb_query_execute], parameter[name[cur], name[sql], name[args]]] variable[ret] assign[=] list[[]] for taget[name[zfinfo]] in starred[name[res]] begin[:] variable[row] assign[=] dictionary[[], []] call[name[row].update, parameter[name[zfinfo]]] call[name[ret].append, parameter[name[row]]] if compare[call[name[len], parameter[name[ret]]] equal[==] constant[0]] begin[:] return[constant[None]] return[name[ret]]
keyword[def] identifier[atlasdb_get_zonefiles_by_hash] ( identifier[zonefile_hash] , identifier[block_height] = keyword[None] , identifier[con] = keyword[None] , identifier[path] = keyword[None] ): literal[string] keyword[with] identifier[AtlasDBOpen] ( identifier[con] = identifier[con] , identifier[path] = identifier[path] ) keyword[as] identifier[dbcon] : identifier[sql] = literal[string] identifier[args] =( identifier[zonefile_hash] ,) keyword[if] identifier[block_height] : identifier[sql] += literal[string] identifier[args] +=( identifier[block_height] ,) identifier[sql] += literal[string] identifier[cur] = identifier[dbcon] . identifier[cursor] () identifier[res] = identifier[atlasdb_query_execute] ( identifier[cur] , identifier[sql] , identifier[args] ) identifier[ret] =[] keyword[for] identifier[zfinfo] keyword[in] identifier[res] : identifier[row] ={} identifier[row] . identifier[update] ( identifier[zfinfo] ) identifier[ret] . identifier[append] ( identifier[row] ) keyword[if] identifier[len] ( identifier[ret] )== literal[int] : keyword[return] keyword[None] keyword[return] identifier[ret]
def atlasdb_get_zonefiles_by_hash(zonefile_hash, block_height=None, con=None, path=None): """ Find all instances of this zone file in the atlasdb. Optionally filter on block height Returns [{'name': ..., 'zonefile_hash': ..., 'txid': ..., 'inv_index': ..., 'block_height': ..., 'present': ..., 'tried_storage': ...}], in blockchain order Returns None if the zone file is not in the db, or if block_height is set, return None if the zone file is not at this block height. """ with AtlasDBOpen(con=con, path=path) as dbcon: sql = 'SELECT * FROM zonefiles WHERE zonefile_hash = ?' args = (zonefile_hash,) if block_height: sql += ' AND block_height = ?' args += (block_height,) # depends on [control=['if'], data=[]] sql += ' ORDER BY inv_index;' cur = dbcon.cursor() res = atlasdb_query_execute(cur, sql, args) ret = [] for zfinfo in res: row = {} row.update(zfinfo) ret.append(row) # depends on [control=['for'], data=['zfinfo']] if len(ret) == 0: return None # depends on [control=['if'], data=[]] return ret # depends on [control=['with'], data=['dbcon']]
def SetWriteBack(self, filename): """Sets the config file which will receive any modifications. The main config file can be made writable, but directing all Set() operations into a secondary location. This secondary location will receive any updates and will override the options for this file. Args: filename: A filename which will receive updates. The file is parsed first and merged into the raw data from this object. """ try: self.writeback = self.LoadSecondaryConfig(filename) self.MergeData(self.writeback.RawData(), self.writeback_data) except IOError as e: # This means that we probably aren't installed correctly. logging.error("Unable to read writeback file: %s", e) return except Exception as we: # pylint: disable=broad-except # Could be yaml parse error, could be some malformed parameter. Move the # writeback file so that we start in a clean state next run if os.path.exists(filename): try: b = filename + ".bak" os.rename(filename, b) logging.warning("Broken writeback (%s) renamed to: %s", we, b) except Exception as e: # pylint: disable=broad-except logging.error("Unable to rename broken writeback: %s", e) raise we logging.debug("Configuration writeback is set to %s", filename)
def function[SetWriteBack, parameter[self, filename]]: constant[Sets the config file which will receive any modifications. The main config file can be made writable, but directing all Set() operations into a secondary location. This secondary location will receive any updates and will override the options for this file. Args: filename: A filename which will receive updates. The file is parsed first and merged into the raw data from this object. ] <ast.Try object at 0x7da1b1b5aad0> call[name[logging].debug, parameter[constant[Configuration writeback is set to %s], name[filename]]]
keyword[def] identifier[SetWriteBack] ( identifier[self] , identifier[filename] ): literal[string] keyword[try] : identifier[self] . identifier[writeback] = identifier[self] . identifier[LoadSecondaryConfig] ( identifier[filename] ) identifier[self] . identifier[MergeData] ( identifier[self] . identifier[writeback] . identifier[RawData] (), identifier[self] . identifier[writeback_data] ) keyword[except] identifier[IOError] keyword[as] identifier[e] : identifier[logging] . identifier[error] ( literal[string] , identifier[e] ) keyword[return] keyword[except] identifier[Exception] keyword[as] identifier[we] : keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[filename] ): keyword[try] : identifier[b] = identifier[filename] + literal[string] identifier[os] . identifier[rename] ( identifier[filename] , identifier[b] ) identifier[logging] . identifier[warning] ( literal[string] , identifier[we] , identifier[b] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[logging] . identifier[error] ( literal[string] , identifier[e] ) keyword[raise] identifier[we] identifier[logging] . identifier[debug] ( literal[string] , identifier[filename] )
def SetWriteBack(self, filename): """Sets the config file which will receive any modifications. The main config file can be made writable, but directing all Set() operations into a secondary location. This secondary location will receive any updates and will override the options for this file. Args: filename: A filename which will receive updates. The file is parsed first and merged into the raw data from this object. """ try: self.writeback = self.LoadSecondaryConfig(filename) self.MergeData(self.writeback.RawData(), self.writeback_data) # depends on [control=['try'], data=[]] except IOError as e: # This means that we probably aren't installed correctly. logging.error('Unable to read writeback file: %s', e) return # depends on [control=['except'], data=['e']] except Exception as we: # pylint: disable=broad-except # Could be yaml parse error, could be some malformed parameter. Move the # writeback file so that we start in a clean state next run if os.path.exists(filename): try: b = filename + '.bak' os.rename(filename, b) logging.warning('Broken writeback (%s) renamed to: %s', we, b) # depends on [control=['try'], data=[]] except Exception as e: # pylint: disable=broad-except logging.error('Unable to rename broken writeback: %s', e) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] raise we # depends on [control=['except'], data=['we']] logging.debug('Configuration writeback is set to %s', filename)
def alignPronunciations(pronI, pronA): ''' Align the phones in two pronunciations ''' # First prep the two pronunctions pronI = [char for char in pronI] pronA = [char for char in pronA] # Remove any elements not in the other list (but maintain order) pronITmp = pronI pronATmp = pronA # Find the longest sequence sequence = _lcs(pronITmp, pronATmp) # Find the index of the sequence # TODO: investigate ambiguous cases startA = 0 startI = 0 sequenceIndexListA = [] sequenceIndexListI = [] for phone in sequence: startA = pronA.index(phone, startA) startI = pronI.index(phone, startI) sequenceIndexListA.append(startA) sequenceIndexListI.append(startI) # An index on the tail of both will be used to create output strings # of the same length sequenceIndexListA.append(len(pronA)) sequenceIndexListI.append(len(pronI)) # Fill in any blanks such that the sequential items have the same # index and the two strings are the same length for x in range(len(sequenceIndexListA)): indexA = sequenceIndexListA[x] indexI = sequenceIndexListI[x] if indexA < indexI: for x in range(indexI - indexA): pronA.insert(indexA, "''") sequenceIndexListA = [val + indexI - indexA for val in sequenceIndexListA] elif indexA > indexI: for x in range(indexA - indexI): pronI.insert(indexI, "''") sequenceIndexListI = [val + indexA - indexI for val in sequenceIndexListI] return pronI, pronA
def function[alignPronunciations, parameter[pronI, pronA]]: constant[ Align the phones in two pronunciations ] variable[pronI] assign[=] <ast.ListComp object at 0x7da18f00f3d0> variable[pronA] assign[=] <ast.ListComp object at 0x7da18f00cca0> variable[pronITmp] assign[=] name[pronI] variable[pronATmp] assign[=] name[pronA] variable[sequence] assign[=] call[name[_lcs], parameter[name[pronITmp], name[pronATmp]]] variable[startA] assign[=] constant[0] variable[startI] assign[=] constant[0] variable[sequenceIndexListA] assign[=] list[[]] variable[sequenceIndexListI] assign[=] list[[]] for taget[name[phone]] in starred[name[sequence]] begin[:] variable[startA] assign[=] call[name[pronA].index, parameter[name[phone], name[startA]]] variable[startI] assign[=] call[name[pronI].index, parameter[name[phone], name[startI]]] call[name[sequenceIndexListA].append, parameter[name[startA]]] call[name[sequenceIndexListI].append, parameter[name[startI]]] call[name[sequenceIndexListA].append, parameter[call[name[len], parameter[name[pronA]]]]] call[name[sequenceIndexListI].append, parameter[call[name[len], parameter[name[pronI]]]]] for taget[name[x]] in starred[call[name[range], parameter[call[name[len], parameter[name[sequenceIndexListA]]]]]] begin[:] variable[indexA] assign[=] call[name[sequenceIndexListA]][name[x]] variable[indexI] assign[=] call[name[sequenceIndexListI]][name[x]] if compare[name[indexA] less[<] name[indexI]] begin[:] for taget[name[x]] in starred[call[name[range], parameter[binary_operation[name[indexI] - name[indexA]]]]] begin[:] call[name[pronA].insert, parameter[name[indexA], constant['']]] variable[sequenceIndexListA] assign[=] <ast.ListComp object at 0x7da18c4ccb50> return[tuple[[<ast.Name object at 0x7da18c4cfd30>, <ast.Name object at 0x7da18c4cf880>]]]
keyword[def] identifier[alignPronunciations] ( identifier[pronI] , identifier[pronA] ): literal[string] identifier[pronI] =[ identifier[char] keyword[for] identifier[char] keyword[in] identifier[pronI] ] identifier[pronA] =[ identifier[char] keyword[for] identifier[char] keyword[in] identifier[pronA] ] identifier[pronITmp] = identifier[pronI] identifier[pronATmp] = identifier[pronA] identifier[sequence] = identifier[_lcs] ( identifier[pronITmp] , identifier[pronATmp] ) identifier[startA] = literal[int] identifier[startI] = literal[int] identifier[sequenceIndexListA] =[] identifier[sequenceIndexListI] =[] keyword[for] identifier[phone] keyword[in] identifier[sequence] : identifier[startA] = identifier[pronA] . identifier[index] ( identifier[phone] , identifier[startA] ) identifier[startI] = identifier[pronI] . identifier[index] ( identifier[phone] , identifier[startI] ) identifier[sequenceIndexListA] . identifier[append] ( identifier[startA] ) identifier[sequenceIndexListI] . identifier[append] ( identifier[startI] ) identifier[sequenceIndexListA] . identifier[append] ( identifier[len] ( identifier[pronA] )) identifier[sequenceIndexListI] . identifier[append] ( identifier[len] ( identifier[pronI] )) keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[len] ( identifier[sequenceIndexListA] )): identifier[indexA] = identifier[sequenceIndexListA] [ identifier[x] ] identifier[indexI] = identifier[sequenceIndexListI] [ identifier[x] ] keyword[if] identifier[indexA] < identifier[indexI] : keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[indexI] - identifier[indexA] ): identifier[pronA] . identifier[insert] ( identifier[indexA] , literal[string] ) identifier[sequenceIndexListA] =[ identifier[val] + identifier[indexI] - identifier[indexA] keyword[for] identifier[val] keyword[in] identifier[sequenceIndexListA] ] keyword[elif] identifier[indexA] > identifier[indexI] : keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[indexA] - identifier[indexI] ): identifier[pronI] . identifier[insert] ( identifier[indexI] , literal[string] ) identifier[sequenceIndexListI] =[ identifier[val] + identifier[indexA] - identifier[indexI] keyword[for] identifier[val] keyword[in] identifier[sequenceIndexListI] ] keyword[return] identifier[pronI] , identifier[pronA]
def alignPronunciations(pronI, pronA): """ Align the phones in two pronunciations """ # First prep the two pronunctions pronI = [char for char in pronI] pronA = [char for char in pronA] # Remove any elements not in the other list (but maintain order) pronITmp = pronI pronATmp = pronA # Find the longest sequence sequence = _lcs(pronITmp, pronATmp) # Find the index of the sequence # TODO: investigate ambiguous cases startA = 0 startI = 0 sequenceIndexListA = [] sequenceIndexListI = [] for phone in sequence: startA = pronA.index(phone, startA) startI = pronI.index(phone, startI) sequenceIndexListA.append(startA) sequenceIndexListI.append(startI) # depends on [control=['for'], data=['phone']] # An index on the tail of both will be used to create output strings # of the same length sequenceIndexListA.append(len(pronA)) sequenceIndexListI.append(len(pronI)) # Fill in any blanks such that the sequential items have the same # index and the two strings are the same length for x in range(len(sequenceIndexListA)): indexA = sequenceIndexListA[x] indexI = sequenceIndexListI[x] if indexA < indexI: for x in range(indexI - indexA): pronA.insert(indexA, "''") # depends on [control=['for'], data=[]] sequenceIndexListA = [val + indexI - indexA for val in sequenceIndexListA] # depends on [control=['if'], data=['indexA', 'indexI']] elif indexA > indexI: for x in range(indexA - indexI): pronI.insert(indexI, "''") # depends on [control=['for'], data=[]] sequenceIndexListI = [val + indexA - indexI for val in sequenceIndexListI] # depends on [control=['if'], data=['indexA', 'indexI']] # depends on [control=['for'], data=['x']] return (pronI, pronA)
def setproctitle(text): """ This is a wrapper for setproctitle.setproctitle(). The call sets 'text' as the new process title and returns the previous value. The module is commonly not installed. If missing, nothing is changed, and the call returns None. The module is described here: https://pypi.python.org/pypi/setproctitle """ try: import setproctitle except Exception as e: return None else: # pragma: no cover prev = setproctitle.getproctitle() setproctitle.setproctitle(text) return prev
def function[setproctitle, parameter[text]]: constant[ This is a wrapper for setproctitle.setproctitle(). The call sets 'text' as the new process title and returns the previous value. The module is commonly not installed. If missing, nothing is changed, and the call returns None. The module is described here: https://pypi.python.org/pypi/setproctitle ] <ast.Try object at 0x7da2054a6890>
keyword[def] identifier[setproctitle] ( identifier[text] ): literal[string] keyword[try] : keyword[import] identifier[setproctitle] keyword[except] identifier[Exception] keyword[as] identifier[e] : keyword[return] keyword[None] keyword[else] : identifier[prev] = identifier[setproctitle] . identifier[getproctitle] () identifier[setproctitle] . identifier[setproctitle] ( identifier[text] ) keyword[return] identifier[prev]
def setproctitle(text): """ This is a wrapper for setproctitle.setproctitle(). The call sets 'text' as the new process title and returns the previous value. The module is commonly not installed. If missing, nothing is changed, and the call returns None. The module is described here: https://pypi.python.org/pypi/setproctitle """ try: import setproctitle # depends on [control=['try'], data=[]] except Exception as e: return None # depends on [control=['except'], data=[]] else: # pragma: no cover prev = setproctitle.getproctitle() setproctitle.setproctitle(text) return prev
def extract_chm (archive, compression, cmd, verbosity, interactive, outdir): """Extract a CHM archive.""" return [cmd, os.path.abspath(archive), outdir]
def function[extract_chm, parameter[archive, compression, cmd, verbosity, interactive, outdir]]: constant[Extract a CHM archive.] return[list[[<ast.Name object at 0x7da1b07ad7e0>, <ast.Call object at 0x7da1b07af730>, <ast.Name object at 0x7da1b07aca30>]]]
keyword[def] identifier[extract_chm] ( identifier[archive] , identifier[compression] , identifier[cmd] , identifier[verbosity] , identifier[interactive] , identifier[outdir] ): literal[string] keyword[return] [ identifier[cmd] , identifier[os] . identifier[path] . identifier[abspath] ( identifier[archive] ), identifier[outdir] ]
def extract_chm(archive, compression, cmd, verbosity, interactive, outdir): """Extract a CHM archive.""" return [cmd, os.path.abspath(archive), outdir]
def rgb_to_hsl(r, g=None, b=None): """Convert the color from RGB coordinates to HSL. Parameters: :r: The Red component value [0...1] :g: The Green component value [0...1] :b: The Blue component value [0...1] Returns: The color as an (h, s, l) tuple in the range: h[0...360], s[0...1], l[0...1] >>> rgb_to_hsl(1, 0.5, 0) (30.0, 1.0, 0.5) """ if type(r) in [list,tuple]: r, g, b = r minVal = min(r, g, b) # min RGB value maxVal = max(r, g, b) # max RGB value l = (maxVal + minVal) / 2.0 if minVal==maxVal: return (0.0, 0.0, l) # achromatic (gray) d = maxVal - minVal # delta RGB value if l < 0.5: s = d / (maxVal + minVal) else: s = d / (2.0 - maxVal - minVal) dr, dg, db = [(maxVal-val) / d for val in (r, g, b)] if r==maxVal: h = db - dg elif g==maxVal: h = 2.0 + dr - db else: h = 4.0 + dg - dr h = (h*60.0) % 360.0 return (h, s, l)
def function[rgb_to_hsl, parameter[r, g, b]]: constant[Convert the color from RGB coordinates to HSL. Parameters: :r: The Red component value [0...1] :g: The Green component value [0...1] :b: The Blue component value [0...1] Returns: The color as an (h, s, l) tuple in the range: h[0...360], s[0...1], l[0...1] >>> rgb_to_hsl(1, 0.5, 0) (30.0, 1.0, 0.5) ] if compare[call[name[type], parameter[name[r]]] in list[[<ast.Name object at 0x7da18fe93e20>, <ast.Name object at 0x7da18fe92f20>]]] begin[:] <ast.Tuple object at 0x7da18fe93d00> assign[=] name[r] variable[minVal] assign[=] call[name[min], parameter[name[r], name[g], name[b]]] variable[maxVal] assign[=] call[name[max], parameter[name[r], name[g], name[b]]] variable[l] assign[=] binary_operation[binary_operation[name[maxVal] + name[minVal]] / constant[2.0]] if compare[name[minVal] equal[==] name[maxVal]] begin[:] return[tuple[[<ast.Constant object at 0x7da18fe93040>, <ast.Constant object at 0x7da18fe91a50>, <ast.Name object at 0x7da18fe90340>]]] variable[d] assign[=] binary_operation[name[maxVal] - name[minVal]] if compare[name[l] less[<] constant[0.5]] begin[:] variable[s] assign[=] binary_operation[name[d] / binary_operation[name[maxVal] + name[minVal]]] <ast.Tuple object at 0x7da1b118d030> assign[=] <ast.ListComp object at 0x7da1b118cb20> if compare[name[r] equal[==] name[maxVal]] begin[:] variable[h] assign[=] binary_operation[name[db] - name[dg]] variable[h] assign[=] binary_operation[binary_operation[name[h] * constant[60.0]] <ast.Mod object at 0x7da2590d6920> constant[360.0]] return[tuple[[<ast.Name object at 0x7da1b118d180>, <ast.Name object at 0x7da1b118c340>, <ast.Name object at 0x7da1b118c7f0>]]]
keyword[def] identifier[rgb_to_hsl] ( identifier[r] , identifier[g] = keyword[None] , identifier[b] = keyword[None] ): literal[string] keyword[if] identifier[type] ( identifier[r] ) keyword[in] [ identifier[list] , identifier[tuple] ]: identifier[r] , identifier[g] , identifier[b] = identifier[r] identifier[minVal] = identifier[min] ( identifier[r] , identifier[g] , identifier[b] ) identifier[maxVal] = identifier[max] ( identifier[r] , identifier[g] , identifier[b] ) identifier[l] =( identifier[maxVal] + identifier[minVal] )/ literal[int] keyword[if] identifier[minVal] == identifier[maxVal] : keyword[return] ( literal[int] , literal[int] , identifier[l] ) identifier[d] = identifier[maxVal] - identifier[minVal] keyword[if] identifier[l] < literal[int] : identifier[s] = identifier[d] /( identifier[maxVal] + identifier[minVal] ) keyword[else] : identifier[s] = identifier[d] /( literal[int] - identifier[maxVal] - identifier[minVal] ) identifier[dr] , identifier[dg] , identifier[db] =[( identifier[maxVal] - identifier[val] )/ identifier[d] keyword[for] identifier[val] keyword[in] ( identifier[r] , identifier[g] , identifier[b] )] keyword[if] identifier[r] == identifier[maxVal] : identifier[h] = identifier[db] - identifier[dg] keyword[elif] identifier[g] == identifier[maxVal] : identifier[h] = literal[int] + identifier[dr] - identifier[db] keyword[else] : identifier[h] = literal[int] + identifier[dg] - identifier[dr] identifier[h] =( identifier[h] * literal[int] )% literal[int] keyword[return] ( identifier[h] , identifier[s] , identifier[l] )
def rgb_to_hsl(r, g=None, b=None): """Convert the color from RGB coordinates to HSL. Parameters: :r: The Red component value [0...1] :g: The Green component value [0...1] :b: The Blue component value [0...1] Returns: The color as an (h, s, l) tuple in the range: h[0...360], s[0...1], l[0...1] >>> rgb_to_hsl(1, 0.5, 0) (30.0, 1.0, 0.5) """ if type(r) in [list, tuple]: (r, g, b) = r # depends on [control=['if'], data=[]] minVal = min(r, g, b) # min RGB value maxVal = max(r, g, b) # max RGB value l = (maxVal + minVal) / 2.0 if minVal == maxVal: return (0.0, 0.0, l) # achromatic (gray) # depends on [control=['if'], data=[]] d = maxVal - minVal # delta RGB value if l < 0.5: s = d / (maxVal + minVal) # depends on [control=['if'], data=[]] else: s = d / (2.0 - maxVal - minVal) (dr, dg, db) = [(maxVal - val) / d for val in (r, g, b)] if r == maxVal: h = db - dg # depends on [control=['if'], data=[]] elif g == maxVal: h = 2.0 + dr - db # depends on [control=['if'], data=[]] else: h = 4.0 + dg - dr h = h * 60.0 % 360.0 return (h, s, l)
def ucs_manager_connect(self, ucsm_ip): """Connects to a UCS Manager.""" if not self.ucsmsdk: self.ucsmsdk = self._import_ucsmsdk() ucsm = CONF.ml2_cisco_ucsm.ucsms.get(ucsm_ip) if not ucsm or not ucsm.ucsm_username or not ucsm.ucsm_password: LOG.error('UCS Manager network driver failed to get login ' 'credentials for UCSM %s', ucsm_ip) return None handle = self.ucsmsdk.handle(ucsm_ip, ucsm.ucsm_username, ucsm.ucsm_password) try: handle.login() except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConnectFailed(ucsm_ip=ucsm_ip, exc=e) return handle
def function[ucs_manager_connect, parameter[self, ucsm_ip]]: constant[Connects to a UCS Manager.] if <ast.UnaryOp object at 0x7da1b1a5e0b0> begin[:] name[self].ucsmsdk assign[=] call[name[self]._import_ucsmsdk, parameter[]] variable[ucsm] assign[=] call[name[CONF].ml2_cisco_ucsm.ucsms.get, parameter[name[ucsm_ip]]] if <ast.BoolOp object at 0x7da1b1a5ef80> begin[:] call[name[LOG].error, parameter[constant[UCS Manager network driver failed to get login credentials for UCSM %s], name[ucsm_ip]]] return[constant[None]] variable[handle] assign[=] call[name[self].ucsmsdk.handle, parameter[name[ucsm_ip], name[ucsm].ucsm_username, name[ucsm].ucsm_password]] <ast.Try object at 0x7da1b1a5de70> return[name[handle]]
keyword[def] identifier[ucs_manager_connect] ( identifier[self] , identifier[ucsm_ip] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[ucsmsdk] : identifier[self] . identifier[ucsmsdk] = identifier[self] . identifier[_import_ucsmsdk] () identifier[ucsm] = identifier[CONF] . identifier[ml2_cisco_ucsm] . identifier[ucsms] . identifier[get] ( identifier[ucsm_ip] ) keyword[if] keyword[not] identifier[ucsm] keyword[or] keyword[not] identifier[ucsm] . identifier[ucsm_username] keyword[or] keyword[not] identifier[ucsm] . identifier[ucsm_password] : identifier[LOG] . identifier[error] ( literal[string] literal[string] , identifier[ucsm_ip] ) keyword[return] keyword[None] identifier[handle] = identifier[self] . identifier[ucsmsdk] . identifier[handle] ( identifier[ucsm_ip] , identifier[ucsm] . identifier[ucsm_username] , identifier[ucsm] . identifier[ucsm_password] ) keyword[try] : identifier[handle] . identifier[login] () keyword[except] identifier[Exception] keyword[as] identifier[e] : keyword[raise] identifier[cexc] . identifier[UcsmConnectFailed] ( identifier[ucsm_ip] = identifier[ucsm_ip] , identifier[exc] = identifier[e] ) keyword[return] identifier[handle]
def ucs_manager_connect(self, ucsm_ip): """Connects to a UCS Manager.""" if not self.ucsmsdk: self.ucsmsdk = self._import_ucsmsdk() # depends on [control=['if'], data=[]] ucsm = CONF.ml2_cisco_ucsm.ucsms.get(ucsm_ip) if not ucsm or not ucsm.ucsm_username or (not ucsm.ucsm_password): LOG.error('UCS Manager network driver failed to get login credentials for UCSM %s', ucsm_ip) return None # depends on [control=['if'], data=[]] handle = self.ucsmsdk.handle(ucsm_ip, ucsm.ucsm_username, ucsm.ucsm_password) try: handle.login() # depends on [control=['try'], data=[]] except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConnectFailed(ucsm_ip=ucsm_ip, exc=e) # depends on [control=['except'], data=['e']] return handle
def F(self, value): """State Transition matrix""" self._F = value self._F_inv = self.inv(self._F)
def function[F, parameter[self, value]]: constant[State Transition matrix] name[self]._F assign[=] name[value] name[self]._F_inv assign[=] call[name[self].inv, parameter[name[self]._F]]
keyword[def] identifier[F] ( identifier[self] , identifier[value] ): literal[string] identifier[self] . identifier[_F] = identifier[value] identifier[self] . identifier[_F_inv] = identifier[self] . identifier[inv] ( identifier[self] . identifier[_F] )
def F(self, value): """State Transition matrix""" self._F = value self._F_inv = self.inv(self._F)
def get_labels(self): """ :calls: `GET /repos/:owner/:repo/issues/:number/labels <http://developer.github.com/v3/issues/labels>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Label.Label` """ return github.PaginatedList.PaginatedList( github.Label.Label, self._requester, self.issue_url + "/labels", None )
def function[get_labels, parameter[self]]: constant[ :calls: `GET /repos/:owner/:repo/issues/:number/labels <http://developer.github.com/v3/issues/labels>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Label.Label` ] return[call[name[github].PaginatedList.PaginatedList, parameter[name[github].Label.Label, name[self]._requester, binary_operation[name[self].issue_url + constant[/labels]], constant[None]]]]
keyword[def] identifier[get_labels] ( identifier[self] ): literal[string] keyword[return] identifier[github] . identifier[PaginatedList] . identifier[PaginatedList] ( identifier[github] . identifier[Label] . identifier[Label] , identifier[self] . identifier[_requester] , identifier[self] . identifier[issue_url] + literal[string] , keyword[None] )
def get_labels(self): """ :calls: `GET /repos/:owner/:repo/issues/:number/labels <http://developer.github.com/v3/issues/labels>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Label.Label` """ return github.PaginatedList.PaginatedList(github.Label.Label, self._requester, self.issue_url + '/labels', None)
def init(plugindirs=[]): """Initialize the plugin framework""" # initialize the builtin plugins from .translators import yang,yin,dsdl yang.pyang_plugin_init() yin.pyang_plugin_init() dsdl.pyang_plugin_init() # initialize installed plugins for ep in pkg_resources.iter_entry_points(group='pyang.plugin'): plugin_init = ep.load() plugin_init() # search for plugins in std directories (plugins directory first) basedir = os.path.split(sys.modules['pyang'].__file__)[0] plugindirs.insert(0, basedir + "/transforms") plugindirs.insert(0, basedir + "/plugins") # add paths from env pluginpath = os.getenv('PYANG_PLUGINPATH') if pluginpath is not None: plugindirs.extend(pluginpath.split(os.pathsep)) syspath = sys.path for plugindir in plugindirs: sys.path = [plugindir] + syspath try: fnames = os.listdir(plugindir) except OSError: continue modnames = [] for fname in fnames: if (fname.startswith(".#") or fname.startswith("__init__.py") or fname.endswith("_flymake.py") or fname.endswith("_flymake.pyc")): pass elif fname.endswith(".py"): modname = fname[:-3] if modname not in modnames: modnames.append(modname) elif fname.endswith(".pyc"): modname = fname[:-4] if modname not in modnames: modnames.append(modname) for modname in modnames: pluginmod = __import__(modname) try: pluginmod.pyang_plugin_init() except AttributeError as s: print(pluginmod.__dict__) raise AttributeError(pluginmod.__file__ + ': ' + str(s)) sys.path = syspath
def function[init, parameter[plugindirs]]: constant[Initialize the plugin framework] from relative_module[translators] import module[yang], module[yin], module[dsdl] call[name[yang].pyang_plugin_init, parameter[]] call[name[yin].pyang_plugin_init, parameter[]] call[name[dsdl].pyang_plugin_init, parameter[]] for taget[name[ep]] in starred[call[name[pkg_resources].iter_entry_points, parameter[]]] begin[:] variable[plugin_init] assign[=] call[name[ep].load, parameter[]] call[name[plugin_init], parameter[]] variable[basedir] assign[=] call[call[name[os].path.split, parameter[call[name[sys].modules][constant[pyang]].__file__]]][constant[0]] call[name[plugindirs].insert, parameter[constant[0], binary_operation[name[basedir] + constant[/transforms]]]] call[name[plugindirs].insert, parameter[constant[0], binary_operation[name[basedir] + constant[/plugins]]]] variable[pluginpath] assign[=] call[name[os].getenv, parameter[constant[PYANG_PLUGINPATH]]] if compare[name[pluginpath] is_not constant[None]] begin[:] call[name[plugindirs].extend, parameter[call[name[pluginpath].split, parameter[name[os].pathsep]]]] variable[syspath] assign[=] name[sys].path for taget[name[plugindir]] in starred[name[plugindirs]] begin[:] name[sys].path assign[=] binary_operation[list[[<ast.Name object at 0x7da20cabfbb0>]] + name[syspath]] <ast.Try object at 0x7da20cabe9b0> variable[modnames] assign[=] list[[]] for taget[name[fname]] in starred[name[fnames]] begin[:] if <ast.BoolOp object at 0x7da20cabd930> begin[:] pass for taget[name[modname]] in starred[name[modnames]] begin[:] variable[pluginmod] assign[=] call[name[__import__], parameter[name[modname]]] <ast.Try object at 0x7da20c990340> name[sys].path assign[=] name[syspath]
keyword[def] identifier[init] ( identifier[plugindirs] =[]): literal[string] keyword[from] . identifier[translators] keyword[import] identifier[yang] , identifier[yin] , identifier[dsdl] identifier[yang] . identifier[pyang_plugin_init] () identifier[yin] . identifier[pyang_plugin_init] () identifier[dsdl] . identifier[pyang_plugin_init] () keyword[for] identifier[ep] keyword[in] identifier[pkg_resources] . identifier[iter_entry_points] ( identifier[group] = literal[string] ): identifier[plugin_init] = identifier[ep] . identifier[load] () identifier[plugin_init] () identifier[basedir] = identifier[os] . identifier[path] . identifier[split] ( identifier[sys] . identifier[modules] [ literal[string] ]. identifier[__file__] )[ literal[int] ] identifier[plugindirs] . identifier[insert] ( literal[int] , identifier[basedir] + literal[string] ) identifier[plugindirs] . identifier[insert] ( literal[int] , identifier[basedir] + literal[string] ) identifier[pluginpath] = identifier[os] . identifier[getenv] ( literal[string] ) keyword[if] identifier[pluginpath] keyword[is] keyword[not] keyword[None] : identifier[plugindirs] . identifier[extend] ( identifier[pluginpath] . identifier[split] ( identifier[os] . identifier[pathsep] )) identifier[syspath] = identifier[sys] . identifier[path] keyword[for] identifier[plugindir] keyword[in] identifier[plugindirs] : identifier[sys] . identifier[path] =[ identifier[plugindir] ]+ identifier[syspath] keyword[try] : identifier[fnames] = identifier[os] . identifier[listdir] ( identifier[plugindir] ) keyword[except] identifier[OSError] : keyword[continue] identifier[modnames] =[] keyword[for] identifier[fname] keyword[in] identifier[fnames] : keyword[if] ( identifier[fname] . identifier[startswith] ( literal[string] ) keyword[or] identifier[fname] . identifier[startswith] ( literal[string] ) keyword[or] identifier[fname] . identifier[endswith] ( literal[string] ) keyword[or] identifier[fname] . identifier[endswith] ( literal[string] )): keyword[pass] keyword[elif] identifier[fname] . identifier[endswith] ( literal[string] ): identifier[modname] = identifier[fname] [:- literal[int] ] keyword[if] identifier[modname] keyword[not] keyword[in] identifier[modnames] : identifier[modnames] . identifier[append] ( identifier[modname] ) keyword[elif] identifier[fname] . identifier[endswith] ( literal[string] ): identifier[modname] = identifier[fname] [:- literal[int] ] keyword[if] identifier[modname] keyword[not] keyword[in] identifier[modnames] : identifier[modnames] . identifier[append] ( identifier[modname] ) keyword[for] identifier[modname] keyword[in] identifier[modnames] : identifier[pluginmod] = identifier[__import__] ( identifier[modname] ) keyword[try] : identifier[pluginmod] . identifier[pyang_plugin_init] () keyword[except] identifier[AttributeError] keyword[as] identifier[s] : identifier[print] ( identifier[pluginmod] . identifier[__dict__] ) keyword[raise] identifier[AttributeError] ( identifier[pluginmod] . identifier[__file__] + literal[string] + identifier[str] ( identifier[s] )) identifier[sys] . identifier[path] = identifier[syspath]
def init(plugindirs=[]): """Initialize the plugin framework""" # initialize the builtin plugins from .translators import yang, yin, dsdl yang.pyang_plugin_init() yin.pyang_plugin_init() dsdl.pyang_plugin_init() # initialize installed plugins for ep in pkg_resources.iter_entry_points(group='pyang.plugin'): plugin_init = ep.load() plugin_init() # depends on [control=['for'], data=['ep']] # search for plugins in std directories (plugins directory first) basedir = os.path.split(sys.modules['pyang'].__file__)[0] plugindirs.insert(0, basedir + '/transforms') plugindirs.insert(0, basedir + '/plugins') # add paths from env pluginpath = os.getenv('PYANG_PLUGINPATH') if pluginpath is not None: plugindirs.extend(pluginpath.split(os.pathsep)) # depends on [control=['if'], data=['pluginpath']] syspath = sys.path for plugindir in plugindirs: sys.path = [plugindir] + syspath try: fnames = os.listdir(plugindir) # depends on [control=['try'], data=[]] except OSError: continue # depends on [control=['except'], data=[]] modnames = [] for fname in fnames: if fname.startswith('.#') or fname.startswith('__init__.py') or fname.endswith('_flymake.py') or fname.endswith('_flymake.pyc'): pass # depends on [control=['if'], data=[]] elif fname.endswith('.py'): modname = fname[:-3] if modname not in modnames: modnames.append(modname) # depends on [control=['if'], data=['modname', 'modnames']] # depends on [control=['if'], data=[]] elif fname.endswith('.pyc'): modname = fname[:-4] if modname not in modnames: modnames.append(modname) # depends on [control=['if'], data=['modname', 'modnames']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fname']] for modname in modnames: pluginmod = __import__(modname) try: pluginmod.pyang_plugin_init() # depends on [control=['try'], data=[]] except AttributeError as s: print(pluginmod.__dict__) raise AttributeError(pluginmod.__file__ + ': ' + str(s)) # depends on [control=['except'], data=['s']] # depends on [control=['for'], data=['modname']] sys.path = syspath # depends on [control=['for'], data=['plugindir']]
def set_palette_name(self, palette_name): """If the given palette matches an existing one, shows it in the combobox """ combo = self.get_widget('palette_name') found = False log.debug("wanting palette: %r", palette_name) for i in combo.get_model(): if i[0] == palette_name: combo.set_active_iter(i.iter) found = True break if not found: combo.set_active(self.custom_palette_index)
def function[set_palette_name, parameter[self, palette_name]]: constant[If the given palette matches an existing one, shows it in the combobox ] variable[combo] assign[=] call[name[self].get_widget, parameter[constant[palette_name]]] variable[found] assign[=] constant[False] call[name[log].debug, parameter[constant[wanting palette: %r], name[palette_name]]] for taget[name[i]] in starred[call[name[combo].get_model, parameter[]]] begin[:] if compare[call[name[i]][constant[0]] equal[==] name[palette_name]] begin[:] call[name[combo].set_active_iter, parameter[name[i].iter]] variable[found] assign[=] constant[True] break if <ast.UnaryOp object at 0x7da2041d9b10> begin[:] call[name[combo].set_active, parameter[name[self].custom_palette_index]]
keyword[def] identifier[set_palette_name] ( identifier[self] , identifier[palette_name] ): literal[string] identifier[combo] = identifier[self] . identifier[get_widget] ( literal[string] ) identifier[found] = keyword[False] identifier[log] . identifier[debug] ( literal[string] , identifier[palette_name] ) keyword[for] identifier[i] keyword[in] identifier[combo] . identifier[get_model] (): keyword[if] identifier[i] [ literal[int] ]== identifier[palette_name] : identifier[combo] . identifier[set_active_iter] ( identifier[i] . identifier[iter] ) identifier[found] = keyword[True] keyword[break] keyword[if] keyword[not] identifier[found] : identifier[combo] . identifier[set_active] ( identifier[self] . identifier[custom_palette_index] )
def set_palette_name(self, palette_name): """If the given palette matches an existing one, shows it in the combobox """ combo = self.get_widget('palette_name') found = False log.debug('wanting palette: %r', palette_name) for i in combo.get_model(): if i[0] == palette_name: combo.set_active_iter(i.iter) found = True break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] if not found: combo.set_active(self.custom_palette_index) # depends on [control=['if'], data=[]]
def set_permission(self, permission, value, parent=False, admin=False): """ Sets permission for personal information. Returns False silently if unable to set permission. Returns True if successful. """ try: if not getattr(self, 'parent_{}'.format(permission)) and not parent and not admin: return False level = 'parent' if parent else 'self' setattr(self, '{}_{}'.format(level, permission), value) # Set student permission to false if parent sets permission to false. if parent and not value: setattr(self, 'self_{}'.format(permission), False) self.save() return True except Exception as e: logger.error("Error occurred setting permission {} to {}: {}".format(permission, value, e)) return False
def function[set_permission, parameter[self, permission, value, parent, admin]]: constant[ Sets permission for personal information. Returns False silently if unable to set permission. Returns True if successful. ] <ast.Try object at 0x7da1b04bdf60>
keyword[def] identifier[set_permission] ( identifier[self] , identifier[permission] , identifier[value] , identifier[parent] = keyword[False] , identifier[admin] = keyword[False] ): literal[string] keyword[try] : keyword[if] keyword[not] identifier[getattr] ( identifier[self] , literal[string] . identifier[format] ( identifier[permission] )) keyword[and] keyword[not] identifier[parent] keyword[and] keyword[not] identifier[admin] : keyword[return] keyword[False] identifier[level] = literal[string] keyword[if] identifier[parent] keyword[else] literal[string] identifier[setattr] ( identifier[self] , literal[string] . identifier[format] ( identifier[level] , identifier[permission] ), identifier[value] ) keyword[if] identifier[parent] keyword[and] keyword[not] identifier[value] : identifier[setattr] ( identifier[self] , literal[string] . identifier[format] ( identifier[permission] ), keyword[False] ) identifier[self] . identifier[save] () keyword[return] keyword[True] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[permission] , identifier[value] , identifier[e] )) keyword[return] keyword[False]
def set_permission(self, permission, value, parent=False, admin=False): """ Sets permission for personal information. Returns False silently if unable to set permission. Returns True if successful. """ try: if not getattr(self, 'parent_{}'.format(permission)) and (not parent) and (not admin): return False # depends on [control=['if'], data=[]] level = 'parent' if parent else 'self' setattr(self, '{}_{}'.format(level, permission), value) # Set student permission to false if parent sets permission to false. if parent and (not value): setattr(self, 'self_{}'.format(permission), False) # depends on [control=['if'], data=[]] self.save() return True # depends on [control=['try'], data=[]] except Exception as e: logger.error('Error occurred setting permission {} to {}: {}'.format(permission, value, e)) return False # depends on [control=['except'], data=['e']]
def parseFromDelimitedString(obj, buf, offset=0): """ Stanford CoreNLP uses the Java "writeDelimitedTo" function, which writes the size (and offset) of the buffer before writing the object. This function handles parsing this message starting from offset 0. @returns how many bytes of @buf were consumed. """ size, pos = _DecodeVarint(buf, offset) obj.ParseFromString(buf[offset+pos:offset+pos+size]) return pos+size
def function[parseFromDelimitedString, parameter[obj, buf, offset]]: constant[ Stanford CoreNLP uses the Java "writeDelimitedTo" function, which writes the size (and offset) of the buffer before writing the object. This function handles parsing this message starting from offset 0. @returns how many bytes of @buf were consumed. ] <ast.Tuple object at 0x7da1b11ee740> assign[=] call[name[_DecodeVarint], parameter[name[buf], name[offset]]] call[name[obj].ParseFromString, parameter[call[name[buf]][<ast.Slice object at 0x7da1b10752a0>]]] return[binary_operation[name[pos] + name[size]]]
keyword[def] identifier[parseFromDelimitedString] ( identifier[obj] , identifier[buf] , identifier[offset] = literal[int] ): literal[string] identifier[size] , identifier[pos] = identifier[_DecodeVarint] ( identifier[buf] , identifier[offset] ) identifier[obj] . identifier[ParseFromString] ( identifier[buf] [ identifier[offset] + identifier[pos] : identifier[offset] + identifier[pos] + identifier[size] ]) keyword[return] identifier[pos] + identifier[size]
def parseFromDelimitedString(obj, buf, offset=0): """ Stanford CoreNLP uses the Java "writeDelimitedTo" function, which writes the size (and offset) of the buffer before writing the object. This function handles parsing this message starting from offset 0. @returns how many bytes of @buf were consumed. """ (size, pos) = _DecodeVarint(buf, offset) obj.ParseFromString(buf[offset + pos:offset + pos + size]) return pos + size
def _import_submodules(package_name): """ Import all submodules of a module, recursively Adapted from: http://stackoverflow.com/a/25083161 :param package_name: Package name :type package_name: str :rtype: dict[types.ModuleType] """ package = sys.modules[package_name] out = {} for loader, name, is_pkg in pkgutil.walk_packages(package.__path__): try: #module = importlib.import_module('{}.{}'.format(package_name, name)) module = importlib.import_module('.{}'.format(name), package=package_name) out[name] = module except: continue return out
def function[_import_submodules, parameter[package_name]]: constant[ Import all submodules of a module, recursively Adapted from: http://stackoverflow.com/a/25083161 :param package_name: Package name :type package_name: str :rtype: dict[types.ModuleType] ] variable[package] assign[=] call[name[sys].modules][name[package_name]] variable[out] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da207f01870>, <ast.Name object at 0x7da207f039a0>, <ast.Name object at 0x7da207f03940>]]] in starred[call[name[pkgutil].walk_packages, parameter[name[package].__path__]]] begin[:] <ast.Try object at 0x7da207f031c0> return[name[out]]
keyword[def] identifier[_import_submodules] ( identifier[package_name] ): literal[string] identifier[package] = identifier[sys] . identifier[modules] [ identifier[package_name] ] identifier[out] ={} keyword[for] identifier[loader] , identifier[name] , identifier[is_pkg] keyword[in] identifier[pkgutil] . identifier[walk_packages] ( identifier[package] . identifier[__path__] ): keyword[try] : identifier[module] = identifier[importlib] . identifier[import_module] ( literal[string] . identifier[format] ( identifier[name] ), identifier[package] = identifier[package_name] ) identifier[out] [ identifier[name] ]= identifier[module] keyword[except] : keyword[continue] keyword[return] identifier[out]
def _import_submodules(package_name): """ Import all submodules of a module, recursively Adapted from: http://stackoverflow.com/a/25083161 :param package_name: Package name :type package_name: str :rtype: dict[types.ModuleType] """ package = sys.modules[package_name] out = {} for (loader, name, is_pkg) in pkgutil.walk_packages(package.__path__): try: #module = importlib.import_module('{}.{}'.format(package_name, name)) module = importlib.import_module('.{}'.format(name), package=package_name) out[name] = module # depends on [control=['try'], data=[]] except: continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] return out
def export(self, last_checkpoint, output_dir): """Builds a prediction graph and xports the model. Args: last_checkpoint: Path to the latest checkpoint file from training. output_dir: Path to the folder to be used to output the model. """ logging.info('Exporting prediction graph to %s', output_dir) with tf.Session(graph=tf.Graph()) as sess: # Build and save prediction meta graph and trained variable values. inputs, outputs = self.build_prediction_graph() signature_def_map = { 'serving_default': signature_def_utils.predict_signature_def(inputs, outputs) } init_op = tf.global_variables_initializer() sess.run(init_op) self.restore_from_checkpoint(sess, self.inception_checkpoint_file, last_checkpoint) init_op_serving = control_flow_ops.group( variables.local_variables_initializer(), tf.tables_initializer()) builder = saved_model_builder.SavedModelBuilder(output_dir) builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map=signature_def_map, legacy_init_op=init_op_serving) builder.save(False)
def function[export, parameter[self, last_checkpoint, output_dir]]: constant[Builds a prediction graph and xports the model. Args: last_checkpoint: Path to the latest checkpoint file from training. output_dir: Path to the folder to be used to output the model. ] call[name[logging].info, parameter[constant[Exporting prediction graph to %s], name[output_dir]]] with call[name[tf].Session, parameter[]] begin[:] <ast.Tuple object at 0x7da18f00ee90> assign[=] call[name[self].build_prediction_graph, parameter[]] variable[signature_def_map] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e4b80>], [<ast.Call object at 0x7da20c6e7790>]] variable[init_op] assign[=] call[name[tf].global_variables_initializer, parameter[]] call[name[sess].run, parameter[name[init_op]]] call[name[self].restore_from_checkpoint, parameter[name[sess], name[self].inception_checkpoint_file, name[last_checkpoint]]] variable[init_op_serving] assign[=] call[name[control_flow_ops].group, parameter[call[name[variables].local_variables_initializer, parameter[]], call[name[tf].tables_initializer, parameter[]]]] variable[builder] assign[=] call[name[saved_model_builder].SavedModelBuilder, parameter[name[output_dir]]] call[name[builder].add_meta_graph_and_variables, parameter[name[sess], list[[<ast.Attribute object at 0x7da20c6e5c60>]]]] call[name[builder].save, parameter[constant[False]]]
keyword[def] identifier[export] ( identifier[self] , identifier[last_checkpoint] , identifier[output_dir] ): literal[string] identifier[logging] . identifier[info] ( literal[string] , identifier[output_dir] ) keyword[with] identifier[tf] . identifier[Session] ( identifier[graph] = identifier[tf] . identifier[Graph] ()) keyword[as] identifier[sess] : identifier[inputs] , identifier[outputs] = identifier[self] . identifier[build_prediction_graph] () identifier[signature_def_map] ={ literal[string] : identifier[signature_def_utils] . identifier[predict_signature_def] ( identifier[inputs] , identifier[outputs] ) } identifier[init_op] = identifier[tf] . identifier[global_variables_initializer] () identifier[sess] . identifier[run] ( identifier[init_op] ) identifier[self] . identifier[restore_from_checkpoint] ( identifier[sess] , identifier[self] . identifier[inception_checkpoint_file] , identifier[last_checkpoint] ) identifier[init_op_serving] = identifier[control_flow_ops] . identifier[group] ( identifier[variables] . identifier[local_variables_initializer] (), identifier[tf] . identifier[tables_initializer] ()) identifier[builder] = identifier[saved_model_builder] . identifier[SavedModelBuilder] ( identifier[output_dir] ) identifier[builder] . identifier[add_meta_graph_and_variables] ( identifier[sess] ,[ identifier[tag_constants] . identifier[SERVING] ], identifier[signature_def_map] = identifier[signature_def_map] , identifier[legacy_init_op] = identifier[init_op_serving] ) identifier[builder] . identifier[save] ( keyword[False] )
def export(self, last_checkpoint, output_dir): """Builds a prediction graph and xports the model. Args: last_checkpoint: Path to the latest checkpoint file from training. output_dir: Path to the folder to be used to output the model. """ logging.info('Exporting prediction graph to %s', output_dir) with tf.Session(graph=tf.Graph()) as sess: # Build and save prediction meta graph and trained variable values. (inputs, outputs) = self.build_prediction_graph() signature_def_map = {'serving_default': signature_def_utils.predict_signature_def(inputs, outputs)} init_op = tf.global_variables_initializer() sess.run(init_op) self.restore_from_checkpoint(sess, self.inception_checkpoint_file, last_checkpoint) init_op_serving = control_flow_ops.group(variables.local_variables_initializer(), tf.tables_initializer()) builder = saved_model_builder.SavedModelBuilder(output_dir) builder.add_meta_graph_and_variables(sess, [tag_constants.SERVING], signature_def_map=signature_def_map, legacy_init_op=init_op_serving) builder.save(False) # depends on [control=['with'], data=['sess']]
def review_args(self, obj, show_repr=False, heading='Arguments'): """ Reviews the given argument specification. Can review the meta-arguments (launch_args) or the arguments themselves. """ args = obj.args if isinstance(obj, Launcher) else obj print('\n%s\n' % self.summary_heading(heading)) args.summary() if show_repr: print("\n%s\n" % args) response = self.input_options(['y', 'N','quit'], '\nShow available argument specifier entries?', default='n') if response == 'quit': return False if response == 'y': args.show() print('') return True
def function[review_args, parameter[self, obj, show_repr, heading]]: constant[ Reviews the given argument specification. Can review the meta-arguments (launch_args) or the arguments themselves. ] variable[args] assign[=] <ast.IfExp object at 0x7da1afe0c1f0> call[name[print], parameter[binary_operation[constant[ %s ] <ast.Mod object at 0x7da2590d6920> call[name[self].summary_heading, parameter[name[heading]]]]]] call[name[args].summary, parameter[]] if name[show_repr] begin[:] call[name[print], parameter[binary_operation[constant[ %s ] <ast.Mod object at 0x7da2590d6920> name[args]]]] variable[response] assign[=] call[name[self].input_options, parameter[list[[<ast.Constant object at 0x7da1afe3aa70>, <ast.Constant object at 0x7da1afe3ae00>, <ast.Constant object at 0x7da1afe39ae0>]], constant[ Show available argument specifier entries?]]] if compare[name[response] equal[==] constant[quit]] begin[:] return[constant[False]] if compare[name[response] equal[==] constant[y]] begin[:] call[name[args].show, parameter[]] call[name[print], parameter[constant[]]] return[constant[True]]
keyword[def] identifier[review_args] ( identifier[self] , identifier[obj] , identifier[show_repr] = keyword[False] , identifier[heading] = literal[string] ): literal[string] identifier[args] = identifier[obj] . identifier[args] keyword[if] identifier[isinstance] ( identifier[obj] , identifier[Launcher] ) keyword[else] identifier[obj] identifier[print] ( literal[string] % identifier[self] . identifier[summary_heading] ( identifier[heading] )) identifier[args] . identifier[summary] () keyword[if] identifier[show_repr] : identifier[print] ( literal[string] % identifier[args] ) identifier[response] = identifier[self] . identifier[input_options] ([ literal[string] , literal[string] , literal[string] ], literal[string] , identifier[default] = literal[string] ) keyword[if] identifier[response] == literal[string] : keyword[return] keyword[False] keyword[if] identifier[response] == literal[string] : identifier[args] . identifier[show] () identifier[print] ( literal[string] ) keyword[return] keyword[True]
def review_args(self, obj, show_repr=False, heading='Arguments'): """ Reviews the given argument specification. Can review the meta-arguments (launch_args) or the arguments themselves. """ args = obj.args if isinstance(obj, Launcher) else obj print('\n%s\n' % self.summary_heading(heading)) args.summary() if show_repr: print('\n%s\n' % args) # depends on [control=['if'], data=[]] response = self.input_options(['y', 'N', 'quit'], '\nShow available argument specifier entries?', default='n') if response == 'quit': return False # depends on [control=['if'], data=[]] if response == 'y': args.show() # depends on [control=['if'], data=[]] print('') return True
def voronoi_from_pixel_centers(pixel_centers): """Compute the Voronoi grid of the pixelization, using the pixel centers. Parameters ---------- pixel_centers : ndarray The (y,x) centre of every Voronoi pixel. """ return scipy.spatial.Voronoi(np.asarray([pixel_centers[:, 1], pixel_centers[:, 0]]).T, qhull_options='Qbb Qc Qx Qm')
def function[voronoi_from_pixel_centers, parameter[pixel_centers]]: constant[Compute the Voronoi grid of the pixelization, using the pixel centers. Parameters ---------- pixel_centers : ndarray The (y,x) centre of every Voronoi pixel. ] return[call[name[scipy].spatial.Voronoi, parameter[call[name[np].asarray, parameter[list[[<ast.Subscript object at 0x7da20c76d780>, <ast.Subscript object at 0x7da20c76e620>]]]].T]]]
keyword[def] identifier[voronoi_from_pixel_centers] ( identifier[pixel_centers] ): literal[string] keyword[return] identifier[scipy] . identifier[spatial] . identifier[Voronoi] ( identifier[np] . identifier[asarray] ([ identifier[pixel_centers] [:, literal[int] ], identifier[pixel_centers] [:, literal[int] ]]). identifier[T] , identifier[qhull_options] = literal[string] )
def voronoi_from_pixel_centers(pixel_centers): """Compute the Voronoi grid of the pixelization, using the pixel centers. Parameters ---------- pixel_centers : ndarray The (y,x) centre of every Voronoi pixel. """ return scipy.spatial.Voronoi(np.asarray([pixel_centers[:, 1], pixel_centers[:, 0]]).T, qhull_options='Qbb Qc Qx Qm')
def metadata(self): """Return extracted metadata.""" output = dict(self.defaults) output.update(self.extra_metadata) return output
def function[metadata, parameter[self]]: constant[Return extracted metadata.] variable[output] assign[=] call[name[dict], parameter[name[self].defaults]] call[name[output].update, parameter[name[self].extra_metadata]] return[name[output]]
keyword[def] identifier[metadata] ( identifier[self] ): literal[string] identifier[output] = identifier[dict] ( identifier[self] . identifier[defaults] ) identifier[output] . identifier[update] ( identifier[self] . identifier[extra_metadata] ) keyword[return] identifier[output]
def metadata(self): """Return extracted metadata.""" output = dict(self.defaults) output.update(self.extra_metadata) return output
def start_background_task(self, target, *args, **kwargs): """Start a background task using the appropriate async model. This is a utility function that applications can use to start a background task using the method that is compatible with the selected async mode. :param target: the target function to execute. :param args: arguments to pass to the function. :param kwargs: keyword arguments to pass to the function. This function returns an object compatible with the `Thread` class in the Python standard library. The `start()` method on this object is already called by this function. """ return self.eio.start_background_task(target, *args, **kwargs)
def function[start_background_task, parameter[self, target]]: constant[Start a background task using the appropriate async model. This is a utility function that applications can use to start a background task using the method that is compatible with the selected async mode. :param target: the target function to execute. :param args: arguments to pass to the function. :param kwargs: keyword arguments to pass to the function. This function returns an object compatible with the `Thread` class in the Python standard library. The `start()` method on this object is already called by this function. ] return[call[name[self].eio.start_background_task, parameter[name[target], <ast.Starred object at 0x7da1b21d5120>]]]
keyword[def] identifier[start_background_task] ( identifier[self] , identifier[target] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[eio] . identifier[start_background_task] ( identifier[target] ,* identifier[args] ,** identifier[kwargs] )
def start_background_task(self, target, *args, **kwargs): """Start a background task using the appropriate async model. This is a utility function that applications can use to start a background task using the method that is compatible with the selected async mode. :param target: the target function to execute. :param args: arguments to pass to the function. :param kwargs: keyword arguments to pass to the function. This function returns an object compatible with the `Thread` class in the Python standard library. The `start()` method on this object is already called by this function. """ return self.eio.start_background_task(target, *args, **kwargs)
def _transfers(reaction, delta, elements, result, epsilon): """Yield transfers obtained from result.""" left = set(c for c, _ in reaction.left) right = set(c for c, _ in reaction.right) for c1, c2 in product(left, right): items = {} for e in elements: v = result.get_value(delta[c1, c2, e]) nearest_int = round(v) if abs(v - nearest_int) < epsilon: v = int(nearest_int) if v >= epsilon: items[e] = v if len(items) > 0: yield (c1, c2), Formula(items)
def function[_transfers, parameter[reaction, delta, elements, result, epsilon]]: constant[Yield transfers obtained from result.] variable[left] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da20c6c44f0>]] variable[right] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da20c6c56f0>]] for taget[tuple[[<ast.Name object at 0x7da20c6c6f80>, <ast.Name object at 0x7da20c6c7610>]]] in starred[call[name[product], parameter[name[left], name[right]]]] begin[:] variable[items] assign[=] dictionary[[], []] for taget[name[e]] in starred[name[elements]] begin[:] variable[v] assign[=] call[name[result].get_value, parameter[call[name[delta]][tuple[[<ast.Name object at 0x7da20c6c5870>, <ast.Name object at 0x7da20c6c4c10>, <ast.Name object at 0x7da20c6c6a10>]]]]] variable[nearest_int] assign[=] call[name[round], parameter[name[v]]] if compare[call[name[abs], parameter[binary_operation[name[v] - name[nearest_int]]]] less[<] name[epsilon]] begin[:] variable[v] assign[=] call[name[int], parameter[name[nearest_int]]] if compare[name[v] greater_or_equal[>=] name[epsilon]] begin[:] call[name[items]][name[e]] assign[=] name[v] if compare[call[name[len], parameter[name[items]]] greater[>] constant[0]] begin[:] <ast.Yield object at 0x7da207f99150>
keyword[def] identifier[_transfers] ( identifier[reaction] , identifier[delta] , identifier[elements] , identifier[result] , identifier[epsilon] ): literal[string] identifier[left] = identifier[set] ( identifier[c] keyword[for] identifier[c] , identifier[_] keyword[in] identifier[reaction] . identifier[left] ) identifier[right] = identifier[set] ( identifier[c] keyword[for] identifier[c] , identifier[_] keyword[in] identifier[reaction] . identifier[right] ) keyword[for] identifier[c1] , identifier[c2] keyword[in] identifier[product] ( identifier[left] , identifier[right] ): identifier[items] ={} keyword[for] identifier[e] keyword[in] identifier[elements] : identifier[v] = identifier[result] . identifier[get_value] ( identifier[delta] [ identifier[c1] , identifier[c2] , identifier[e] ]) identifier[nearest_int] = identifier[round] ( identifier[v] ) keyword[if] identifier[abs] ( identifier[v] - identifier[nearest_int] )< identifier[epsilon] : identifier[v] = identifier[int] ( identifier[nearest_int] ) keyword[if] identifier[v] >= identifier[epsilon] : identifier[items] [ identifier[e] ]= identifier[v] keyword[if] identifier[len] ( identifier[items] )> literal[int] : keyword[yield] ( identifier[c1] , identifier[c2] ), identifier[Formula] ( identifier[items] )
def _transfers(reaction, delta, elements, result, epsilon): """Yield transfers obtained from result.""" left = set((c for (c, _) in reaction.left)) right = set((c for (c, _) in reaction.right)) for (c1, c2) in product(left, right): items = {} for e in elements: v = result.get_value(delta[c1, c2, e]) nearest_int = round(v) if abs(v - nearest_int) < epsilon: v = int(nearest_int) # depends on [control=['if'], data=[]] if v >= epsilon: items[e] = v # depends on [control=['if'], data=['v']] # depends on [control=['for'], data=['e']] if len(items) > 0: yield ((c1, c2), Formula(items)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def backward(A, pobs, T=None, beta_out=None): """Compute all backward coefficients. With scaling! Parameters ---------- A : ndarray((N,N), dtype = float) transition matrix of the hidden states pobs : ndarray((T,N), dtype = float) pobs[t,i] is the observation probability for observation at time t given hidden state i T : int, optional, default = None trajectory length. If not given, T = pobs.shape[0] will be used. beta_out : ndarray((T,N), dtype = float), optional, default = None containter for the beta result variables. If None, a new container will be created. Returns ------- beta : ndarray((T,N), dtype = float), optional, default = None beta[t,i] is the ith backward coefficient of time t. These can be used in many different algorithms related to HMMs. """ if __impl__ == __IMPL_PYTHON__: return ip.backward(A, pobs, T=T, beta_out=beta_out, dtype=config.dtype) elif __impl__ == __IMPL_C__: return ic.backward(A, pobs, T=T, beta_out=beta_out, dtype=config.dtype) else: raise RuntimeError('Nonexisting implementation selected: '+str(__impl__))
def function[backward, parameter[A, pobs, T, beta_out]]: constant[Compute all backward coefficients. With scaling! Parameters ---------- A : ndarray((N,N), dtype = float) transition matrix of the hidden states pobs : ndarray((T,N), dtype = float) pobs[t,i] is the observation probability for observation at time t given hidden state i T : int, optional, default = None trajectory length. If not given, T = pobs.shape[0] will be used. beta_out : ndarray((T,N), dtype = float), optional, default = None containter for the beta result variables. If None, a new container will be created. Returns ------- beta : ndarray((T,N), dtype = float), optional, default = None beta[t,i] is the ith backward coefficient of time t. These can be used in many different algorithms related to HMMs. ] if compare[name[__impl__] equal[==] name[__IMPL_PYTHON__]] begin[:] return[call[name[ip].backward, parameter[name[A], name[pobs]]]]
keyword[def] identifier[backward] ( identifier[A] , identifier[pobs] , identifier[T] = keyword[None] , identifier[beta_out] = keyword[None] ): literal[string] keyword[if] identifier[__impl__] == identifier[__IMPL_PYTHON__] : keyword[return] identifier[ip] . identifier[backward] ( identifier[A] , identifier[pobs] , identifier[T] = identifier[T] , identifier[beta_out] = identifier[beta_out] , identifier[dtype] = identifier[config] . identifier[dtype] ) keyword[elif] identifier[__impl__] == identifier[__IMPL_C__] : keyword[return] identifier[ic] . identifier[backward] ( identifier[A] , identifier[pobs] , identifier[T] = identifier[T] , identifier[beta_out] = identifier[beta_out] , identifier[dtype] = identifier[config] . identifier[dtype] ) keyword[else] : keyword[raise] identifier[RuntimeError] ( literal[string] + identifier[str] ( identifier[__impl__] ))
def backward(A, pobs, T=None, beta_out=None): """Compute all backward coefficients. With scaling! Parameters ---------- A : ndarray((N,N), dtype = float) transition matrix of the hidden states pobs : ndarray((T,N), dtype = float) pobs[t,i] is the observation probability for observation at time t given hidden state i T : int, optional, default = None trajectory length. If not given, T = pobs.shape[0] will be used. beta_out : ndarray((T,N), dtype = float), optional, default = None containter for the beta result variables. If None, a new container will be created. Returns ------- beta : ndarray((T,N), dtype = float), optional, default = None beta[t,i] is the ith backward coefficient of time t. These can be used in many different algorithms related to HMMs. """ if __impl__ == __IMPL_PYTHON__: return ip.backward(A, pobs, T=T, beta_out=beta_out, dtype=config.dtype) # depends on [control=['if'], data=[]] elif __impl__ == __IMPL_C__: return ic.backward(A, pobs, T=T, beta_out=beta_out, dtype=config.dtype) # depends on [control=['if'], data=[]] else: raise RuntimeError('Nonexisting implementation selected: ' + str(__impl__))
def defend_file_methods(coro): """ Decorator. Raises exception when file methods called with wrapped by :py:class:`aioftp.AsyncPathIOContext` file object. """ @functools.wraps(coro) async def wrapper(self, file, *args, **kwargs): if isinstance(file, AsyncPathIOContext): raise ValueError("Native path io file methods can not be used " "with wrapped file object") return await coro(self, file, *args, **kwargs) return wrapper
def function[defend_file_methods, parameter[coro]]: constant[ Decorator. Raises exception when file methods called with wrapped by :py:class:`aioftp.AsyncPathIOContext` file object. ] <ast.AsyncFunctionDef object at 0x7da1b00b0220> return[name[wrapper]]
keyword[def] identifier[defend_file_methods] ( identifier[coro] ): literal[string] @ identifier[functools] . identifier[wraps] ( identifier[coro] ) keyword[async] keyword[def] identifier[wrapper] ( identifier[self] , identifier[file] ,* identifier[args] ,** identifier[kwargs] ): keyword[if] identifier[isinstance] ( identifier[file] , identifier[AsyncPathIOContext] ): keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[return] keyword[await] identifier[coro] ( identifier[self] , identifier[file] ,* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[wrapper]
def defend_file_methods(coro): """ Decorator. Raises exception when file methods called with wrapped by :py:class:`aioftp.AsyncPathIOContext` file object. """ @functools.wraps(coro) async def wrapper(self, file, *args, **kwargs): if isinstance(file, AsyncPathIOContext): raise ValueError('Native path io file methods can not be used with wrapped file object') # depends on [control=['if'], data=[]] return await coro(self, file, *args, **kwargs) return wrapper
def integrate_storage(self, timeseries, position, **kwargs): """ Integrates storage into grid. See :class:`~.grid.network.StorageControl` for more information. """ StorageControl(edisgo=self, timeseries=timeseries, position=position, **kwargs)
def function[integrate_storage, parameter[self, timeseries, position]]: constant[ Integrates storage into grid. See :class:`~.grid.network.StorageControl` for more information. ] call[name[StorageControl], parameter[]]
keyword[def] identifier[integrate_storage] ( identifier[self] , identifier[timeseries] , identifier[position] ,** identifier[kwargs] ): literal[string] identifier[StorageControl] ( identifier[edisgo] = identifier[self] , identifier[timeseries] = identifier[timeseries] , identifier[position] = identifier[position] ,** identifier[kwargs] )
def integrate_storage(self, timeseries, position, **kwargs): """ Integrates storage into grid. See :class:`~.grid.network.StorageControl` for more information. """ StorageControl(edisgo=self, timeseries=timeseries, position=position, **kwargs)
def getDepartments(self): """Returns a list of the departments assigned to the Analyses from this Analysis Request """ departments = list() for analysis in self.getAnalyses(full_objects=True): department = analysis.getDepartment() if department and department not in departments: departments.append(department) return departments
def function[getDepartments, parameter[self]]: constant[Returns a list of the departments assigned to the Analyses from this Analysis Request ] variable[departments] assign[=] call[name[list], parameter[]] for taget[name[analysis]] in starred[call[name[self].getAnalyses, parameter[]]] begin[:] variable[department] assign[=] call[name[analysis].getDepartment, parameter[]] if <ast.BoolOp object at 0x7da2047e9270> begin[:] call[name[departments].append, parameter[name[department]]] return[name[departments]]
keyword[def] identifier[getDepartments] ( identifier[self] ): literal[string] identifier[departments] = identifier[list] () keyword[for] identifier[analysis] keyword[in] identifier[self] . identifier[getAnalyses] ( identifier[full_objects] = keyword[True] ): identifier[department] = identifier[analysis] . identifier[getDepartment] () keyword[if] identifier[department] keyword[and] identifier[department] keyword[not] keyword[in] identifier[departments] : identifier[departments] . identifier[append] ( identifier[department] ) keyword[return] identifier[departments]
def getDepartments(self): """Returns a list of the departments assigned to the Analyses from this Analysis Request """ departments = list() for analysis in self.getAnalyses(full_objects=True): department = analysis.getDepartment() if department and department not in departments: departments.append(department) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['analysis']] return departments
def lifted_pauli(pauli_sum: Union[PauliSum, PauliTerm], qubits: List[int]): """ Takes a PauliSum object along with a list of qubits and returns a matrix corresponding the tensor representation of the object. Useful for generating the full Hamiltonian after a particular fermion to pauli transformation. For example: Converting a PauliSum X0Y1 + Y1X0 into the matrix .. code-block:: python [[ 0.+0.j, 0.+0.j, 0.+0.j, 0.-2.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+2.j, 0.+0.j, 0.+0.j, 0.+0.j]] Developer note: Quil and the QVM like qubits to be ordered such that qubit 0 is on the right. Therefore, in ``qubit_adjacent_lifted_gate``, ``lifted_pauli``, and ``lifted_state_operator``, we build up the lifted matrix by performing the kronecker product from right to left. :param pauli_sum: Pauli representation of an operator :param qubits: list of qubits in the order they will be represented in the resultant matrix. :returns: matrix representation of the pauli_sum operator """ if isinstance(pauli_sum, PauliTerm): pauli_sum = PauliSum([pauli_sum]) n_qubits = len(qubits) result_hilbert = np.zeros((2 ** n_qubits, 2 ** n_qubits), dtype=np.complex128) # left kronecker product corresponds to the correct basis ordering for term in pauli_sum.terms: term_hilbert = np.array([1]) for qubit in qubits: term_hilbert = np.kron(QUANTUM_GATES[term[qubit]], term_hilbert) result_hilbert += term_hilbert * term.coefficient return result_hilbert
def function[lifted_pauli, parameter[pauli_sum, qubits]]: constant[ Takes a PauliSum object along with a list of qubits and returns a matrix corresponding the tensor representation of the object. Useful for generating the full Hamiltonian after a particular fermion to pauli transformation. For example: Converting a PauliSum X0Y1 + Y1X0 into the matrix .. code-block:: python [[ 0.+0.j, 0.+0.j, 0.+0.j, 0.-2.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+2.j, 0.+0.j, 0.+0.j, 0.+0.j]] Developer note: Quil and the QVM like qubits to be ordered such that qubit 0 is on the right. Therefore, in ``qubit_adjacent_lifted_gate``, ``lifted_pauli``, and ``lifted_state_operator``, we build up the lifted matrix by performing the kronecker product from right to left. :param pauli_sum: Pauli representation of an operator :param qubits: list of qubits in the order they will be represented in the resultant matrix. :returns: matrix representation of the pauli_sum operator ] if call[name[isinstance], parameter[name[pauli_sum], name[PauliTerm]]] begin[:] variable[pauli_sum] assign[=] call[name[PauliSum], parameter[list[[<ast.Name object at 0x7da1b1bf91b0>]]]] variable[n_qubits] assign[=] call[name[len], parameter[name[qubits]]] variable[result_hilbert] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da1b1bfba00>, <ast.BinOp object at 0x7da1b1bf8d90>]]]] for taget[name[term]] in starred[name[pauli_sum].terms] begin[:] variable[term_hilbert] assign[=] call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b1bf87f0>]]]] for taget[name[qubit]] in starred[name[qubits]] begin[:] variable[term_hilbert] assign[=] call[name[np].kron, parameter[call[name[QUANTUM_GATES]][call[name[term]][name[qubit]]], name[term_hilbert]]] <ast.AugAssign object at 0x7da1b1bf8f10> return[name[result_hilbert]]
keyword[def] identifier[lifted_pauli] ( identifier[pauli_sum] : identifier[Union] [ identifier[PauliSum] , identifier[PauliTerm] ], identifier[qubits] : identifier[List] [ identifier[int] ]): literal[string] keyword[if] identifier[isinstance] ( identifier[pauli_sum] , identifier[PauliTerm] ): identifier[pauli_sum] = identifier[PauliSum] ([ identifier[pauli_sum] ]) identifier[n_qubits] = identifier[len] ( identifier[qubits] ) identifier[result_hilbert] = identifier[np] . identifier[zeros] (( literal[int] ** identifier[n_qubits] , literal[int] ** identifier[n_qubits] ), identifier[dtype] = identifier[np] . identifier[complex128] ) keyword[for] identifier[term] keyword[in] identifier[pauli_sum] . identifier[terms] : identifier[term_hilbert] = identifier[np] . identifier[array] ([ literal[int] ]) keyword[for] identifier[qubit] keyword[in] identifier[qubits] : identifier[term_hilbert] = identifier[np] . identifier[kron] ( identifier[QUANTUM_GATES] [ identifier[term] [ identifier[qubit] ]], identifier[term_hilbert] ) identifier[result_hilbert] += identifier[term_hilbert] * identifier[term] . identifier[coefficient] keyword[return] identifier[result_hilbert]
def lifted_pauli(pauli_sum: Union[PauliSum, PauliTerm], qubits: List[int]): """ Takes a PauliSum object along with a list of qubits and returns a matrix corresponding the tensor representation of the object. Useful for generating the full Hamiltonian after a particular fermion to pauli transformation. For example: Converting a PauliSum X0Y1 + Y1X0 into the matrix .. code-block:: python [[ 0.+0.j, 0.+0.j, 0.+0.j, 0.-2.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+2.j, 0.+0.j, 0.+0.j, 0.+0.j]] Developer note: Quil and the QVM like qubits to be ordered such that qubit 0 is on the right. Therefore, in ``qubit_adjacent_lifted_gate``, ``lifted_pauli``, and ``lifted_state_operator``, we build up the lifted matrix by performing the kronecker product from right to left. :param pauli_sum: Pauli representation of an operator :param qubits: list of qubits in the order they will be represented in the resultant matrix. :returns: matrix representation of the pauli_sum operator """ if isinstance(pauli_sum, PauliTerm): pauli_sum = PauliSum([pauli_sum]) # depends on [control=['if'], data=[]] n_qubits = len(qubits) result_hilbert = np.zeros((2 ** n_qubits, 2 ** n_qubits), dtype=np.complex128) # left kronecker product corresponds to the correct basis ordering for term in pauli_sum.terms: term_hilbert = np.array([1]) for qubit in qubits: term_hilbert = np.kron(QUANTUM_GATES[term[qubit]], term_hilbert) # depends on [control=['for'], data=['qubit']] result_hilbert += term_hilbert * term.coefficient # depends on [control=['for'], data=['term']] return result_hilbert
def main(): """Test code called from commandline""" model = load_model('../data/hmmdefs') hmm = model.hmms['r-We'] for state_name in hmm.state_names: print(state_name) state = model.states[state_name] print(state.means_) print(model) model2 = load_model('../data/prior.hmm1mixSI.rate32') print(model2)
def function[main, parameter[]]: constant[Test code called from commandline] variable[model] assign[=] call[name[load_model], parameter[constant[../data/hmmdefs]]] variable[hmm] assign[=] call[name[model].hmms][constant[r-We]] for taget[name[state_name]] in starred[name[hmm].state_names] begin[:] call[name[print], parameter[name[state_name]]] variable[state] assign[=] call[name[model].states][name[state_name]] call[name[print], parameter[name[state].means_]] call[name[print], parameter[name[model]]] variable[model2] assign[=] call[name[load_model], parameter[constant[../data/prior.hmm1mixSI.rate32]]] call[name[print], parameter[name[model2]]]
keyword[def] identifier[main] (): literal[string] identifier[model] = identifier[load_model] ( literal[string] ) identifier[hmm] = identifier[model] . identifier[hmms] [ literal[string] ] keyword[for] identifier[state_name] keyword[in] identifier[hmm] . identifier[state_names] : identifier[print] ( identifier[state_name] ) identifier[state] = identifier[model] . identifier[states] [ identifier[state_name] ] identifier[print] ( identifier[state] . identifier[means_] ) identifier[print] ( identifier[model] ) identifier[model2] = identifier[load_model] ( literal[string] ) identifier[print] ( identifier[model2] )
def main(): """Test code called from commandline""" model = load_model('../data/hmmdefs') hmm = model.hmms['r-We'] for state_name in hmm.state_names: print(state_name) state = model.states[state_name] print(state.means_) # depends on [control=['for'], data=['state_name']] print(model) model2 = load_model('../data/prior.hmm1mixSI.rate32') print(model2)
def build_mv_grid_district(self, poly_id, subst_id, grid_district_geo_data, station_geo_data): """Initiates single MV grid_district including station and grid Parameters ---------- poly_id: int ID of grid_district according to database table. Also used as ID for created grid #TODO: check type subst_id: int ID of station according to database table #TODO: check type grid_district_geo_data: :shapely:`Shapely Polygon object<polygons>` Polygon of grid district station_geo_data: :shapely:`Shapely Point object<points>` Point of station Returns ------- :shapely:`Shapely Polygon object<polygons>` Description of return #TODO: check """ mv_station = MVStationDing0(id_db=subst_id, geo_data=station_geo_data) mv_grid = MVGridDing0(network=self, id_db=poly_id, station=mv_station) mv_grid_district = MVGridDistrictDing0(id_db=poly_id, mv_grid=mv_grid, geo_data=grid_district_geo_data) mv_grid.grid_district = mv_grid_district mv_station.grid = mv_grid self.add_mv_grid_district(mv_grid_district) return mv_grid_district
def function[build_mv_grid_district, parameter[self, poly_id, subst_id, grid_district_geo_data, station_geo_data]]: constant[Initiates single MV grid_district including station and grid Parameters ---------- poly_id: int ID of grid_district according to database table. Also used as ID for created grid #TODO: check type subst_id: int ID of station according to database table #TODO: check type grid_district_geo_data: :shapely:`Shapely Polygon object<polygons>` Polygon of grid district station_geo_data: :shapely:`Shapely Point object<points>` Point of station Returns ------- :shapely:`Shapely Polygon object<polygons>` Description of return #TODO: check ] variable[mv_station] assign[=] call[name[MVStationDing0], parameter[]] variable[mv_grid] assign[=] call[name[MVGridDing0], parameter[]] variable[mv_grid_district] assign[=] call[name[MVGridDistrictDing0], parameter[]] name[mv_grid].grid_district assign[=] name[mv_grid_district] name[mv_station].grid assign[=] name[mv_grid] call[name[self].add_mv_grid_district, parameter[name[mv_grid_district]]] return[name[mv_grid_district]]
keyword[def] identifier[build_mv_grid_district] ( identifier[self] , identifier[poly_id] , identifier[subst_id] , identifier[grid_district_geo_data] , identifier[station_geo_data] ): literal[string] identifier[mv_station] = identifier[MVStationDing0] ( identifier[id_db] = identifier[subst_id] , identifier[geo_data] = identifier[station_geo_data] ) identifier[mv_grid] = identifier[MVGridDing0] ( identifier[network] = identifier[self] , identifier[id_db] = identifier[poly_id] , identifier[station] = identifier[mv_station] ) identifier[mv_grid_district] = identifier[MVGridDistrictDing0] ( identifier[id_db] = identifier[poly_id] , identifier[mv_grid] = identifier[mv_grid] , identifier[geo_data] = identifier[grid_district_geo_data] ) identifier[mv_grid] . identifier[grid_district] = identifier[mv_grid_district] identifier[mv_station] . identifier[grid] = identifier[mv_grid] identifier[self] . identifier[add_mv_grid_district] ( identifier[mv_grid_district] ) keyword[return] identifier[mv_grid_district]
def build_mv_grid_district(self, poly_id, subst_id, grid_district_geo_data, station_geo_data): """Initiates single MV grid_district including station and grid Parameters ---------- poly_id: int ID of grid_district according to database table. Also used as ID for created grid #TODO: check type subst_id: int ID of station according to database table #TODO: check type grid_district_geo_data: :shapely:`Shapely Polygon object<polygons>` Polygon of grid district station_geo_data: :shapely:`Shapely Point object<points>` Point of station Returns ------- :shapely:`Shapely Polygon object<polygons>` Description of return #TODO: check """ mv_station = MVStationDing0(id_db=subst_id, geo_data=station_geo_data) mv_grid = MVGridDing0(network=self, id_db=poly_id, station=mv_station) mv_grid_district = MVGridDistrictDing0(id_db=poly_id, mv_grid=mv_grid, geo_data=grid_district_geo_data) mv_grid.grid_district = mv_grid_district mv_station.grid = mv_grid self.add_mv_grid_district(mv_grid_district) return mv_grid_district
def bio_write(self, buf): """ If the Connection was created with a memory BIO, this method can be used to add bytes to the read end of that memory BIO. The Connection can then read the bytes (for example, in response to a call to :meth:`recv`). :param buf: The string to put into the memory BIO. :return: The number of bytes written """ buf = _text_to_bytes_and_warn("buf", buf) if self._into_ssl is None: raise TypeError("Connection sock was not None") result = _lib.BIO_write(self._into_ssl, buf, len(buf)) if result <= 0: self._handle_bio_errors(self._into_ssl, result) return result
def function[bio_write, parameter[self, buf]]: constant[ If the Connection was created with a memory BIO, this method can be used to add bytes to the read end of that memory BIO. The Connection can then read the bytes (for example, in response to a call to :meth:`recv`). :param buf: The string to put into the memory BIO. :return: The number of bytes written ] variable[buf] assign[=] call[name[_text_to_bytes_and_warn], parameter[constant[buf], name[buf]]] if compare[name[self]._into_ssl is constant[None]] begin[:] <ast.Raise object at 0x7da1b024fbe0> variable[result] assign[=] call[name[_lib].BIO_write, parameter[name[self]._into_ssl, name[buf], call[name[len], parameter[name[buf]]]]] if compare[name[result] less_or_equal[<=] constant[0]] begin[:] call[name[self]._handle_bio_errors, parameter[name[self]._into_ssl, name[result]]] return[name[result]]
keyword[def] identifier[bio_write] ( identifier[self] , identifier[buf] ): literal[string] identifier[buf] = identifier[_text_to_bytes_and_warn] ( literal[string] , identifier[buf] ) keyword[if] identifier[self] . identifier[_into_ssl] keyword[is] keyword[None] : keyword[raise] identifier[TypeError] ( literal[string] ) identifier[result] = identifier[_lib] . identifier[BIO_write] ( identifier[self] . identifier[_into_ssl] , identifier[buf] , identifier[len] ( identifier[buf] )) keyword[if] identifier[result] <= literal[int] : identifier[self] . identifier[_handle_bio_errors] ( identifier[self] . identifier[_into_ssl] , identifier[result] ) keyword[return] identifier[result]
def bio_write(self, buf): """ If the Connection was created with a memory BIO, this method can be used to add bytes to the read end of that memory BIO. The Connection can then read the bytes (for example, in response to a call to :meth:`recv`). :param buf: The string to put into the memory BIO. :return: The number of bytes written """ buf = _text_to_bytes_and_warn('buf', buf) if self._into_ssl is None: raise TypeError('Connection sock was not None') # depends on [control=['if'], data=[]] result = _lib.BIO_write(self._into_ssl, buf, len(buf)) if result <= 0: self._handle_bio_errors(self._into_ssl, result) # depends on [control=['if'], data=['result']] return result
def ancestral(args): """ %prog ancestral ancestral.txt assembly.fasta Karyotype evolution of pineapple. The figure is inspired by Amphioxus paper Figure 3 and Tetradon paper Figure 9. """ p = OptionParser(ancestral.__doc__) opts, args, iopts = p.set_image_options(args, figsize="8x7") if len(args) != 2: sys.exit(not p.print_help()) regionsfile, sizesfile = args regions = RegionsFile(regionsfile) sizes = Sizes(sizesfile).mapping sizes = dict((k, v) for (k, v) in sizes.iteritems() if k[:2] == "LG") maxsize = max(sizes.values()) ratio = .5 / maxsize fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes((0, 0, 1, 1)) from jcvi.graphics.base import set2 a, b, c, d, e, f, g = set2[:7] set2 = (c, g, b, e, d, a, f) # Upper panel is the evolution of segments # All segments belong to one of seven karyotypes 1 to 7 karyotypes = regions.karyotypes xgap = 1. / (1 + len(karyotypes)) ygap = .05 mgap = xgap / 4.5 gwidth = mgap * .75 tip = .02 coords = {} for i, k in enumerate(regions.karyotypes): x = (i + 1) * xgap y = .9 root.text(x, y + tip, "Anc" + k, ha="center") root.plot((x, x), (y, y - ygap), "k-", lw=2) y -= 2 * ygap coords['a'] = (x - 1.5 * mgap , y) coords['b'] = (x - .5 * mgap , y) coords['c'] = (x + .5 * mgap , y) coords['d'] = (x + 1.5 * mgap , y) coords['ab'] = join_nodes_vertical(root, coords, 'a', 'b', y + ygap / 2) coords['cd'] = join_nodes_vertical(root, coords, 'c', 'd', y + ygap / 2) coords['abcd'] = join_nodes_vertical(root, coords, 'ab', 'cd', y + ygap) for n in 'abcd': nx, ny = coords[n] root.text(nx, ny - tip, n, ha="center") coords[n] = (nx, ny - ygap / 2) kdata = regions.get_karyotype(k) for kd in kdata: g = kd.group gx, gy = coords[g] gsize = ratio * kd.span gy -= gsize p = Rectangle((gx - gwidth / 2, gy), gwidth, gsize, lw=0, color=set2[i]) root.add_patch(p) root.text(gx, gy + gsize / 2, kd.chromosome, ha="center", va="center", color='w') coords[g] = (gx, gy - tip) # Bottom panel shows the location of segments on chromosomes # TODO: redundant code, similar to graphics.chromosome ystart = .54 chr_number = len(sizes) xstart, xend = xgap - 2 * mgap, 1 - xgap + 2 * mgap xinterval = (xend - xstart - gwidth) / (chr_number - 1) chrpos = {} for a, (chr, clen) in enumerate(sorted(sizes.items())): chr = get_number(chr) xx = xstart + a * xinterval + gwidth / 2 chrpos[chr] = xx root.text(xx, ystart + .01, chr, ha="center") Chromosome(root, xx, ystart, ystart - clen * ratio, width=gwidth) # Start painting for r in regions: xx = chrpos[r.chromosome] yystart = ystart - r.start * ratio yyend = ystart - r.end * ratio p = Rectangle((xx - gwidth / 2, yystart), gwidth, yyend - yystart, color=set2[int(r.karyotype) - 1], lw=0) root.add_patch(p) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "pineapple-karyotype" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def function[ancestral, parameter[args]]: constant[ %prog ancestral ancestral.txt assembly.fasta Karyotype evolution of pineapple. The figure is inspired by Amphioxus paper Figure 3 and Tetradon paper Figure 9. ] variable[p] assign[=] call[name[OptionParser], parameter[name[ancestral].__doc__]] <ast.Tuple object at 0x7da1b094df60> assign[=] call[name[p].set_image_options, parameter[name[args]]] if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b094dae0>]] <ast.Tuple object at 0x7da1b094cb50> assign[=] name[args] variable[regions] assign[=] call[name[RegionsFile], parameter[name[regionsfile]]] variable[sizes] assign[=] call[name[Sizes], parameter[name[sizesfile]]].mapping variable[sizes] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b094d0f0>]] variable[maxsize] assign[=] call[name[max], parameter[call[name[sizes].values, parameter[]]]] variable[ratio] assign[=] binary_operation[constant[0.5] / name[maxsize]] variable[fig] assign[=] call[name[plt].figure, parameter[constant[1], tuple[[<ast.Attribute object at 0x7da1b094ddb0>, <ast.Attribute object at 0x7da1b094fe50>]]]] variable[root] assign[=] call[name[fig].add_axes, parameter[tuple[[<ast.Constant object at 0x7da1b094caf0>, <ast.Constant object at 0x7da1b094ee00>, <ast.Constant object at 0x7da1b094da80>, <ast.Constant object at 0x7da1b094c8b0>]]]] from relative_module[jcvi.graphics.base] import module[set2] <ast.Tuple object at 0x7da1b094d9f0> assign[=] call[name[set2]][<ast.Slice object at 0x7da1b094dfc0>] variable[set2] assign[=] tuple[[<ast.Name object at 0x7da1b094db40>, <ast.Name object at 0x7da1b094f8e0>, <ast.Name object at 0x7da1b094d060>, <ast.Name object at 0x7da1b094ce20>, <ast.Name object at 0x7da1b094df00>, <ast.Name object at 0x7da1b094d360>, <ast.Name object at 0x7da1b094dde0>]] variable[karyotypes] assign[=] name[regions].karyotypes variable[xgap] assign[=] binary_operation[constant[1.0] / binary_operation[constant[1] + call[name[len], parameter[name[karyotypes]]]]] variable[ygap] assign[=] constant[0.05] variable[mgap] assign[=] binary_operation[name[xgap] / constant[4.5]] variable[gwidth] assign[=] binary_operation[name[mgap] * constant[0.75]] variable[tip] assign[=] constant[0.02] variable[coords] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b094faf0>, <ast.Name object at 0x7da1b094da20>]]] in starred[call[name[enumerate], parameter[name[regions].karyotypes]]] begin[:] variable[x] assign[=] binary_operation[binary_operation[name[i] + constant[1]] * name[xgap]] variable[y] assign[=] constant[0.9] call[name[root].text, parameter[name[x], binary_operation[name[y] + name[tip]], binary_operation[constant[Anc] + name[k]]]] call[name[root].plot, parameter[tuple[[<ast.Name object at 0x7da1b094e0b0>, <ast.Name object at 0x7da1b094cc70>]], tuple[[<ast.Name object at 0x7da1b094fbb0>, <ast.BinOp object at 0x7da1b094c3a0>]], constant[k-]]] <ast.AugAssign object at 0x7da1b094dd80> call[name[coords]][constant[a]] assign[=] tuple[[<ast.BinOp object at 0x7da1b0831180>, <ast.Name object at 0x7da1b0830f40>]] call[name[coords]][constant[b]] assign[=] tuple[[<ast.BinOp object at 0x7da1b0831cc0>, <ast.Name object at 0x7da1b0831480>]] call[name[coords]][constant[c]] assign[=] tuple[[<ast.BinOp object at 0x7da1b0833ee0>, <ast.Name object at 0x7da1b0832080>]] call[name[coords]][constant[d]] assign[=] tuple[[<ast.BinOp object at 0x7da1b08310c0>, <ast.Name object at 0x7da1b0833100>]] call[name[coords]][constant[ab]] assign[=] call[name[join_nodes_vertical], parameter[name[root], name[coords], constant[a], constant[b], binary_operation[name[y] + binary_operation[name[ygap] / constant[2]]]]] call[name[coords]][constant[cd]] assign[=] call[name[join_nodes_vertical], parameter[name[root], name[coords], constant[c], constant[d], binary_operation[name[y] + binary_operation[name[ygap] / constant[2]]]]] call[name[coords]][constant[abcd]] assign[=] call[name[join_nodes_vertical], parameter[name[root], name[coords], constant[ab], constant[cd], binary_operation[name[y] + name[ygap]]]] for taget[name[n]] in starred[constant[abcd]] begin[:] <ast.Tuple object at 0x7da20c6e6350> assign[=] call[name[coords]][name[n]] call[name[root].text, parameter[name[nx], binary_operation[name[ny] - name[tip]], name[n]]] call[name[coords]][name[n]] assign[=] tuple[[<ast.Name object at 0x7da20c6e6f80>, <ast.BinOp object at 0x7da20c6e4b50>]] variable[kdata] assign[=] call[name[regions].get_karyotype, parameter[name[k]]] for taget[name[kd]] in starred[name[kdata]] begin[:] variable[g] assign[=] name[kd].group <ast.Tuple object at 0x7da20c6e63b0> assign[=] call[name[coords]][name[g]] variable[gsize] assign[=] binary_operation[name[ratio] * name[kd].span] <ast.AugAssign object at 0x7da20c6e5c30> variable[p] assign[=] call[name[Rectangle], parameter[tuple[[<ast.BinOp object at 0x7da20c6e7e80>, <ast.Name object at 0x7da20c6e7ee0>]], name[gwidth], name[gsize]]] call[name[root].add_patch, parameter[name[p]]] call[name[root].text, parameter[name[gx], binary_operation[name[gy] + binary_operation[name[gsize] / constant[2]]], name[kd].chromosome]] call[name[coords]][name[g]] assign[=] tuple[[<ast.Name object at 0x7da1b09e9b10>, <ast.BinOp object at 0x7da1b09ea740>]] variable[ystart] assign[=] constant[0.54] variable[chr_number] assign[=] call[name[len], parameter[name[sizes]]] <ast.Tuple object at 0x7da1b09e8dc0> assign[=] tuple[[<ast.BinOp object at 0x7da1b09eae90>, <ast.BinOp object at 0x7da1b09e9540>]] variable[xinterval] assign[=] binary_operation[binary_operation[binary_operation[name[xend] - name[xstart]] - name[gwidth]] / binary_operation[name[chr_number] - constant[1]]] variable[chrpos] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b09e8640>, <ast.Tuple object at 0x7da1b09e89a0>]]] in starred[call[name[enumerate], parameter[call[name[sorted], parameter[call[name[sizes].items, parameter[]]]]]]] begin[:] variable[chr] assign[=] call[name[get_number], parameter[name[chr]]] variable[xx] assign[=] binary_operation[binary_operation[name[xstart] + binary_operation[name[a] * name[xinterval]]] + binary_operation[name[gwidth] / constant[2]]] call[name[chrpos]][name[chr]] assign[=] name[xx] call[name[root].text, parameter[name[xx], binary_operation[name[ystart] + constant[0.01]], name[chr]]] call[name[Chromosome], parameter[name[root], name[xx], name[ystart], binary_operation[name[ystart] - binary_operation[name[clen] * name[ratio]]]]] for taget[name[r]] in starred[name[regions]] begin[:] variable[xx] assign[=] call[name[chrpos]][name[r].chromosome] variable[yystart] assign[=] binary_operation[name[ystart] - binary_operation[name[r].start * name[ratio]]] variable[yyend] assign[=] binary_operation[name[ystart] - binary_operation[name[r].end * name[ratio]]] variable[p] assign[=] call[name[Rectangle], parameter[tuple[[<ast.BinOp object at 0x7da1b0854df0>, <ast.Name object at 0x7da1b0854f40>]], name[gwidth], binary_operation[name[yyend] - name[yystart]]]] call[name[root].add_patch, parameter[name[p]]] call[name[root].set_xlim, parameter[constant[0], constant[1]]] call[name[root].set_ylim, parameter[constant[0], constant[1]]] call[name[root].set_axis_off, parameter[]] variable[pf] assign[=] constant[pineapple-karyotype] variable[image_name] assign[=] binary_operation[binary_operation[name[pf] + constant[.]] + name[iopts].format] call[name[savefig], parameter[name[image_name]]]
keyword[def] identifier[ancestral] ( identifier[args] ): literal[string] identifier[p] = identifier[OptionParser] ( identifier[ancestral] . identifier[__doc__] ) identifier[opts] , identifier[args] , identifier[iopts] = identifier[p] . identifier[set_image_options] ( identifier[args] , identifier[figsize] = literal[string] ) keyword[if] identifier[len] ( identifier[args] )!= literal[int] : identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ()) identifier[regionsfile] , identifier[sizesfile] = identifier[args] identifier[regions] = identifier[RegionsFile] ( identifier[regionsfile] ) identifier[sizes] = identifier[Sizes] ( identifier[sizesfile] ). identifier[mapping] identifier[sizes] = identifier[dict] (( identifier[k] , identifier[v] ) keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[sizes] . identifier[iteritems] () keyword[if] identifier[k] [: literal[int] ]== literal[string] ) identifier[maxsize] = identifier[max] ( identifier[sizes] . identifier[values] ()) identifier[ratio] = literal[int] / identifier[maxsize] identifier[fig] = identifier[plt] . identifier[figure] ( literal[int] ,( identifier[iopts] . identifier[w] , identifier[iopts] . identifier[h] )) identifier[root] = identifier[fig] . identifier[add_axes] (( literal[int] , literal[int] , literal[int] , literal[int] )) keyword[from] identifier[jcvi] . identifier[graphics] . identifier[base] keyword[import] identifier[set2] identifier[a] , identifier[b] , identifier[c] , identifier[d] , identifier[e] , identifier[f] , identifier[g] = identifier[set2] [: literal[int] ] identifier[set2] =( identifier[c] , identifier[g] , identifier[b] , identifier[e] , identifier[d] , identifier[a] , identifier[f] ) identifier[karyotypes] = identifier[regions] . identifier[karyotypes] identifier[xgap] = literal[int] /( literal[int] + identifier[len] ( identifier[karyotypes] )) identifier[ygap] = literal[int] identifier[mgap] = identifier[xgap] / literal[int] identifier[gwidth] = identifier[mgap] * literal[int] identifier[tip] = literal[int] identifier[coords] ={} keyword[for] identifier[i] , identifier[k] keyword[in] identifier[enumerate] ( identifier[regions] . identifier[karyotypes] ): identifier[x] =( identifier[i] + literal[int] )* identifier[xgap] identifier[y] = literal[int] identifier[root] . identifier[text] ( identifier[x] , identifier[y] + identifier[tip] , literal[string] + identifier[k] , identifier[ha] = literal[string] ) identifier[root] . identifier[plot] (( identifier[x] , identifier[x] ),( identifier[y] , identifier[y] - identifier[ygap] ), literal[string] , identifier[lw] = literal[int] ) identifier[y] -= literal[int] * identifier[ygap] identifier[coords] [ literal[string] ]=( identifier[x] - literal[int] * identifier[mgap] , identifier[y] ) identifier[coords] [ literal[string] ]=( identifier[x] - literal[int] * identifier[mgap] , identifier[y] ) identifier[coords] [ literal[string] ]=( identifier[x] + literal[int] * identifier[mgap] , identifier[y] ) identifier[coords] [ literal[string] ]=( identifier[x] + literal[int] * identifier[mgap] , identifier[y] ) identifier[coords] [ literal[string] ]= identifier[join_nodes_vertical] ( identifier[root] , identifier[coords] , literal[string] , literal[string] , identifier[y] + identifier[ygap] / literal[int] ) identifier[coords] [ literal[string] ]= identifier[join_nodes_vertical] ( identifier[root] , identifier[coords] , literal[string] , literal[string] , identifier[y] + identifier[ygap] / literal[int] ) identifier[coords] [ literal[string] ]= identifier[join_nodes_vertical] ( identifier[root] , identifier[coords] , literal[string] , literal[string] , identifier[y] + identifier[ygap] ) keyword[for] identifier[n] keyword[in] literal[string] : identifier[nx] , identifier[ny] = identifier[coords] [ identifier[n] ] identifier[root] . identifier[text] ( identifier[nx] , identifier[ny] - identifier[tip] , identifier[n] , identifier[ha] = literal[string] ) identifier[coords] [ identifier[n] ]=( identifier[nx] , identifier[ny] - identifier[ygap] / literal[int] ) identifier[kdata] = identifier[regions] . identifier[get_karyotype] ( identifier[k] ) keyword[for] identifier[kd] keyword[in] identifier[kdata] : identifier[g] = identifier[kd] . identifier[group] identifier[gx] , identifier[gy] = identifier[coords] [ identifier[g] ] identifier[gsize] = identifier[ratio] * identifier[kd] . identifier[span] identifier[gy] -= identifier[gsize] identifier[p] = identifier[Rectangle] (( identifier[gx] - identifier[gwidth] / literal[int] , identifier[gy] ), identifier[gwidth] , identifier[gsize] , identifier[lw] = literal[int] , identifier[color] = identifier[set2] [ identifier[i] ]) identifier[root] . identifier[add_patch] ( identifier[p] ) identifier[root] . identifier[text] ( identifier[gx] , identifier[gy] + identifier[gsize] / literal[int] , identifier[kd] . identifier[chromosome] , identifier[ha] = literal[string] , identifier[va] = literal[string] , identifier[color] = literal[string] ) identifier[coords] [ identifier[g] ]=( identifier[gx] , identifier[gy] - identifier[tip] ) identifier[ystart] = literal[int] identifier[chr_number] = identifier[len] ( identifier[sizes] ) identifier[xstart] , identifier[xend] = identifier[xgap] - literal[int] * identifier[mgap] , literal[int] - identifier[xgap] + literal[int] * identifier[mgap] identifier[xinterval] =( identifier[xend] - identifier[xstart] - identifier[gwidth] )/( identifier[chr_number] - literal[int] ) identifier[chrpos] ={} keyword[for] identifier[a] ,( identifier[chr] , identifier[clen] ) keyword[in] identifier[enumerate] ( identifier[sorted] ( identifier[sizes] . identifier[items] ())): identifier[chr] = identifier[get_number] ( identifier[chr] ) identifier[xx] = identifier[xstart] + identifier[a] * identifier[xinterval] + identifier[gwidth] / literal[int] identifier[chrpos] [ identifier[chr] ]= identifier[xx] identifier[root] . identifier[text] ( identifier[xx] , identifier[ystart] + literal[int] , identifier[chr] , identifier[ha] = literal[string] ) identifier[Chromosome] ( identifier[root] , identifier[xx] , identifier[ystart] , identifier[ystart] - identifier[clen] * identifier[ratio] , identifier[width] = identifier[gwidth] ) keyword[for] identifier[r] keyword[in] identifier[regions] : identifier[xx] = identifier[chrpos] [ identifier[r] . identifier[chromosome] ] identifier[yystart] = identifier[ystart] - identifier[r] . identifier[start] * identifier[ratio] identifier[yyend] = identifier[ystart] - identifier[r] . identifier[end] * identifier[ratio] identifier[p] = identifier[Rectangle] (( identifier[xx] - identifier[gwidth] / literal[int] , identifier[yystart] ), identifier[gwidth] , identifier[yyend] - identifier[yystart] , identifier[color] = identifier[set2] [ identifier[int] ( identifier[r] . identifier[karyotype] )- literal[int] ], identifier[lw] = literal[int] ) identifier[root] . identifier[add_patch] ( identifier[p] ) identifier[root] . identifier[set_xlim] ( literal[int] , literal[int] ) identifier[root] . identifier[set_ylim] ( literal[int] , literal[int] ) identifier[root] . identifier[set_axis_off] () identifier[pf] = literal[string] identifier[image_name] = identifier[pf] + literal[string] + identifier[iopts] . identifier[format] identifier[savefig] ( identifier[image_name] , identifier[dpi] = identifier[iopts] . identifier[dpi] , identifier[iopts] = identifier[iopts] )
def ancestral(args): """ %prog ancestral ancestral.txt assembly.fasta Karyotype evolution of pineapple. The figure is inspired by Amphioxus paper Figure 3 and Tetradon paper Figure 9. """ p = OptionParser(ancestral.__doc__) (opts, args, iopts) = p.set_image_options(args, figsize='8x7') if len(args) != 2: sys.exit(not p.print_help()) # depends on [control=['if'], data=[]] (regionsfile, sizesfile) = args regions = RegionsFile(regionsfile) sizes = Sizes(sizesfile).mapping sizes = dict(((k, v) for (k, v) in sizes.iteritems() if k[:2] == 'LG')) maxsize = max(sizes.values()) ratio = 0.5 / maxsize fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes((0, 0, 1, 1)) from jcvi.graphics.base import set2 (a, b, c, d, e, f, g) = set2[:7] set2 = (c, g, b, e, d, a, f) # Upper panel is the evolution of segments # All segments belong to one of seven karyotypes 1 to 7 karyotypes = regions.karyotypes xgap = 1.0 / (1 + len(karyotypes)) ygap = 0.05 mgap = xgap / 4.5 gwidth = mgap * 0.75 tip = 0.02 coords = {} for (i, k) in enumerate(regions.karyotypes): x = (i + 1) * xgap y = 0.9 root.text(x, y + tip, 'Anc' + k, ha='center') root.plot((x, x), (y, y - ygap), 'k-', lw=2) y -= 2 * ygap coords['a'] = (x - 1.5 * mgap, y) coords['b'] = (x - 0.5 * mgap, y) coords['c'] = (x + 0.5 * mgap, y) coords['d'] = (x + 1.5 * mgap, y) coords['ab'] = join_nodes_vertical(root, coords, 'a', 'b', y + ygap / 2) coords['cd'] = join_nodes_vertical(root, coords, 'c', 'd', y + ygap / 2) coords['abcd'] = join_nodes_vertical(root, coords, 'ab', 'cd', y + ygap) for n in 'abcd': (nx, ny) = coords[n] root.text(nx, ny - tip, n, ha='center') coords[n] = (nx, ny - ygap / 2) # depends on [control=['for'], data=['n']] kdata = regions.get_karyotype(k) for kd in kdata: g = kd.group (gx, gy) = coords[g] gsize = ratio * kd.span gy -= gsize p = Rectangle((gx - gwidth / 2, gy), gwidth, gsize, lw=0, color=set2[i]) root.add_patch(p) root.text(gx, gy + gsize / 2, kd.chromosome, ha='center', va='center', color='w') coords[g] = (gx, gy - tip) # depends on [control=['for'], data=['kd']] # depends on [control=['for'], data=[]] # Bottom panel shows the location of segments on chromosomes # TODO: redundant code, similar to graphics.chromosome ystart = 0.54 chr_number = len(sizes) (xstart, xend) = (xgap - 2 * mgap, 1 - xgap + 2 * mgap) xinterval = (xend - xstart - gwidth) / (chr_number - 1) chrpos = {} for (a, (chr, clen)) in enumerate(sorted(sizes.items())): chr = get_number(chr) xx = xstart + a * xinterval + gwidth / 2 chrpos[chr] = xx root.text(xx, ystart + 0.01, chr, ha='center') Chromosome(root, xx, ystart, ystart - clen * ratio, width=gwidth) # depends on [control=['for'], data=[]] # Start painting for r in regions: xx = chrpos[r.chromosome] yystart = ystart - r.start * ratio yyend = ystart - r.end * ratio p = Rectangle((xx - gwidth / 2, yystart), gwidth, yyend - yystart, color=set2[int(r.karyotype) - 1], lw=0) root.add_patch(p) # depends on [control=['for'], data=['r']] root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = 'pineapple-karyotype' image_name = pf + '.' + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def to_dict(self, val=UNSET): """Creates dict object from dict2 object Args: val (:obj:`dict2`): Value to create from Returns: Equivalent dict object. """ if val is UNSET: val = self if isinstance(val, dict2) or isinstance(val, dict): res = dict() for k, v in val.items(): res[k] = self.to_dict(v) return res elif isinstance(val, list): res = [] for item in val: res.append(self.to_dict(item)) return res else: return val
def function[to_dict, parameter[self, val]]: constant[Creates dict object from dict2 object Args: val (:obj:`dict2`): Value to create from Returns: Equivalent dict object. ] if compare[name[val] is name[UNSET]] begin[:] variable[val] assign[=] name[self] if <ast.BoolOp object at 0x7da2044c1150> begin[:] variable[res] assign[=] call[name[dict], parameter[]] for taget[tuple[[<ast.Name object at 0x7da2044c3d00>, <ast.Name object at 0x7da2044c34f0>]]] in starred[call[name[val].items, parameter[]]] begin[:] call[name[res]][name[k]] assign[=] call[name[self].to_dict, parameter[name[v]]] return[name[res]]
keyword[def] identifier[to_dict] ( identifier[self] , identifier[val] = identifier[UNSET] ): literal[string] keyword[if] identifier[val] keyword[is] identifier[UNSET] : identifier[val] = identifier[self] keyword[if] identifier[isinstance] ( identifier[val] , identifier[dict2] ) keyword[or] identifier[isinstance] ( identifier[val] , identifier[dict] ): identifier[res] = identifier[dict] () keyword[for] identifier[k] , identifier[v] keyword[in] identifier[val] . identifier[items] (): identifier[res] [ identifier[k] ]= identifier[self] . identifier[to_dict] ( identifier[v] ) keyword[return] identifier[res] keyword[elif] identifier[isinstance] ( identifier[val] , identifier[list] ): identifier[res] =[] keyword[for] identifier[item] keyword[in] identifier[val] : identifier[res] . identifier[append] ( identifier[self] . identifier[to_dict] ( identifier[item] )) keyword[return] identifier[res] keyword[else] : keyword[return] identifier[val]
def to_dict(self, val=UNSET): """Creates dict object from dict2 object Args: val (:obj:`dict2`): Value to create from Returns: Equivalent dict object. """ if val is UNSET: val = self # depends on [control=['if'], data=['val']] if isinstance(val, dict2) or isinstance(val, dict): res = dict() for (k, v) in val.items(): res[k] = self.to_dict(v) # depends on [control=['for'], data=[]] return res # depends on [control=['if'], data=[]] elif isinstance(val, list): res = [] for item in val: res.append(self.to_dict(item)) # depends on [control=['for'], data=['item']] return res # depends on [control=['if'], data=[]] else: return val
def _inject_constructor(self, cls, func, name, resolution_level, keep, trace): """ Modifying Methods in Place - after the recipe 15.7 in the Python Cookbook by Ken Seehof. The original constructors may be restored later. """ try: constructor = cls.__init__ except AttributeError: def constructor(self, *_args, **_kwargs): pass # Possible name clash between keyword arguments of the tracked class' # constructor and the curried arguments of the injected constructor. # Therefore, the additional argument has a 'magic' name to make it less # likely that an argument name clash occurs. self._observers[cls] = _ClassObserver(constructor, name, resolution_level, keep, trace) cls.__init__ = instancemethod( lambda *args, **kwds: func(self._observers[cls], *args, **kwds), None, cls )
def function[_inject_constructor, parameter[self, cls, func, name, resolution_level, keep, trace]]: constant[ Modifying Methods in Place - after the recipe 15.7 in the Python Cookbook by Ken Seehof. The original constructors may be restored later. ] <ast.Try object at 0x7da18f00ec50> call[name[self]._observers][name[cls]] assign[=] call[name[_ClassObserver], parameter[name[constructor], name[name], name[resolution_level], name[keep], name[trace]]] name[cls].__init__ assign[=] call[name[instancemethod], parameter[<ast.Lambda object at 0x7da1b0471db0>, constant[None], name[cls]]]
keyword[def] identifier[_inject_constructor] ( identifier[self] , identifier[cls] , identifier[func] , identifier[name] , identifier[resolution_level] , identifier[keep] , identifier[trace] ): literal[string] keyword[try] : identifier[constructor] = identifier[cls] . identifier[__init__] keyword[except] identifier[AttributeError] : keyword[def] identifier[constructor] ( identifier[self] ,* identifier[_args] ,** identifier[_kwargs] ): keyword[pass] identifier[self] . identifier[_observers] [ identifier[cls] ]= identifier[_ClassObserver] ( identifier[constructor] , identifier[name] , identifier[resolution_level] , identifier[keep] , identifier[trace] ) identifier[cls] . identifier[__init__] = identifier[instancemethod] ( keyword[lambda] * identifier[args] ,** identifier[kwds] : identifier[func] ( identifier[self] . identifier[_observers] [ identifier[cls] ],* identifier[args] ,** identifier[kwds] ), keyword[None] , identifier[cls] )
def _inject_constructor(self, cls, func, name, resolution_level, keep, trace): """ Modifying Methods in Place - after the recipe 15.7 in the Python Cookbook by Ken Seehof. The original constructors may be restored later. """ try: constructor = cls.__init__ # depends on [control=['try'], data=[]] except AttributeError: def constructor(self, *_args, **_kwargs): pass # depends on [control=['except'], data=[]] # Possible name clash between keyword arguments of the tracked class' # constructor and the curried arguments of the injected constructor. # Therefore, the additional argument has a 'magic' name to make it less # likely that an argument name clash occurs. self._observers[cls] = _ClassObserver(constructor, name, resolution_level, keep, trace) cls.__init__ = instancemethod(lambda *args, **kwds: func(self._observers[cls], *args, **kwds), None, cls)
def to_segwizard(segs, target, header=True, coltype=LIGOTimeGPS): """Write the given `SegmentList` to a file in SegWizard format. Parameters ---------- segs : :class:`~gwpy.segments.SegmentList` The list of segments to write. target : `file`, `str` An open file, or file path, to which to write. header : `bool`, optional Print a column header into the file, default: `True`. coltype : `type`, optional The numerical type in which to cast times before printing. Notes ----- This method is adapted from original code written by Kipp Cannon and distributed under GPLv3. """ # write file path if isinstance(target, string_types): with open(target, 'w') as fobj: return to_segwizard(segs, fobj, header=header, coltype=coltype) # write file object if header: print('# seg\tstart\tstop\tduration', file=target) for i, seg in enumerate(segs): a = coltype(seg[0]) b = coltype(seg[1]) c = float(b - a) print( '\t'.join(map(str, (i, a, b, c))), file=target, )
def function[to_segwizard, parameter[segs, target, header, coltype]]: constant[Write the given `SegmentList` to a file in SegWizard format. Parameters ---------- segs : :class:`~gwpy.segments.SegmentList` The list of segments to write. target : `file`, `str` An open file, or file path, to which to write. header : `bool`, optional Print a column header into the file, default: `True`. coltype : `type`, optional The numerical type in which to cast times before printing. Notes ----- This method is adapted from original code written by Kipp Cannon and distributed under GPLv3. ] if call[name[isinstance], parameter[name[target], name[string_types]]] begin[:] with call[name[open], parameter[name[target], constant[w]]] begin[:] return[call[name[to_segwizard], parameter[name[segs], name[fobj]]]] if name[header] begin[:] call[name[print], parameter[constant[# seg start stop duration]]] for taget[tuple[[<ast.Name object at 0x7da18f09d120>, <ast.Name object at 0x7da18f09ca30>]]] in starred[call[name[enumerate], parameter[name[segs]]]] begin[:] variable[a] assign[=] call[name[coltype], parameter[call[name[seg]][constant[0]]]] variable[b] assign[=] call[name[coltype], parameter[call[name[seg]][constant[1]]]] variable[c] assign[=] call[name[float], parameter[binary_operation[name[b] - name[a]]]] call[name[print], parameter[call[constant[ ].join, parameter[call[name[map], parameter[name[str], tuple[[<ast.Name object at 0x7da18f09eec0>, <ast.Name object at 0x7da18f09d6c0>, <ast.Name object at 0x7da18f09e830>, <ast.Name object at 0x7da18f09e020>]]]]]]]]
keyword[def] identifier[to_segwizard] ( identifier[segs] , identifier[target] , identifier[header] = keyword[True] , identifier[coltype] = identifier[LIGOTimeGPS] ): literal[string] keyword[if] identifier[isinstance] ( identifier[target] , identifier[string_types] ): keyword[with] identifier[open] ( identifier[target] , literal[string] ) keyword[as] identifier[fobj] : keyword[return] identifier[to_segwizard] ( identifier[segs] , identifier[fobj] , identifier[header] = identifier[header] , identifier[coltype] = identifier[coltype] ) keyword[if] identifier[header] : identifier[print] ( literal[string] , identifier[file] = identifier[target] ) keyword[for] identifier[i] , identifier[seg] keyword[in] identifier[enumerate] ( identifier[segs] ): identifier[a] = identifier[coltype] ( identifier[seg] [ literal[int] ]) identifier[b] = identifier[coltype] ( identifier[seg] [ literal[int] ]) identifier[c] = identifier[float] ( identifier[b] - identifier[a] ) identifier[print] ( literal[string] . identifier[join] ( identifier[map] ( identifier[str] ,( identifier[i] , identifier[a] , identifier[b] , identifier[c] ))), identifier[file] = identifier[target] , )
def to_segwizard(segs, target, header=True, coltype=LIGOTimeGPS): """Write the given `SegmentList` to a file in SegWizard format. Parameters ---------- segs : :class:`~gwpy.segments.SegmentList` The list of segments to write. target : `file`, `str` An open file, or file path, to which to write. header : `bool`, optional Print a column header into the file, default: `True`. coltype : `type`, optional The numerical type in which to cast times before printing. Notes ----- This method is adapted from original code written by Kipp Cannon and distributed under GPLv3. """ # write file path if isinstance(target, string_types): with open(target, 'w') as fobj: return to_segwizard(segs, fobj, header=header, coltype=coltype) # depends on [control=['with'], data=['fobj']] # depends on [control=['if'], data=[]] # write file object if header: print('# seg\tstart\tstop\tduration', file=target) # depends on [control=['if'], data=[]] for (i, seg) in enumerate(segs): a = coltype(seg[0]) b = coltype(seg[1]) c = float(b - a) print('\t'.join(map(str, (i, a, b, c))), file=target) # depends on [control=['for'], data=[]]
def issues(self, from_date=DEFAULT_DATETIME, offset=None, max_issues=MAX_ISSUES): """Get the information of a list of issues. :param from_date: retrieve issues that where updated from that date; dates are converted to UTC :param offset: starting position for the search :param max_issues: maximum number of issues to reteurn per query """ resource = self.RISSUES + self.CJSON ts = datetime_to_utc(from_date) ts = ts.strftime("%Y-%m-%dT%H:%M:%SZ") # By default, Redmine returns open issues only. # Parameter 'status_id' is set to get all the statuses. params = { self.PSTATUS_ID: '*', self.PSORT: self.PUPDATED_ON, self.PUPDATED_ON: '>=' + ts, self.PLIMIT: max_issues } if offset is not None: params[self.POFFSET] = offset response = self._call(resource, params) return response
def function[issues, parameter[self, from_date, offset, max_issues]]: constant[Get the information of a list of issues. :param from_date: retrieve issues that where updated from that date; dates are converted to UTC :param offset: starting position for the search :param max_issues: maximum number of issues to reteurn per query ] variable[resource] assign[=] binary_operation[name[self].RISSUES + name[self].CJSON] variable[ts] assign[=] call[name[datetime_to_utc], parameter[name[from_date]]] variable[ts] assign[=] call[name[ts].strftime, parameter[constant[%Y-%m-%dT%H:%M:%SZ]]] variable[params] assign[=] dictionary[[<ast.Attribute object at 0x7da1b02f0b50>, <ast.Attribute object at 0x7da1b02f00d0>, <ast.Attribute object at 0x7da1b02f0550>, <ast.Attribute object at 0x7da1b02f0970>], [<ast.Constant object at 0x7da1b02f3a30>, <ast.Attribute object at 0x7da1b02f1480>, <ast.BinOp object at 0x7da1b02f2dd0>, <ast.Name object at 0x7da1b02f2c20>]] if compare[name[offset] is_not constant[None]] begin[:] call[name[params]][name[self].POFFSET] assign[=] name[offset] variable[response] assign[=] call[name[self]._call, parameter[name[resource], name[params]]] return[name[response]]
keyword[def] identifier[issues] ( identifier[self] , identifier[from_date] = identifier[DEFAULT_DATETIME] , identifier[offset] = keyword[None] , identifier[max_issues] = identifier[MAX_ISSUES] ): literal[string] identifier[resource] = identifier[self] . identifier[RISSUES] + identifier[self] . identifier[CJSON] identifier[ts] = identifier[datetime_to_utc] ( identifier[from_date] ) identifier[ts] = identifier[ts] . identifier[strftime] ( literal[string] ) identifier[params] ={ identifier[self] . identifier[PSTATUS_ID] : literal[string] , identifier[self] . identifier[PSORT] : identifier[self] . identifier[PUPDATED_ON] , identifier[self] . identifier[PUPDATED_ON] : literal[string] + identifier[ts] , identifier[self] . identifier[PLIMIT] : identifier[max_issues] } keyword[if] identifier[offset] keyword[is] keyword[not] keyword[None] : identifier[params] [ identifier[self] . identifier[POFFSET] ]= identifier[offset] identifier[response] = identifier[self] . identifier[_call] ( identifier[resource] , identifier[params] ) keyword[return] identifier[response]
def issues(self, from_date=DEFAULT_DATETIME, offset=None, max_issues=MAX_ISSUES): """Get the information of a list of issues. :param from_date: retrieve issues that where updated from that date; dates are converted to UTC :param offset: starting position for the search :param max_issues: maximum number of issues to reteurn per query """ resource = self.RISSUES + self.CJSON ts = datetime_to_utc(from_date) ts = ts.strftime('%Y-%m-%dT%H:%M:%SZ') # By default, Redmine returns open issues only. # Parameter 'status_id' is set to get all the statuses. params = {self.PSTATUS_ID: '*', self.PSORT: self.PUPDATED_ON, self.PUPDATED_ON: '>=' + ts, self.PLIMIT: max_issues} if offset is not None: params[self.POFFSET] = offset # depends on [control=['if'], data=['offset']] response = self._call(resource, params) return response
def func_call(self, t): """ For function calls e.g. TEXT (tostring([area],"%.2f")) """ func, params = t func_name = func.value func.value = "({}({}))".format(func_name, params) return func
def function[func_call, parameter[self, t]]: constant[ For function calls e.g. TEXT (tostring([area],"%.2f")) ] <ast.Tuple object at 0x7da2041da830> assign[=] name[t] variable[func_name] assign[=] name[func].value name[func].value assign[=] call[constant[({}({}))].format, parameter[name[func_name], name[params]]] return[name[func]]
keyword[def] identifier[func_call] ( identifier[self] , identifier[t] ): literal[string] identifier[func] , identifier[params] = identifier[t] identifier[func_name] = identifier[func] . identifier[value] identifier[func] . identifier[value] = literal[string] . identifier[format] ( identifier[func_name] , identifier[params] ) keyword[return] identifier[func]
def func_call(self, t): """ For function calls e.g. TEXT (tostring([area],"%.2f")) """ (func, params) = t func_name = func.value func.value = '({}({}))'.format(func_name, params) return func
def ReadHuntOutputPluginLogEntries(self, hunt_id, output_plugin_id, offset, count, with_type=None): """Reads hunt output plugin log entries.""" all_entries = [] for flow_obj in self._GetHuntFlows(hunt_id): for entry in self.ReadFlowOutputPluginLogEntries( flow_obj.client_id, flow_obj.flow_id, output_plugin_id, 0, sys.maxsize, with_type=with_type): all_entries.append( rdf_flow_objects.FlowOutputPluginLogEntry( hunt_id=hunt_id, client_id=flow_obj.client_id, flow_id=flow_obj.flow_id, output_plugin_id=output_plugin_id, log_entry_type=entry.log_entry_type, timestamp=entry.timestamp, message=entry.message)) return sorted(all_entries, key=lambda x: x.timestamp)[offset:offset + count]
def function[ReadHuntOutputPluginLogEntries, parameter[self, hunt_id, output_plugin_id, offset, count, with_type]]: constant[Reads hunt output plugin log entries.] variable[all_entries] assign[=] list[[]] for taget[name[flow_obj]] in starred[call[name[self]._GetHuntFlows, parameter[name[hunt_id]]]] begin[:] for taget[name[entry]] in starred[call[name[self].ReadFlowOutputPluginLogEntries, parameter[name[flow_obj].client_id, name[flow_obj].flow_id, name[output_plugin_id], constant[0], name[sys].maxsize]]] begin[:] call[name[all_entries].append, parameter[call[name[rdf_flow_objects].FlowOutputPluginLogEntry, parameter[]]]] return[call[call[name[sorted], parameter[name[all_entries]]]][<ast.Slice object at 0x7da1b1b0fd90>]]
keyword[def] identifier[ReadHuntOutputPluginLogEntries] ( identifier[self] , identifier[hunt_id] , identifier[output_plugin_id] , identifier[offset] , identifier[count] , identifier[with_type] = keyword[None] ): literal[string] identifier[all_entries] =[] keyword[for] identifier[flow_obj] keyword[in] identifier[self] . identifier[_GetHuntFlows] ( identifier[hunt_id] ): keyword[for] identifier[entry] keyword[in] identifier[self] . identifier[ReadFlowOutputPluginLogEntries] ( identifier[flow_obj] . identifier[client_id] , identifier[flow_obj] . identifier[flow_id] , identifier[output_plugin_id] , literal[int] , identifier[sys] . identifier[maxsize] , identifier[with_type] = identifier[with_type] ): identifier[all_entries] . identifier[append] ( identifier[rdf_flow_objects] . identifier[FlowOutputPluginLogEntry] ( identifier[hunt_id] = identifier[hunt_id] , identifier[client_id] = identifier[flow_obj] . identifier[client_id] , identifier[flow_id] = identifier[flow_obj] . identifier[flow_id] , identifier[output_plugin_id] = identifier[output_plugin_id] , identifier[log_entry_type] = identifier[entry] . identifier[log_entry_type] , identifier[timestamp] = identifier[entry] . identifier[timestamp] , identifier[message] = identifier[entry] . identifier[message] )) keyword[return] identifier[sorted] ( identifier[all_entries] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[timestamp] )[ identifier[offset] : identifier[offset] + identifier[count] ]
def ReadHuntOutputPluginLogEntries(self, hunt_id, output_plugin_id, offset, count, with_type=None): """Reads hunt output plugin log entries.""" all_entries = [] for flow_obj in self._GetHuntFlows(hunt_id): for entry in self.ReadFlowOutputPluginLogEntries(flow_obj.client_id, flow_obj.flow_id, output_plugin_id, 0, sys.maxsize, with_type=with_type): all_entries.append(rdf_flow_objects.FlowOutputPluginLogEntry(hunt_id=hunt_id, client_id=flow_obj.client_id, flow_id=flow_obj.flow_id, output_plugin_id=output_plugin_id, log_entry_type=entry.log_entry_type, timestamp=entry.timestamp, message=entry.message)) # depends on [control=['for'], data=['entry']] # depends on [control=['for'], data=['flow_obj']] return sorted(all_entries, key=lambda x: x.timestamp)[offset:offset + count]
def set_node_attr(self, name, attr, value): ''' API: set_node_attr(self, name, attr) Description: Sets attr attribute of node named name to value. Input: name: Name of node. attr: Attribute of node to set. Pre: Graph should have this node. Post: Node attribute will be updated. ''' self.get_node(name).set_attr(attr, value)
def function[set_node_attr, parameter[self, name, attr, value]]: constant[ API: set_node_attr(self, name, attr) Description: Sets attr attribute of node named name to value. Input: name: Name of node. attr: Attribute of node to set. Pre: Graph should have this node. Post: Node attribute will be updated. ] call[call[name[self].get_node, parameter[name[name]]].set_attr, parameter[name[attr], name[value]]]
keyword[def] identifier[set_node_attr] ( identifier[self] , identifier[name] , identifier[attr] , identifier[value] ): literal[string] identifier[self] . identifier[get_node] ( identifier[name] ). identifier[set_attr] ( identifier[attr] , identifier[value] )
def set_node_attr(self, name, attr, value): """ API: set_node_attr(self, name, attr) Description: Sets attr attribute of node named name to value. Input: name: Name of node. attr: Attribute of node to set. Pre: Graph should have this node. Post: Node attribute will be updated. """ self.get_node(name).set_attr(attr, value)
def _add(self, codeobj): """Add a child (variable) to this object.""" assert isinstance(codeobj, CodeVariable) self.variables.append(codeobj)
def function[_add, parameter[self, codeobj]]: constant[Add a child (variable) to this object.] assert[call[name[isinstance], parameter[name[codeobj], name[CodeVariable]]]] call[name[self].variables.append, parameter[name[codeobj]]]
keyword[def] identifier[_add] ( identifier[self] , identifier[codeobj] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[codeobj] , identifier[CodeVariable] ) identifier[self] . identifier[variables] . identifier[append] ( identifier[codeobj] )
def _add(self, codeobj): """Add a child (variable) to this object.""" assert isinstance(codeobj, CodeVariable) self.variables.append(codeobj)
def export_obj(vertices, triangles, filename): """ Exports a mesh in the (.obj) format. """ with open(filename, 'w') as fh: for v in vertices: fh.write("v {} {} {}\n".format(*v)) for f in triangles: fh.write("f {} {} {}\n".format(*(f + 1)))
def function[export_obj, parameter[vertices, triangles, filename]]: constant[ Exports a mesh in the (.obj) format. ] with call[name[open], parameter[name[filename], constant[w]]] begin[:] for taget[name[v]] in starred[name[vertices]] begin[:] call[name[fh].write, parameter[call[constant[v {} {} {} ].format, parameter[<ast.Starred object at 0x7da1b28729e0>]]]] for taget[name[f]] in starred[name[triangles]] begin[:] call[name[fh].write, parameter[call[constant[f {} {} {} ].format, parameter[<ast.Starred object at 0x7da1b28734f0>]]]]
keyword[def] identifier[export_obj] ( identifier[vertices] , identifier[triangles] , identifier[filename] ): literal[string] keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[fh] : keyword[for] identifier[v] keyword[in] identifier[vertices] : identifier[fh] . identifier[write] ( literal[string] . identifier[format] (* identifier[v] )) keyword[for] identifier[f] keyword[in] identifier[triangles] : identifier[fh] . identifier[write] ( literal[string] . identifier[format] (*( identifier[f] + literal[int] )))
def export_obj(vertices, triangles, filename): """ Exports a mesh in the (.obj) format. """ with open(filename, 'w') as fh: for v in vertices: fh.write('v {} {} {}\n'.format(*v)) # depends on [control=['for'], data=['v']] for f in triangles: fh.write('f {} {} {}\n'.format(*f + 1)) # depends on [control=['for'], data=['f']] # depends on [control=['with'], data=['fh']]
def restore(self, remotepath): ''' Usage: restore <remotepath> - \ restore a file from the recycle bin remotepath - the remote path to restore ''' rpath = get_pcs_path(remotepath) # by default, only 1000 items, more than that sounds a bit crazy pars = { 'method' : 'listrecycle' } self.pd("Searching for fs_id to restore") return self.__get(pcsurl + 'file', pars, self.__restore_search_act, rpath)
def function[restore, parameter[self, remotepath]]: constant[ Usage: restore <remotepath> - restore a file from the recycle bin remotepath - the remote path to restore ] variable[rpath] assign[=] call[name[get_pcs_path], parameter[name[remotepath]]] variable[pars] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d44a60>], [<ast.Constant object at 0x7da1b1d451b0>]] call[name[self].pd, parameter[constant[Searching for fs_id to restore]]] return[call[name[self].__get, parameter[binary_operation[name[pcsurl] + constant[file]], name[pars], name[self].__restore_search_act, name[rpath]]]]
keyword[def] identifier[restore] ( identifier[self] , identifier[remotepath] ): literal[string] identifier[rpath] = identifier[get_pcs_path] ( identifier[remotepath] ) identifier[pars] ={ literal[string] : literal[string] } identifier[self] . identifier[pd] ( literal[string] ) keyword[return] identifier[self] . identifier[__get] ( identifier[pcsurl] + literal[string] , identifier[pars] , identifier[self] . identifier[__restore_search_act] , identifier[rpath] )
def restore(self, remotepath): """ Usage: restore <remotepath> - restore a file from the recycle bin remotepath - the remote path to restore """ rpath = get_pcs_path(remotepath) # by default, only 1000 items, more than that sounds a bit crazy pars = {'method': 'listrecycle'} self.pd('Searching for fs_id to restore') return self.__get(pcsurl + 'file', pars, self.__restore_search_act, rpath)