code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def handle(self, *args, **options): """gets data from WordPress site""" # TODO: refactor these with .get if 'username' in options: self.username = options['username'] else: self.username = None if 'password' in options: self.password = options['password'] else: self.password = None self.xml_path = options.get('xml') self.url = options.get('url') try: blog_index = BlogIndexPage.objects.get( title__icontains=options['blog_index']) except BlogIndexPage.DoesNotExist: raise CommandError("Incorrect blog index title - have you created it?") if self.url == "just_testing": with open('test-data.json') as test_json: posts = json.load(test_json) elif self.xml_path: try: import lxml from blog.wp_xml_parser import XML_parser except ImportError as e: print("You must have lxml installed to run xml imports." " Run `pip install lxml`.") raise e self.xml_parser = XML_parser(self.xml_path) posts = self.xml_parser.get_posts_data() else: posts = self.get_posts_data(self.url) self.should_import_comments = options.get('import_comments') self.create_blog_pages(posts, blog_index)
def function[handle, parameter[self]]: constant[gets data from WordPress site] if compare[constant[username] in name[options]] begin[:] name[self].username assign[=] call[name[options]][constant[username]] if compare[constant[password] in name[options]] begin[:] name[self].password assign[=] call[name[options]][constant[password]] name[self].xml_path assign[=] call[name[options].get, parameter[constant[xml]]] name[self].url assign[=] call[name[options].get, parameter[constant[url]]] <ast.Try object at 0x7da1b19d8280> if compare[name[self].url equal[==] constant[just_testing]] begin[:] with call[name[open], parameter[constant[test-data.json]]] begin[:] variable[posts] assign[=] call[name[json].load, parameter[name[test_json]]] name[self].should_import_comments assign[=] call[name[options].get, parameter[constant[import_comments]]] call[name[self].create_blog_pages, parameter[name[posts], name[blog_index]]]
keyword[def] identifier[handle] ( identifier[self] ,* identifier[args] ,** identifier[options] ): literal[string] keyword[if] literal[string] keyword[in] identifier[options] : identifier[self] . identifier[username] = identifier[options] [ literal[string] ] keyword[else] : identifier[self] . identifier[username] = keyword[None] keyword[if] literal[string] keyword[in] identifier[options] : identifier[self] . identifier[password] = identifier[options] [ literal[string] ] keyword[else] : identifier[self] . identifier[password] = keyword[None] identifier[self] . identifier[xml_path] = identifier[options] . identifier[get] ( literal[string] ) identifier[self] . identifier[url] = identifier[options] . identifier[get] ( literal[string] ) keyword[try] : identifier[blog_index] = identifier[BlogIndexPage] . identifier[objects] . identifier[get] ( identifier[title__icontains] = identifier[options] [ literal[string] ]) keyword[except] identifier[BlogIndexPage] . identifier[DoesNotExist] : keyword[raise] identifier[CommandError] ( literal[string] ) keyword[if] identifier[self] . identifier[url] == literal[string] : keyword[with] identifier[open] ( literal[string] ) keyword[as] identifier[test_json] : identifier[posts] = identifier[json] . identifier[load] ( identifier[test_json] ) keyword[elif] identifier[self] . identifier[xml_path] : keyword[try] : keyword[import] identifier[lxml] keyword[from] identifier[blog] . identifier[wp_xml_parser] keyword[import] identifier[XML_parser] keyword[except] identifier[ImportError] keyword[as] identifier[e] : identifier[print] ( literal[string] literal[string] ) keyword[raise] identifier[e] identifier[self] . identifier[xml_parser] = identifier[XML_parser] ( identifier[self] . identifier[xml_path] ) identifier[posts] = identifier[self] . identifier[xml_parser] . identifier[get_posts_data] () keyword[else] : identifier[posts] = identifier[self] . identifier[get_posts_data] ( identifier[self] . identifier[url] ) identifier[self] . identifier[should_import_comments] = identifier[options] . identifier[get] ( literal[string] ) identifier[self] . identifier[create_blog_pages] ( identifier[posts] , identifier[blog_index] )
def handle(self, *args, **options): """gets data from WordPress site""" # TODO: refactor these with .get if 'username' in options: self.username = options['username'] # depends on [control=['if'], data=['options']] else: self.username = None if 'password' in options: self.password = options['password'] # depends on [control=['if'], data=['options']] else: self.password = None self.xml_path = options.get('xml') self.url = options.get('url') try: blog_index = BlogIndexPage.objects.get(title__icontains=options['blog_index']) # depends on [control=['try'], data=[]] except BlogIndexPage.DoesNotExist: raise CommandError('Incorrect blog index title - have you created it?') # depends on [control=['except'], data=[]] if self.url == 'just_testing': with open('test-data.json') as test_json: posts = json.load(test_json) # depends on [control=['with'], data=['test_json']] # depends on [control=['if'], data=[]] elif self.xml_path: try: import lxml from blog.wp_xml_parser import XML_parser # depends on [control=['try'], data=[]] except ImportError as e: print('You must have lxml installed to run xml imports. Run `pip install lxml`.') raise e # depends on [control=['except'], data=['e']] self.xml_parser = XML_parser(self.xml_path) posts = self.xml_parser.get_posts_data() # depends on [control=['if'], data=[]] else: posts = self.get_posts_data(self.url) self.should_import_comments = options.get('import_comments') self.create_blog_pages(posts, blog_index)
def get_free_shipping_by_id(cls, free_shipping_id, **kwargs): """Find FreeShipping Return single instance of FreeShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_free_shipping_by_id(free_shipping_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_id: ID of freeShipping to return (required) :return: FreeShipping If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_free_shipping_by_id_with_http_info(free_shipping_id, **kwargs) else: (data) = cls._get_free_shipping_by_id_with_http_info(free_shipping_id, **kwargs) return data
def function[get_free_shipping_by_id, parameter[cls, free_shipping_id]]: constant[Find FreeShipping Return single instance of FreeShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_free_shipping_by_id(free_shipping_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_id: ID of freeShipping to return (required) :return: FreeShipping If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[async]]] begin[:] return[call[name[cls]._get_free_shipping_by_id_with_http_info, parameter[name[free_shipping_id]]]]
keyword[def] identifier[get_free_shipping_by_id] ( identifier[cls] , identifier[free_shipping_id] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[cls] . identifier[_get_free_shipping_by_id_with_http_info] ( identifier[free_shipping_id] ,** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[cls] . identifier[_get_free_shipping_by_id_with_http_info] ( identifier[free_shipping_id] ,** identifier[kwargs] ) keyword[return] identifier[data]
def get_free_shipping_by_id(cls, free_shipping_id, **kwargs): """Find FreeShipping Return single instance of FreeShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_free_shipping_by_id(free_shipping_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_id: ID of freeShipping to return (required) :return: FreeShipping If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_free_shipping_by_id_with_http_info(free_shipping_id, **kwargs) # depends on [control=['if'], data=[]] else: data = cls._get_free_shipping_by_id_with_http_info(free_shipping_id, **kwargs) return data
def experiment_property(prop): """Get a property of the experiment by name.""" exp = experiment(session) p = getattr(exp, prop) return success_response(field=prop, data=p, request_type=prop)
def function[experiment_property, parameter[prop]]: constant[Get a property of the experiment by name.] variable[exp] assign[=] call[name[experiment], parameter[name[session]]] variable[p] assign[=] call[name[getattr], parameter[name[exp], name[prop]]] return[call[name[success_response], parameter[]]]
keyword[def] identifier[experiment_property] ( identifier[prop] ): literal[string] identifier[exp] = identifier[experiment] ( identifier[session] ) identifier[p] = identifier[getattr] ( identifier[exp] , identifier[prop] ) keyword[return] identifier[success_response] ( identifier[field] = identifier[prop] , identifier[data] = identifier[p] , identifier[request_type] = identifier[prop] )
def experiment_property(prop): """Get a property of the experiment by name.""" exp = experiment(session) p = getattr(exp, prop) return success_response(field=prop, data=p, request_type=prop)
def connect_by_mapping(self, si, vm, mapping, default_network, reserved_networks, logger, promiscuous_mode): """ gets the mapping to the vnics and connects it to the vm :param default_network: :param si: ServiceInstance :param vm: vim.VirtualMachine :param mapping: [VmNetworkMapping] :param reserved_networks: :param logger: :param promiscuous_mode <str> 'True' or 'False' turn on/off promiscuous mode for the port group """ request_mapping = [] logger.debug( 'about to map to the vm: {0}, the following networks'.format(vm.name if vm.name else vm.config.uuid)) for network_map in mapping: network = self.dv_port_group_creator.get_or_create_network(si, vm, network_map.dv_port_name, network_map.dv_switch_name, network_map.dv_switch_path, network_map.vlan_id, network_map.vlan_spec, logger, promiscuous_mode) request_mapping.append(ConnectRequest(network_map.vnic_name, network)) logger.debug(str(request_mapping)) return self.virtual_machine_port_group_configurer.connect_vnic_to_networks(vm, request_mapping, default_network, reserved_networks, logger)
def function[connect_by_mapping, parameter[self, si, vm, mapping, default_network, reserved_networks, logger, promiscuous_mode]]: constant[ gets the mapping to the vnics and connects it to the vm :param default_network: :param si: ServiceInstance :param vm: vim.VirtualMachine :param mapping: [VmNetworkMapping] :param reserved_networks: :param logger: :param promiscuous_mode <str> 'True' or 'False' turn on/off promiscuous mode for the port group ] variable[request_mapping] assign[=] list[[]] call[name[logger].debug, parameter[call[constant[about to map to the vm: {0}, the following networks].format, parameter[<ast.IfExp object at 0x7da18eb551e0>]]]] for taget[name[network_map]] in starred[name[mapping]] begin[:] variable[network] assign[=] call[name[self].dv_port_group_creator.get_or_create_network, parameter[name[si], name[vm], name[network_map].dv_port_name, name[network_map].dv_switch_name, name[network_map].dv_switch_path, name[network_map].vlan_id, name[network_map].vlan_spec, name[logger], name[promiscuous_mode]]] call[name[request_mapping].append, parameter[call[name[ConnectRequest], parameter[name[network_map].vnic_name, name[network]]]]] call[name[logger].debug, parameter[call[name[str], parameter[name[request_mapping]]]]] return[call[name[self].virtual_machine_port_group_configurer.connect_vnic_to_networks, parameter[name[vm], name[request_mapping], name[default_network], name[reserved_networks], name[logger]]]]
keyword[def] identifier[connect_by_mapping] ( identifier[self] , identifier[si] , identifier[vm] , identifier[mapping] , identifier[default_network] , identifier[reserved_networks] , identifier[logger] , identifier[promiscuous_mode] ): literal[string] identifier[request_mapping] =[] identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[vm] . identifier[name] keyword[if] identifier[vm] . identifier[name] keyword[else] identifier[vm] . identifier[config] . identifier[uuid] )) keyword[for] identifier[network_map] keyword[in] identifier[mapping] : identifier[network] = identifier[self] . identifier[dv_port_group_creator] . identifier[get_or_create_network] ( identifier[si] , identifier[vm] , identifier[network_map] . identifier[dv_port_name] , identifier[network_map] . identifier[dv_switch_name] , identifier[network_map] . identifier[dv_switch_path] , identifier[network_map] . identifier[vlan_id] , identifier[network_map] . identifier[vlan_spec] , identifier[logger] , identifier[promiscuous_mode] ) identifier[request_mapping] . identifier[append] ( identifier[ConnectRequest] ( identifier[network_map] . identifier[vnic_name] , identifier[network] )) identifier[logger] . identifier[debug] ( identifier[str] ( identifier[request_mapping] )) keyword[return] identifier[self] . identifier[virtual_machine_port_group_configurer] . identifier[connect_vnic_to_networks] ( identifier[vm] , identifier[request_mapping] , identifier[default_network] , identifier[reserved_networks] , identifier[logger] )
def connect_by_mapping(self, si, vm, mapping, default_network, reserved_networks, logger, promiscuous_mode): """ gets the mapping to the vnics and connects it to the vm :param default_network: :param si: ServiceInstance :param vm: vim.VirtualMachine :param mapping: [VmNetworkMapping] :param reserved_networks: :param logger: :param promiscuous_mode <str> 'True' or 'False' turn on/off promiscuous mode for the port group """ request_mapping = [] logger.debug('about to map to the vm: {0}, the following networks'.format(vm.name if vm.name else vm.config.uuid)) for network_map in mapping: network = self.dv_port_group_creator.get_or_create_network(si, vm, network_map.dv_port_name, network_map.dv_switch_name, network_map.dv_switch_path, network_map.vlan_id, network_map.vlan_spec, logger, promiscuous_mode) request_mapping.append(ConnectRequest(network_map.vnic_name, network)) # depends on [control=['for'], data=['network_map']] logger.debug(str(request_mapping)) return self.virtual_machine_port_group_configurer.connect_vnic_to_networks(vm, request_mapping, default_network, reserved_networks, logger)
def _remove_end_gaps(sequence): '''Removes double-stranded gaps from ends of the sequence. :returns: The current sequence with terminal double-strand gaps ('-') removed. :rtype: coral.DNA ''' # Count terminal blank sequences def count_end_gaps(seq): gap = coral.DNA('-') count = 0 for base in seq: if base == gap: count += 1 else: break return count top_left = count_end_gaps(sequence.top) top_right = count_end_gaps(reversed(sequence.top)) bottom_left = count_end_gaps(reversed(sequence.bottom)) bottom_right = count_end_gaps(sequence.bottom) # Trim sequence left_index = min(top_left, bottom_left) right_index = len(sequence) - min(top_right, bottom_right) return sequence[left_index:right_index]
def function[_remove_end_gaps, parameter[sequence]]: constant[Removes double-stranded gaps from ends of the sequence. :returns: The current sequence with terminal double-strand gaps ('-') removed. :rtype: coral.DNA ] def function[count_end_gaps, parameter[seq]]: variable[gap] assign[=] call[name[coral].DNA, parameter[constant[-]]] variable[count] assign[=] constant[0] for taget[name[base]] in starred[name[seq]] begin[:] if compare[name[base] equal[==] name[gap]] begin[:] <ast.AugAssign object at 0x7da1b057ae60> return[name[count]] variable[top_left] assign[=] call[name[count_end_gaps], parameter[name[sequence].top]] variable[top_right] assign[=] call[name[count_end_gaps], parameter[call[name[reversed], parameter[name[sequence].top]]]] variable[bottom_left] assign[=] call[name[count_end_gaps], parameter[call[name[reversed], parameter[name[sequence].bottom]]]] variable[bottom_right] assign[=] call[name[count_end_gaps], parameter[name[sequence].bottom]] variable[left_index] assign[=] call[name[min], parameter[name[top_left], name[bottom_left]]] variable[right_index] assign[=] binary_operation[call[name[len], parameter[name[sequence]]] - call[name[min], parameter[name[top_right], name[bottom_right]]]] return[call[name[sequence]][<ast.Slice object at 0x7da18c4cd810>]]
keyword[def] identifier[_remove_end_gaps] ( identifier[sequence] ): literal[string] keyword[def] identifier[count_end_gaps] ( identifier[seq] ): identifier[gap] = identifier[coral] . identifier[DNA] ( literal[string] ) identifier[count] = literal[int] keyword[for] identifier[base] keyword[in] identifier[seq] : keyword[if] identifier[base] == identifier[gap] : identifier[count] += literal[int] keyword[else] : keyword[break] keyword[return] identifier[count] identifier[top_left] = identifier[count_end_gaps] ( identifier[sequence] . identifier[top] ) identifier[top_right] = identifier[count_end_gaps] ( identifier[reversed] ( identifier[sequence] . identifier[top] )) identifier[bottom_left] = identifier[count_end_gaps] ( identifier[reversed] ( identifier[sequence] . identifier[bottom] )) identifier[bottom_right] = identifier[count_end_gaps] ( identifier[sequence] . identifier[bottom] ) identifier[left_index] = identifier[min] ( identifier[top_left] , identifier[bottom_left] ) identifier[right_index] = identifier[len] ( identifier[sequence] )- identifier[min] ( identifier[top_right] , identifier[bottom_right] ) keyword[return] identifier[sequence] [ identifier[left_index] : identifier[right_index] ]
def _remove_end_gaps(sequence): """Removes double-stranded gaps from ends of the sequence. :returns: The current sequence with terminal double-strand gaps ('-') removed. :rtype: coral.DNA """ # Count terminal blank sequences def count_end_gaps(seq): gap = coral.DNA('-') count = 0 for base in seq: if base == gap: count += 1 # depends on [control=['if'], data=[]] else: break # depends on [control=['for'], data=['base']] return count top_left = count_end_gaps(sequence.top) top_right = count_end_gaps(reversed(sequence.top)) bottom_left = count_end_gaps(reversed(sequence.bottom)) bottom_right = count_end_gaps(sequence.bottom) # Trim sequence left_index = min(top_left, bottom_left) right_index = len(sequence) - min(top_right, bottom_right) return sequence[left_index:right_index]
def add_group_members(self, members): """Add a new group member to the groups list :param members: member name :type members: str :return: None """ if not isinstance(members, list): members = [members] if not getattr(self, 'group_members', None): self.group_members = members else: self.group_members.extend(members)
def function[add_group_members, parameter[self, members]]: constant[Add a new group member to the groups list :param members: member name :type members: str :return: None ] if <ast.UnaryOp object at 0x7da18f58c820> begin[:] variable[members] assign[=] list[[<ast.Name object at 0x7da18f58e050>]] if <ast.UnaryOp object at 0x7da18f58d4b0> begin[:] name[self].group_members assign[=] name[members]
keyword[def] identifier[add_group_members] ( identifier[self] , identifier[members] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[members] , identifier[list] ): identifier[members] =[ identifier[members] ] keyword[if] keyword[not] identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ): identifier[self] . identifier[group_members] = identifier[members] keyword[else] : identifier[self] . identifier[group_members] . identifier[extend] ( identifier[members] )
def add_group_members(self, members): """Add a new group member to the groups list :param members: member name :type members: str :return: None """ if not isinstance(members, list): members = [members] # depends on [control=['if'], data=[]] if not getattr(self, 'group_members', None): self.group_members = members # depends on [control=['if'], data=[]] else: self.group_members.extend(members)
def create_push_handlers(): """Create a handler for upload per package format.""" # pylint: disable=fixme # HACK: hacky territory - Dynamically generate a handler for each of the # package formats, until we have slightly more clever 'guess type' # handling. :-) handlers = create_push_handlers.handlers = {} context = create_push_handlers.context = get_package_formats() for key, parameters in six.iteritems(context): kwargs = parameters.copy() # Remove standard arguments kwargs.pop("package_file") if "distribution" in parameters: has_distribution_param = True kwargs.pop("distribution") else: has_distribution_param = False has_additional_params = len(kwargs) > 0 help_text = """ Push/upload a new %(type)s package upstream. """ % { "type": key.capitalize() } if has_additional_params: help_text += """ PACKAGE_FILE: The main file to create the package from. """ else: help_text += """ PACKAGE_FILE: Any number of files to create packages from. Each file will result in a separate package. """ if has_distribution_param: target_metavar = "OWNER/REPO/DISTRO/RELEASE" target_callback = validators.validate_owner_repo_distro help_text += """ OWNER/REPO/DISTRO/RELEASE: Specify the OWNER namespace (i.e. user or org), the REPO name where the package file will be uploaded to, and the DISTRO and RELEASE the package is for. All separated by a slash. Example: 'your-org/awesome-repo/ubuntu/xenial'. """ else: target_metavar = "OWNER/REPO" target_callback = validators.validate_owner_repo help_text += """ OWNER/REPO: Specify the OWNER namespace (i.e. user or org), and the REPO name where the package file will be uploaded to. All separated by a slash. Example: 'your-org/awesome-repo'. """ @push.command(name=key, help=help_text) @decorators.common_api_auth_options @decorators.common_cli_config_options @decorators.common_cli_output_options @decorators.common_package_action_options @decorators.initialise_api @click.argument("owner_repo", metavar=target_metavar, callback=target_callback) @click.argument( "package_file", nargs=1 if has_additional_params else -1, type=ExpandPath( dir_okay=False, exists=True, writable=False, resolve_path=True ), ) @click.option( "-n", "--dry-run", default=False, is_flag=True, help="Execute in dry run mode (don't upload anything.)", ) @click.option( "-n", "--dry-run", default=False, is_flag=True, help="Execute in dry run mode (don't upload anything.)", ) @click.pass_context def push_handler(ctx, *args, **kwargs): """Handle upload for a specific package format.""" parameters = context.get(ctx.info_name) kwargs["package_type"] = ctx.info_name owner_repo = kwargs.pop("owner_repo") if "distribution" in parameters: kwargs["distribution"] = "/".join(owner_repo[2:]) owner_repo = owner_repo[0:2] kwargs["owner_repo"] = owner_repo package_files = kwargs.pop("package_file") if not isinstance(package_files, tuple): package_files = (package_files,) for package_file in package_files: kwargs["package_file"] = package_file try: click.echo() upload_files_and_create_package(ctx, *args, **kwargs) except ApiException: click.secho("Skipping error and moving on.", fg="yellow") click.echo() # Add any additional arguments for k, info in six.iteritems(kwargs): option_kwargs = {} option_name_fmt = "--%(key)s" if k.endswith("_file"): # Treat parameters that end with _file as uploadable filepaths. option_kwargs["type"] = ExpandPath( dir_okay=False, exists=True, writable=False, resolve_path=True ) elif info["type"] == "bool": option_name_fmt = "--%(key)s/--no-%(key)s" option_kwargs["is_flag"] = True else: option_kwargs["type"] = str option_name = option_name_fmt % {"key": k.replace("_", "-")} decorator = click.option( option_name, required=info["required"], help=info["help"], **option_kwargs ) push_handler = decorator(push_handler) handlers[key] = push_handler
def function[create_push_handlers, parameter[]]: constant[Create a handler for upload per package format.] variable[handlers] assign[=] dictionary[[], []] variable[context] assign[=] call[name[get_package_formats], parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b197c130>, <ast.Name object at 0x7da1b197c160>]]] in starred[call[name[six].iteritems, parameter[name[context]]]] begin[:] variable[kwargs] assign[=] call[name[parameters].copy, parameter[]] call[name[kwargs].pop, parameter[constant[package_file]]] if compare[constant[distribution] in name[parameters]] begin[:] variable[has_distribution_param] assign[=] constant[True] call[name[kwargs].pop, parameter[constant[distribution]]] variable[has_additional_params] assign[=] compare[call[name[len], parameter[name[kwargs]]] greater[>] constant[0]] variable[help_text] assign[=] binary_operation[constant[ Push/upload a new %(type)s package upstream. ] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b197f100>], [<ast.Call object at 0x7da1b197f0d0>]]] if name[has_additional_params] begin[:] <ast.AugAssign object at 0x7da1b197efe0> if name[has_distribution_param] begin[:] variable[target_metavar] assign[=] constant[OWNER/REPO/DISTRO/RELEASE] variable[target_callback] assign[=] name[validators].validate_owner_repo_distro <ast.AugAssign object at 0x7da1b197f700> def function[push_handler, parameter[ctx]]: constant[Handle upload for a specific package format.] variable[parameters] assign[=] call[name[context].get, parameter[name[ctx].info_name]] call[name[kwargs]][constant[package_type]] assign[=] name[ctx].info_name variable[owner_repo] assign[=] call[name[kwargs].pop, parameter[constant[owner_repo]]] if compare[constant[distribution] in name[parameters]] begin[:] call[name[kwargs]][constant[distribution]] assign[=] call[constant[/].join, parameter[call[name[owner_repo]][<ast.Slice object at 0x7da1b197ea10>]]] variable[owner_repo] assign[=] call[name[owner_repo]][<ast.Slice object at 0x7da1b197e8f0>] call[name[kwargs]][constant[owner_repo]] assign[=] name[owner_repo] variable[package_files] assign[=] call[name[kwargs].pop, parameter[constant[package_file]]] if <ast.UnaryOp object at 0x7da1b197e620> begin[:] variable[package_files] assign[=] tuple[[<ast.Name object at 0x7da1b197e4a0>]] for taget[name[package_file]] in starred[name[package_files]] begin[:] call[name[kwargs]][constant[package_file]] assign[=] name[package_file] <ast.Try object at 0x7da1b197e2f0> call[name[click].echo, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b1930220>, <ast.Name object at 0x7da1b1933a90>]]] in starred[call[name[six].iteritems, parameter[name[kwargs]]]] begin[:] variable[option_kwargs] assign[=] dictionary[[], []] variable[option_name_fmt] assign[=] constant[--%(key)s] if call[name[k].endswith, parameter[constant[_file]]] begin[:] call[name[option_kwargs]][constant[type]] assign[=] call[name[ExpandPath], parameter[]] variable[option_name] assign[=] binary_operation[name[option_name_fmt] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b19332b0>], [<ast.Call object at 0x7da1b19323e0>]]] variable[decorator] assign[=] call[name[click].option, parameter[name[option_name]]] variable[push_handler] assign[=] call[name[decorator], parameter[name[push_handler]]] call[name[handlers]][name[key]] assign[=] name[push_handler]
keyword[def] identifier[create_push_handlers] (): literal[string] identifier[handlers] = identifier[create_push_handlers] . identifier[handlers] ={} identifier[context] = identifier[create_push_handlers] . identifier[context] = identifier[get_package_formats] () keyword[for] identifier[key] , identifier[parameters] keyword[in] identifier[six] . identifier[iteritems] ( identifier[context] ): identifier[kwargs] = identifier[parameters] . identifier[copy] () identifier[kwargs] . identifier[pop] ( literal[string] ) keyword[if] literal[string] keyword[in] identifier[parameters] : identifier[has_distribution_param] = keyword[True] identifier[kwargs] . identifier[pop] ( literal[string] ) keyword[else] : identifier[has_distribution_param] = keyword[False] identifier[has_additional_params] = identifier[len] ( identifier[kwargs] )> literal[int] identifier[help_text] = literal[string] %{ literal[string] : identifier[key] . identifier[capitalize] () } keyword[if] identifier[has_additional_params] : identifier[help_text] += literal[string] keyword[else] : identifier[help_text] += literal[string] keyword[if] identifier[has_distribution_param] : identifier[target_metavar] = literal[string] identifier[target_callback] = identifier[validators] . identifier[validate_owner_repo_distro] identifier[help_text] += literal[string] keyword[else] : identifier[target_metavar] = literal[string] identifier[target_callback] = identifier[validators] . identifier[validate_owner_repo] identifier[help_text] += literal[string] @ identifier[push] . identifier[command] ( identifier[name] = identifier[key] , identifier[help] = identifier[help_text] ) @ identifier[decorators] . identifier[common_api_auth_options] @ identifier[decorators] . identifier[common_cli_config_options] @ identifier[decorators] . identifier[common_cli_output_options] @ identifier[decorators] . identifier[common_package_action_options] @ identifier[decorators] . identifier[initialise_api] @ identifier[click] . identifier[argument] ( literal[string] , identifier[metavar] = identifier[target_metavar] , identifier[callback] = identifier[target_callback] ) @ identifier[click] . identifier[argument] ( literal[string] , identifier[nargs] = literal[int] keyword[if] identifier[has_additional_params] keyword[else] - literal[int] , identifier[type] = identifier[ExpandPath] ( identifier[dir_okay] = keyword[False] , identifier[exists] = keyword[True] , identifier[writable] = keyword[False] , identifier[resolve_path] = keyword[True] ), ) @ identifier[click] . identifier[option] ( literal[string] , literal[string] , identifier[default] = keyword[False] , identifier[is_flag] = keyword[True] , identifier[help] = literal[string] , ) @ identifier[click] . identifier[option] ( literal[string] , literal[string] , identifier[default] = keyword[False] , identifier[is_flag] = keyword[True] , identifier[help] = literal[string] , ) @ identifier[click] . identifier[pass_context] keyword[def] identifier[push_handler] ( identifier[ctx] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[parameters] = identifier[context] . identifier[get] ( identifier[ctx] . identifier[info_name] ) identifier[kwargs] [ literal[string] ]= identifier[ctx] . identifier[info_name] identifier[owner_repo] = identifier[kwargs] . identifier[pop] ( literal[string] ) keyword[if] literal[string] keyword[in] identifier[parameters] : identifier[kwargs] [ literal[string] ]= literal[string] . identifier[join] ( identifier[owner_repo] [ literal[int] :]) identifier[owner_repo] = identifier[owner_repo] [ literal[int] : literal[int] ] identifier[kwargs] [ literal[string] ]= identifier[owner_repo] identifier[package_files] = identifier[kwargs] . identifier[pop] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[package_files] , identifier[tuple] ): identifier[package_files] =( identifier[package_files] ,) keyword[for] identifier[package_file] keyword[in] identifier[package_files] : identifier[kwargs] [ literal[string] ]= identifier[package_file] keyword[try] : identifier[click] . identifier[echo] () identifier[upload_files_and_create_package] ( identifier[ctx] ,* identifier[args] ,** identifier[kwargs] ) keyword[except] identifier[ApiException] : identifier[click] . identifier[secho] ( literal[string] , identifier[fg] = literal[string] ) identifier[click] . identifier[echo] () keyword[for] identifier[k] , identifier[info] keyword[in] identifier[six] . identifier[iteritems] ( identifier[kwargs] ): identifier[option_kwargs] ={} identifier[option_name_fmt] = literal[string] keyword[if] identifier[k] . identifier[endswith] ( literal[string] ): identifier[option_kwargs] [ literal[string] ]= identifier[ExpandPath] ( identifier[dir_okay] = keyword[False] , identifier[exists] = keyword[True] , identifier[writable] = keyword[False] , identifier[resolve_path] = keyword[True] ) keyword[elif] identifier[info] [ literal[string] ]== literal[string] : identifier[option_name_fmt] = literal[string] identifier[option_kwargs] [ literal[string] ]= keyword[True] keyword[else] : identifier[option_kwargs] [ literal[string] ]= identifier[str] identifier[option_name] = identifier[option_name_fmt] %{ literal[string] : identifier[k] . identifier[replace] ( literal[string] , literal[string] )} identifier[decorator] = identifier[click] . identifier[option] ( identifier[option_name] , identifier[required] = identifier[info] [ literal[string] ], identifier[help] = identifier[info] [ literal[string] ], ** identifier[option_kwargs] ) identifier[push_handler] = identifier[decorator] ( identifier[push_handler] ) identifier[handlers] [ identifier[key] ]= identifier[push_handler]
def create_push_handlers(): """Create a handler for upload per package format.""" # pylint: disable=fixme # HACK: hacky territory - Dynamically generate a handler for each of the # package formats, until we have slightly more clever 'guess type' # handling. :-) handlers = create_push_handlers.handlers = {} context = create_push_handlers.context = get_package_formats() for (key, parameters) in six.iteritems(context): kwargs = parameters.copy() # Remove standard arguments kwargs.pop('package_file') if 'distribution' in parameters: has_distribution_param = True kwargs.pop('distribution') # depends on [control=['if'], data=[]] else: has_distribution_param = False has_additional_params = len(kwargs) > 0 help_text = '\n Push/upload a new %(type)s package upstream.\n ' % {'type': key.capitalize()} if has_additional_params: help_text += '\n\n PACKAGE_FILE: The main file to create the package from.\n ' # depends on [control=['if'], data=[]] else: help_text += '\n\n PACKAGE_FILE: Any number of files to create packages from. Each\n file will result in a separate package.\n ' if has_distribution_param: target_metavar = 'OWNER/REPO/DISTRO/RELEASE' target_callback = validators.validate_owner_repo_distro help_text += "\n\n OWNER/REPO/DISTRO/RELEASE: Specify the OWNER namespace (i.e.\n user or org), the REPO name where the package file will be uploaded\n to, and the DISTRO and RELEASE the package is for. All separated by\n a slash.\n\n Example: 'your-org/awesome-repo/ubuntu/xenial'.\n " # depends on [control=['if'], data=[]] else: target_metavar = 'OWNER/REPO' target_callback = validators.validate_owner_repo help_text += "\n\n OWNER/REPO: Specify the OWNER namespace (i.e. user or org), and the\n REPO name where the package file will be uploaded to. All separated\n by a slash.\n\n Example: 'your-org/awesome-repo'.\n " @push.command(name=key, help=help_text) @decorators.common_api_auth_options @decorators.common_cli_config_options @decorators.common_cli_output_options @decorators.common_package_action_options @decorators.initialise_api @click.argument('owner_repo', metavar=target_metavar, callback=target_callback) @click.argument('package_file', nargs=1 if has_additional_params else -1, type=ExpandPath(dir_okay=False, exists=True, writable=False, resolve_path=True)) @click.option('-n', '--dry-run', default=False, is_flag=True, help="Execute in dry run mode (don't upload anything.)") @click.option('-n', '--dry-run', default=False, is_flag=True, help="Execute in dry run mode (don't upload anything.)") @click.pass_context def push_handler(ctx, *args, **kwargs): """Handle upload for a specific package format.""" parameters = context.get(ctx.info_name) kwargs['package_type'] = ctx.info_name owner_repo = kwargs.pop('owner_repo') if 'distribution' in parameters: kwargs['distribution'] = '/'.join(owner_repo[2:]) owner_repo = owner_repo[0:2] # depends on [control=['if'], data=[]] kwargs['owner_repo'] = owner_repo package_files = kwargs.pop('package_file') if not isinstance(package_files, tuple): package_files = (package_files,) # depends on [control=['if'], data=[]] for package_file in package_files: kwargs['package_file'] = package_file try: click.echo() upload_files_and_create_package(ctx, *args, **kwargs) # depends on [control=['try'], data=[]] except ApiException: click.secho('Skipping error and moving on.', fg='yellow') # depends on [control=['except'], data=[]] click.echo() # depends on [control=['for'], data=['package_file']] # Add any additional arguments for (k, info) in six.iteritems(kwargs): option_kwargs = {} option_name_fmt = '--%(key)s' if k.endswith('_file'): # Treat parameters that end with _file as uploadable filepaths. option_kwargs['type'] = ExpandPath(dir_okay=False, exists=True, writable=False, resolve_path=True) # depends on [control=['if'], data=[]] elif info['type'] == 'bool': option_name_fmt = '--%(key)s/--no-%(key)s' option_kwargs['is_flag'] = True # depends on [control=['if'], data=[]] else: option_kwargs['type'] = str option_name = option_name_fmt % {'key': k.replace('_', '-')} decorator = click.option(option_name, required=info['required'], help=info['help'], **option_kwargs) push_handler = decorator(push_handler) # depends on [control=['for'], data=[]] handlers[key] = push_handler # depends on [control=['for'], data=[]]
def project_exists(response: 'environ.Response', path: str) -> bool: """ Determines whether or not a project exists at the specified path :param response: :param path: :return: """ if os.path.exists(path): return True response.fail( code='PROJECT_NOT_FOUND', message='The project path does not exist', path=path ).console( """ [ERROR]: Unable to open project. The specified path does not exist: {path} """.format(path=path) ) return False
def function[project_exists, parameter[response, path]]: constant[ Determines whether or not a project exists at the specified path :param response: :param path: :return: ] if call[name[os].path.exists, parameter[name[path]]] begin[:] return[constant[True]] call[call[name[response].fail, parameter[]].console, parameter[call[constant[ [ERROR]: Unable to open project. The specified path does not exist: {path} ].format, parameter[]]]] return[constant[False]]
keyword[def] identifier[project_exists] ( identifier[response] : literal[string] , identifier[path] : identifier[str] )-> identifier[bool] : literal[string] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ): keyword[return] keyword[True] identifier[response] . identifier[fail] ( identifier[code] = literal[string] , identifier[message] = literal[string] , identifier[path] = identifier[path] ). identifier[console] ( literal[string] . identifier[format] ( identifier[path] = identifier[path] ) ) keyword[return] keyword[False]
def project_exists(response: 'environ.Response', path: str) -> bool: """ Determines whether or not a project exists at the specified path :param response: :param path: :return: """ if os.path.exists(path): return True # depends on [control=['if'], data=[]] response.fail(code='PROJECT_NOT_FOUND', message='The project path does not exist', path=path).console('\n [ERROR]: Unable to open project. The specified path does not exist:\n\n {path}\n '.format(path=path)) return False
def info(self, page, version=None): """Returns informations of *page*. Informations of the last version is returned if *version* is not set. """ return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPageInfo', page))
def function[info, parameter[self, page, version]]: constant[Returns informations of *page*. Informations of the last version is returned if *version* is not set. ] return[<ast.IfExp object at 0x7da20c76dde0>]
keyword[def] identifier[info] ( identifier[self] , identifier[page] , identifier[version] = keyword[None] ): literal[string] keyword[return] ( identifier[self] . identifier[_dokuwiki] . identifier[send] ( literal[string] , identifier[page] , identifier[version] ) keyword[if] identifier[version] keyword[is] keyword[not] keyword[None] keyword[else] identifier[self] . identifier[_dokuwiki] . identifier[send] ( literal[string] , identifier[page] ))
def info(self, page, version=None): """Returns informations of *page*. Informations of the last version is returned if *version* is not set. """ return self._dokuwiki.send('wiki.getPageInfoVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPageInfo', page)
def handle_no_start_state(self): """Handles the situation, when no start state exists during execution The method waits, until a transition is created. It then checks again for an existing start state and waits again, if this is not the case. It returns the None state if the the state machine was stopped. """ start_state = self.get_start_state(set_final_outcome=True) while not start_state: # depending on the execution mode pause execution execution_signal = state_machine_execution_engine.handle_execution_mode(self) if execution_signal is StateMachineExecutionStatus.STOPPED: # this will be caught at the end of the run method return None self._transitions_cv.acquire() self._transitions_cv.wait(3.0) self._transitions_cv.release() start_state = self.get_start_state(set_final_outcome=True) return start_state
def function[handle_no_start_state, parameter[self]]: constant[Handles the situation, when no start state exists during execution The method waits, until a transition is created. It then checks again for an existing start state and waits again, if this is not the case. It returns the None state if the the state machine was stopped. ] variable[start_state] assign[=] call[name[self].get_start_state, parameter[]] while <ast.UnaryOp object at 0x7da1b1c7dc90> begin[:] variable[execution_signal] assign[=] call[name[state_machine_execution_engine].handle_execution_mode, parameter[name[self]]] if compare[name[execution_signal] is name[StateMachineExecutionStatus].STOPPED] begin[:] return[constant[None]] call[name[self]._transitions_cv.acquire, parameter[]] call[name[self]._transitions_cv.wait, parameter[constant[3.0]]] call[name[self]._transitions_cv.release, parameter[]] variable[start_state] assign[=] call[name[self].get_start_state, parameter[]] return[name[start_state]]
keyword[def] identifier[handle_no_start_state] ( identifier[self] ): literal[string] identifier[start_state] = identifier[self] . identifier[get_start_state] ( identifier[set_final_outcome] = keyword[True] ) keyword[while] keyword[not] identifier[start_state] : identifier[execution_signal] = identifier[state_machine_execution_engine] . identifier[handle_execution_mode] ( identifier[self] ) keyword[if] identifier[execution_signal] keyword[is] identifier[StateMachineExecutionStatus] . identifier[STOPPED] : keyword[return] keyword[None] identifier[self] . identifier[_transitions_cv] . identifier[acquire] () identifier[self] . identifier[_transitions_cv] . identifier[wait] ( literal[int] ) identifier[self] . identifier[_transitions_cv] . identifier[release] () identifier[start_state] = identifier[self] . identifier[get_start_state] ( identifier[set_final_outcome] = keyword[True] ) keyword[return] identifier[start_state]
def handle_no_start_state(self): """Handles the situation, when no start state exists during execution The method waits, until a transition is created. It then checks again for an existing start state and waits again, if this is not the case. It returns the None state if the the state machine was stopped. """ start_state = self.get_start_state(set_final_outcome=True) while not start_state: # depending on the execution mode pause execution execution_signal = state_machine_execution_engine.handle_execution_mode(self) if execution_signal is StateMachineExecutionStatus.STOPPED: # this will be caught at the end of the run method return None # depends on [control=['if'], data=[]] self._transitions_cv.acquire() self._transitions_cv.wait(3.0) self._transitions_cv.release() start_state = self.get_start_state(set_final_outcome=True) # depends on [control=['while'], data=[]] return start_state
def try_pop(d, key, default): """ >>> d = {"a": "b", "c": "d", "e": "f"} >>> try_pop(d, "g", "default") 'default' >>> d {'a': 'b', 'c': 'd', 'e': 'f'} >>> try_pop(d, "c", "default") 'd' >>> d {'a': 'b', 'e': 'f'} """ value = d.get(key, default) if key in d: d.pop(key) return value
def function[try_pop, parameter[d, key, default]]: constant[ >>> d = {"a": "b", "c": "d", "e": "f"} >>> try_pop(d, "g", "default") 'default' >>> d {'a': 'b', 'c': 'd', 'e': 'f'} >>> try_pop(d, "c", "default") 'd' >>> d {'a': 'b', 'e': 'f'} ] variable[value] assign[=] call[name[d].get, parameter[name[key], name[default]]] if compare[name[key] in name[d]] begin[:] call[name[d].pop, parameter[name[key]]] return[name[value]]
keyword[def] identifier[try_pop] ( identifier[d] , identifier[key] , identifier[default] ): literal[string] identifier[value] = identifier[d] . identifier[get] ( identifier[key] , identifier[default] ) keyword[if] identifier[key] keyword[in] identifier[d] : identifier[d] . identifier[pop] ( identifier[key] ) keyword[return] identifier[value]
def try_pop(d, key, default): """ >>> d = {"a": "b", "c": "d", "e": "f"} >>> try_pop(d, "g", "default") 'default' >>> d {'a': 'b', 'c': 'd', 'e': 'f'} >>> try_pop(d, "c", "default") 'd' >>> d {'a': 'b', 'e': 'f'} """ value = d.get(key, default) if key in d: d.pop(key) # depends on [control=['if'], data=['key', 'd']] return value
def make_linked_folder(sym_path): """ Create a folder in the ~/.autolens directory and create a sym link to it at the provided path. If both folders already exist then nothing is changed. If the source folder exists but the destination folder does not then the source folder is removed and replaced so as to conform to the behaviour that the user would expect should they delete the sym linked folder. Parameters ---------- sym_path: str The path where multinest output is apparently saved Returns ------- actual_path: str The path where multinest output is actually saved """ source_path = path_for(sym_path) if os.path.exists(source_path) and not os.path.exists(sym_path): logger.debug("Source {} exists but target {} does not. Removing source.".format(source_path, sym_path)) shutil.rmtree(source_path) try: logger.debug("Making source {}".format(source_path)) os.mkdir(source_path) logger.debug("Success") except FileExistsError as e: logger.info("Source already existed") logger.debug(e) try: logger.debug("Making linking from source {} to sym {}".format(source_path, sym_path)) os.symlink(source_path, sym_path) logger.debug("Success") except FileExistsError as e: logger.debug("Sym already existed") logger.debug(e) return source_path
def function[make_linked_folder, parameter[sym_path]]: constant[ Create a folder in the ~/.autolens directory and create a sym link to it at the provided path. If both folders already exist then nothing is changed. If the source folder exists but the destination folder does not then the source folder is removed and replaced so as to conform to the behaviour that the user would expect should they delete the sym linked folder. Parameters ---------- sym_path: str The path where multinest output is apparently saved Returns ------- actual_path: str The path where multinest output is actually saved ] variable[source_path] assign[=] call[name[path_for], parameter[name[sym_path]]] if <ast.BoolOp object at 0x7da18eb54a60> begin[:] call[name[logger].debug, parameter[call[constant[Source {} exists but target {} does not. Removing source.].format, parameter[name[source_path], name[sym_path]]]]] call[name[shutil].rmtree, parameter[name[source_path]]] <ast.Try object at 0x7da18fe916c0> <ast.Try object at 0x7da18fe931c0> return[name[source_path]]
keyword[def] identifier[make_linked_folder] ( identifier[sym_path] ): literal[string] identifier[source_path] = identifier[path_for] ( identifier[sym_path] ) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[source_path] ) keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[sym_path] ): identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[source_path] , identifier[sym_path] )) identifier[shutil] . identifier[rmtree] ( identifier[source_path] ) keyword[try] : identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[source_path] )) identifier[os] . identifier[mkdir] ( identifier[source_path] ) identifier[logger] . identifier[debug] ( literal[string] ) keyword[except] identifier[FileExistsError] keyword[as] identifier[e] : identifier[logger] . identifier[info] ( literal[string] ) identifier[logger] . identifier[debug] ( identifier[e] ) keyword[try] : identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[source_path] , identifier[sym_path] )) identifier[os] . identifier[symlink] ( identifier[source_path] , identifier[sym_path] ) identifier[logger] . identifier[debug] ( literal[string] ) keyword[except] identifier[FileExistsError] keyword[as] identifier[e] : identifier[logger] . identifier[debug] ( literal[string] ) identifier[logger] . identifier[debug] ( identifier[e] ) keyword[return] identifier[source_path]
def make_linked_folder(sym_path): """ Create a folder in the ~/.autolens directory and create a sym link to it at the provided path. If both folders already exist then nothing is changed. If the source folder exists but the destination folder does not then the source folder is removed and replaced so as to conform to the behaviour that the user would expect should they delete the sym linked folder. Parameters ---------- sym_path: str The path where multinest output is apparently saved Returns ------- actual_path: str The path where multinest output is actually saved """ source_path = path_for(sym_path) if os.path.exists(source_path) and (not os.path.exists(sym_path)): logger.debug('Source {} exists but target {} does not. Removing source.'.format(source_path, sym_path)) shutil.rmtree(source_path) # depends on [control=['if'], data=[]] try: logger.debug('Making source {}'.format(source_path)) os.mkdir(source_path) logger.debug('Success') # depends on [control=['try'], data=[]] except FileExistsError as e: logger.info('Source already existed') logger.debug(e) # depends on [control=['except'], data=['e']] try: logger.debug('Making linking from source {} to sym {}'.format(source_path, sym_path)) os.symlink(source_path, sym_path) logger.debug('Success') # depends on [control=['try'], data=[]] except FileExistsError as e: logger.debug('Sym already existed') logger.debug(e) # depends on [control=['except'], data=['e']] return source_path
def select_idle_worker(action, action_space, select_worker): """Select an idle worker.""" del action_space action.action_ui.select_idle_worker.type = select_worker
def function[select_idle_worker, parameter[action, action_space, select_worker]]: constant[Select an idle worker.] <ast.Delete object at 0x7da18f00cc40> name[action].action_ui.select_idle_worker.type assign[=] name[select_worker]
keyword[def] identifier[select_idle_worker] ( identifier[action] , identifier[action_space] , identifier[select_worker] ): literal[string] keyword[del] identifier[action_space] identifier[action] . identifier[action_ui] . identifier[select_idle_worker] . identifier[type] = identifier[select_worker]
def select_idle_worker(action, action_space, select_worker): """Select an idle worker.""" del action_space action.action_ui.select_idle_worker.type = select_worker
def aslist(generator): 'Function decorator to transform a generator into a list' def wrapper(*args, **kwargs): return list(generator(*args, **kwargs)) return wrapper
def function[aslist, parameter[generator]]: constant[Function decorator to transform a generator into a list] def function[wrapper, parameter[]]: return[call[name[list], parameter[call[name[generator], parameter[<ast.Starred object at 0x7da1b0fc1870>]]]]] return[name[wrapper]]
keyword[def] identifier[aslist] ( identifier[generator] ): literal[string] keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ): keyword[return] identifier[list] ( identifier[generator] (* identifier[args] ,** identifier[kwargs] )) keyword[return] identifier[wrapper]
def aslist(generator): """Function decorator to transform a generator into a list""" def wrapper(*args, **kwargs): return list(generator(*args, **kwargs)) return wrapper
def escape_windows_cmd_string(s): """Returns a string that is usable by the Windows cmd.exe. The escaping is based on details here and emperical testing: http://www.robvanderwoude.com/escapechars.php """ for c in '()%!^<>&|"': s = s.replace(c, '^' + c) s = s.replace('/?', '/.') return s
def function[escape_windows_cmd_string, parameter[s]]: constant[Returns a string that is usable by the Windows cmd.exe. The escaping is based on details here and emperical testing: http://www.robvanderwoude.com/escapechars.php ] for taget[name[c]] in starred[constant[()%!^<>&|"]] begin[:] variable[s] assign[=] call[name[s].replace, parameter[name[c], binary_operation[constant[^] + name[c]]]] variable[s] assign[=] call[name[s].replace, parameter[constant[/?], constant[/.]]] return[name[s]]
keyword[def] identifier[escape_windows_cmd_string] ( identifier[s] ): literal[string] keyword[for] identifier[c] keyword[in] literal[string] : identifier[s] = identifier[s] . identifier[replace] ( identifier[c] , literal[string] + identifier[c] ) identifier[s] = identifier[s] . identifier[replace] ( literal[string] , literal[string] ) keyword[return] identifier[s]
def escape_windows_cmd_string(s): """Returns a string that is usable by the Windows cmd.exe. The escaping is based on details here and emperical testing: http://www.robvanderwoude.com/escapechars.php """ for c in '()%!^<>&|"': s = s.replace(c, '^' + c) # depends on [control=['for'], data=['c']] s = s.replace('/?', '/.') return s
def sort(ctx): """Sort the variants of a vcf file""" head = ctx.parent.head vcf_handle = ctx.parent.handle outfile = ctx.parent.outfile silent = ctx.parent.silent print_headers(head, outfile=outfile, silent=silent) for line in sort_variants(vcf_handle): print_variant(variant_line=line, outfile=outfile, silent=silent)
def function[sort, parameter[ctx]]: constant[Sort the variants of a vcf file] variable[head] assign[=] name[ctx].parent.head variable[vcf_handle] assign[=] name[ctx].parent.handle variable[outfile] assign[=] name[ctx].parent.outfile variable[silent] assign[=] name[ctx].parent.silent call[name[print_headers], parameter[name[head]]] for taget[name[line]] in starred[call[name[sort_variants], parameter[name[vcf_handle]]]] begin[:] call[name[print_variant], parameter[]]
keyword[def] identifier[sort] ( identifier[ctx] ): literal[string] identifier[head] = identifier[ctx] . identifier[parent] . identifier[head] identifier[vcf_handle] = identifier[ctx] . identifier[parent] . identifier[handle] identifier[outfile] = identifier[ctx] . identifier[parent] . identifier[outfile] identifier[silent] = identifier[ctx] . identifier[parent] . identifier[silent] identifier[print_headers] ( identifier[head] , identifier[outfile] = identifier[outfile] , identifier[silent] = identifier[silent] ) keyword[for] identifier[line] keyword[in] identifier[sort_variants] ( identifier[vcf_handle] ): identifier[print_variant] ( identifier[variant_line] = identifier[line] , identifier[outfile] = identifier[outfile] , identifier[silent] = identifier[silent] )
def sort(ctx): """Sort the variants of a vcf file""" head = ctx.parent.head vcf_handle = ctx.parent.handle outfile = ctx.parent.outfile silent = ctx.parent.silent print_headers(head, outfile=outfile, silent=silent) for line in sort_variants(vcf_handle): print_variant(variant_line=line, outfile=outfile, silent=silent) # depends on [control=['for'], data=['line']]
def kpoints(self): """ Generate gamma center k-points mesh grid for GW calc, which is requested by GW calculation. """ return Kpoints.automatic_density_by_vol(self.structure, self.reciprocal_density, force_gamma=True)
def function[kpoints, parameter[self]]: constant[ Generate gamma center k-points mesh grid for GW calc, which is requested by GW calculation. ] return[call[name[Kpoints].automatic_density_by_vol, parameter[name[self].structure, name[self].reciprocal_density]]]
keyword[def] identifier[kpoints] ( identifier[self] ): literal[string] keyword[return] identifier[Kpoints] . identifier[automatic_density_by_vol] ( identifier[self] . identifier[structure] , identifier[self] . identifier[reciprocal_density] , identifier[force_gamma] = keyword[True] )
def kpoints(self): """ Generate gamma center k-points mesh grid for GW calc, which is requested by GW calculation. """ return Kpoints.automatic_density_by_vol(self.structure, self.reciprocal_density, force_gamma=True)
def _theorem6p3(): """See Theorem 6.3 in paper. Prunes (s,b) when (s,a) is explored, b (almost) simplicial in (s,a), and a (almost) simplicial in (s,b) """ pruning_set3 = set() def _prune3(x, as_list, b): for a in as_list: key = (tuple(x), a, b) # (s,a,b) with (s,a) explored if key in pruning_set3: return True return False def _explored3(x, a, as_list): for b in as_list: prunable = (tuple(x), a, b) # (s,a,b) with (s,a) explored pruning_set3.add(prunable) return _prune3, _explored3
def function[_theorem6p3, parameter[]]: constant[See Theorem 6.3 in paper. Prunes (s,b) when (s,a) is explored, b (almost) simplicial in (s,a), and a (almost) simplicial in (s,b) ] variable[pruning_set3] assign[=] call[name[set], parameter[]] def function[_prune3, parameter[x, as_list, b]]: for taget[name[a]] in starred[name[as_list]] begin[:] variable[key] assign[=] tuple[[<ast.Call object at 0x7da1b0892980>, <ast.Name object at 0x7da1b08905e0>, <ast.Name object at 0x7da1b08906a0>]] if compare[name[key] in name[pruning_set3]] begin[:] return[constant[True]] return[constant[False]] def function[_explored3, parameter[x, a, as_list]]: for taget[name[b]] in starred[name[as_list]] begin[:] variable[prunable] assign[=] tuple[[<ast.Call object at 0x7da1b08f9de0>, <ast.Name object at 0x7da1b08fa8c0>, <ast.Name object at 0x7da1b08fa9e0>]] call[name[pruning_set3].add, parameter[name[prunable]]] return[tuple[[<ast.Name object at 0x7da1b08fae00>, <ast.Name object at 0x7da1b08fb5b0>]]]
keyword[def] identifier[_theorem6p3] (): literal[string] identifier[pruning_set3] = identifier[set] () keyword[def] identifier[_prune3] ( identifier[x] , identifier[as_list] , identifier[b] ): keyword[for] identifier[a] keyword[in] identifier[as_list] : identifier[key] =( identifier[tuple] ( identifier[x] ), identifier[a] , identifier[b] ) keyword[if] identifier[key] keyword[in] identifier[pruning_set3] : keyword[return] keyword[True] keyword[return] keyword[False] keyword[def] identifier[_explored3] ( identifier[x] , identifier[a] , identifier[as_list] ): keyword[for] identifier[b] keyword[in] identifier[as_list] : identifier[prunable] =( identifier[tuple] ( identifier[x] ), identifier[a] , identifier[b] ) identifier[pruning_set3] . identifier[add] ( identifier[prunable] ) keyword[return] identifier[_prune3] , identifier[_explored3]
def _theorem6p3(): """See Theorem 6.3 in paper. Prunes (s,b) when (s,a) is explored, b (almost) simplicial in (s,a), and a (almost) simplicial in (s,b) """ pruning_set3 = set() def _prune3(x, as_list, b): for a in as_list: key = (tuple(x), a, b) # (s,a,b) with (s,a) explored if key in pruning_set3: return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']] return False def _explored3(x, a, as_list): for b in as_list: prunable = (tuple(x), a, b) # (s,a,b) with (s,a) explored pruning_set3.add(prunable) # depends on [control=['for'], data=['b']] return (_prune3, _explored3)
async def exec_loop(stdout, stderr, kernel, mode, code, *, opts=None, vprint_done=print_done, is_multi=False): ''' Fully streamed asynchronous version of the execute loop. ''' async with kernel.stream_execute(code, mode=mode, opts=opts) as stream: async for result in stream: if result.type == aiohttp.WSMsgType.TEXT: result = json.loads(result.data) else: # future extension continue for rec in result.get('console', []): if rec[0] == 'stdout': print(rec[1], end='', file=stdout) elif rec[0] == 'stderr': print(rec[1], end='', file=stderr) else: print('----- output record (type: {0}) -----'.format(rec[0]), file=stdout) print(rec[1], file=stdout) print('----- end of record -----', file=stdout) stdout.flush() files = result.get('files', []) if files: print('--- generated files ---', file=stdout) for item in files: print('{0}: {1}'.format(item['name'], item['url']), file=stdout) print('--- end of generated files ---', file=stdout) if result['status'] == 'clean-finished': exitCode = result.get('exitCode') msg = 'Clean finished. (exit code = {0})'.format(exitCode) if is_multi: print(msg, file=stderr) vprint_done(msg) elif result['status'] == 'build-finished': exitCode = result.get('exitCode') msg = 'Build finished. (exit code = {0})'.format(exitCode) if is_multi: print(msg, file=stderr) vprint_done(msg) elif result['status'] == 'finished': exitCode = result.get('exitCode') msg = 'Execution finished. (exit code = {0})'.format(exitCode) if is_multi: print(msg, file=stderr) vprint_done(msg) break elif result['status'] == 'waiting-input': if result['options'].get('is_password', False): code = getpass.getpass() else: code = input() await stream.send_str(code) elif result['status'] == 'continued': pass
<ast.AsyncFunctionDef object at 0x7da1b0ff21d0>
keyword[async] keyword[def] identifier[exec_loop] ( identifier[stdout] , identifier[stderr] , identifier[kernel] , identifier[mode] , identifier[code] ,*, identifier[opts] = keyword[None] , identifier[vprint_done] = identifier[print_done] , identifier[is_multi] = keyword[False] ): literal[string] keyword[async] keyword[with] identifier[kernel] . identifier[stream_execute] ( identifier[code] , identifier[mode] = identifier[mode] , identifier[opts] = identifier[opts] ) keyword[as] identifier[stream] : keyword[async] keyword[for] identifier[result] keyword[in] identifier[stream] : keyword[if] identifier[result] . identifier[type] == identifier[aiohttp] . identifier[WSMsgType] . identifier[TEXT] : identifier[result] = identifier[json] . identifier[loads] ( identifier[result] . identifier[data] ) keyword[else] : keyword[continue] keyword[for] identifier[rec] keyword[in] identifier[result] . identifier[get] ( literal[string] ,[]): keyword[if] identifier[rec] [ literal[int] ]== literal[string] : identifier[print] ( identifier[rec] [ literal[int] ], identifier[end] = literal[string] , identifier[file] = identifier[stdout] ) keyword[elif] identifier[rec] [ literal[int] ]== literal[string] : identifier[print] ( identifier[rec] [ literal[int] ], identifier[end] = literal[string] , identifier[file] = identifier[stderr] ) keyword[else] : identifier[print] ( literal[string] . identifier[format] ( identifier[rec] [ literal[int] ]), identifier[file] = identifier[stdout] ) identifier[print] ( identifier[rec] [ literal[int] ], identifier[file] = identifier[stdout] ) identifier[print] ( literal[string] , identifier[file] = identifier[stdout] ) identifier[stdout] . identifier[flush] () identifier[files] = identifier[result] . identifier[get] ( literal[string] ,[]) keyword[if] identifier[files] : identifier[print] ( literal[string] , identifier[file] = identifier[stdout] ) keyword[for] identifier[item] keyword[in] identifier[files] : identifier[print] ( literal[string] . identifier[format] ( identifier[item] [ literal[string] ], identifier[item] [ literal[string] ]), identifier[file] = identifier[stdout] ) identifier[print] ( literal[string] , identifier[file] = identifier[stdout] ) keyword[if] identifier[result] [ literal[string] ]== literal[string] : identifier[exitCode] = identifier[result] . identifier[get] ( literal[string] ) identifier[msg] = literal[string] . identifier[format] ( identifier[exitCode] ) keyword[if] identifier[is_multi] : identifier[print] ( identifier[msg] , identifier[file] = identifier[stderr] ) identifier[vprint_done] ( identifier[msg] ) keyword[elif] identifier[result] [ literal[string] ]== literal[string] : identifier[exitCode] = identifier[result] . identifier[get] ( literal[string] ) identifier[msg] = literal[string] . identifier[format] ( identifier[exitCode] ) keyword[if] identifier[is_multi] : identifier[print] ( identifier[msg] , identifier[file] = identifier[stderr] ) identifier[vprint_done] ( identifier[msg] ) keyword[elif] identifier[result] [ literal[string] ]== literal[string] : identifier[exitCode] = identifier[result] . identifier[get] ( literal[string] ) identifier[msg] = literal[string] . identifier[format] ( identifier[exitCode] ) keyword[if] identifier[is_multi] : identifier[print] ( identifier[msg] , identifier[file] = identifier[stderr] ) identifier[vprint_done] ( identifier[msg] ) keyword[break] keyword[elif] identifier[result] [ literal[string] ]== literal[string] : keyword[if] identifier[result] [ literal[string] ]. identifier[get] ( literal[string] , keyword[False] ): identifier[code] = identifier[getpass] . identifier[getpass] () keyword[else] : identifier[code] = identifier[input] () keyword[await] identifier[stream] . identifier[send_str] ( identifier[code] ) keyword[elif] identifier[result] [ literal[string] ]== literal[string] : keyword[pass]
async def exec_loop(stdout, stderr, kernel, mode, code, *, opts=None, vprint_done=print_done, is_multi=False): """ Fully streamed asynchronous version of the execute loop. """ async with kernel.stream_execute(code, mode=mode, opts=opts) as stream: async for result in stream: if result.type == aiohttp.WSMsgType.TEXT: result = json.loads(result.data) # depends on [control=['if'], data=[]] else: # future extension continue for rec in result.get('console', []): if rec[0] == 'stdout': print(rec[1], end='', file=stdout) # depends on [control=['if'], data=[]] elif rec[0] == 'stderr': print(rec[1], end='', file=stderr) # depends on [control=['if'], data=[]] else: print('----- output record (type: {0}) -----'.format(rec[0]), file=stdout) print(rec[1], file=stdout) print('----- end of record -----', file=stdout) # depends on [control=['for'], data=['rec']] stdout.flush() files = result.get('files', []) if files: print('--- generated files ---', file=stdout) for item in files: print('{0}: {1}'.format(item['name'], item['url']), file=stdout) # depends on [control=['for'], data=['item']] print('--- end of generated files ---', file=stdout) # depends on [control=['if'], data=[]] if result['status'] == 'clean-finished': exitCode = result.get('exitCode') msg = 'Clean finished. (exit code = {0})'.format(exitCode) if is_multi: print(msg, file=stderr) # depends on [control=['if'], data=[]] vprint_done(msg) # depends on [control=['if'], data=[]] elif result['status'] == 'build-finished': exitCode = result.get('exitCode') msg = 'Build finished. (exit code = {0})'.format(exitCode) if is_multi: print(msg, file=stderr) # depends on [control=['if'], data=[]] vprint_done(msg) # depends on [control=['if'], data=[]] elif result['status'] == 'finished': exitCode = result.get('exitCode') msg = 'Execution finished. (exit code = {0})'.format(exitCode) if is_multi: print(msg, file=stderr) # depends on [control=['if'], data=[]] vprint_done(msg) break # depends on [control=['if'], data=[]] elif result['status'] == 'waiting-input': if result['options'].get('is_password', False): code = getpass.getpass() # depends on [control=['if'], data=[]] else: code = input() await stream.send_str(code) # depends on [control=['if'], data=[]] elif result['status'] == 'continued': pass # depends on [control=['if'], data=[]]
def _find_address_values_in_chain(self, base_contexts, addresses_to_find): """Breadth first search through the chain of contexts searching for the bytes values at the addresses in addresses_to_find. Args: base_contexts (list of str): The context ids to start with. addresses_to_find (list of str): Addresses to find values in the chain of contexts. Returns: tuple of found address_values and still not found addresses """ contexts_in_chain = deque() contexts_in_chain.extend(base_contexts) reads = list(addresses_to_find) address_values = [] context_ids_already_searched = [] context_ids_already_searched.extend(base_contexts) # There are two loop exit conditions, either all the addresses that # are being searched for have been found, or we run out of contexts # in the chain of contexts. while reads: try: current_c_id = contexts_in_chain.popleft() except IndexError: # There aren't any more contexts known about. break current_context = self._contexts[current_c_id] # First, check for addresses that have been deleted. deleted_addresses = current_context.get_if_deleted(reads) for address in deleted_addresses: if address is not None: address_values.append((address, None)) reads = list(set(reads) - set(deleted_addresses)) # Second, check for addresses that have been set in the context, # and remove those addresses from being asked about again. Here # any value of None means the address hasn't been set. values = current_context.get_if_set(reads) addresses_not_found = [] for address, value in zip(reads, values): if value is not None: address_values.append((address, value)) else: addresses_not_found.append(address) reads = addresses_not_found # Next check for addresses that might be in a context # because they were inputs. addresses_in_inputs = [address for address in reads if address in current_context] values = current_context.get_if_not_set(addresses_in_inputs) address_values.extend(list(zip(addresses_in_inputs, values))) for add in addresses_in_inputs: reads.remove(add) for c_id in current_context.base_contexts: if c_id not in context_ids_already_searched: contexts_in_chain.append(c_id) context_ids_already_searched.append(c_id) return address_values, reads
def function[_find_address_values_in_chain, parameter[self, base_contexts, addresses_to_find]]: constant[Breadth first search through the chain of contexts searching for the bytes values at the addresses in addresses_to_find. Args: base_contexts (list of str): The context ids to start with. addresses_to_find (list of str): Addresses to find values in the chain of contexts. Returns: tuple of found address_values and still not found addresses ] variable[contexts_in_chain] assign[=] call[name[deque], parameter[]] call[name[contexts_in_chain].extend, parameter[name[base_contexts]]] variable[reads] assign[=] call[name[list], parameter[name[addresses_to_find]]] variable[address_values] assign[=] list[[]] variable[context_ids_already_searched] assign[=] list[[]] call[name[context_ids_already_searched].extend, parameter[name[base_contexts]]] while name[reads] begin[:] <ast.Try object at 0x7da20c7cbf70> variable[current_context] assign[=] call[name[self]._contexts][name[current_c_id]] variable[deleted_addresses] assign[=] call[name[current_context].get_if_deleted, parameter[name[reads]]] for taget[name[address]] in starred[name[deleted_addresses]] begin[:] if compare[name[address] is_not constant[None]] begin[:] call[name[address_values].append, parameter[tuple[[<ast.Name object at 0x7da20c7cb520>, <ast.Constant object at 0x7da20c7c8be0>]]]] variable[reads] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[name[reads]]] - call[name[set], parameter[name[deleted_addresses]]]]]] variable[values] assign[=] call[name[current_context].get_if_set, parameter[name[reads]]] variable[addresses_not_found] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da20c6c4520>, <ast.Name object at 0x7da20c6c77c0>]]] in starred[call[name[zip], parameter[name[reads], name[values]]]] begin[:] if compare[name[value] is_not constant[None]] begin[:] call[name[address_values].append, parameter[tuple[[<ast.Name object at 0x7da20c6c6e30>, <ast.Name object at 0x7da20c6c66e0>]]]] variable[reads] assign[=] name[addresses_not_found] variable[addresses_in_inputs] assign[=] <ast.ListComp object at 0x7da18f00d960> variable[values] assign[=] call[name[current_context].get_if_not_set, parameter[name[addresses_in_inputs]]] call[name[address_values].extend, parameter[call[name[list], parameter[call[name[zip], parameter[name[addresses_in_inputs], name[values]]]]]]] for taget[name[add]] in starred[name[addresses_in_inputs]] begin[:] call[name[reads].remove, parameter[name[add]]] for taget[name[c_id]] in starred[name[current_context].base_contexts] begin[:] if compare[name[c_id] <ast.NotIn object at 0x7da2590d7190> name[context_ids_already_searched]] begin[:] call[name[contexts_in_chain].append, parameter[name[c_id]]] call[name[context_ids_already_searched].append, parameter[name[c_id]]] return[tuple[[<ast.Name object at 0x7da18f00fe20>, <ast.Name object at 0x7da18f00d900>]]]
keyword[def] identifier[_find_address_values_in_chain] ( identifier[self] , identifier[base_contexts] , identifier[addresses_to_find] ): literal[string] identifier[contexts_in_chain] = identifier[deque] () identifier[contexts_in_chain] . identifier[extend] ( identifier[base_contexts] ) identifier[reads] = identifier[list] ( identifier[addresses_to_find] ) identifier[address_values] =[] identifier[context_ids_already_searched] =[] identifier[context_ids_already_searched] . identifier[extend] ( identifier[base_contexts] ) keyword[while] identifier[reads] : keyword[try] : identifier[current_c_id] = identifier[contexts_in_chain] . identifier[popleft] () keyword[except] identifier[IndexError] : keyword[break] identifier[current_context] = identifier[self] . identifier[_contexts] [ identifier[current_c_id] ] identifier[deleted_addresses] = identifier[current_context] . identifier[get_if_deleted] ( identifier[reads] ) keyword[for] identifier[address] keyword[in] identifier[deleted_addresses] : keyword[if] identifier[address] keyword[is] keyword[not] keyword[None] : identifier[address_values] . identifier[append] (( identifier[address] , keyword[None] )) identifier[reads] = identifier[list] ( identifier[set] ( identifier[reads] )- identifier[set] ( identifier[deleted_addresses] )) identifier[values] = identifier[current_context] . identifier[get_if_set] ( identifier[reads] ) identifier[addresses_not_found] =[] keyword[for] identifier[address] , identifier[value] keyword[in] identifier[zip] ( identifier[reads] , identifier[values] ): keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] : identifier[address_values] . identifier[append] (( identifier[address] , identifier[value] )) keyword[else] : identifier[addresses_not_found] . identifier[append] ( identifier[address] ) identifier[reads] = identifier[addresses_not_found] identifier[addresses_in_inputs] =[ identifier[address] keyword[for] identifier[address] keyword[in] identifier[reads] keyword[if] identifier[address] keyword[in] identifier[current_context] ] identifier[values] = identifier[current_context] . identifier[get_if_not_set] ( identifier[addresses_in_inputs] ) identifier[address_values] . identifier[extend] ( identifier[list] ( identifier[zip] ( identifier[addresses_in_inputs] , identifier[values] ))) keyword[for] identifier[add] keyword[in] identifier[addresses_in_inputs] : identifier[reads] . identifier[remove] ( identifier[add] ) keyword[for] identifier[c_id] keyword[in] identifier[current_context] . identifier[base_contexts] : keyword[if] identifier[c_id] keyword[not] keyword[in] identifier[context_ids_already_searched] : identifier[contexts_in_chain] . identifier[append] ( identifier[c_id] ) identifier[context_ids_already_searched] . identifier[append] ( identifier[c_id] ) keyword[return] identifier[address_values] , identifier[reads]
def _find_address_values_in_chain(self, base_contexts, addresses_to_find): """Breadth first search through the chain of contexts searching for the bytes values at the addresses in addresses_to_find. Args: base_contexts (list of str): The context ids to start with. addresses_to_find (list of str): Addresses to find values in the chain of contexts. Returns: tuple of found address_values and still not found addresses """ contexts_in_chain = deque() contexts_in_chain.extend(base_contexts) reads = list(addresses_to_find) address_values = [] context_ids_already_searched = [] context_ids_already_searched.extend(base_contexts) # There are two loop exit conditions, either all the addresses that # are being searched for have been found, or we run out of contexts # in the chain of contexts. while reads: try: current_c_id = contexts_in_chain.popleft() # depends on [control=['try'], data=[]] except IndexError: # There aren't any more contexts known about. break # depends on [control=['except'], data=[]] current_context = self._contexts[current_c_id] # First, check for addresses that have been deleted. deleted_addresses = current_context.get_if_deleted(reads) for address in deleted_addresses: if address is not None: address_values.append((address, None)) # depends on [control=['if'], data=['address']] # depends on [control=['for'], data=['address']] reads = list(set(reads) - set(deleted_addresses)) # Second, check for addresses that have been set in the context, # and remove those addresses from being asked about again. Here # any value of None means the address hasn't been set. values = current_context.get_if_set(reads) addresses_not_found = [] for (address, value) in zip(reads, values): if value is not None: address_values.append((address, value)) # depends on [control=['if'], data=['value']] else: addresses_not_found.append(address) # depends on [control=['for'], data=[]] reads = addresses_not_found # Next check for addresses that might be in a context # because they were inputs. addresses_in_inputs = [address for address in reads if address in current_context] values = current_context.get_if_not_set(addresses_in_inputs) address_values.extend(list(zip(addresses_in_inputs, values))) for add in addresses_in_inputs: reads.remove(add) # depends on [control=['for'], data=['add']] for c_id in current_context.base_contexts: if c_id not in context_ids_already_searched: contexts_in_chain.append(c_id) context_ids_already_searched.append(c_id) # depends on [control=['if'], data=['c_id', 'context_ids_already_searched']] # depends on [control=['for'], data=['c_id']] # depends on [control=['while'], data=[]] return (address_values, reads)
def collapsesum(data_frame, by = None, var = None): ''' Pour une variable, fonction qui calcule la moyenne pondérée au sein de chaque groupe. ''' assert by is not None assert var is not None grouped = data_frame.groupby([by]) return grouped.apply(lambda x: weighted_sum(groupe = x, var =var))
def function[collapsesum, parameter[data_frame, by, var]]: constant[ Pour une variable, fonction qui calcule la moyenne pondérée au sein de chaque groupe. ] assert[compare[name[by] is_not constant[None]]] assert[compare[name[var] is_not constant[None]]] variable[grouped] assign[=] call[name[data_frame].groupby, parameter[list[[<ast.Name object at 0x7da1b0a0b370>]]]] return[call[name[grouped].apply, parameter[<ast.Lambda object at 0x7da1b0a0b460>]]]
keyword[def] identifier[collapsesum] ( identifier[data_frame] , identifier[by] = keyword[None] , identifier[var] = keyword[None] ): literal[string] keyword[assert] identifier[by] keyword[is] keyword[not] keyword[None] keyword[assert] identifier[var] keyword[is] keyword[not] keyword[None] identifier[grouped] = identifier[data_frame] . identifier[groupby] ([ identifier[by] ]) keyword[return] identifier[grouped] . identifier[apply] ( keyword[lambda] identifier[x] : identifier[weighted_sum] ( identifier[groupe] = identifier[x] , identifier[var] = identifier[var] ))
def collapsesum(data_frame, by=None, var=None): """ Pour une variable, fonction qui calcule la moyenne pondérée au sein de chaque groupe. """ assert by is not None assert var is not None grouped = data_frame.groupby([by]) return grouped.apply(lambda x: weighted_sum(groupe=x, var=var))
def save_form_data(self, instance, data): """ Handle data sent from MultiValueField forms that set ppoi values. `instance`: The model instance that is being altered via a form `data`: The data sent from the form to this field which can be either: * `None`: This is unset data from an optional field * A two-position tuple: (image_form_data, ppoi_data) * `image_form-data` options: * `None` the file for this field is unchanged * `False` unassign the file form the field * `ppoi_data` data structure: * `%(x_coordinate)sx%(y_coordinate)s': The ppoi data to assign to the unchanged file """ to_assign = data if data and isinstance(data, tuple): # This value is coming from a MultiValueField if data[0] is None: # This means the file hasn't changed but we need to # update the ppoi current_field = getattr(instance, self.name) if data[1]: current_field.ppoi = data[1] to_assign = current_field elif data[0] is False: # This means the 'Clear' checkbox was checked so we # need to empty the field to_assign = '' else: # This means there is a new upload so we need to unpack # the tuple and assign the first position to the field # attribute to_assign = data[0] super(VersatileImageField, self).save_form_data(instance, to_assign)
def function[save_form_data, parameter[self, instance, data]]: constant[ Handle data sent from MultiValueField forms that set ppoi values. `instance`: The model instance that is being altered via a form `data`: The data sent from the form to this field which can be either: * `None`: This is unset data from an optional field * A two-position tuple: (image_form_data, ppoi_data) * `image_form-data` options: * `None` the file for this field is unchanged * `False` unassign the file form the field * `ppoi_data` data structure: * `%(x_coordinate)sx%(y_coordinate)s': The ppoi data to assign to the unchanged file ] variable[to_assign] assign[=] name[data] if <ast.BoolOp object at 0x7da1b05cb070> begin[:] if compare[call[name[data]][constant[0]] is constant[None]] begin[:] variable[current_field] assign[=] call[name[getattr], parameter[name[instance], name[self].name]] if call[name[data]][constant[1]] begin[:] name[current_field].ppoi assign[=] call[name[data]][constant[1]] variable[to_assign] assign[=] name[current_field] call[call[name[super], parameter[name[VersatileImageField], name[self]]].save_form_data, parameter[name[instance], name[to_assign]]]
keyword[def] identifier[save_form_data] ( identifier[self] , identifier[instance] , identifier[data] ): literal[string] identifier[to_assign] = identifier[data] keyword[if] identifier[data] keyword[and] identifier[isinstance] ( identifier[data] , identifier[tuple] ): keyword[if] identifier[data] [ literal[int] ] keyword[is] keyword[None] : identifier[current_field] = identifier[getattr] ( identifier[instance] , identifier[self] . identifier[name] ) keyword[if] identifier[data] [ literal[int] ]: identifier[current_field] . identifier[ppoi] = identifier[data] [ literal[int] ] identifier[to_assign] = identifier[current_field] keyword[elif] identifier[data] [ literal[int] ] keyword[is] keyword[False] : identifier[to_assign] = literal[string] keyword[else] : identifier[to_assign] = identifier[data] [ literal[int] ] identifier[super] ( identifier[VersatileImageField] , identifier[self] ). identifier[save_form_data] ( identifier[instance] , identifier[to_assign] )
def save_form_data(self, instance, data): """ Handle data sent from MultiValueField forms that set ppoi values. `instance`: The model instance that is being altered via a form `data`: The data sent from the form to this field which can be either: * `None`: This is unset data from an optional field * A two-position tuple: (image_form_data, ppoi_data) * `image_form-data` options: * `None` the file for this field is unchanged * `False` unassign the file form the field * `ppoi_data` data structure: * `%(x_coordinate)sx%(y_coordinate)s': The ppoi data to assign to the unchanged file """ to_assign = data if data and isinstance(data, tuple): # This value is coming from a MultiValueField if data[0] is None: # This means the file hasn't changed but we need to # update the ppoi current_field = getattr(instance, self.name) if data[1]: current_field.ppoi = data[1] # depends on [control=['if'], data=[]] to_assign = current_field # depends on [control=['if'], data=[]] elif data[0] is False: # This means the 'Clear' checkbox was checked so we # need to empty the field to_assign = '' # depends on [control=['if'], data=[]] else: # This means there is a new upload so we need to unpack # the tuple and assign the first position to the field # attribute to_assign = data[0] # depends on [control=['if'], data=[]] super(VersatileImageField, self).save_form_data(instance, to_assign)
def getBytesForString(self, u): """ Returns the corresponding utf-8 encoded string for a given unicode object. If there is no string, one is encoded. @since: 0.6 """ h = hash(u) s = self._unicodes.get(h, None) if s is not None: return s s = self._unicodes[h] = u.encode('utf-8') return s
def function[getBytesForString, parameter[self, u]]: constant[ Returns the corresponding utf-8 encoded string for a given unicode object. If there is no string, one is encoded. @since: 0.6 ] variable[h] assign[=] call[name[hash], parameter[name[u]]] variable[s] assign[=] call[name[self]._unicodes.get, parameter[name[h], constant[None]]] if compare[name[s] is_not constant[None]] begin[:] return[name[s]] variable[s] assign[=] call[name[u].encode, parameter[constant[utf-8]]] return[name[s]]
keyword[def] identifier[getBytesForString] ( identifier[self] , identifier[u] ): literal[string] identifier[h] = identifier[hash] ( identifier[u] ) identifier[s] = identifier[self] . identifier[_unicodes] . identifier[get] ( identifier[h] , keyword[None] ) keyword[if] identifier[s] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[s] identifier[s] = identifier[self] . identifier[_unicodes] [ identifier[h] ]= identifier[u] . identifier[encode] ( literal[string] ) keyword[return] identifier[s]
def getBytesForString(self, u): """ Returns the corresponding utf-8 encoded string for a given unicode object. If there is no string, one is encoded. @since: 0.6 """ h = hash(u) s = self._unicodes.get(h, None) if s is not None: return s # depends on [control=['if'], data=['s']] s = self._unicodes[h] = u.encode('utf-8') return s
def get_seconds_description(self): """Generates a description for only the SECONDS portion of the expression Returns: The SECONDS description """ return self.get_segment_description( self._expression_parts[0], _("every second"), lambda s: s, lambda s: _("every {0} seconds").format(s), lambda s: _("seconds {0} through {1} past the minute"), lambda s: _("at {0} seconds past the minute") )
def function[get_seconds_description, parameter[self]]: constant[Generates a description for only the SECONDS portion of the expression Returns: The SECONDS description ] return[call[name[self].get_segment_description, parameter[call[name[self]._expression_parts][constant[0]], call[name[_], parameter[constant[every second]]], <ast.Lambda object at 0x7da1b04674c0>, <ast.Lambda object at 0x7da1b04640a0>, <ast.Lambda object at 0x7da1b04f5e40>, <ast.Lambda object at 0x7da1b04f4d60>]]]
keyword[def] identifier[get_seconds_description] ( identifier[self] ): literal[string] keyword[return] identifier[self] . identifier[get_segment_description] ( identifier[self] . identifier[_expression_parts] [ literal[int] ], identifier[_] ( literal[string] ), keyword[lambda] identifier[s] : identifier[s] , keyword[lambda] identifier[s] : identifier[_] ( literal[string] ). identifier[format] ( identifier[s] ), keyword[lambda] identifier[s] : identifier[_] ( literal[string] ), keyword[lambda] identifier[s] : identifier[_] ( literal[string] ) )
def get_seconds_description(self): """Generates a description for only the SECONDS portion of the expression Returns: The SECONDS description """ return self.get_segment_description(self._expression_parts[0], _('every second'), lambda s: s, lambda s: _('every {0} seconds').format(s), lambda s: _('seconds {0} through {1} past the minute'), lambda s: _('at {0} seconds past the minute'))
def parse(self, string, lexer=None): """ Interpret a string. :param string: The input to lex and parse. :param lexer: The lexer to use. A new one will be created if not provided. :return: An object representation of the input. """ if not lexer: lexer = Lexer().lexer return self.parser.parse(string, lexer=lexer)
def function[parse, parameter[self, string, lexer]]: constant[ Interpret a string. :param string: The input to lex and parse. :param lexer: The lexer to use. A new one will be created if not provided. :return: An object representation of the input. ] if <ast.UnaryOp object at 0x7da1b1307bb0> begin[:] variable[lexer] assign[=] call[name[Lexer], parameter[]].lexer return[call[name[self].parser.parse, parameter[name[string]]]]
keyword[def] identifier[parse] ( identifier[self] , identifier[string] , identifier[lexer] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[lexer] : identifier[lexer] = identifier[Lexer] (). identifier[lexer] keyword[return] identifier[self] . identifier[parser] . identifier[parse] ( identifier[string] , identifier[lexer] = identifier[lexer] )
def parse(self, string, lexer=None): """ Interpret a string. :param string: The input to lex and parse. :param lexer: The lexer to use. A new one will be created if not provided. :return: An object representation of the input. """ if not lexer: lexer = Lexer().lexer # depends on [control=['if'], data=[]] return self.parser.parse(string, lexer=lexer)
def sigma_clip(arrin, niter=4, nsig=4, extra={}, verbose=False): """ NAME: sigma_clip() PURPOSE: Calculate the mean/stdev of an array with sigma clipping. Iterate niter times, removing elements that are outside nsig, and recalculating mean/stdev. CALLING SEQUENCE: mean,stdev = sigma_clip(arr, niter=4, nsig=4, extra={}) INPUTS: arr: A numpy array or a sequence that can be converted. OPTIONAL INPUTS: niter: number of iterations, defaults to 4 nsig: number of sigma, defaults to 4 OUTPUTS: mean,stdev: A tuple containing mean and standard deviation. OPTIONAL OUTPUTS extra={}: Dictionary containing the array of used indices in extra['index'] REVISION HISTORY: Converted from IDL: 2006-10-23. Erin Sheldon, NYU """ arr = numpy.array(arrin, ndmin=1, copy=False) index = numpy.arange( arr.size ) for i in numpy.arange(niter): m = arr[index].mean() s = arr[index].std() if verbose: sys.stdout.write('iter %s\tnuse: %s\tmean %s\tstdev %s\n' % \ (i+1, index.size,m,s)) clip = nsig*s w, = numpy.where( (numpy.abs(arr[index]) - m) < clip ) if w.size == 0: sys.stderr.write("nsig too small. Everything clipped on iteration %d" % i+1) return m,s index = index[w] # Calculate final stats amean = arr[index].mean() asig = arr[index].std() extra['index'] = index return amean, asig
def function[sigma_clip, parameter[arrin, niter, nsig, extra, verbose]]: constant[ NAME: sigma_clip() PURPOSE: Calculate the mean/stdev of an array with sigma clipping. Iterate niter times, removing elements that are outside nsig, and recalculating mean/stdev. CALLING SEQUENCE: mean,stdev = sigma_clip(arr, niter=4, nsig=4, extra={}) INPUTS: arr: A numpy array or a sequence that can be converted. OPTIONAL INPUTS: niter: number of iterations, defaults to 4 nsig: number of sigma, defaults to 4 OUTPUTS: mean,stdev: A tuple containing mean and standard deviation. OPTIONAL OUTPUTS extra={}: Dictionary containing the array of used indices in extra['index'] REVISION HISTORY: Converted from IDL: 2006-10-23. Erin Sheldon, NYU ] variable[arr] assign[=] call[name[numpy].array, parameter[name[arrin]]] variable[index] assign[=] call[name[numpy].arange, parameter[name[arr].size]] for taget[name[i]] in starred[call[name[numpy].arange, parameter[name[niter]]]] begin[:] variable[m] assign[=] call[call[name[arr]][name[index]].mean, parameter[]] variable[s] assign[=] call[call[name[arr]][name[index]].std, parameter[]] if name[verbose] begin[:] call[name[sys].stdout.write, parameter[binary_operation[constant[iter %s nuse: %s mean %s stdev %s ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da1b09bd6f0>, <ast.Attribute object at 0x7da1b09bd750>, <ast.Name object at 0x7da1b09bd7b0>, <ast.Name object at 0x7da1b09be6b0>]]]]] variable[clip] assign[=] binary_operation[name[nsig] * name[s]] <ast.Tuple object at 0x7da1b09befb0> assign[=] call[name[numpy].where, parameter[compare[binary_operation[call[name[numpy].abs, parameter[call[name[arr]][name[index]]]] - name[m]] less[<] name[clip]]]] if compare[name[w].size equal[==] constant[0]] begin[:] call[name[sys].stderr.write, parameter[binary_operation[binary_operation[constant[nsig too small. Everything clipped on iteration %d] <ast.Mod object at 0x7da2590d6920> name[i]] + constant[1]]]] return[tuple[[<ast.Name object at 0x7da1b09bfc40>, <ast.Name object at 0x7da1b09be800>]]] variable[index] assign[=] call[name[index]][name[w]] variable[amean] assign[=] call[call[name[arr]][name[index]].mean, parameter[]] variable[asig] assign[=] call[call[name[arr]][name[index]].std, parameter[]] call[name[extra]][constant[index]] assign[=] name[index] return[tuple[[<ast.Name object at 0x7da1b09be980>, <ast.Name object at 0x7da1b09bf4f0>]]]
keyword[def] identifier[sigma_clip] ( identifier[arrin] , identifier[niter] = literal[int] , identifier[nsig] = literal[int] , identifier[extra] ={}, identifier[verbose] = keyword[False] ): literal[string] identifier[arr] = identifier[numpy] . identifier[array] ( identifier[arrin] , identifier[ndmin] = literal[int] , identifier[copy] = keyword[False] ) identifier[index] = identifier[numpy] . identifier[arange] ( identifier[arr] . identifier[size] ) keyword[for] identifier[i] keyword[in] identifier[numpy] . identifier[arange] ( identifier[niter] ): identifier[m] = identifier[arr] [ identifier[index] ]. identifier[mean] () identifier[s] = identifier[arr] [ identifier[index] ]. identifier[std] () keyword[if] identifier[verbose] : identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] %( identifier[i] + literal[int] , identifier[index] . identifier[size] , identifier[m] , identifier[s] )) identifier[clip] = identifier[nsig] * identifier[s] identifier[w] ,= identifier[numpy] . identifier[where] (( identifier[numpy] . identifier[abs] ( identifier[arr] [ identifier[index] ])- identifier[m] )< identifier[clip] ) keyword[if] identifier[w] . identifier[size] == literal[int] : identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] % identifier[i] + literal[int] ) keyword[return] identifier[m] , identifier[s] identifier[index] = identifier[index] [ identifier[w] ] identifier[amean] = identifier[arr] [ identifier[index] ]. identifier[mean] () identifier[asig] = identifier[arr] [ identifier[index] ]. identifier[std] () identifier[extra] [ literal[string] ]= identifier[index] keyword[return] identifier[amean] , identifier[asig]
def sigma_clip(arrin, niter=4, nsig=4, extra={}, verbose=False): """ NAME: sigma_clip() PURPOSE: Calculate the mean/stdev of an array with sigma clipping. Iterate niter times, removing elements that are outside nsig, and recalculating mean/stdev. CALLING SEQUENCE: mean,stdev = sigma_clip(arr, niter=4, nsig=4, extra={}) INPUTS: arr: A numpy array or a sequence that can be converted. OPTIONAL INPUTS: niter: number of iterations, defaults to 4 nsig: number of sigma, defaults to 4 OUTPUTS: mean,stdev: A tuple containing mean and standard deviation. OPTIONAL OUTPUTS extra={}: Dictionary containing the array of used indices in extra['index'] REVISION HISTORY: Converted from IDL: 2006-10-23. Erin Sheldon, NYU """ arr = numpy.array(arrin, ndmin=1, copy=False) index = numpy.arange(arr.size) for i in numpy.arange(niter): m = arr[index].mean() s = arr[index].std() if verbose: sys.stdout.write('iter %s\tnuse: %s\tmean %s\tstdev %s\n' % (i + 1, index.size, m, s)) # depends on [control=['if'], data=[]] clip = nsig * s (w,) = numpy.where(numpy.abs(arr[index]) - m < clip) if w.size == 0: sys.stderr.write('nsig too small. Everything clipped on iteration %d' % i + 1) return (m, s) # depends on [control=['if'], data=[]] index = index[w] # depends on [control=['for'], data=['i']] # Calculate final stats amean = arr[index].mean() asig = arr[index].std() extra['index'] = index return (amean, asig)
def read_from_hdx(identifier, configuration=None): # type: (str, Optional[Configuration]) -> Optional['ResourceView'] """Reads the resource view given by identifier from HDX and returns ResourceView object Args: identifier (str): Identifier of resource view configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[ResourceView]: ResourceView object if successful read, None if not """ resourceview = ResourceView(configuration=configuration) result = resourceview._load_from_hdx('resource view', identifier) if result: return resourceview return None
def function[read_from_hdx, parameter[identifier, configuration]]: constant[Reads the resource view given by identifier from HDX and returns ResourceView object Args: identifier (str): Identifier of resource view configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. Returns: Optional[ResourceView]: ResourceView object if successful read, None if not ] variable[resourceview] assign[=] call[name[ResourceView], parameter[]] variable[result] assign[=] call[name[resourceview]._load_from_hdx, parameter[constant[resource view], name[identifier]]] if name[result] begin[:] return[name[resourceview]] return[constant[None]]
keyword[def] identifier[read_from_hdx] ( identifier[identifier] , identifier[configuration] = keyword[None] ): literal[string] identifier[resourceview] = identifier[ResourceView] ( identifier[configuration] = identifier[configuration] ) identifier[result] = identifier[resourceview] . identifier[_load_from_hdx] ( literal[string] , identifier[identifier] ) keyword[if] identifier[result] : keyword[return] identifier[resourceview] keyword[return] keyword[None]
def read_from_hdx(identifier, configuration=None): # type: (str, Optional[Configuration]) -> Optional['ResourceView'] 'Reads the resource view given by identifier from HDX and returns ResourceView object\n\n Args:\n identifier (str): Identifier of resource view\n configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\n Returns:\n Optional[ResourceView]: ResourceView object if successful read, None if not\n ' resourceview = ResourceView(configuration=configuration) result = resourceview._load_from_hdx('resource view', identifier) if result: return resourceview # depends on [control=['if'], data=[]] return None
def zavg(self, name, score_start, score_end): """ Returns the average of elements of the sorted set stored at the specified key which have scores in the range [score_start,score_end]. .. note:: The range is [``score_start``, ``score_end``] :param string name: the zset name :param int score_start: The minimum score related to keys(included), empty string ``''`` means -inf :param int score_end: The maximum score(included) related to keys, empty string ``''`` means +inf :return: the average of keys in specified range :rtype: int >>> ssdb.zavg('zset_1', 20, 70) 38 >>> ssdb.zavg('zset_1', 0, 100) 35 >>> ssdb.zavg('zset_1', 2, 3) 0 """ score_start = get_integer_or_emptystring('score_start', score_start) score_end = get_integer_or_emptystring('score_end', score_end) return self.execute_command('zavg', name, score_start, score_end)
def function[zavg, parameter[self, name, score_start, score_end]]: constant[ Returns the average of elements of the sorted set stored at the specified key which have scores in the range [score_start,score_end]. .. note:: The range is [``score_start``, ``score_end``] :param string name: the zset name :param int score_start: The minimum score related to keys(included), empty string ``''`` means -inf :param int score_end: The maximum score(included) related to keys, empty string ``''`` means +inf :return: the average of keys in specified range :rtype: int >>> ssdb.zavg('zset_1', 20, 70) 38 >>> ssdb.zavg('zset_1', 0, 100) 35 >>> ssdb.zavg('zset_1', 2, 3) 0 ] variable[score_start] assign[=] call[name[get_integer_or_emptystring], parameter[constant[score_start], name[score_start]]] variable[score_end] assign[=] call[name[get_integer_or_emptystring], parameter[constant[score_end], name[score_end]]] return[call[name[self].execute_command, parameter[constant[zavg], name[name], name[score_start], name[score_end]]]]
keyword[def] identifier[zavg] ( identifier[self] , identifier[name] , identifier[score_start] , identifier[score_end] ): literal[string] identifier[score_start] = identifier[get_integer_or_emptystring] ( literal[string] , identifier[score_start] ) identifier[score_end] = identifier[get_integer_or_emptystring] ( literal[string] , identifier[score_end] ) keyword[return] identifier[self] . identifier[execute_command] ( literal[string] , identifier[name] , identifier[score_start] , identifier[score_end] )
def zavg(self, name, score_start, score_end): """ Returns the average of elements of the sorted set stored at the specified key which have scores in the range [score_start,score_end]. .. note:: The range is [``score_start``, ``score_end``] :param string name: the zset name :param int score_start: The minimum score related to keys(included), empty string ``''`` means -inf :param int score_end: The maximum score(included) related to keys, empty string ``''`` means +inf :return: the average of keys in specified range :rtype: int >>> ssdb.zavg('zset_1', 20, 70) 38 >>> ssdb.zavg('zset_1', 0, 100) 35 >>> ssdb.zavg('zset_1', 2, 3) 0 """ score_start = get_integer_or_emptystring('score_start', score_start) score_end = get_integer_or_emptystring('score_end', score_end) return self.execute_command('zavg', name, score_start, score_end)
def restore_defaults(self): """ Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values. """ for key in self.default_values: self.restore_default(key) for section in self.sections: self[section].restore_defaults()
def function[restore_defaults, parameter[self]]: constant[ Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values. ] for taget[name[key]] in starred[name[self].default_values] begin[:] call[name[self].restore_default, parameter[name[key]]] for taget[name[section]] in starred[name[self].sections] begin[:] call[call[name[self]][name[section]].restore_defaults, parameter[]]
keyword[def] identifier[restore_defaults] ( identifier[self] ): literal[string] keyword[for] identifier[key] keyword[in] identifier[self] . identifier[default_values] : identifier[self] . identifier[restore_default] ( identifier[key] ) keyword[for] identifier[section] keyword[in] identifier[self] . identifier[sections] : identifier[self] [ identifier[section] ]. identifier[restore_defaults] ()
def restore_defaults(self): """ Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values. """ for key in self.default_values: self.restore_default(key) # depends on [control=['for'], data=['key']] for section in self.sections: self[section].restore_defaults() # depends on [control=['for'], data=['section']]
async def field(self, elem=None, elem_type=None, params=None): """ Archive field :param elem: :param elem_type: :param params: :return: """ elem_type = elem_type if elem_type else elem.__class__ fvalue = None src = elem if issubclass(elem_type, x.UVarintType): fvalue = await self.uvarint(x.get_elem(src)) elif issubclass(elem_type, x.IntType): fvalue = await self.uint(elem=x.get_elem(src), elem_type=elem_type, params=params) elif issubclass(elem_type, x.BlobType): fvalue = await self.blob(elem=x.get_elem(src), elem_type=elem_type, params=params) elif issubclass(elem_type, x.UnicodeType): fvalue = await self.unicode_type(x.get_elem(src)) elif issubclass(elem_type, x.VariantType): fvalue = await self.variant(elem=x.get_elem(src), elem_type=elem_type, params=params) elif issubclass(elem_type, x.ContainerType): # container ~ simple list fvalue = await self.container(container=x.get_elem(src), container_type=elem_type, params=params) elif issubclass(elem_type, x.TupleType): # tuple ~ simple list fvalue = await self.tuple(elem=x.get_elem(src), elem_type=elem_type, params=params) elif issubclass(elem_type, x.MessageType): fvalue = await self.message(x.get_elem(src), msg_type=elem_type) else: raise TypeError return fvalue if self.writing else x.set_elem(elem, fvalue)
<ast.AsyncFunctionDef object at 0x7da1b24504f0>
keyword[async] keyword[def] identifier[field] ( identifier[self] , identifier[elem] = keyword[None] , identifier[elem_type] = keyword[None] , identifier[params] = keyword[None] ): literal[string] identifier[elem_type] = identifier[elem_type] keyword[if] identifier[elem_type] keyword[else] identifier[elem] . identifier[__class__] identifier[fvalue] = keyword[None] identifier[src] = identifier[elem] keyword[if] identifier[issubclass] ( identifier[elem_type] , identifier[x] . identifier[UVarintType] ): identifier[fvalue] = keyword[await] identifier[self] . identifier[uvarint] ( identifier[x] . identifier[get_elem] ( identifier[src] )) keyword[elif] identifier[issubclass] ( identifier[elem_type] , identifier[x] . identifier[IntType] ): identifier[fvalue] = keyword[await] identifier[self] . identifier[uint] ( identifier[elem] = identifier[x] . identifier[get_elem] ( identifier[src] ), identifier[elem_type] = identifier[elem_type] , identifier[params] = identifier[params] ) keyword[elif] identifier[issubclass] ( identifier[elem_type] , identifier[x] . identifier[BlobType] ): identifier[fvalue] = keyword[await] identifier[self] . identifier[blob] ( identifier[elem] = identifier[x] . identifier[get_elem] ( identifier[src] ), identifier[elem_type] = identifier[elem_type] , identifier[params] = identifier[params] ) keyword[elif] identifier[issubclass] ( identifier[elem_type] , identifier[x] . identifier[UnicodeType] ): identifier[fvalue] = keyword[await] identifier[self] . identifier[unicode_type] ( identifier[x] . identifier[get_elem] ( identifier[src] )) keyword[elif] identifier[issubclass] ( identifier[elem_type] , identifier[x] . identifier[VariantType] ): identifier[fvalue] = keyword[await] identifier[self] . identifier[variant] ( identifier[elem] = identifier[x] . identifier[get_elem] ( identifier[src] ), identifier[elem_type] = identifier[elem_type] , identifier[params] = identifier[params] ) keyword[elif] identifier[issubclass] ( identifier[elem_type] , identifier[x] . identifier[ContainerType] ): identifier[fvalue] = keyword[await] identifier[self] . identifier[container] ( identifier[container] = identifier[x] . identifier[get_elem] ( identifier[src] ), identifier[container_type] = identifier[elem_type] , identifier[params] = identifier[params] ) keyword[elif] identifier[issubclass] ( identifier[elem_type] , identifier[x] . identifier[TupleType] ): identifier[fvalue] = keyword[await] identifier[self] . identifier[tuple] ( identifier[elem] = identifier[x] . identifier[get_elem] ( identifier[src] ), identifier[elem_type] = identifier[elem_type] , identifier[params] = identifier[params] ) keyword[elif] identifier[issubclass] ( identifier[elem_type] , identifier[x] . identifier[MessageType] ): identifier[fvalue] = keyword[await] identifier[self] . identifier[message] ( identifier[x] . identifier[get_elem] ( identifier[src] ), identifier[msg_type] = identifier[elem_type] ) keyword[else] : keyword[raise] identifier[TypeError] keyword[return] identifier[fvalue] keyword[if] identifier[self] . identifier[writing] keyword[else] identifier[x] . identifier[set_elem] ( identifier[elem] , identifier[fvalue] )
async def field(self, elem=None, elem_type=None, params=None): """ Archive field :param elem: :param elem_type: :param params: :return: """ elem_type = elem_type if elem_type else elem.__class__ fvalue = None src = elem if issubclass(elem_type, x.UVarintType): fvalue = await self.uvarint(x.get_elem(src)) # depends on [control=['if'], data=[]] elif issubclass(elem_type, x.IntType): fvalue = await self.uint(elem=x.get_elem(src), elem_type=elem_type, params=params) # depends on [control=['if'], data=[]] elif issubclass(elem_type, x.BlobType): fvalue = await self.blob(elem=x.get_elem(src), elem_type=elem_type, params=params) # depends on [control=['if'], data=[]] elif issubclass(elem_type, x.UnicodeType): fvalue = await self.unicode_type(x.get_elem(src)) # depends on [control=['if'], data=[]] elif issubclass(elem_type, x.VariantType): fvalue = await self.variant(elem=x.get_elem(src), elem_type=elem_type, params=params) # depends on [control=['if'], data=[]] elif issubclass(elem_type, x.ContainerType): # container ~ simple list fvalue = await self.container(container=x.get_elem(src), container_type=elem_type, params=params) # depends on [control=['if'], data=[]] elif issubclass(elem_type, x.TupleType): # tuple ~ simple list fvalue = await self.tuple(elem=x.get_elem(src), elem_type=elem_type, params=params) # depends on [control=['if'], data=[]] elif issubclass(elem_type, x.MessageType): fvalue = await self.message(x.get_elem(src), msg_type=elem_type) # depends on [control=['if'], data=[]] else: raise TypeError return fvalue if self.writing else x.set_elem(elem, fvalue)
def fetch_open_data(cls, flag, start, end, **kwargs): """Fetch Open Data timeline segments into a flag. flag : `str` the name of the flag to query start : `int`, `str` the GPS start time (or parseable date string) to query end : `int`, `str` the GPS end time (or parseable date string) to query verbose : `bool`, optional show verbose download progress, default: `False` timeout : `int`, optional timeout for download (seconds) host : `str`, optional URL of LOSC host, default: ``'losc.ligo.org'`` Returns ------- flag : `DataQualityFlag` a new flag with `active` segments filled from Open Data Examples -------- >>> from gwpy.segments import DataQualityFlag >>> print(DataQualityFlag.fetch_open_data('H1_DATA', 'Jan 1 2010', ... 'Jan 2 2010')) <DataQualityFlag('H1:DATA', known=[[946339215 ... 946425615)], active=[[946340946 ... 946351800) [946356479 ... 946360620) [946362652 ... 946369150) [946372854 ... 946382630) [946395595 ... 946396751) [946400173 ... 946404977) [946412312 ... 946413577) [946415770 ... 946422986)], description=None)> """ start = to_gps(start).gpsSeconds end = to_gps(end).gpsSeconds known = [(start, end)] active = timeline.get_segments(flag, start, end, **kwargs) return cls(flag.replace('_', ':', 1), known=known, active=active, label=flag)
def function[fetch_open_data, parameter[cls, flag, start, end]]: constant[Fetch Open Data timeline segments into a flag. flag : `str` the name of the flag to query start : `int`, `str` the GPS start time (or parseable date string) to query end : `int`, `str` the GPS end time (or parseable date string) to query verbose : `bool`, optional show verbose download progress, default: `False` timeout : `int`, optional timeout for download (seconds) host : `str`, optional URL of LOSC host, default: ``'losc.ligo.org'`` Returns ------- flag : `DataQualityFlag` a new flag with `active` segments filled from Open Data Examples -------- >>> from gwpy.segments import DataQualityFlag >>> print(DataQualityFlag.fetch_open_data('H1_DATA', 'Jan 1 2010', ... 'Jan 2 2010')) <DataQualityFlag('H1:DATA', known=[[946339215 ... 946425615)], active=[[946340946 ... 946351800) [946356479 ... 946360620) [946362652 ... 946369150) [946372854 ... 946382630) [946395595 ... 946396751) [946400173 ... 946404977) [946412312 ... 946413577) [946415770 ... 946422986)], description=None)> ] variable[start] assign[=] call[name[to_gps], parameter[name[start]]].gpsSeconds variable[end] assign[=] call[name[to_gps], parameter[name[end]]].gpsSeconds variable[known] assign[=] list[[<ast.Tuple object at 0x7da2043478e0>]] variable[active] assign[=] call[name[timeline].get_segments, parameter[name[flag], name[start], name[end]]] return[call[name[cls], parameter[call[name[flag].replace, parameter[constant[_], constant[:], constant[1]]]]]]
keyword[def] identifier[fetch_open_data] ( identifier[cls] , identifier[flag] , identifier[start] , identifier[end] ,** identifier[kwargs] ): literal[string] identifier[start] = identifier[to_gps] ( identifier[start] ). identifier[gpsSeconds] identifier[end] = identifier[to_gps] ( identifier[end] ). identifier[gpsSeconds] identifier[known] =[( identifier[start] , identifier[end] )] identifier[active] = identifier[timeline] . identifier[get_segments] ( identifier[flag] , identifier[start] , identifier[end] ,** identifier[kwargs] ) keyword[return] identifier[cls] ( identifier[flag] . identifier[replace] ( literal[string] , literal[string] , literal[int] ), identifier[known] = identifier[known] , identifier[active] = identifier[active] , identifier[label] = identifier[flag] )
def fetch_open_data(cls, flag, start, end, **kwargs): """Fetch Open Data timeline segments into a flag. flag : `str` the name of the flag to query start : `int`, `str` the GPS start time (or parseable date string) to query end : `int`, `str` the GPS end time (or parseable date string) to query verbose : `bool`, optional show verbose download progress, default: `False` timeout : `int`, optional timeout for download (seconds) host : `str`, optional URL of LOSC host, default: ``'losc.ligo.org'`` Returns ------- flag : `DataQualityFlag` a new flag with `active` segments filled from Open Data Examples -------- >>> from gwpy.segments import DataQualityFlag >>> print(DataQualityFlag.fetch_open_data('H1_DATA', 'Jan 1 2010', ... 'Jan 2 2010')) <DataQualityFlag('H1:DATA', known=[[946339215 ... 946425615)], active=[[946340946 ... 946351800) [946356479 ... 946360620) [946362652 ... 946369150) [946372854 ... 946382630) [946395595 ... 946396751) [946400173 ... 946404977) [946412312 ... 946413577) [946415770 ... 946422986)], description=None)> """ start = to_gps(start).gpsSeconds end = to_gps(end).gpsSeconds known = [(start, end)] active = timeline.get_segments(flag, start, end, **kwargs) return cls(flag.replace('_', ':', 1), known=known, active=active, label=flag)
def make_class(clsname, func, attrs): """Turn a funcs list element into a class object.""" clsdict = {"__set__": create_setter(func, attrs)} if len(attrs) > 0: clsdict["__init__"] = create_init(attrs) clsobj = type(str(clsname), (Descriptor, ), clsdict) clsobj.__doc__ = docstrings.get(clsname) return clsobj
def function[make_class, parameter[clsname, func, attrs]]: constant[Turn a funcs list element into a class object.] variable[clsdict] assign[=] dictionary[[<ast.Constant object at 0x7da1b021d8d0>], [<ast.Call object at 0x7da1b021d960>]] if compare[call[name[len], parameter[name[attrs]]] greater[>] constant[0]] begin[:] call[name[clsdict]][constant[__init__]] assign[=] call[name[create_init], parameter[name[attrs]]] variable[clsobj] assign[=] call[name[type], parameter[call[name[str], parameter[name[clsname]]], tuple[[<ast.Name object at 0x7da1b021fc40>]], name[clsdict]]] name[clsobj].__doc__ assign[=] call[name[docstrings].get, parameter[name[clsname]]] return[name[clsobj]]
keyword[def] identifier[make_class] ( identifier[clsname] , identifier[func] , identifier[attrs] ): literal[string] identifier[clsdict] ={ literal[string] : identifier[create_setter] ( identifier[func] , identifier[attrs] )} keyword[if] identifier[len] ( identifier[attrs] )> literal[int] : identifier[clsdict] [ literal[string] ]= identifier[create_init] ( identifier[attrs] ) identifier[clsobj] = identifier[type] ( identifier[str] ( identifier[clsname] ),( identifier[Descriptor] ,), identifier[clsdict] ) identifier[clsobj] . identifier[__doc__] = identifier[docstrings] . identifier[get] ( identifier[clsname] ) keyword[return] identifier[clsobj]
def make_class(clsname, func, attrs): """Turn a funcs list element into a class object.""" clsdict = {'__set__': create_setter(func, attrs)} if len(attrs) > 0: clsdict['__init__'] = create_init(attrs) # depends on [control=['if'], data=[]] clsobj = type(str(clsname), (Descriptor,), clsdict) clsobj.__doc__ = docstrings.get(clsname) return clsobj
def quick_layout_save(self): """Save layout dialog""" get = CONF.get set_ = CONF.set names = get('quick_layouts', 'names') order = get('quick_layouts', 'order') active = get('quick_layouts', 'active') dlg = self.dialog_layout_save(self, names) if dlg.exec_(): name = dlg.combo_box.currentText() if name in names: answer = QMessageBox.warning(self, _("Warning"), _("Layout <b>%s</b> will be \ overwritten. Do you want to \ continue?") % name, QMessageBox.Yes | QMessageBox.No) index = order.index(name) else: answer = True if None in names: index = names.index(None) names[index] = name else: index = len(names) names.append(name) order.append(name) # Always make active a new layout even if it overwrites an inactive # layout if name not in active: active.append(name) if answer: self.save_current_window_settings('layout_{}/'.format(index), section='quick_layouts') set_('quick_layouts', 'names', names) set_('quick_layouts', 'order', order) set_('quick_layouts', 'active', active) self.quick_layout_set_menu()
def function[quick_layout_save, parameter[self]]: constant[Save layout dialog] variable[get] assign[=] name[CONF].get variable[set_] assign[=] name[CONF].set variable[names] assign[=] call[name[get], parameter[constant[quick_layouts], constant[names]]] variable[order] assign[=] call[name[get], parameter[constant[quick_layouts], constant[order]]] variable[active] assign[=] call[name[get], parameter[constant[quick_layouts], constant[active]]] variable[dlg] assign[=] call[name[self].dialog_layout_save, parameter[name[self], name[names]]] if call[name[dlg].exec_, parameter[]] begin[:] variable[name] assign[=] call[name[dlg].combo_box.currentText, parameter[]] if compare[name[name] in name[names]] begin[:] variable[answer] assign[=] call[name[QMessageBox].warning, parameter[name[self], call[name[_], parameter[constant[Warning]]], binary_operation[call[name[_], parameter[constant[Layout <b>%s</b> will be overwritten. Do you want to continue?]]] <ast.Mod object at 0x7da2590d6920> name[name]], binary_operation[name[QMessageBox].Yes <ast.BitOr object at 0x7da2590d6aa0> name[QMessageBox].No]]] variable[index] assign[=] call[name[order].index, parameter[name[name]]] if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[active]] begin[:] call[name[active].append, parameter[name[name]]] if name[answer] begin[:] call[name[self].save_current_window_settings, parameter[call[constant[layout_{}/].format, parameter[name[index]]]]] call[name[set_], parameter[constant[quick_layouts], constant[names], name[names]]] call[name[set_], parameter[constant[quick_layouts], constant[order], name[order]]] call[name[set_], parameter[constant[quick_layouts], constant[active], name[active]]] call[name[self].quick_layout_set_menu, parameter[]]
keyword[def] identifier[quick_layout_save] ( identifier[self] ): literal[string] identifier[get] = identifier[CONF] . identifier[get] identifier[set_] = identifier[CONF] . identifier[set] identifier[names] = identifier[get] ( literal[string] , literal[string] ) identifier[order] = identifier[get] ( literal[string] , literal[string] ) identifier[active] = identifier[get] ( literal[string] , literal[string] ) identifier[dlg] = identifier[self] . identifier[dialog_layout_save] ( identifier[self] , identifier[names] ) keyword[if] identifier[dlg] . identifier[exec_] (): identifier[name] = identifier[dlg] . identifier[combo_box] . identifier[currentText] () keyword[if] identifier[name] keyword[in] identifier[names] : identifier[answer] = identifier[QMessageBox] . identifier[warning] ( identifier[self] , identifier[_] ( literal[string] ), identifier[_] ( literal[string] )% identifier[name] , identifier[QMessageBox] . identifier[Yes] | identifier[QMessageBox] . identifier[No] ) identifier[index] = identifier[order] . identifier[index] ( identifier[name] ) keyword[else] : identifier[answer] = keyword[True] keyword[if] keyword[None] keyword[in] identifier[names] : identifier[index] = identifier[names] . identifier[index] ( keyword[None] ) identifier[names] [ identifier[index] ]= identifier[name] keyword[else] : identifier[index] = identifier[len] ( identifier[names] ) identifier[names] . identifier[append] ( identifier[name] ) identifier[order] . identifier[append] ( identifier[name] ) keyword[if] identifier[name] keyword[not] keyword[in] identifier[active] : identifier[active] . identifier[append] ( identifier[name] ) keyword[if] identifier[answer] : identifier[self] . identifier[save_current_window_settings] ( literal[string] . identifier[format] ( identifier[index] ), identifier[section] = literal[string] ) identifier[set_] ( literal[string] , literal[string] , identifier[names] ) identifier[set_] ( literal[string] , literal[string] , identifier[order] ) identifier[set_] ( literal[string] , literal[string] , identifier[active] ) identifier[self] . identifier[quick_layout_set_menu] ()
def quick_layout_save(self): """Save layout dialog""" get = CONF.get set_ = CONF.set names = get('quick_layouts', 'names') order = get('quick_layouts', 'order') active = get('quick_layouts', 'active') dlg = self.dialog_layout_save(self, names) if dlg.exec_(): name = dlg.combo_box.currentText() if name in names: answer = QMessageBox.warning(self, _('Warning'), _('Layout <b>%s</b> will be overwritten. Do you want to continue?') % name, QMessageBox.Yes | QMessageBox.No) index = order.index(name) # depends on [control=['if'], data=['name']] else: answer = True if None in names: index = names.index(None) names[index] = name # depends on [control=['if'], data=['names']] else: index = len(names) names.append(name) order.append(name) # Always make active a new layout even if it overwrites an inactive # layout if name not in active: active.append(name) # depends on [control=['if'], data=['name', 'active']] if answer: self.save_current_window_settings('layout_{}/'.format(index), section='quick_layouts') set_('quick_layouts', 'names', names) set_('quick_layouts', 'order', order) set_('quick_layouts', 'active', active) self.quick_layout_set_menu() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def remove_mixin(target, name, mixedin=None, replace=True): """Remove a mixin with name (and reference) from targetand returns the replaced one or None. :param mixedin: a mixedin value or the last defined mixedin if is None (by default). :param bool replace: If True (default), the removed mixedin replaces the current mixin. """ try: result = getattr(target, name) except AttributeError: raise Mixin.MixInError( "No mixin {0} exists in {1}".format(name, target) ) mixedins_by_name = Mixin.get_mixedins_by_name(target) mixedins = mixedins_by_name.get(name) if mixedins: if mixedin is None: mixedin = mixedins[-1] mixedins = mixedins[:-2] else: try: index = mixedins.index(mixedin) except ValueError: raise Mixin.MixInError( "Mixedin {0} with name {1} does not exist \ in target {2}" .format(mixedin, name, target) ) mixedins = mixedins[0:index] + mixedins[index + 1:] if len(mixedins) == 0: # force to replace/delete the mixin even if replace is False # in order to stay in a consistent state if mixedin != Mixin.__NEW_CONTENT_KEY__: setattr(target, name, mixedin) else: delattr(target, name) del mixedins_by_name[name] else: if replace: setattr(target, name, mixedin) mixedins_by_name[name] = mixedins else: # shouldn't be raised except if removing has been done # manually raise Mixin.MixInError( "No mixin {0} exists in {1}".format(name, target)) # clean mixedins if no one exists if len(mixedins_by_name) == 0: delattr(target, Mixin.__MIXEDIN_KEY__) return result
def function[remove_mixin, parameter[target, name, mixedin, replace]]: constant[Remove a mixin with name (and reference) from targetand returns the replaced one or None. :param mixedin: a mixedin value or the last defined mixedin if is None (by default). :param bool replace: If True (default), the removed mixedin replaces the current mixin. ] <ast.Try object at 0x7da18eb55de0> variable[mixedins_by_name] assign[=] call[name[Mixin].get_mixedins_by_name, parameter[name[target]]] variable[mixedins] assign[=] call[name[mixedins_by_name].get, parameter[name[name]]] if name[mixedins] begin[:] if compare[name[mixedin] is constant[None]] begin[:] variable[mixedin] assign[=] call[name[mixedins]][<ast.UnaryOp object at 0x7da2054a5c60>] variable[mixedins] assign[=] call[name[mixedins]][<ast.Slice object at 0x7da2054a50f0>] if compare[call[name[len], parameter[name[mixedins]]] equal[==] constant[0]] begin[:] if compare[name[mixedin] not_equal[!=] name[Mixin].__NEW_CONTENT_KEY__] begin[:] call[name[setattr], parameter[name[target], name[name], name[mixedin]]] <ast.Delete object at 0x7da2054a7580> if compare[call[name[len], parameter[name[mixedins_by_name]]] equal[==] constant[0]] begin[:] call[name[delattr], parameter[name[target], name[Mixin].__MIXEDIN_KEY__]] return[name[result]]
keyword[def] identifier[remove_mixin] ( identifier[target] , identifier[name] , identifier[mixedin] = keyword[None] , identifier[replace] = keyword[True] ): literal[string] keyword[try] : identifier[result] = identifier[getattr] ( identifier[target] , identifier[name] ) keyword[except] identifier[AttributeError] : keyword[raise] identifier[Mixin] . identifier[MixInError] ( literal[string] . identifier[format] ( identifier[name] , identifier[target] ) ) identifier[mixedins_by_name] = identifier[Mixin] . identifier[get_mixedins_by_name] ( identifier[target] ) identifier[mixedins] = identifier[mixedins_by_name] . identifier[get] ( identifier[name] ) keyword[if] identifier[mixedins] : keyword[if] identifier[mixedin] keyword[is] keyword[None] : identifier[mixedin] = identifier[mixedins] [- literal[int] ] identifier[mixedins] = identifier[mixedins] [:- literal[int] ] keyword[else] : keyword[try] : identifier[index] = identifier[mixedins] . identifier[index] ( identifier[mixedin] ) keyword[except] identifier[ValueError] : keyword[raise] identifier[Mixin] . identifier[MixInError] ( literal[string] . identifier[format] ( identifier[mixedin] , identifier[name] , identifier[target] ) ) identifier[mixedins] = identifier[mixedins] [ literal[int] : identifier[index] ]+ identifier[mixedins] [ identifier[index] + literal[int] :] keyword[if] identifier[len] ( identifier[mixedins] )== literal[int] : keyword[if] identifier[mixedin] != identifier[Mixin] . identifier[__NEW_CONTENT_KEY__] : identifier[setattr] ( identifier[target] , identifier[name] , identifier[mixedin] ) keyword[else] : identifier[delattr] ( identifier[target] , identifier[name] ) keyword[del] identifier[mixedins_by_name] [ identifier[name] ] keyword[else] : keyword[if] identifier[replace] : identifier[setattr] ( identifier[target] , identifier[name] , identifier[mixedin] ) identifier[mixedins_by_name] [ identifier[name] ]= identifier[mixedins] keyword[else] : keyword[raise] identifier[Mixin] . identifier[MixInError] ( literal[string] . identifier[format] ( identifier[name] , identifier[target] )) keyword[if] identifier[len] ( identifier[mixedins_by_name] )== literal[int] : identifier[delattr] ( identifier[target] , identifier[Mixin] . identifier[__MIXEDIN_KEY__] ) keyword[return] identifier[result]
def remove_mixin(target, name, mixedin=None, replace=True): """Remove a mixin with name (and reference) from targetand returns the replaced one or None. :param mixedin: a mixedin value or the last defined mixedin if is None (by default). :param bool replace: If True (default), the removed mixedin replaces the current mixin. """ try: result = getattr(target, name) # depends on [control=['try'], data=[]] except AttributeError: raise Mixin.MixInError('No mixin {0} exists in {1}'.format(name, target)) # depends on [control=['except'], data=[]] mixedins_by_name = Mixin.get_mixedins_by_name(target) mixedins = mixedins_by_name.get(name) if mixedins: if mixedin is None: mixedin = mixedins[-1] mixedins = mixedins[:-2] # depends on [control=['if'], data=['mixedin']] else: try: index = mixedins.index(mixedin) # depends on [control=['try'], data=[]] except ValueError: raise Mixin.MixInError('Mixedin {0} with name {1} does not exist in target {2}'.format(mixedin, name, target)) # depends on [control=['except'], data=[]] mixedins = mixedins[0:index] + mixedins[index + 1:] if len(mixedins) == 0: # force to replace/delete the mixin even if replace is False # in order to stay in a consistent state if mixedin != Mixin.__NEW_CONTENT_KEY__: setattr(target, name, mixedin) # depends on [control=['if'], data=['mixedin']] else: delattr(target, name) del mixedins_by_name[name] # depends on [control=['if'], data=[]] else: if replace: setattr(target, name, mixedin) # depends on [control=['if'], data=[]] mixedins_by_name[name] = mixedins # depends on [control=['if'], data=[]] else: # shouldn't be raised except if removing has been done # manually raise Mixin.MixInError('No mixin {0} exists in {1}'.format(name, target)) # clean mixedins if no one exists if len(mixedins_by_name) == 0: delattr(target, Mixin.__MIXEDIN_KEY__) # depends on [control=['if'], data=[]] return result
def encode(self, text): """ 对需要加密的明文进行填充补位 @param text: 需要进行填充补位操作的明文 @return: 补齐明文字符串 """ text_length = len(text) # 计算需要填充的位数 amount_to_pad = self.block_size - (text_length % self.block_size) if amount_to_pad == 0: amount_to_pad = self.block_size # 获得补位所用的字符 pad = chr(amount_to_pad) return text + pad * amount_to_pad
def function[encode, parameter[self, text]]: constant[ 对需要加密的明文进行填充补位 @param text: 需要进行填充补位操作的明文 @return: 补齐明文字符串 ] variable[text_length] assign[=] call[name[len], parameter[name[text]]] variable[amount_to_pad] assign[=] binary_operation[name[self].block_size - binary_operation[name[text_length] <ast.Mod object at 0x7da2590d6920> name[self].block_size]] if compare[name[amount_to_pad] equal[==] constant[0]] begin[:] variable[amount_to_pad] assign[=] name[self].block_size variable[pad] assign[=] call[name[chr], parameter[name[amount_to_pad]]] return[binary_operation[name[text] + binary_operation[name[pad] * name[amount_to_pad]]]]
keyword[def] identifier[encode] ( identifier[self] , identifier[text] ): literal[string] identifier[text_length] = identifier[len] ( identifier[text] ) identifier[amount_to_pad] = identifier[self] . identifier[block_size] -( identifier[text_length] % identifier[self] . identifier[block_size] ) keyword[if] identifier[amount_to_pad] == literal[int] : identifier[amount_to_pad] = identifier[self] . identifier[block_size] identifier[pad] = identifier[chr] ( identifier[amount_to_pad] ) keyword[return] identifier[text] + identifier[pad] * identifier[amount_to_pad]
def encode(self, text): """ 对需要加密的明文进行填充补位 @param text: 需要进行填充补位操作的明文 @return: 补齐明文字符串 """ text_length = len(text) # 计算需要填充的位数 amount_to_pad = self.block_size - text_length % self.block_size if amount_to_pad == 0: amount_to_pad = self.block_size # depends on [control=['if'], data=['amount_to_pad']] # 获得补位所用的字符 pad = chr(amount_to_pad) return text + pad * amount_to_pad
def append_inflation_op(self, source=None): """Append a :class:`Inflation <stellar_base.operation.Inflation>` operation to the list of operations. :param str source: The source address that is running the inflation operation. :return: This builder instance. """ op = operation.Inflation(source) return self.append_op(op)
def function[append_inflation_op, parameter[self, source]]: constant[Append a :class:`Inflation <stellar_base.operation.Inflation>` operation to the list of operations. :param str source: The source address that is running the inflation operation. :return: This builder instance. ] variable[op] assign[=] call[name[operation].Inflation, parameter[name[source]]] return[call[name[self].append_op, parameter[name[op]]]]
keyword[def] identifier[append_inflation_op] ( identifier[self] , identifier[source] = keyword[None] ): literal[string] identifier[op] = identifier[operation] . identifier[Inflation] ( identifier[source] ) keyword[return] identifier[self] . identifier[append_op] ( identifier[op] )
def append_inflation_op(self, source=None): """Append a :class:`Inflation <stellar_base.operation.Inflation>` operation to the list of operations. :param str source: The source address that is running the inflation operation. :return: This builder instance. """ op = operation.Inflation(source) return self.append_op(op)
def evolve_visual(msnake, fig=None, levelset=None, num_iters=20, background=None): """ Visual evolution of a morphological snake. Parameters ---------- msnake : MorphGAC or MorphACWE instance The morphological snake solver. fig: object, optional Handles to actual figure. levelset : array-like, optional If given, the levelset of the solver is initialized to this. If not given, the evolution will use the levelset already set in msnake. num_iters : int, optional The number of iterations. background : array-like, optional If given, background will be shown behind the contours instead of msnake.data. """ if levelset is not None: msnake.levelset = levelset # Prepare the visual environment. if fig is None: fig = plt.figure() fig.clf() ax1 = fig.add_subplot(1, 2, 1) if background is None: ax1.imshow(msnake.data, cmap=plt.cm.gray) else: ax1.imshow(background, cmap=plt.cm.gray) ax1.contour(msnake.levelset, [0.5], colors='r') ax2 = fig.add_subplot(1, 2, 2) ax_u = ax2.imshow(msnake.levelset) plt.pause(0.001) # Iterate. for _ in range(num_iters): # Evolve. msnake.step() # Update figure. del ax1.collections[0] ax1.contour(msnake.levelset, [0.5], colors='r') ax_u.set_data(msnake.levelset) fig.canvas.draw() #plt.pause(0.001) # Return the last levelset. return msnake.levelset
def function[evolve_visual, parameter[msnake, fig, levelset, num_iters, background]]: constant[ Visual evolution of a morphological snake. Parameters ---------- msnake : MorphGAC or MorphACWE instance The morphological snake solver. fig: object, optional Handles to actual figure. levelset : array-like, optional If given, the levelset of the solver is initialized to this. If not given, the evolution will use the levelset already set in msnake. num_iters : int, optional The number of iterations. background : array-like, optional If given, background will be shown behind the contours instead of msnake.data. ] if compare[name[levelset] is_not constant[None]] begin[:] name[msnake].levelset assign[=] name[levelset] if compare[name[fig] is constant[None]] begin[:] variable[fig] assign[=] call[name[plt].figure, parameter[]] call[name[fig].clf, parameter[]] variable[ax1] assign[=] call[name[fig].add_subplot, parameter[constant[1], constant[2], constant[1]]] if compare[name[background] is constant[None]] begin[:] call[name[ax1].imshow, parameter[name[msnake].data]] call[name[ax1].contour, parameter[name[msnake].levelset, list[[<ast.Constant object at 0x7da20c6a85b0>]]]] variable[ax2] assign[=] call[name[fig].add_subplot, parameter[constant[1], constant[2], constant[2]]] variable[ax_u] assign[=] call[name[ax2].imshow, parameter[name[msnake].levelset]] call[name[plt].pause, parameter[constant[0.001]]] for taget[name[_]] in starred[call[name[range], parameter[name[num_iters]]]] begin[:] call[name[msnake].step, parameter[]] <ast.Delete object at 0x7da1b206ba60> call[name[ax1].contour, parameter[name[msnake].levelset, list[[<ast.Constant object at 0x7da1b206b220>]]]] call[name[ax_u].set_data, parameter[name[msnake].levelset]] call[name[fig].canvas.draw, parameter[]] return[name[msnake].levelset]
keyword[def] identifier[evolve_visual] ( identifier[msnake] , identifier[fig] = keyword[None] , identifier[levelset] = keyword[None] , identifier[num_iters] = literal[int] , identifier[background] = keyword[None] ): literal[string] keyword[if] identifier[levelset] keyword[is] keyword[not] keyword[None] : identifier[msnake] . identifier[levelset] = identifier[levelset] keyword[if] identifier[fig] keyword[is] keyword[None] : identifier[fig] = identifier[plt] . identifier[figure] () identifier[fig] . identifier[clf] () identifier[ax1] = identifier[fig] . identifier[add_subplot] ( literal[int] , literal[int] , literal[int] ) keyword[if] identifier[background] keyword[is] keyword[None] : identifier[ax1] . identifier[imshow] ( identifier[msnake] . identifier[data] , identifier[cmap] = identifier[plt] . identifier[cm] . identifier[gray] ) keyword[else] : identifier[ax1] . identifier[imshow] ( identifier[background] , identifier[cmap] = identifier[plt] . identifier[cm] . identifier[gray] ) identifier[ax1] . identifier[contour] ( identifier[msnake] . identifier[levelset] ,[ literal[int] ], identifier[colors] = literal[string] ) identifier[ax2] = identifier[fig] . identifier[add_subplot] ( literal[int] , literal[int] , literal[int] ) identifier[ax_u] = identifier[ax2] . identifier[imshow] ( identifier[msnake] . identifier[levelset] ) identifier[plt] . identifier[pause] ( literal[int] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[num_iters] ): identifier[msnake] . identifier[step] () keyword[del] identifier[ax1] . identifier[collections] [ literal[int] ] identifier[ax1] . identifier[contour] ( identifier[msnake] . identifier[levelset] ,[ literal[int] ], identifier[colors] = literal[string] ) identifier[ax_u] . identifier[set_data] ( identifier[msnake] . identifier[levelset] ) identifier[fig] . identifier[canvas] . identifier[draw] () keyword[return] identifier[msnake] . identifier[levelset]
def evolve_visual(msnake, fig=None, levelset=None, num_iters=20, background=None): """ Visual evolution of a morphological snake. Parameters ---------- msnake : MorphGAC or MorphACWE instance The morphological snake solver. fig: object, optional Handles to actual figure. levelset : array-like, optional If given, the levelset of the solver is initialized to this. If not given, the evolution will use the levelset already set in msnake. num_iters : int, optional The number of iterations. background : array-like, optional If given, background will be shown behind the contours instead of msnake.data. """ if levelset is not None: msnake.levelset = levelset # depends on [control=['if'], data=['levelset']] # Prepare the visual environment. if fig is None: fig = plt.figure() # depends on [control=['if'], data=['fig']] fig.clf() ax1 = fig.add_subplot(1, 2, 1) if background is None: ax1.imshow(msnake.data, cmap=plt.cm.gray) # depends on [control=['if'], data=[]] else: ax1.imshow(background, cmap=plt.cm.gray) ax1.contour(msnake.levelset, [0.5], colors='r') ax2 = fig.add_subplot(1, 2, 2) ax_u = ax2.imshow(msnake.levelset) plt.pause(0.001) # Iterate. for _ in range(num_iters): # Evolve. msnake.step() # Update figure. del ax1.collections[0] ax1.contour(msnake.levelset, [0.5], colors='r') ax_u.set_data(msnake.levelset) fig.canvas.draw() # depends on [control=['for'], data=[]] #plt.pause(0.001) # Return the last levelset. return msnake.levelset
def expect_constructor(target): """ Set an expectation on a ``ClassDouble`` constructor :param ClassDouble target: The ClassDouble to set the expectation on. :return: an ``Expectation`` for the __new__ method. :raise: ``ConstructorDoubleError`` if target is not a ClassDouble. """ if not isinstance(target, ClassDouble): raise ConstructorDoubleError( 'Cannot allow_constructor of {} since it is not a ClassDouble.'.format(target), ) return expect(target)._doubles__new__
def function[expect_constructor, parameter[target]]: constant[ Set an expectation on a ``ClassDouble`` constructor :param ClassDouble target: The ClassDouble to set the expectation on. :return: an ``Expectation`` for the __new__ method. :raise: ``ConstructorDoubleError`` if target is not a ClassDouble. ] if <ast.UnaryOp object at 0x7da20c6e6620> begin[:] <ast.Raise object at 0x7da20c6e7ca0> return[call[name[expect], parameter[name[target]]]._doubles__new__]
keyword[def] identifier[expect_constructor] ( identifier[target] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[target] , identifier[ClassDouble] ): keyword[raise] identifier[ConstructorDoubleError] ( literal[string] . identifier[format] ( identifier[target] ), ) keyword[return] identifier[expect] ( identifier[target] ). identifier[_doubles__new__]
def expect_constructor(target): """ Set an expectation on a ``ClassDouble`` constructor :param ClassDouble target: The ClassDouble to set the expectation on. :return: an ``Expectation`` for the __new__ method. :raise: ``ConstructorDoubleError`` if target is not a ClassDouble. """ if not isinstance(target, ClassDouble): raise ConstructorDoubleError('Cannot allow_constructor of {} since it is not a ClassDouble.'.format(target)) # depends on [control=['if'], data=[]] return expect(target)._doubles__new__
def run(self): """ Construct the document id from the date and the url. """ document = {} document['_id'] = hashlib.sha1('%s:%s' % ( self.date, self.url)).hexdigest() with self.input().open() as handle: document['content'] = handle.read().decode('utf-8', 'ignore') document['url'] = self.url document['date'] = unicode(self.date) with self.output().open('w') as output: output.write(json.dumps(document))
def function[run, parameter[self]]: constant[ Construct the document id from the date and the url. ] variable[document] assign[=] dictionary[[], []] call[name[document]][constant[_id]] assign[=] call[call[name[hashlib].sha1, parameter[binary_operation[constant[%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0a335b0>, <ast.Attribute object at 0x7da1b0a30820>]]]]].hexdigest, parameter[]] with call[call[name[self].input, parameter[]].open, parameter[]] begin[:] call[name[document]][constant[content]] assign[=] call[call[name[handle].read, parameter[]].decode, parameter[constant[utf-8], constant[ignore]]] call[name[document]][constant[url]] assign[=] name[self].url call[name[document]][constant[date]] assign[=] call[name[unicode], parameter[name[self].date]] with call[call[name[self].output, parameter[]].open, parameter[constant[w]]] begin[:] call[name[output].write, parameter[call[name[json].dumps, parameter[name[document]]]]]
keyword[def] identifier[run] ( identifier[self] ): literal[string] identifier[document] ={} identifier[document] [ literal[string] ]= identifier[hashlib] . identifier[sha1] ( literal[string] %( identifier[self] . identifier[date] , identifier[self] . identifier[url] )). identifier[hexdigest] () keyword[with] identifier[self] . identifier[input] (). identifier[open] () keyword[as] identifier[handle] : identifier[document] [ literal[string] ]= identifier[handle] . identifier[read] (). identifier[decode] ( literal[string] , literal[string] ) identifier[document] [ literal[string] ]= identifier[self] . identifier[url] identifier[document] [ literal[string] ]= identifier[unicode] ( identifier[self] . identifier[date] ) keyword[with] identifier[self] . identifier[output] (). identifier[open] ( literal[string] ) keyword[as] identifier[output] : identifier[output] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[document] ))
def run(self): """ Construct the document id from the date and the url. """ document = {} document['_id'] = hashlib.sha1('%s:%s' % (self.date, self.url)).hexdigest() with self.input().open() as handle: document['content'] = handle.read().decode('utf-8', 'ignore') # depends on [control=['with'], data=['handle']] document['url'] = self.url document['date'] = unicode(self.date) with self.output().open('w') as output: output.write(json.dumps(document)) # depends on [control=['with'], data=['output']]
def spam(self, msg, *args, **kw): """Log a message with level :data:`SPAM`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(SPAM): self._log(SPAM, msg, args, **kw)
def function[spam, parameter[self, msg]]: constant[Log a message with level :data:`SPAM`. The arguments are interpreted as for :func:`logging.debug()`.] if call[name[self].isEnabledFor, parameter[name[SPAM]]] begin[:] call[name[self]._log, parameter[name[SPAM], name[msg], name[args]]]
keyword[def] identifier[spam] ( identifier[self] , identifier[msg] ,* identifier[args] ,** identifier[kw] ): literal[string] keyword[if] identifier[self] . identifier[isEnabledFor] ( identifier[SPAM] ): identifier[self] . identifier[_log] ( identifier[SPAM] , identifier[msg] , identifier[args] ,** identifier[kw] )
def spam(self, msg, *args, **kw): """Log a message with level :data:`SPAM`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(SPAM): self._log(SPAM, msg, args, **kw) # depends on [control=['if'], data=[]]
def cheby_op(G, c, signal, **kwargs): r""" Chebyshev polynomial of graph Laplacian applied to vector. Parameters ---------- G : Graph c : ndarray or list of ndarrays Chebyshev coefficients for a Filter or a Filterbank signal : ndarray Signal to filter Returns ------- r : ndarray Result of the filtering """ # Handle if we do not have a list of filters but only a simple filter in cheby_coeff. if not isinstance(c, np.ndarray): c = np.array(c) c = np.atleast_2d(c) Nscales, M = c.shape if M < 2: raise TypeError("The coefficients have an invalid shape") # thanks to that, we can also have 1d signal. try: Nv = np.shape(signal)[1] r = np.zeros((G.N * Nscales, Nv)) except IndexError: r = np.zeros((G.N * Nscales)) a_arange = [0, G.lmax] a1 = float(a_arange[1] - a_arange[0]) / 2. a2 = float(a_arange[1] + a_arange[0]) / 2. twf_old = signal twf_cur = (G.L.dot(signal) - a2 * signal) / a1 tmpN = np.arange(G.N, dtype=int) for i in range(Nscales): r[tmpN + G.N*i] = 0.5 * c[i, 0] * twf_old + c[i, 1] * twf_cur factor = 2/a1 * (G.L - a2 * sparse.eye(G.N)) for k in range(2, M): twf_new = factor.dot(twf_cur) - twf_old for i in range(Nscales): r[tmpN + G.N*i] += c[i, k] * twf_new twf_old = twf_cur twf_cur = twf_new return r
def function[cheby_op, parameter[G, c, signal]]: constant[ Chebyshev polynomial of graph Laplacian applied to vector. Parameters ---------- G : Graph c : ndarray or list of ndarrays Chebyshev coefficients for a Filter or a Filterbank signal : ndarray Signal to filter Returns ------- r : ndarray Result of the filtering ] if <ast.UnaryOp object at 0x7da1b06d16f0> begin[:] variable[c] assign[=] call[name[np].array, parameter[name[c]]] variable[c] assign[=] call[name[np].atleast_2d, parameter[name[c]]] <ast.Tuple object at 0x7da1b06d2560> assign[=] name[c].shape if compare[name[M] less[<] constant[2]] begin[:] <ast.Raise object at 0x7da2046212d0> <ast.Try object at 0x7da204623550> variable[a_arange] assign[=] list[[<ast.Constant object at 0x7da18f7237f0>, <ast.Attribute object at 0x7da18f720eb0>]] variable[a1] assign[=] binary_operation[call[name[float], parameter[binary_operation[call[name[a_arange]][constant[1]] - call[name[a_arange]][constant[0]]]]] / constant[2.0]] variable[a2] assign[=] binary_operation[call[name[float], parameter[binary_operation[call[name[a_arange]][constant[1]] + call[name[a_arange]][constant[0]]]]] / constant[2.0]] variable[twf_old] assign[=] name[signal] variable[twf_cur] assign[=] binary_operation[binary_operation[call[name[G].L.dot, parameter[name[signal]]] - binary_operation[name[a2] * name[signal]]] / name[a1]] variable[tmpN] assign[=] call[name[np].arange, parameter[name[G].N]] for taget[name[i]] in starred[call[name[range], parameter[name[Nscales]]]] begin[:] call[name[r]][binary_operation[name[tmpN] + binary_operation[name[G].N * name[i]]]] assign[=] binary_operation[binary_operation[binary_operation[constant[0.5] * call[name[c]][tuple[[<ast.Name object at 0x7da2046236d0>, <ast.Constant object at 0x7da204621a80>]]]] * name[twf_old]] + binary_operation[call[name[c]][tuple[[<ast.Name object at 0x7da204622620>, <ast.Constant object at 0x7da204621240>]]] * name[twf_cur]]] variable[factor] assign[=] binary_operation[binary_operation[constant[2] / name[a1]] * binary_operation[name[G].L - binary_operation[name[a2] * call[name[sparse].eye, parameter[name[G].N]]]]] for taget[name[k]] in starred[call[name[range], parameter[constant[2], name[M]]]] begin[:] variable[twf_new] assign[=] binary_operation[call[name[factor].dot, parameter[name[twf_cur]]] - name[twf_old]] for taget[name[i]] in starred[call[name[range], parameter[name[Nscales]]]] begin[:] <ast.AugAssign object at 0x7da2046216c0> variable[twf_old] assign[=] name[twf_cur] variable[twf_cur] assign[=] name[twf_new] return[name[r]]
keyword[def] identifier[cheby_op] ( identifier[G] , identifier[c] , identifier[signal] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[c] , identifier[np] . identifier[ndarray] ): identifier[c] = identifier[np] . identifier[array] ( identifier[c] ) identifier[c] = identifier[np] . identifier[atleast_2d] ( identifier[c] ) identifier[Nscales] , identifier[M] = identifier[c] . identifier[shape] keyword[if] identifier[M] < literal[int] : keyword[raise] identifier[TypeError] ( literal[string] ) keyword[try] : identifier[Nv] = identifier[np] . identifier[shape] ( identifier[signal] )[ literal[int] ] identifier[r] = identifier[np] . identifier[zeros] (( identifier[G] . identifier[N] * identifier[Nscales] , identifier[Nv] )) keyword[except] identifier[IndexError] : identifier[r] = identifier[np] . identifier[zeros] (( identifier[G] . identifier[N] * identifier[Nscales] )) identifier[a_arange] =[ literal[int] , identifier[G] . identifier[lmax] ] identifier[a1] = identifier[float] ( identifier[a_arange] [ literal[int] ]- identifier[a_arange] [ literal[int] ])/ literal[int] identifier[a2] = identifier[float] ( identifier[a_arange] [ literal[int] ]+ identifier[a_arange] [ literal[int] ])/ literal[int] identifier[twf_old] = identifier[signal] identifier[twf_cur] =( identifier[G] . identifier[L] . identifier[dot] ( identifier[signal] )- identifier[a2] * identifier[signal] )/ identifier[a1] identifier[tmpN] = identifier[np] . identifier[arange] ( identifier[G] . identifier[N] , identifier[dtype] = identifier[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[Nscales] ): identifier[r] [ identifier[tmpN] + identifier[G] . identifier[N] * identifier[i] ]= literal[int] * identifier[c] [ identifier[i] , literal[int] ]* identifier[twf_old] + identifier[c] [ identifier[i] , literal[int] ]* identifier[twf_cur] identifier[factor] = literal[int] / identifier[a1] *( identifier[G] . identifier[L] - identifier[a2] * identifier[sparse] . identifier[eye] ( identifier[G] . identifier[N] )) keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[M] ): identifier[twf_new] = identifier[factor] . identifier[dot] ( identifier[twf_cur] )- identifier[twf_old] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[Nscales] ): identifier[r] [ identifier[tmpN] + identifier[G] . identifier[N] * identifier[i] ]+= identifier[c] [ identifier[i] , identifier[k] ]* identifier[twf_new] identifier[twf_old] = identifier[twf_cur] identifier[twf_cur] = identifier[twf_new] keyword[return] identifier[r]
def cheby_op(G, c, signal, **kwargs): """ Chebyshev polynomial of graph Laplacian applied to vector. Parameters ---------- G : Graph c : ndarray or list of ndarrays Chebyshev coefficients for a Filter or a Filterbank signal : ndarray Signal to filter Returns ------- r : ndarray Result of the filtering """ # Handle if we do not have a list of filters but only a simple filter in cheby_coeff. if not isinstance(c, np.ndarray): c = np.array(c) # depends on [control=['if'], data=[]] c = np.atleast_2d(c) (Nscales, M) = c.shape if M < 2: raise TypeError('The coefficients have an invalid shape') # depends on [control=['if'], data=[]] # thanks to that, we can also have 1d signal. try: Nv = np.shape(signal)[1] r = np.zeros((G.N * Nscales, Nv)) # depends on [control=['try'], data=[]] except IndexError: r = np.zeros(G.N * Nscales) # depends on [control=['except'], data=[]] a_arange = [0, G.lmax] a1 = float(a_arange[1] - a_arange[0]) / 2.0 a2 = float(a_arange[1] + a_arange[0]) / 2.0 twf_old = signal twf_cur = (G.L.dot(signal) - a2 * signal) / a1 tmpN = np.arange(G.N, dtype=int) for i in range(Nscales): r[tmpN + G.N * i] = 0.5 * c[i, 0] * twf_old + c[i, 1] * twf_cur # depends on [control=['for'], data=['i']] factor = 2 / a1 * (G.L - a2 * sparse.eye(G.N)) for k in range(2, M): twf_new = factor.dot(twf_cur) - twf_old for i in range(Nscales): r[tmpN + G.N * i] += c[i, k] * twf_new # depends on [control=['for'], data=['i']] twf_old = twf_cur twf_cur = twf_new # depends on [control=['for'], data=['k']] return r
def set_autostart(vm_, state='on', **kwargs): ''' Set the autostart flag on a VM so that the VM will start with the host system on reboot. :param vm_: domain name :param state: 'on' to auto start the pool, anything else to mark the pool not to be started when the host boots :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt "*" virt.set_autostart <domain> <on | off> ''' conn = __get_conn(**kwargs) dom = _get_domain(conn, vm_) # return False if state is set to something other then on or off ret = False if state == 'on': ret = dom.setAutostart(1) == 0 elif state == 'off': ret = dom.setAutostart(0) == 0 conn.close() return ret
def function[set_autostart, parameter[vm_, state]]: constant[ Set the autostart flag on a VM so that the VM will start with the host system on reboot. :param vm_: domain name :param state: 'on' to auto start the pool, anything else to mark the pool not to be started when the host boots :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt "*" virt.set_autostart <domain> <on | off> ] variable[conn] assign[=] call[name[__get_conn], parameter[]] variable[dom] assign[=] call[name[_get_domain], parameter[name[conn], name[vm_]]] variable[ret] assign[=] constant[False] if compare[name[state] equal[==] constant[on]] begin[:] variable[ret] assign[=] compare[call[name[dom].setAutostart, parameter[constant[1]]] equal[==] constant[0]] call[name[conn].close, parameter[]] return[name[ret]]
keyword[def] identifier[set_autostart] ( identifier[vm_] , identifier[state] = literal[string] ,** identifier[kwargs] ): literal[string] identifier[conn] = identifier[__get_conn] (** identifier[kwargs] ) identifier[dom] = identifier[_get_domain] ( identifier[conn] , identifier[vm_] ) identifier[ret] = keyword[False] keyword[if] identifier[state] == literal[string] : identifier[ret] = identifier[dom] . identifier[setAutostart] ( literal[int] )== literal[int] keyword[elif] identifier[state] == literal[string] : identifier[ret] = identifier[dom] . identifier[setAutostart] ( literal[int] )== literal[int] identifier[conn] . identifier[close] () keyword[return] identifier[ret]
def set_autostart(vm_, state='on', **kwargs): """ Set the autostart flag on a VM so that the VM will start with the host system on reboot. :param vm_: domain name :param state: 'on' to auto start the pool, anything else to mark the pool not to be started when the host boots :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt "*" virt.set_autostart <domain> <on | off> """ conn = __get_conn(**kwargs) dom = _get_domain(conn, vm_) # return False if state is set to something other then on or off ret = False if state == 'on': ret = dom.setAutostart(1) == 0 # depends on [control=['if'], data=[]] elif state == 'off': ret = dom.setAutostart(0) == 0 # depends on [control=['if'], data=[]] conn.close() return ret
def walk_preorder(self): """Depth-first preorder walk over the cursor and its descendants. Yields cursors. """ yield self for child in self.get_children(): for descendant in child.walk_preorder(): yield descendant
def function[walk_preorder, parameter[self]]: constant[Depth-first preorder walk over the cursor and its descendants. Yields cursors. ] <ast.Yield object at 0x7da1b2345a20> for taget[name[child]] in starred[call[name[self].get_children, parameter[]]] begin[:] for taget[name[descendant]] in starred[call[name[child].walk_preorder, parameter[]]] begin[:] <ast.Yield object at 0x7da1b2345ba0>
keyword[def] identifier[walk_preorder] ( identifier[self] ): literal[string] keyword[yield] identifier[self] keyword[for] identifier[child] keyword[in] identifier[self] . identifier[get_children] (): keyword[for] identifier[descendant] keyword[in] identifier[child] . identifier[walk_preorder] (): keyword[yield] identifier[descendant]
def walk_preorder(self): """Depth-first preorder walk over the cursor and its descendants. Yields cursors. """ yield self for child in self.get_children(): for descendant in child.walk_preorder(): yield descendant # depends on [control=['for'], data=['descendant']] # depends on [control=['for'], data=['child']]
def __update_clusters(self): """! @brief Calculate distance (in line with specified metric) to each point from the each cluster. Nearest points are captured by according clusters and as a result clusters are updated. @return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data. """ clusters = [[] for _ in range(len(self.__centers))] dataset_differences = self.__calculate_dataset_difference(len(clusters)) optimum_indexes = numpy.argmin(dataset_differences, axis=0) for index_point in range(len(optimum_indexes)): index_cluster = optimum_indexes[index_point] clusters[index_cluster].append(index_point) clusters = [cluster for cluster in clusters if len(cluster) > 0] return clusters
def function[__update_clusters, parameter[self]]: constant[! @brief Calculate distance (in line with specified metric) to each point from the each cluster. Nearest points are captured by according clusters and as a result clusters are updated. @return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data. ] variable[clusters] assign[=] <ast.ListComp object at 0x7da1b01b0b80> variable[dataset_differences] assign[=] call[name[self].__calculate_dataset_difference, parameter[call[name[len], parameter[name[clusters]]]]] variable[optimum_indexes] assign[=] call[name[numpy].argmin, parameter[name[dataset_differences]]] for taget[name[index_point]] in starred[call[name[range], parameter[call[name[len], parameter[name[optimum_indexes]]]]]] begin[:] variable[index_cluster] assign[=] call[name[optimum_indexes]][name[index_point]] call[call[name[clusters]][name[index_cluster]].append, parameter[name[index_point]]] variable[clusters] assign[=] <ast.ListComp object at 0x7da1b01b2080> return[name[clusters]]
keyword[def] identifier[__update_clusters] ( identifier[self] ): literal[string] identifier[clusters] =[[] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[__centers] ))] identifier[dataset_differences] = identifier[self] . identifier[__calculate_dataset_difference] ( identifier[len] ( identifier[clusters] )) identifier[optimum_indexes] = identifier[numpy] . identifier[argmin] ( identifier[dataset_differences] , identifier[axis] = literal[int] ) keyword[for] identifier[index_point] keyword[in] identifier[range] ( identifier[len] ( identifier[optimum_indexes] )): identifier[index_cluster] = identifier[optimum_indexes] [ identifier[index_point] ] identifier[clusters] [ identifier[index_cluster] ]. identifier[append] ( identifier[index_point] ) identifier[clusters] =[ identifier[cluster] keyword[for] identifier[cluster] keyword[in] identifier[clusters] keyword[if] identifier[len] ( identifier[cluster] )> literal[int] ] keyword[return] identifier[clusters]
def __update_clusters(self): """! @brief Calculate distance (in line with specified metric) to each point from the each cluster. Nearest points are captured by according clusters and as a result clusters are updated. @return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data. """ clusters = [[] for _ in range(len(self.__centers))] dataset_differences = self.__calculate_dataset_difference(len(clusters)) optimum_indexes = numpy.argmin(dataset_differences, axis=0) for index_point in range(len(optimum_indexes)): index_cluster = optimum_indexes[index_point] clusters[index_cluster].append(index_point) # depends on [control=['for'], data=['index_point']] clusters = [cluster for cluster in clusters if len(cluster) > 0] return clusters
def extract_filename_from_url(log, url): """ *get the filename from a URL.* *Will return 'untitled.html', if no filename is found.* **Key Arguments:** - ``url`` -- the url to extract filename from Returns: - ``filename`` -- the filename **Usage:** .. code-block:: python from fundamentals.download import extract_filename_from_url name = extract_filename_from_url( log=log, url="https://en.wikipedia.org/wiki/Docstring" ) print name # OUT: Docstring.html """ ## > IMPORTS ## import re # EXTRACT THE FILENAME FROM THE URL try: log.debug("extracting filename from url " + url) reEoURL = re.compile('([\w\.]*)$') filename = reEoURL.findall(url)[0] # log.debug(filename) if(len(filename) == 0): filename = 'untitled.html' if not (re.search('\.', filename)): filename = filename + '.html' except Exception as e: filename = None # print url log.warning("could not extracting filename from url : " + str(e) + "\n") return filename
def function[extract_filename_from_url, parameter[log, url]]: constant[ *get the filename from a URL.* *Will return 'untitled.html', if no filename is found.* **Key Arguments:** - ``url`` -- the url to extract filename from Returns: - ``filename`` -- the filename **Usage:** .. code-block:: python from fundamentals.download import extract_filename_from_url name = extract_filename_from_url( log=log, url="https://en.wikipedia.org/wiki/Docstring" ) print name # OUT: Docstring.html ] import module[re] <ast.Try object at 0x7da1b28fd4b0> return[name[filename]]
keyword[def] identifier[extract_filename_from_url] ( identifier[log] , identifier[url] ): literal[string] keyword[import] identifier[re] keyword[try] : identifier[log] . identifier[debug] ( literal[string] + identifier[url] ) identifier[reEoURL] = identifier[re] . identifier[compile] ( literal[string] ) identifier[filename] = identifier[reEoURL] . identifier[findall] ( identifier[url] )[ literal[int] ] keyword[if] ( identifier[len] ( identifier[filename] )== literal[int] ): identifier[filename] = literal[string] keyword[if] keyword[not] ( identifier[re] . identifier[search] ( literal[string] , identifier[filename] )): identifier[filename] = identifier[filename] + literal[string] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[filename] = keyword[None] identifier[log] . identifier[warning] ( literal[string] + identifier[str] ( identifier[e] )+ literal[string] ) keyword[return] identifier[filename]
def extract_filename_from_url(log, url): """ *get the filename from a URL.* *Will return 'untitled.html', if no filename is found.* **Key Arguments:** - ``url`` -- the url to extract filename from Returns: - ``filename`` -- the filename **Usage:** .. code-block:: python from fundamentals.download import extract_filename_from_url name = extract_filename_from_url( log=log, url="https://en.wikipedia.org/wiki/Docstring" ) print name # OUT: Docstring.html """ ## > IMPORTS ## import re # EXTRACT THE FILENAME FROM THE URL try: log.debug('extracting filename from url ' + url) reEoURL = re.compile('([\\w\\.]*)$') filename = reEoURL.findall(url)[0] # log.debug(filename) if len(filename) == 0: filename = 'untitled.html' # depends on [control=['if'], data=[]] if not re.search('\\.', filename): filename = filename + '.html' # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except Exception as e: filename = None # print url log.warning('could not extracting filename from url : ' + str(e) + '\n') # depends on [control=['except'], data=['e']] return filename
def delete_event_source_mapping(UUID=None, EventSourceArn=None, FunctionName=None, region=None, key=None, keyid=None, profile=None): ''' Given an event source mapping ID or an event source ARN and FunctionName, delete the event source mapping Returns {deleted: true} if the mapping was deleted and returns {deleted: false} if the mapping was not deleted. CLI Example: .. code-block:: bash salt myminion boto_lambda.delete_event_source_mapping 260c423d-e8b5-4443-8d6a-5e91b9ecd0fa ''' ids = _get_ids(UUID, EventSourceArn=EventSourceArn, FunctionName=FunctionName) try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) for id in ids: conn.delete_event_source_mapping(UUID=id) return {'deleted': True} except ClientError as e: return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
def function[delete_event_source_mapping, parameter[UUID, EventSourceArn, FunctionName, region, key, keyid, profile]]: constant[ Given an event source mapping ID or an event source ARN and FunctionName, delete the event source mapping Returns {deleted: true} if the mapping was deleted and returns {deleted: false} if the mapping was not deleted. CLI Example: .. code-block:: bash salt myminion boto_lambda.delete_event_source_mapping 260c423d-e8b5-4443-8d6a-5e91b9ecd0fa ] variable[ids] assign[=] call[name[_get_ids], parameter[name[UUID]]] <ast.Try object at 0x7da18ede4fd0>
keyword[def] identifier[delete_event_source_mapping] ( identifier[UUID] = keyword[None] , identifier[EventSourceArn] = keyword[None] , identifier[FunctionName] = keyword[None] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ): literal[string] identifier[ids] = identifier[_get_ids] ( identifier[UUID] , identifier[EventSourceArn] = identifier[EventSourceArn] , identifier[FunctionName] = identifier[FunctionName] ) keyword[try] : identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] ) keyword[for] identifier[id] keyword[in] identifier[ids] : identifier[conn] . identifier[delete_event_source_mapping] ( identifier[UUID] = identifier[id] ) keyword[return] { literal[string] : keyword[True] } keyword[except] identifier[ClientError] keyword[as] identifier[e] : keyword[return] { literal[string] : keyword[False] , literal[string] : identifier[__utils__] [ literal[string] ]( identifier[e] )}
def delete_event_source_mapping(UUID=None, EventSourceArn=None, FunctionName=None, region=None, key=None, keyid=None, profile=None): """ Given an event source mapping ID or an event source ARN and FunctionName, delete the event source mapping Returns {deleted: true} if the mapping was deleted and returns {deleted: false} if the mapping was not deleted. CLI Example: .. code-block:: bash salt myminion boto_lambda.delete_event_source_mapping 260c423d-e8b5-4443-8d6a-5e91b9ecd0fa """ ids = _get_ids(UUID, EventSourceArn=EventSourceArn, FunctionName=FunctionName) try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) for id in ids: conn.delete_event_source_mapping(UUID=id) # depends on [control=['for'], data=['id']] return {'deleted': True} # depends on [control=['try'], data=[]] except ClientError as e: return {'deleted': False, 'error': __utils__['boto3.get_error'](e)} # depends on [control=['except'], data=['e']]
def print_ast(f): """ :param f: :type f: file :return: """ for linenum,indent,value in iter_lines(f): print("{0}{1}|{2}".format(str(linenum).rjust(3), ' ' * indent, value))
def function[print_ast, parameter[f]]: constant[ :param f: :type f: file :return: ] for taget[tuple[[<ast.Name object at 0x7da1b141a3e0>, <ast.Name object at 0x7da1b14198a0>, <ast.Name object at 0x7da1b1419300>]]] in starred[call[name[iter_lines], parameter[name[f]]]] begin[:] call[name[print], parameter[call[constant[{0}{1}|{2}].format, parameter[call[call[name[str], parameter[name[linenum]]].rjust, parameter[constant[3]]], binary_operation[constant[ ] * name[indent]], name[value]]]]]
keyword[def] identifier[print_ast] ( identifier[f] ): literal[string] keyword[for] identifier[linenum] , identifier[indent] , identifier[value] keyword[in] identifier[iter_lines] ( identifier[f] ): identifier[print] ( literal[string] . identifier[format] ( identifier[str] ( identifier[linenum] ). identifier[rjust] ( literal[int] ), literal[string] * identifier[indent] , identifier[value] ))
def print_ast(f): """ :param f: :type f: file :return: """ for (linenum, indent, value) in iter_lines(f): print('{0}{1}|{2}'.format(str(linenum).rjust(3), ' ' * indent, value)) # depends on [control=['for'], data=[]]
def _transform_to_time_spent(records, split_interval, sections): """ Each call that crosses a boundary of the sections in the week-matrix is split. These new records contain the amount of time (in record.call_duration) spent talking in that specific section. """ t_records = [] week_nr = records[0].datetime.isocalendar()[1] # contrary to the rest of the binning process, this is done with second # precision for r in filter(lambda rec: rec.interaction == 'call' and rec.call_duration > 0, records): t_left = r.call_duration t_to_next_section = _seconds_to_section_split(r, sections) t_spent_total = 0 while (t_left > 0): t_spent = min(t_to_next_section, t_left) dt_new = r.datetime + dt.timedelta(seconds=t_spent_total) if dt_new.isocalendar()[1] > week_nr: dt_new -= dt.timedelta(days=7) t_records.append( Record('call', r.direction, None, dt_new, t_spent, None)) t_left -= t_spent t_spent_total += t_spent t_to_next_section = split_interval * 60 return sorted(t_records, key=lambda r: _find_weektime(r.datetime))
def function[_transform_to_time_spent, parameter[records, split_interval, sections]]: constant[ Each call that crosses a boundary of the sections in the week-matrix is split. These new records contain the amount of time (in record.call_duration) spent talking in that specific section. ] variable[t_records] assign[=] list[[]] variable[week_nr] assign[=] call[call[call[name[records]][constant[0]].datetime.isocalendar, parameter[]]][constant[1]] for taget[name[r]] in starred[call[name[filter], parameter[<ast.Lambda object at 0x7da1b0d0c580>, name[records]]]] begin[:] variable[t_left] assign[=] name[r].call_duration variable[t_to_next_section] assign[=] call[name[_seconds_to_section_split], parameter[name[r], name[sections]]] variable[t_spent_total] assign[=] constant[0] while compare[name[t_left] greater[>] constant[0]] begin[:] variable[t_spent] assign[=] call[name[min], parameter[name[t_to_next_section], name[t_left]]] variable[dt_new] assign[=] binary_operation[name[r].datetime + call[name[dt].timedelta, parameter[]]] if compare[call[call[name[dt_new].isocalendar, parameter[]]][constant[1]] greater[>] name[week_nr]] begin[:] <ast.AugAssign object at 0x7da1b0d05b10> call[name[t_records].append, parameter[call[name[Record], parameter[constant[call], name[r].direction, constant[None], name[dt_new], name[t_spent], constant[None]]]]] <ast.AugAssign object at 0x7da1b0d062f0> <ast.AugAssign object at 0x7da1b0d06560> variable[t_to_next_section] assign[=] binary_operation[name[split_interval] * constant[60]] return[call[name[sorted], parameter[name[t_records]]]]
keyword[def] identifier[_transform_to_time_spent] ( identifier[records] , identifier[split_interval] , identifier[sections] ): literal[string] identifier[t_records] =[] identifier[week_nr] = identifier[records] [ literal[int] ]. identifier[datetime] . identifier[isocalendar] ()[ literal[int] ] keyword[for] identifier[r] keyword[in] identifier[filter] ( keyword[lambda] identifier[rec] : identifier[rec] . identifier[interaction] == literal[string] keyword[and] identifier[rec] . identifier[call_duration] > literal[int] , identifier[records] ): identifier[t_left] = identifier[r] . identifier[call_duration] identifier[t_to_next_section] = identifier[_seconds_to_section_split] ( identifier[r] , identifier[sections] ) identifier[t_spent_total] = literal[int] keyword[while] ( identifier[t_left] > literal[int] ): identifier[t_spent] = identifier[min] ( identifier[t_to_next_section] , identifier[t_left] ) identifier[dt_new] = identifier[r] . identifier[datetime] + identifier[dt] . identifier[timedelta] ( identifier[seconds] = identifier[t_spent_total] ) keyword[if] identifier[dt_new] . identifier[isocalendar] ()[ literal[int] ]> identifier[week_nr] : identifier[dt_new] -= identifier[dt] . identifier[timedelta] ( identifier[days] = literal[int] ) identifier[t_records] . identifier[append] ( identifier[Record] ( literal[string] , identifier[r] . identifier[direction] , keyword[None] , identifier[dt_new] , identifier[t_spent] , keyword[None] )) identifier[t_left] -= identifier[t_spent] identifier[t_spent_total] += identifier[t_spent] identifier[t_to_next_section] = identifier[split_interval] * literal[int] keyword[return] identifier[sorted] ( identifier[t_records] , identifier[key] = keyword[lambda] identifier[r] : identifier[_find_weektime] ( identifier[r] . identifier[datetime] ))
def _transform_to_time_spent(records, split_interval, sections): """ Each call that crosses a boundary of the sections in the week-matrix is split. These new records contain the amount of time (in record.call_duration) spent talking in that specific section. """ t_records = [] week_nr = records[0].datetime.isocalendar()[1] # contrary to the rest of the binning process, this is done with second # precision for r in filter(lambda rec: rec.interaction == 'call' and rec.call_duration > 0, records): t_left = r.call_duration t_to_next_section = _seconds_to_section_split(r, sections) t_spent_total = 0 while t_left > 0: t_spent = min(t_to_next_section, t_left) dt_new = r.datetime + dt.timedelta(seconds=t_spent_total) if dt_new.isocalendar()[1] > week_nr: dt_new -= dt.timedelta(days=7) # depends on [control=['if'], data=[]] t_records.append(Record('call', r.direction, None, dt_new, t_spent, None)) t_left -= t_spent t_spent_total += t_spent t_to_next_section = split_interval * 60 # depends on [control=['while'], data=['t_left']] # depends on [control=['for'], data=['r']] return sorted(t_records, key=lambda r: _find_weektime(r.datetime))
def verify_request(self, uri, http_method='GET', body=None, headers=None, scopes=None): """Validate client, code etc, return body + headers""" request = Request(uri, http_method, body, headers) request.token_type = self.find_token_type(request) request.scopes = scopes token_type_handler = self.tokens.get(request.token_type, self.default_token_type_handler) log.debug('Dispatching token_type %s request to %r.', request.token_type, token_type_handler) return token_type_handler.validate_request(request), request
def function[verify_request, parameter[self, uri, http_method, body, headers, scopes]]: constant[Validate client, code etc, return body + headers] variable[request] assign[=] call[name[Request], parameter[name[uri], name[http_method], name[body], name[headers]]] name[request].token_type assign[=] call[name[self].find_token_type, parameter[name[request]]] name[request].scopes assign[=] name[scopes] variable[token_type_handler] assign[=] call[name[self].tokens.get, parameter[name[request].token_type, name[self].default_token_type_handler]] call[name[log].debug, parameter[constant[Dispatching token_type %s request to %r.], name[request].token_type, name[token_type_handler]]] return[tuple[[<ast.Call object at 0x7da1b179a410>, <ast.Name object at 0x7da1b179b580>]]]
keyword[def] identifier[verify_request] ( identifier[self] , identifier[uri] , identifier[http_method] = literal[string] , identifier[body] = keyword[None] , identifier[headers] = keyword[None] , identifier[scopes] = keyword[None] ): literal[string] identifier[request] = identifier[Request] ( identifier[uri] , identifier[http_method] , identifier[body] , identifier[headers] ) identifier[request] . identifier[token_type] = identifier[self] . identifier[find_token_type] ( identifier[request] ) identifier[request] . identifier[scopes] = identifier[scopes] identifier[token_type_handler] = identifier[self] . identifier[tokens] . identifier[get] ( identifier[request] . identifier[token_type] , identifier[self] . identifier[default_token_type_handler] ) identifier[log] . identifier[debug] ( literal[string] , identifier[request] . identifier[token_type] , identifier[token_type_handler] ) keyword[return] identifier[token_type_handler] . identifier[validate_request] ( identifier[request] ), identifier[request]
def verify_request(self, uri, http_method='GET', body=None, headers=None, scopes=None): """Validate client, code etc, return body + headers""" request = Request(uri, http_method, body, headers) request.token_type = self.find_token_type(request) request.scopes = scopes token_type_handler = self.tokens.get(request.token_type, self.default_token_type_handler) log.debug('Dispatching token_type %s request to %r.', request.token_type, token_type_handler) return (token_type_handler.validate_request(request), request)
def __add_created_thread(self, event): """ Private method to automatically add new thread objects from debug events. @type event: L{Event} @param event: Event object. """ dwThreadId = event.get_tid() hThread = event.get_thread_handle() ## if not self.has_thread(dwThreadId): # XXX this would trigger a scan if not self._has_thread_id(dwThreadId): aThread = Thread(dwThreadId, hThread, self) teb_ptr = event.get_teb() # remember the TEB pointer if teb_ptr: aThread._teb_ptr = teb_ptr self._add_thread(aThread)
def function[__add_created_thread, parameter[self, event]]: constant[ Private method to automatically add new thread objects from debug events. @type event: L{Event} @param event: Event object. ] variable[dwThreadId] assign[=] call[name[event].get_tid, parameter[]] variable[hThread] assign[=] call[name[event].get_thread_handle, parameter[]] if <ast.UnaryOp object at 0x7da20c6aaaa0> begin[:] variable[aThread] assign[=] call[name[Thread], parameter[name[dwThreadId], name[hThread], name[self]]] variable[teb_ptr] assign[=] call[name[event].get_teb, parameter[]] if name[teb_ptr] begin[:] name[aThread]._teb_ptr assign[=] name[teb_ptr] call[name[self]._add_thread, parameter[name[aThread]]]
keyword[def] identifier[__add_created_thread] ( identifier[self] , identifier[event] ): literal[string] identifier[dwThreadId] = identifier[event] . identifier[get_tid] () identifier[hThread] = identifier[event] . identifier[get_thread_handle] () keyword[if] keyword[not] identifier[self] . identifier[_has_thread_id] ( identifier[dwThreadId] ): identifier[aThread] = identifier[Thread] ( identifier[dwThreadId] , identifier[hThread] , identifier[self] ) identifier[teb_ptr] = identifier[event] . identifier[get_teb] () keyword[if] identifier[teb_ptr] : identifier[aThread] . identifier[_teb_ptr] = identifier[teb_ptr] identifier[self] . identifier[_add_thread] ( identifier[aThread] )
def __add_created_thread(self, event): """ Private method to automatically add new thread objects from debug events. @type event: L{Event} @param event: Event object. """ dwThreadId = event.get_tid() hThread = event.get_thread_handle() ## if not self.has_thread(dwThreadId): # XXX this would trigger a scan if not self._has_thread_id(dwThreadId): aThread = Thread(dwThreadId, hThread, self) teb_ptr = event.get_teb() # remember the TEB pointer if teb_ptr: aThread._teb_ptr = teb_ptr # depends on [control=['if'], data=[]] self._add_thread(aThread) # depends on [control=['if'], data=[]]
def signature(name: str) -> Optional[Tuple]: """ Return the file or URL signature for name :param name: :return: """ return url_signature(name) if is_url(name) else file_signature(name) if is_file(name) else None
def function[signature, parameter[name]]: constant[ Return the file or URL signature for name :param name: :return: ] return[<ast.IfExp object at 0x7da18c4cc910>]
keyword[def] identifier[signature] ( identifier[name] : identifier[str] )-> identifier[Optional] [ identifier[Tuple] ]: literal[string] keyword[return] identifier[url_signature] ( identifier[name] ) keyword[if] identifier[is_url] ( identifier[name] ) keyword[else] identifier[file_signature] ( identifier[name] ) keyword[if] identifier[is_file] ( identifier[name] ) keyword[else] keyword[None]
def signature(name: str) -> Optional[Tuple]: """ Return the file or URL signature for name :param name: :return: """ return url_signature(name) if is_url(name) else file_signature(name) if is_file(name) else None
def exe(self): """Get the current executor Returns ------- exe : mxnet.executor.Executor """ return self._buckets[self.curr_bucket_key]['exe'][tuple(self.data_shapes.items())]
def function[exe, parameter[self]]: constant[Get the current executor Returns ------- exe : mxnet.executor.Executor ] return[call[call[call[name[self]._buckets][name[self].curr_bucket_key]][constant[exe]]][call[name[tuple], parameter[call[name[self].data_shapes.items, parameter[]]]]]]
keyword[def] identifier[exe] ( identifier[self] ): literal[string] keyword[return] identifier[self] . identifier[_buckets] [ identifier[self] . identifier[curr_bucket_key] ][ literal[string] ][ identifier[tuple] ( identifier[self] . identifier[data_shapes] . identifier[items] ())]
def exe(self): """Get the current executor Returns ------- exe : mxnet.executor.Executor """ return self._buckets[self.curr_bucket_key]['exe'][tuple(self.data_shapes.items())]
def insert_history_item(self, parent, history_item, description, dummy=False): """Enters a single history item into the tree store :param Gtk.TreeItem parent: Parent tree item :param HistoryItem history_item: History item to be inserted :param str description: A description to be added to the entry :param None dummy: Whether this is just a dummy entry (wrapper for concurrency items) :return: Inserted tree item :rtype: Gtk.TreeItem """ if not history_item.state_reference: logger.error("This must never happen! Current history_item is {}".format(history_item)) return None content = None if global_gui_config.get_config_value("SHOW_PATH_NAMES_IN_EXECUTION_HISTORY", False): content = (history_item.state_reference.name + " - " + history_item.state_reference.get_path() + " - " + description, None if dummy else history_item, None if dummy else self.TOOL_TIP_TEXT) else: content = (history_item.state_reference.name + " - " + description, None if dummy else history_item, None if dummy else self.TOOL_TIP_TEXT) tree_item = self.history_tree_store.insert_before( parent, None, content) return tree_item
def function[insert_history_item, parameter[self, parent, history_item, description, dummy]]: constant[Enters a single history item into the tree store :param Gtk.TreeItem parent: Parent tree item :param HistoryItem history_item: History item to be inserted :param str description: A description to be added to the entry :param None dummy: Whether this is just a dummy entry (wrapper for concurrency items) :return: Inserted tree item :rtype: Gtk.TreeItem ] if <ast.UnaryOp object at 0x7da1b1aba6b0> begin[:] call[name[logger].error, parameter[call[constant[This must never happen! Current history_item is {}].format, parameter[name[history_item]]]]] return[constant[None]] variable[content] assign[=] constant[None] if call[name[global_gui_config].get_config_value, parameter[constant[SHOW_PATH_NAMES_IN_EXECUTION_HISTORY], constant[False]]] begin[:] variable[content] assign[=] tuple[[<ast.BinOp object at 0x7da1b1c7f520>, <ast.IfExp object at 0x7da1b1c7eb90>, <ast.IfExp object at 0x7da1b1c7e590>]] variable[tree_item] assign[=] call[name[self].history_tree_store.insert_before, parameter[name[parent], constant[None], name[content]]] return[name[tree_item]]
keyword[def] identifier[insert_history_item] ( identifier[self] , identifier[parent] , identifier[history_item] , identifier[description] , identifier[dummy] = keyword[False] ): literal[string] keyword[if] keyword[not] identifier[history_item] . identifier[state_reference] : identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[history_item] )) keyword[return] keyword[None] identifier[content] = keyword[None] keyword[if] identifier[global_gui_config] . identifier[get_config_value] ( literal[string] , keyword[False] ): identifier[content] =( identifier[history_item] . identifier[state_reference] . identifier[name] + literal[string] + identifier[history_item] . identifier[state_reference] . identifier[get_path] ()+ literal[string] + identifier[description] , keyword[None] keyword[if] identifier[dummy] keyword[else] identifier[history_item] , keyword[None] keyword[if] identifier[dummy] keyword[else] identifier[self] . identifier[TOOL_TIP_TEXT] ) keyword[else] : identifier[content] =( identifier[history_item] . identifier[state_reference] . identifier[name] + literal[string] + identifier[description] , keyword[None] keyword[if] identifier[dummy] keyword[else] identifier[history_item] , keyword[None] keyword[if] identifier[dummy] keyword[else] identifier[self] . identifier[TOOL_TIP_TEXT] ) identifier[tree_item] = identifier[self] . identifier[history_tree_store] . identifier[insert_before] ( identifier[parent] , keyword[None] , identifier[content] ) keyword[return] identifier[tree_item]
def insert_history_item(self, parent, history_item, description, dummy=False): """Enters a single history item into the tree store :param Gtk.TreeItem parent: Parent tree item :param HistoryItem history_item: History item to be inserted :param str description: A description to be added to the entry :param None dummy: Whether this is just a dummy entry (wrapper for concurrency items) :return: Inserted tree item :rtype: Gtk.TreeItem """ if not history_item.state_reference: logger.error('This must never happen! Current history_item is {}'.format(history_item)) return None # depends on [control=['if'], data=[]] content = None if global_gui_config.get_config_value('SHOW_PATH_NAMES_IN_EXECUTION_HISTORY', False): content = (history_item.state_reference.name + ' - ' + history_item.state_reference.get_path() + ' - ' + description, None if dummy else history_item, None if dummy else self.TOOL_TIP_TEXT) # depends on [control=['if'], data=[]] else: content = (history_item.state_reference.name + ' - ' + description, None if dummy else history_item, None if dummy else self.TOOL_TIP_TEXT) tree_item = self.history_tree_store.insert_before(parent, None, content) return tree_item
def run_subcommand(netgear, args): """Runs the subcommand configured in args on the netgear session""" subcommand = args.subcommand if subcommand == "block_device" or subcommand == "allow_device": return netgear.allow_block_device(args.mac_addr, BLOCK if subcommand == "block_device" else ALLOW) if subcommand == "attached_devices": if args.verbose: return netgear.get_attached_devices_2() else: return netgear.get_attached_devices() if subcommand == 'traffic_meter': return netgear.get_traffic_meter() if subcommand == 'login': return netgear.login() print("Unknown subcommand")
def function[run_subcommand, parameter[netgear, args]]: constant[Runs the subcommand configured in args on the netgear session] variable[subcommand] assign[=] name[args].subcommand if <ast.BoolOp object at 0x7da1b016e320> begin[:] return[call[name[netgear].allow_block_device, parameter[name[args].mac_addr, <ast.IfExp object at 0x7da1b016fac0>]]] if compare[name[subcommand] equal[==] constant[attached_devices]] begin[:] if name[args].verbose begin[:] return[call[name[netgear].get_attached_devices_2, parameter[]]] if compare[name[subcommand] equal[==] constant[traffic_meter]] begin[:] return[call[name[netgear].get_traffic_meter, parameter[]]] if compare[name[subcommand] equal[==] constant[login]] begin[:] return[call[name[netgear].login, parameter[]]] call[name[print], parameter[constant[Unknown subcommand]]]
keyword[def] identifier[run_subcommand] ( identifier[netgear] , identifier[args] ): literal[string] identifier[subcommand] = identifier[args] . identifier[subcommand] keyword[if] identifier[subcommand] == literal[string] keyword[or] identifier[subcommand] == literal[string] : keyword[return] identifier[netgear] . identifier[allow_block_device] ( identifier[args] . identifier[mac_addr] , identifier[BLOCK] keyword[if] identifier[subcommand] == literal[string] keyword[else] identifier[ALLOW] ) keyword[if] identifier[subcommand] == literal[string] : keyword[if] identifier[args] . identifier[verbose] : keyword[return] identifier[netgear] . identifier[get_attached_devices_2] () keyword[else] : keyword[return] identifier[netgear] . identifier[get_attached_devices] () keyword[if] identifier[subcommand] == literal[string] : keyword[return] identifier[netgear] . identifier[get_traffic_meter] () keyword[if] identifier[subcommand] == literal[string] : keyword[return] identifier[netgear] . identifier[login] () identifier[print] ( literal[string] )
def run_subcommand(netgear, args): """Runs the subcommand configured in args on the netgear session""" subcommand = args.subcommand if subcommand == 'block_device' or subcommand == 'allow_device': return netgear.allow_block_device(args.mac_addr, BLOCK if subcommand == 'block_device' else ALLOW) # depends on [control=['if'], data=[]] if subcommand == 'attached_devices': if args.verbose: return netgear.get_attached_devices_2() # depends on [control=['if'], data=[]] else: return netgear.get_attached_devices() # depends on [control=['if'], data=[]] if subcommand == 'traffic_meter': return netgear.get_traffic_meter() # depends on [control=['if'], data=[]] if subcommand == 'login': return netgear.login() # depends on [control=['if'], data=[]] print('Unknown subcommand')
def get_axis_value_discrete(self, axis): """Return the axis value in discrete steps for a given axis event. How a value translates into a discrete step depends on the source. If the source is :attr:`~libinput.constant.PointerAxisSource.WHEEL`, the discrete value correspond to the number of physical mouse wheel clicks. If the source is :attr:`~libinput.constant.PointerAxisSource.CONTINUOUS` or :attr:`~libinput.constant.PointerAxisSource.FINGER`, the discrete value is always 0. Args: axis (~libinput.constant.PointerAxis): The axis who's value to get. Returns: float: The discrete value for the given event. Raises: AttributeError """ if self.type != EventType.POINTER_AXIS: raise AttributeError(_wrong_meth.format(self.type)) return self._libinput.libinput_event_pointer_get_axis_value_discrete( self._handle, axis)
def function[get_axis_value_discrete, parameter[self, axis]]: constant[Return the axis value in discrete steps for a given axis event. How a value translates into a discrete step depends on the source. If the source is :attr:`~libinput.constant.PointerAxisSource.WHEEL`, the discrete value correspond to the number of physical mouse wheel clicks. If the source is :attr:`~libinput.constant.PointerAxisSource.CONTINUOUS` or :attr:`~libinput.constant.PointerAxisSource.FINGER`, the discrete value is always 0. Args: axis (~libinput.constant.PointerAxis): The axis who's value to get. Returns: float: The discrete value for the given event. Raises: AttributeError ] if compare[name[self].type not_equal[!=] name[EventType].POINTER_AXIS] begin[:] <ast.Raise object at 0x7da20e9b2860> return[call[name[self]._libinput.libinput_event_pointer_get_axis_value_discrete, parameter[name[self]._handle, name[axis]]]]
keyword[def] identifier[get_axis_value_discrete] ( identifier[self] , identifier[axis] ): literal[string] keyword[if] identifier[self] . identifier[type] != identifier[EventType] . identifier[POINTER_AXIS] : keyword[raise] identifier[AttributeError] ( identifier[_wrong_meth] . identifier[format] ( identifier[self] . identifier[type] )) keyword[return] identifier[self] . identifier[_libinput] . identifier[libinput_event_pointer_get_axis_value_discrete] ( identifier[self] . identifier[_handle] , identifier[axis] )
def get_axis_value_discrete(self, axis): """Return the axis value in discrete steps for a given axis event. How a value translates into a discrete step depends on the source. If the source is :attr:`~libinput.constant.PointerAxisSource.WHEEL`, the discrete value correspond to the number of physical mouse wheel clicks. If the source is :attr:`~libinput.constant.PointerAxisSource.CONTINUOUS` or :attr:`~libinput.constant.PointerAxisSource.FINGER`, the discrete value is always 0. Args: axis (~libinput.constant.PointerAxis): The axis who's value to get. Returns: float: The discrete value for the given event. Raises: AttributeError """ if self.type != EventType.POINTER_AXIS: raise AttributeError(_wrong_meth.format(self.type)) # depends on [control=['if'], data=[]] return self._libinput.libinput_event_pointer_get_axis_value_discrete(self._handle, axis)
def spec_trace(traces, cmap=None, wlen=0.4, log=False, trc='k', tralpha=0.9, size=(10, 13), fig=None, **kwargs): """ Plots seismic data with spectrogram behind. Takes a stream or list of traces and plots the trace with the spectra beneath it. :type traces: list :param traces: Traces to be plotted, can be a single :class:`obspy.core.stream.Stream`, or a list of :class:`obspy.core.trace.Trace`. :type cmap: str :param cmap: `Matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_. :type wlen: float :param wlen: Window length for fft in seconds :type log: bool :param log: Use a log frequency scale :type trc: str :param trc: Color for the trace. :type tralpha: float :param tralpha: Opacity level for the seismogram, from transparent (0.0) \ to opaque (1.0). :type size: tuple :param size: Plot size, tuple of floats, inches :type fig: matplotlib.figure.Figure :param fig: Figure to plot onto, defaults to self generating. :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example >>> from obspy import read >>> from eqcorrscan.utils.plotting import spec_trace >>> st = read() >>> spec_trace(st, trc='white') # doctest: +SKIP .. plot:: from obspy import read from eqcorrscan.utils.plotting import spec_trace st = read() spec_trace(st, trc='white') """ import matplotlib.pyplot as plt if isinstance(traces, Stream): traces.sort(['station', 'channel']) if not fig: fig = plt.figure() for i, tr in enumerate(traces): if i == 0: ax = fig.add_subplot(len(traces), 1, i + 1) else: ax = fig.add_subplot(len(traces), 1, i + 1, sharex=ax) ax1, ax2 = _spec_trace(tr, cmap=cmap, wlen=wlen, log=log, trc=trc, tralpha=tralpha, axes=ax) ax.set_yticks([]) if i < len(traces) - 1: plt.setp(ax1.get_xticklabels(), visible=False) if isinstance(traces, list): ax.text(0.005, 0.85, "{0}::{1}".format(tr.id, tr.stats.starttime), bbox=dict(facecolor='white', alpha=0.8), transform=ax2.transAxes) elif isinstance(traces, Stream): ax.text(0.005, 0.85, tr.id, bbox=dict(facecolor='white', alpha=0.8), transform=ax2.transAxes) ax.text(0.005, 0.02, str(np.max(tr.data).round(1)), bbox=dict(facecolor='white', alpha=0.95), transform=ax2.transAxes) ax.set_xlabel('Time (s)') fig.subplots_adjust(hspace=0) fig.set_size_inches(w=size[0], h=size[1], forward=True) fig.text(0.04, 0.5, 'Frequency (Hz)', va='center', rotation='vertical') fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
def function[spec_trace, parameter[traces, cmap, wlen, log, trc, tralpha, size, fig]]: constant[ Plots seismic data with spectrogram behind. Takes a stream or list of traces and plots the trace with the spectra beneath it. :type traces: list :param traces: Traces to be plotted, can be a single :class:`obspy.core.stream.Stream`, or a list of :class:`obspy.core.trace.Trace`. :type cmap: str :param cmap: `Matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_. :type wlen: float :param wlen: Window length for fft in seconds :type log: bool :param log: Use a log frequency scale :type trc: str :param trc: Color for the trace. :type tralpha: float :param tralpha: Opacity level for the seismogram, from transparent (0.0) to opaque (1.0). :type size: tuple :param size: Plot size, tuple of floats, inches :type fig: matplotlib.figure.Figure :param fig: Figure to plot onto, defaults to self generating. :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example >>> from obspy import read >>> from eqcorrscan.utils.plotting import spec_trace >>> st = read() >>> spec_trace(st, trc='white') # doctest: +SKIP .. plot:: from obspy import read from eqcorrscan.utils.plotting import spec_trace st = read() spec_trace(st, trc='white') ] import module[matplotlib.pyplot] as alias[plt] if call[name[isinstance], parameter[name[traces], name[Stream]]] begin[:] call[name[traces].sort, parameter[list[[<ast.Constant object at 0x7da20c76c1f0>, <ast.Constant object at 0x7da20c76d5a0>]]]] if <ast.UnaryOp object at 0x7da20c76c3d0> begin[:] variable[fig] assign[=] call[name[plt].figure, parameter[]] for taget[tuple[[<ast.Name object at 0x7da20c76f2b0>, <ast.Name object at 0x7da20c76f550>]]] in starred[call[name[enumerate], parameter[name[traces]]]] begin[:] if compare[name[i] equal[==] constant[0]] begin[:] variable[ax] assign[=] call[name[fig].add_subplot, parameter[call[name[len], parameter[name[traces]]], constant[1], binary_operation[name[i] + constant[1]]]] <ast.Tuple object at 0x7da20c76ee00> assign[=] call[name[_spec_trace], parameter[name[tr]]] call[name[ax].set_yticks, parameter[list[[]]]] if compare[name[i] less[<] binary_operation[call[name[len], parameter[name[traces]]] - constant[1]]] begin[:] call[name[plt].setp, parameter[call[name[ax1].get_xticklabels, parameter[]]]] if call[name[isinstance], parameter[name[traces], name[list]]] begin[:] call[name[ax].text, parameter[constant[0.005], constant[0.85], call[constant[{0}::{1}].format, parameter[name[tr].id, name[tr].stats.starttime]]]] call[name[ax].text, parameter[constant[0.005], constant[0.02], call[name[str], parameter[call[call[name[np].max, parameter[name[tr].data]].round, parameter[constant[1]]]]]]] call[name[ax].set_xlabel, parameter[constant[Time (s)]]] call[name[fig].subplots_adjust, parameter[]] call[name[fig].set_size_inches, parameter[]] call[name[fig].text, parameter[constant[0.04], constant[0.5], constant[Frequency (Hz)]]] variable[fig] assign[=] call[name[_finalise_figure], parameter[]] return[name[fig]]
keyword[def] identifier[spec_trace] ( identifier[traces] , identifier[cmap] = keyword[None] , identifier[wlen] = literal[int] , identifier[log] = keyword[False] , identifier[trc] = literal[string] , identifier[tralpha] = literal[int] , identifier[size] =( literal[int] , literal[int] ), identifier[fig] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[import] identifier[matplotlib] . identifier[pyplot] keyword[as] identifier[plt] keyword[if] identifier[isinstance] ( identifier[traces] , identifier[Stream] ): identifier[traces] . identifier[sort] ([ literal[string] , literal[string] ]) keyword[if] keyword[not] identifier[fig] : identifier[fig] = identifier[plt] . identifier[figure] () keyword[for] identifier[i] , identifier[tr] keyword[in] identifier[enumerate] ( identifier[traces] ): keyword[if] identifier[i] == literal[int] : identifier[ax] = identifier[fig] . identifier[add_subplot] ( identifier[len] ( identifier[traces] ), literal[int] , identifier[i] + literal[int] ) keyword[else] : identifier[ax] = identifier[fig] . identifier[add_subplot] ( identifier[len] ( identifier[traces] ), literal[int] , identifier[i] + literal[int] , identifier[sharex] = identifier[ax] ) identifier[ax1] , identifier[ax2] = identifier[_spec_trace] ( identifier[tr] , identifier[cmap] = identifier[cmap] , identifier[wlen] = identifier[wlen] , identifier[log] = identifier[log] , identifier[trc] = identifier[trc] , identifier[tralpha] = identifier[tralpha] , identifier[axes] = identifier[ax] ) identifier[ax] . identifier[set_yticks] ([]) keyword[if] identifier[i] < identifier[len] ( identifier[traces] )- literal[int] : identifier[plt] . identifier[setp] ( identifier[ax1] . identifier[get_xticklabels] (), identifier[visible] = keyword[False] ) keyword[if] identifier[isinstance] ( identifier[traces] , identifier[list] ): identifier[ax] . identifier[text] ( literal[int] , literal[int] , literal[string] . identifier[format] ( identifier[tr] . identifier[id] , identifier[tr] . identifier[stats] . identifier[starttime] ), identifier[bbox] = identifier[dict] ( identifier[facecolor] = literal[string] , identifier[alpha] = literal[int] ), identifier[transform] = identifier[ax2] . identifier[transAxes] ) keyword[elif] identifier[isinstance] ( identifier[traces] , identifier[Stream] ): identifier[ax] . identifier[text] ( literal[int] , literal[int] , identifier[tr] . identifier[id] , identifier[bbox] = identifier[dict] ( identifier[facecolor] = literal[string] , identifier[alpha] = literal[int] ), identifier[transform] = identifier[ax2] . identifier[transAxes] ) identifier[ax] . identifier[text] ( literal[int] , literal[int] , identifier[str] ( identifier[np] . identifier[max] ( identifier[tr] . identifier[data] ). identifier[round] ( literal[int] )), identifier[bbox] = identifier[dict] ( identifier[facecolor] = literal[string] , identifier[alpha] = literal[int] ), identifier[transform] = identifier[ax2] . identifier[transAxes] ) identifier[ax] . identifier[set_xlabel] ( literal[string] ) identifier[fig] . identifier[subplots_adjust] ( identifier[hspace] = literal[int] ) identifier[fig] . identifier[set_size_inches] ( identifier[w] = identifier[size] [ literal[int] ], identifier[h] = identifier[size] [ literal[int] ], identifier[forward] = keyword[True] ) identifier[fig] . identifier[text] ( literal[int] , literal[int] , literal[string] , identifier[va] = literal[string] , identifier[rotation] = literal[string] ) identifier[fig] = identifier[_finalise_figure] ( identifier[fig] = identifier[fig] ,** identifier[kwargs] ) keyword[return] identifier[fig]
def spec_trace(traces, cmap=None, wlen=0.4, log=False, trc='k', tralpha=0.9, size=(10, 13), fig=None, **kwargs): """ Plots seismic data with spectrogram behind. Takes a stream or list of traces and plots the trace with the spectra beneath it. :type traces: list :param traces: Traces to be plotted, can be a single :class:`obspy.core.stream.Stream`, or a list of :class:`obspy.core.trace.Trace`. :type cmap: str :param cmap: `Matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_. :type wlen: float :param wlen: Window length for fft in seconds :type log: bool :param log: Use a log frequency scale :type trc: str :param trc: Color for the trace. :type tralpha: float :param tralpha: Opacity level for the seismogram, from transparent (0.0) to opaque (1.0). :type size: tuple :param size: Plot size, tuple of floats, inches :type fig: matplotlib.figure.Figure :param fig: Figure to plot onto, defaults to self generating. :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example >>> from obspy import read >>> from eqcorrscan.utils.plotting import spec_trace >>> st = read() >>> spec_trace(st, trc='white') # doctest: +SKIP .. plot:: from obspy import read from eqcorrscan.utils.plotting import spec_trace st = read() spec_trace(st, trc='white') """ import matplotlib.pyplot as plt if isinstance(traces, Stream): traces.sort(['station', 'channel']) # depends on [control=['if'], data=[]] if not fig: fig = plt.figure() # depends on [control=['if'], data=[]] for (i, tr) in enumerate(traces): if i == 0: ax = fig.add_subplot(len(traces), 1, i + 1) # depends on [control=['if'], data=['i']] else: ax = fig.add_subplot(len(traces), 1, i + 1, sharex=ax) (ax1, ax2) = _spec_trace(tr, cmap=cmap, wlen=wlen, log=log, trc=trc, tralpha=tralpha, axes=ax) ax.set_yticks([]) if i < len(traces) - 1: plt.setp(ax1.get_xticklabels(), visible=False) # depends on [control=['if'], data=[]] if isinstance(traces, list): ax.text(0.005, 0.85, '{0}::{1}'.format(tr.id, tr.stats.starttime), bbox=dict(facecolor='white', alpha=0.8), transform=ax2.transAxes) # depends on [control=['if'], data=[]] elif isinstance(traces, Stream): ax.text(0.005, 0.85, tr.id, bbox=dict(facecolor='white', alpha=0.8), transform=ax2.transAxes) # depends on [control=['if'], data=[]] ax.text(0.005, 0.02, str(np.max(tr.data).round(1)), bbox=dict(facecolor='white', alpha=0.95), transform=ax2.transAxes) # depends on [control=['for'], data=[]] ax.set_xlabel('Time (s)') fig.subplots_adjust(hspace=0) fig.set_size_inches(w=size[0], h=size[1], forward=True) fig.text(0.04, 0.5, 'Frequency (Hz)', va='center', rotation='vertical') fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
def _ParseExtensionsString(self, extensions_string): """Parses the extensions string. Args: extensions_string (str): comma separated extensions to filter. """ if not extensions_string: return extensions_string = extensions_string.lower() extensions = [ extension.strip() for extension in extensions_string.split(',')] file_entry_filter = file_entry_filters.ExtensionsFileEntryFilter(extensions) self._filter_collection.AddFilter(file_entry_filter)
def function[_ParseExtensionsString, parameter[self, extensions_string]]: constant[Parses the extensions string. Args: extensions_string (str): comma separated extensions to filter. ] if <ast.UnaryOp object at 0x7da20c993940> begin[:] return[None] variable[extensions_string] assign[=] call[name[extensions_string].lower, parameter[]] variable[extensions] assign[=] <ast.ListComp object at 0x7da18fe90490> variable[file_entry_filter] assign[=] call[name[file_entry_filters].ExtensionsFileEntryFilter, parameter[name[extensions]]] call[name[self]._filter_collection.AddFilter, parameter[name[file_entry_filter]]]
keyword[def] identifier[_ParseExtensionsString] ( identifier[self] , identifier[extensions_string] ): literal[string] keyword[if] keyword[not] identifier[extensions_string] : keyword[return] identifier[extensions_string] = identifier[extensions_string] . identifier[lower] () identifier[extensions] =[ identifier[extension] . identifier[strip] () keyword[for] identifier[extension] keyword[in] identifier[extensions_string] . identifier[split] ( literal[string] )] identifier[file_entry_filter] = identifier[file_entry_filters] . identifier[ExtensionsFileEntryFilter] ( identifier[extensions] ) identifier[self] . identifier[_filter_collection] . identifier[AddFilter] ( identifier[file_entry_filter] )
def _ParseExtensionsString(self, extensions_string): """Parses the extensions string. Args: extensions_string (str): comma separated extensions to filter. """ if not extensions_string: return # depends on [control=['if'], data=[]] extensions_string = extensions_string.lower() extensions = [extension.strip() for extension in extensions_string.split(',')] file_entry_filter = file_entry_filters.ExtensionsFileEntryFilter(extensions) self._filter_collection.AddFilter(file_entry_filter)
def element_should_not_exist(self, json_string, expr): """ Check that one or more elements, matching [ http://jsonselect.org/ | JSONSelect] expression, don't exist. *DEPRECATED* JSON Select query language is outdated and not supported any more. Use other keywords of this library to query JSON. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONSelect expression;\n *Raises:*\n JsonValidatorError """ value = self.select_elements(json_string, expr) if value is not None: raise JsonValidatorError('Elements %s exist but should not' % expr)
def function[element_should_not_exist, parameter[self, json_string, expr]]: constant[ Check that one or more elements, matching [ http://jsonselect.org/ | JSONSelect] expression, don't exist. *DEPRECATED* JSON Select query language is outdated and not supported any more. Use other keywords of this library to query JSON. *Args:* _json_string_ - JSON string; _expr_ - JSONSelect expression; *Raises:* JsonValidatorError ] variable[value] assign[=] call[name[self].select_elements, parameter[name[json_string], name[expr]]] if compare[name[value] is_not constant[None]] begin[:] <ast.Raise object at 0x7da1b10e49d0>
keyword[def] identifier[element_should_not_exist] ( identifier[self] , identifier[json_string] , identifier[expr] ): literal[string] identifier[value] = identifier[self] . identifier[select_elements] ( identifier[json_string] , identifier[expr] ) keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] : keyword[raise] identifier[JsonValidatorError] ( literal[string] % identifier[expr] )
def element_should_not_exist(self, json_string, expr): """ Check that one or more elements, matching [ http://jsonselect.org/ | JSONSelect] expression, don't exist. *DEPRECATED* JSON Select query language is outdated and not supported any more. Use other keywords of this library to query JSON. *Args:* _json_string_ - JSON string; _expr_ - JSONSelect expression; *Raises:* JsonValidatorError """ value = self.select_elements(json_string, expr) if value is not None: raise JsonValidatorError('Elements %s exist but should not' % expr) # depends on [control=['if'], data=[]]
def pid(kp=0., ki=0., kd=0., smooth=0.1): r'''Create a callable that implements a PID controller. A PID controller returns a control signal :math:`u(t)` given a history of error measurements :math:`e(0) \dots e(t)`, using proportional (P), integral (I), and derivative (D) terms, according to: .. math:: u(t) = kp * e(t) + ki * \int_{s=0}^t e(s) ds + kd * \frac{de(s)}{ds}(t) The proportional term is just the current error, the integral term is the sum of all error measurements, and the derivative term is the instantaneous derivative of the error measurement. Parameters ---------- kp : float The weight associated with the proportional term of the PID controller. ki : float The weight associated with the integral term of the PID controller. kd : float The weight associated with the derivative term of the PID controller. smooth : float in [0, 1] Derivative values will be smoothed with this exponential average. A value of 1 never incorporates new derivative information, a value of 0.5 uses the mean of the historic and new information, and a value of 0 discards historic information (i.e., the derivative in this case will be unsmoothed). The default is 0.1. Returns ------- controller : callable (float, float) -> float Returns a function that accepts an error measurement and a delta-time value since the previous measurement, and returns a control signal. ''' state = dict(p=0, i=0, d=0) def control(error, dt=1): state['d'] = smooth * state['d'] + (1 - smooth) * (error - state['p']) / dt state['i'] += error * dt state['p'] = error return kp * state['p'] + ki * state['i'] + kd * state['d'] return control
def function[pid, parameter[kp, ki, kd, smooth]]: constant[Create a callable that implements a PID controller. A PID controller returns a control signal :math:`u(t)` given a history of error measurements :math:`e(0) \dots e(t)`, using proportional (P), integral (I), and derivative (D) terms, according to: .. math:: u(t) = kp * e(t) + ki * \int_{s=0}^t e(s) ds + kd * \frac{de(s)}{ds}(t) The proportional term is just the current error, the integral term is the sum of all error measurements, and the derivative term is the instantaneous derivative of the error measurement. Parameters ---------- kp : float The weight associated with the proportional term of the PID controller. ki : float The weight associated with the integral term of the PID controller. kd : float The weight associated with the derivative term of the PID controller. smooth : float in [0, 1] Derivative values will be smoothed with this exponential average. A value of 1 never incorporates new derivative information, a value of 0.5 uses the mean of the historic and new information, and a value of 0 discards historic information (i.e., the derivative in this case will be unsmoothed). The default is 0.1. Returns ------- controller : callable (float, float) -> float Returns a function that accepts an error measurement and a delta-time value since the previous measurement, and returns a control signal. ] variable[state] assign[=] call[name[dict], parameter[]] def function[control, parameter[error, dt]]: call[name[state]][constant[d]] assign[=] binary_operation[binary_operation[name[smooth] * call[name[state]][constant[d]]] + binary_operation[binary_operation[binary_operation[constant[1] - name[smooth]] * binary_operation[name[error] - call[name[state]][constant[p]]]] / name[dt]]] <ast.AugAssign object at 0x7da1afef9810> call[name[state]][constant[p]] assign[=] name[error] return[binary_operation[binary_operation[binary_operation[name[kp] * call[name[state]][constant[p]]] + binary_operation[name[ki] * call[name[state]][constant[i]]]] + binary_operation[name[kd] * call[name[state]][constant[d]]]]] return[name[control]]
keyword[def] identifier[pid] ( identifier[kp] = literal[int] , identifier[ki] = literal[int] , identifier[kd] = literal[int] , identifier[smooth] = literal[int] ): literal[string] identifier[state] = identifier[dict] ( identifier[p] = literal[int] , identifier[i] = literal[int] , identifier[d] = literal[int] ) keyword[def] identifier[control] ( identifier[error] , identifier[dt] = literal[int] ): identifier[state] [ literal[string] ]= identifier[smooth] * identifier[state] [ literal[string] ]+( literal[int] - identifier[smooth] )*( identifier[error] - identifier[state] [ literal[string] ])/ identifier[dt] identifier[state] [ literal[string] ]+= identifier[error] * identifier[dt] identifier[state] [ literal[string] ]= identifier[error] keyword[return] identifier[kp] * identifier[state] [ literal[string] ]+ identifier[ki] * identifier[state] [ literal[string] ]+ identifier[kd] * identifier[state] [ literal[string] ] keyword[return] identifier[control]
def pid(kp=0.0, ki=0.0, kd=0.0, smooth=0.1): """Create a callable that implements a PID controller. A PID controller returns a control signal :math:`u(t)` given a history of error measurements :math:`e(0) \\dots e(t)`, using proportional (P), integral (I), and derivative (D) terms, according to: .. math:: u(t) = kp * e(t) + ki * \\int_{s=0}^t e(s) ds + kd * \\frac{de(s)}{ds}(t) The proportional term is just the current error, the integral term is the sum of all error measurements, and the derivative term is the instantaneous derivative of the error measurement. Parameters ---------- kp : float The weight associated with the proportional term of the PID controller. ki : float The weight associated with the integral term of the PID controller. kd : float The weight associated with the derivative term of the PID controller. smooth : float in [0, 1] Derivative values will be smoothed with this exponential average. A value of 1 never incorporates new derivative information, a value of 0.5 uses the mean of the historic and new information, and a value of 0 discards historic information (i.e., the derivative in this case will be unsmoothed). The default is 0.1. Returns ------- controller : callable (float, float) -> float Returns a function that accepts an error measurement and a delta-time value since the previous measurement, and returns a control signal. """ state = dict(p=0, i=0, d=0) def control(error, dt=1): state['d'] = smooth * state['d'] + (1 - smooth) * (error - state['p']) / dt state['i'] += error * dt state['p'] = error return kp * state['p'] + ki * state['i'] + kd * state['d'] return control
def VR(VR=None, description=None): """ Value Representation (VR) <-> Description lookup. :param VR: Takes the VR and returns its description :param description: Take the description of a VR and returns the VR """ value_repr = { "AE": "Application Entity", "AS": "Age String", "AT": "Attribute Tag", "CS": "Code String", "DA": "Date", "DS": "Decimal String", "DT": "Date/Time", "FL": "Floating Point Single (4 bytes)", "FD": "Floating Point Double (8 bytes)", "IS": "Integer String", "LO": "Long String", "LT": "Long Text", "OB": "Other Byte", "OF": "Other Float", "OW": "Other Word", "PN": "Person Name", "SH": "Short String", "SL": "Signed Long", "SQ": "Sequence of Items", "SS": "Signed Short", "ST": "Short Text", "TM": "Time", "UI": "Unique Identifier", "UL": "Unsigned Long", "UN": "Unknown", "US": "Unsigned Short", "UT": "Unlimited Text" } assert VR or description, "Either VR or description required to map VR" if VR is not None: VR = VR.upper() if VR in value_repr: return value_repr[VR] for key, value in value_repr.iteritems(): if description == value: return key return None
def function[VR, parameter[VR, description]]: constant[ Value Representation (VR) <-> Description lookup. :param VR: Takes the VR and returns its description :param description: Take the description of a VR and returns the VR ] variable[value_repr] assign[=] dictionary[[<ast.Constant object at 0x7da18f7220b0>, <ast.Constant object at 0x7da18f723880>, <ast.Constant object at 0x7da18f720be0>, <ast.Constant object at 0x7da18f720730>, <ast.Constant object at 0x7da18f723f40>, <ast.Constant object at 0x7da18f7209a0>, <ast.Constant object at 0x7da18f721390>, <ast.Constant object at 0x7da18f7207c0>, <ast.Constant object at 0x7da18f722860>, <ast.Constant object at 0x7da18f720b80>, <ast.Constant object at 0x7da18f723a30>, <ast.Constant object at 0x7da18f721180>, <ast.Constant object at 0x7da18f721c30>, <ast.Constant object at 0x7da18f722ad0>, <ast.Constant object at 0x7da18f7216f0>, <ast.Constant object at 0x7da18f720250>, <ast.Constant object at 0x7da18f7222f0>, <ast.Constant object at 0x7da18f723b80>, <ast.Constant object at 0x7da18f7233d0>, <ast.Constant object at 0x7da18f721600>, <ast.Constant object at 0x7da18f721690>, <ast.Constant object at 0x7da18f813730>, <ast.Constant object at 0x7da18f811c90>, <ast.Constant object at 0x7da18f813280>, <ast.Constant object at 0x7da18f813130>, <ast.Constant object at 0x7da18f8107c0>, <ast.Constant object at 0x7da18f811000>], [<ast.Constant object at 0x7da18f810700>, <ast.Constant object at 0x7da18f813370>, <ast.Constant object at 0x7da18f813f70>, <ast.Constant object at 0x7da18f810100>, <ast.Constant object at 0x7da18f811840>, <ast.Constant object at 0x7da18f812ce0>, <ast.Constant object at 0x7da18f8121d0>, <ast.Constant object at 0x7da18f811420>, <ast.Constant object at 0x7da18f812e60>, <ast.Constant object at 0x7da18f811660>, <ast.Constant object at 0x7da18f811e70>, <ast.Constant object at 0x7da18f813520>, <ast.Constant object at 0x7da18f8106d0>, <ast.Constant object at 0x7da18f813b80>, <ast.Constant object at 0x7da18f812fe0>, <ast.Constant object at 0x7da18f8127d0>, <ast.Constant object at 0x7da18f813d30>, <ast.Constant object at 0x7da18f812e90>, <ast.Constant object at 0x7da18f812fb0>, <ast.Constant object at 0x7da18f813670>, <ast.Constant object at 0x7da18f812170>, <ast.Constant object at 0x7da18f8125f0>, <ast.Constant object at 0x7da18f8101f0>, <ast.Constant object at 0x7da18f811360>, <ast.Constant object at 0x7da18f813100>, <ast.Constant object at 0x7da18f810790>, <ast.Constant object at 0x7da18f811960>]] assert[<ast.BoolOp object at 0x7da18f810c40>] if compare[name[VR] is_not constant[None]] begin[:] variable[VR] assign[=] call[name[VR].upper, parameter[]] if compare[name[VR] in name[value_repr]] begin[:] return[call[name[value_repr]][name[VR]]] for taget[tuple[[<ast.Name object at 0x7da18f812920>, <ast.Name object at 0x7da18f810a60>]]] in starred[call[name[value_repr].iteritems, parameter[]]] begin[:] if compare[name[description] equal[==] name[value]] begin[:] return[name[key]] return[constant[None]]
keyword[def] identifier[VR] ( identifier[VR] = keyword[None] , identifier[description] = keyword[None] ): literal[string] identifier[value_repr] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } keyword[assert] identifier[VR] keyword[or] identifier[description] , literal[string] keyword[if] identifier[VR] keyword[is] keyword[not] keyword[None] : identifier[VR] = identifier[VR] . identifier[upper] () keyword[if] identifier[VR] keyword[in] identifier[value_repr] : keyword[return] identifier[value_repr] [ identifier[VR] ] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[value_repr] . identifier[iteritems] (): keyword[if] identifier[description] == identifier[value] : keyword[return] identifier[key] keyword[return] keyword[None]
def VR(VR=None, description=None): """ Value Representation (VR) <-> Description lookup. :param VR: Takes the VR and returns its description :param description: Take the description of a VR and returns the VR """ value_repr = {'AE': 'Application Entity', 'AS': 'Age String', 'AT': 'Attribute Tag', 'CS': 'Code String', 'DA': 'Date', 'DS': 'Decimal String', 'DT': 'Date/Time', 'FL': 'Floating Point Single (4 bytes)', 'FD': 'Floating Point Double (8 bytes)', 'IS': 'Integer String', 'LO': 'Long String', 'LT': 'Long Text', 'OB': 'Other Byte', 'OF': 'Other Float', 'OW': 'Other Word', 'PN': 'Person Name', 'SH': 'Short String', 'SL': 'Signed Long', 'SQ': 'Sequence of Items', 'SS': 'Signed Short', 'ST': 'Short Text', 'TM': 'Time', 'UI': 'Unique Identifier', 'UL': 'Unsigned Long', 'UN': 'Unknown', 'US': 'Unsigned Short', 'UT': 'Unlimited Text'} assert VR or description, 'Either VR or description required to map VR' if VR is not None: VR = VR.upper() if VR in value_repr: return value_repr[VR] # depends on [control=['if'], data=['VR', 'value_repr']] # depends on [control=['if'], data=['VR']] for (key, value) in value_repr.iteritems(): if description == value: return key # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return None
def _input_as_lines(self, data): """Write sequence of lines to temp file, return filename data: a sequence to be written to a file, each element of the sequence will compose a line in the file * Note: '\n' will be stripped off the end of each sequence element before writing to a file in order to avoid multiple new lines accidentally be written to a file """ self._input_filename = self.getTmpFilename( self.WorkingDir, suffix='.fasta') with open(self._input_filename, 'w') as f: # Use lazy iteration instead of list comprehension to # prevent reading entire file into memory for line in data: f.write(str(line).strip('\n')) f.write('\n') return self._input_filename
def function[_input_as_lines, parameter[self, data]]: constant[Write sequence of lines to temp file, return filename data: a sequence to be written to a file, each element of the sequence will compose a line in the file * Note: ' ' will be stripped off the end of each sequence element before writing to a file in order to avoid multiple new lines accidentally be written to a file ] name[self]._input_filename assign[=] call[name[self].getTmpFilename, parameter[name[self].WorkingDir]] with call[name[open], parameter[name[self]._input_filename, constant[w]]] begin[:] for taget[name[line]] in starred[name[data]] begin[:] call[name[f].write, parameter[call[call[name[str], parameter[name[line]]].strip, parameter[constant[ ]]]]] call[name[f].write, parameter[constant[ ]]] return[name[self]._input_filename]
keyword[def] identifier[_input_as_lines] ( identifier[self] , identifier[data] ): literal[string] identifier[self] . identifier[_input_filename] = identifier[self] . identifier[getTmpFilename] ( identifier[self] . identifier[WorkingDir] , identifier[suffix] = literal[string] ) keyword[with] identifier[open] ( identifier[self] . identifier[_input_filename] , literal[string] ) keyword[as] identifier[f] : keyword[for] identifier[line] keyword[in] identifier[data] : identifier[f] . identifier[write] ( identifier[str] ( identifier[line] ). identifier[strip] ( literal[string] )) identifier[f] . identifier[write] ( literal[string] ) keyword[return] identifier[self] . identifier[_input_filename]
def _input_as_lines(self, data): """Write sequence of lines to temp file, return filename data: a sequence to be written to a file, each element of the sequence will compose a line in the file * Note: ' ' will be stripped off the end of each sequence element before writing to a file in order to avoid multiple new lines accidentally be written to a file """ self._input_filename = self.getTmpFilename(self.WorkingDir, suffix='.fasta') with open(self._input_filename, 'w') as f: # Use lazy iteration instead of list comprehension to # prevent reading entire file into memory for line in data: f.write(str(line).strip('\n')) f.write('\n') # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']] return self._input_filename
def run_node(cls, node, inputs, device='CPU'): # pylint: disable=arguments-differ """Running individual node inference on mxnet engine and return the result to onnx test infrastructure. Parameters ---------- node : onnx node object loaded onnx node (individual layer) inputs : numpy array input to run a node on device : 'CPU' device to run a node on Returns ------- params : numpy array result obtained after running the operator """ graph = GraphProto() sym, _ = graph.from_onnx(MXNetBackend.make_graph(node, inputs)) data_names = [i for i in sym.get_internals().list_inputs()] data_shapes = [] reduce_op_types = set(['ReduceMin', 'ReduceMax', 'ReduceMean', 'ReduceProd', 'ReduceSum', 'Slice', 'Pad', 'Squeeze', 'Upsample', 'Reshape', 'Conv', 'ConvTranspose']) # Adding extra dimension of batch_size 1 if the batch_size is different for multiple inputs. for idx, input_name in enumerate(data_names): batch_size = 1 if len(inputs[idx].shape) < 4 and len(inputs) > 1 and \ len(set(x.shape[0] for x in inputs)) != 1: tuples = ((batch_size,), inputs[idx].shape) new_shape = sum(tuples, ()) data_shapes.append((input_name, new_shape)) else: data_shapes.append((input_name, inputs[idx].shape)) # create module, passing cpu context if device == 'CPU': ctx = mx.cpu() else: raise NotImplementedError("Only CPU context is supported for now") # create a module mod = mx.mod.Module(symbol=sym, data_names=data_names, context=ctx, label_names=None) mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None) # initializing parameters for calculating result of each individual node mod.init_params() data_forward = [] for idx, input_name in enumerate(data_names): # slice and pad operator tests needs 1 less dimension in forward pass # otherwise it will throw an error. # for squeeze operator, need to retain shape of input as provided val = inputs[idx] if node.op_type in reduce_op_types: data_forward.append(mx.nd.array(val)) else: data_forward.append(mx.nd.array([val])) mod.forward(mx.io.DataBatch(data_forward)) result = mod.get_outputs()[0].asnumpy() if node.op_type in reduce_op_types: return [result] return result
def function[run_node, parameter[cls, node, inputs, device]]: constant[Running individual node inference on mxnet engine and return the result to onnx test infrastructure. Parameters ---------- node : onnx node object loaded onnx node (individual layer) inputs : numpy array input to run a node on device : 'CPU' device to run a node on Returns ------- params : numpy array result obtained after running the operator ] variable[graph] assign[=] call[name[GraphProto], parameter[]] <ast.Tuple object at 0x7da1b23477c0> assign[=] call[name[graph].from_onnx, parameter[call[name[MXNetBackend].make_graph, parameter[name[node], name[inputs]]]]] variable[data_names] assign[=] <ast.ListComp object at 0x7da1b23456f0> variable[data_shapes] assign[=] list[[]] variable[reduce_op_types] assign[=] call[name[set], parameter[list[[<ast.Constant object at 0x7da1b23466e0>, <ast.Constant object at 0x7da1b2345f30>, <ast.Constant object at 0x7da1b2344d00>, <ast.Constant object at 0x7da1b2345e10>, <ast.Constant object at 0x7da1b2346f50>, <ast.Constant object at 0x7da1b2344be0>, <ast.Constant object at 0x7da1b23453c0>, <ast.Constant object at 0x7da1b23479d0>, <ast.Constant object at 0x7da1b2344130>, <ast.Constant object at 0x7da1b2347160>, <ast.Constant object at 0x7da1b2346500>, <ast.Constant object at 0x7da1b2346e00>]]]] for taget[tuple[[<ast.Name object at 0x7da1b2346d40>, <ast.Name object at 0x7da1b2344850>]]] in starred[call[name[enumerate], parameter[name[data_names]]]] begin[:] variable[batch_size] assign[=] constant[1] if <ast.BoolOp object at 0x7da1b23454e0> begin[:] variable[tuples] assign[=] tuple[[<ast.Tuple object at 0x7da1b23471c0>, <ast.Attribute object at 0x7da1b2347b50>]] variable[new_shape] assign[=] call[name[sum], parameter[name[tuples], tuple[[]]]] call[name[data_shapes].append, parameter[tuple[[<ast.Name object at 0x7da1b2347100>, <ast.Name object at 0x7da1b2347be0>]]]] if compare[name[device] equal[==] constant[CPU]] begin[:] variable[ctx] assign[=] call[name[mx].cpu, parameter[]] variable[mod] assign[=] call[name[mx].mod.Module, parameter[]] call[name[mod].bind, parameter[]] call[name[mod].init_params, parameter[]] variable[data_forward] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b2344a60>, <ast.Name object at 0x7da1b2344970>]]] in starred[call[name[enumerate], parameter[name[data_names]]]] begin[:] variable[val] assign[=] call[name[inputs]][name[idx]] if compare[name[node].op_type in name[reduce_op_types]] begin[:] call[name[data_forward].append, parameter[call[name[mx].nd.array, parameter[name[val]]]]] call[name[mod].forward, parameter[call[name[mx].io.DataBatch, parameter[name[data_forward]]]]] variable[result] assign[=] call[call[call[name[mod].get_outputs, parameter[]]][constant[0]].asnumpy, parameter[]] if compare[name[node].op_type in name[reduce_op_types]] begin[:] return[list[[<ast.Name object at 0x7da1b2344670>]]] return[name[result]]
keyword[def] identifier[run_node] ( identifier[cls] , identifier[node] , identifier[inputs] , identifier[device] = literal[string] ): literal[string] identifier[graph] = identifier[GraphProto] () identifier[sym] , identifier[_] = identifier[graph] . identifier[from_onnx] ( identifier[MXNetBackend] . identifier[make_graph] ( identifier[node] , identifier[inputs] )) identifier[data_names] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[sym] . identifier[get_internals] (). identifier[list_inputs] ()] identifier[data_shapes] =[] identifier[reduce_op_types] = identifier[set] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]) keyword[for] identifier[idx] , identifier[input_name] keyword[in] identifier[enumerate] ( identifier[data_names] ): identifier[batch_size] = literal[int] keyword[if] identifier[len] ( identifier[inputs] [ identifier[idx] ]. identifier[shape] )< literal[int] keyword[and] identifier[len] ( identifier[inputs] )> literal[int] keyword[and] identifier[len] ( identifier[set] ( identifier[x] . identifier[shape] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[inputs] ))!= literal[int] : identifier[tuples] =(( identifier[batch_size] ,), identifier[inputs] [ identifier[idx] ]. identifier[shape] ) identifier[new_shape] = identifier[sum] ( identifier[tuples] ,()) identifier[data_shapes] . identifier[append] (( identifier[input_name] , identifier[new_shape] )) keyword[else] : identifier[data_shapes] . identifier[append] (( identifier[input_name] , identifier[inputs] [ identifier[idx] ]. identifier[shape] )) keyword[if] identifier[device] == literal[string] : identifier[ctx] = identifier[mx] . identifier[cpu] () keyword[else] : keyword[raise] identifier[NotImplementedError] ( literal[string] ) identifier[mod] = identifier[mx] . identifier[mod] . identifier[Module] ( identifier[symbol] = identifier[sym] , identifier[data_names] = identifier[data_names] , identifier[context] = identifier[ctx] , identifier[label_names] = keyword[None] ) identifier[mod] . identifier[bind] ( identifier[for_training] = keyword[False] , identifier[data_shapes] = identifier[data_shapes] , identifier[label_shapes] = keyword[None] ) identifier[mod] . identifier[init_params] () identifier[data_forward] =[] keyword[for] identifier[idx] , identifier[input_name] keyword[in] identifier[enumerate] ( identifier[data_names] ): identifier[val] = identifier[inputs] [ identifier[idx] ] keyword[if] identifier[node] . identifier[op_type] keyword[in] identifier[reduce_op_types] : identifier[data_forward] . identifier[append] ( identifier[mx] . identifier[nd] . identifier[array] ( identifier[val] )) keyword[else] : identifier[data_forward] . identifier[append] ( identifier[mx] . identifier[nd] . identifier[array] ([ identifier[val] ])) identifier[mod] . identifier[forward] ( identifier[mx] . identifier[io] . identifier[DataBatch] ( identifier[data_forward] )) identifier[result] = identifier[mod] . identifier[get_outputs] ()[ literal[int] ]. identifier[asnumpy] () keyword[if] identifier[node] . identifier[op_type] keyword[in] identifier[reduce_op_types] : keyword[return] [ identifier[result] ] keyword[return] identifier[result]
def run_node(cls, node, inputs, device='CPU'): # pylint: disable=arguments-differ "Running individual node inference on mxnet engine and\n return the result to onnx test infrastructure.\n\n Parameters\n ----------\n node : onnx node object\n loaded onnx node (individual layer)\n inputs : numpy array\n input to run a node on\n device : 'CPU'\n device to run a node on\n\n Returns\n -------\n params : numpy array\n result obtained after running the operator\n " graph = GraphProto() (sym, _) = graph.from_onnx(MXNetBackend.make_graph(node, inputs)) data_names = [i for i in sym.get_internals().list_inputs()] data_shapes = [] reduce_op_types = set(['ReduceMin', 'ReduceMax', 'ReduceMean', 'ReduceProd', 'ReduceSum', 'Slice', 'Pad', 'Squeeze', 'Upsample', 'Reshape', 'Conv', 'ConvTranspose']) # Adding extra dimension of batch_size 1 if the batch_size is different for multiple inputs. for (idx, input_name) in enumerate(data_names): batch_size = 1 if len(inputs[idx].shape) < 4 and len(inputs) > 1 and (len(set((x.shape[0] for x in inputs))) != 1): tuples = ((batch_size,), inputs[idx].shape) new_shape = sum(tuples, ()) data_shapes.append((input_name, new_shape)) # depends on [control=['if'], data=[]] else: data_shapes.append((input_name, inputs[idx].shape)) # depends on [control=['for'], data=[]] # create module, passing cpu context if device == 'CPU': ctx = mx.cpu() # depends on [control=['if'], data=[]] else: raise NotImplementedError('Only CPU context is supported for now') # create a module mod = mx.mod.Module(symbol=sym, data_names=data_names, context=ctx, label_names=None) mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None) # initializing parameters for calculating result of each individual node mod.init_params() data_forward = [] for (idx, input_name) in enumerate(data_names): # slice and pad operator tests needs 1 less dimension in forward pass # otherwise it will throw an error. # for squeeze operator, need to retain shape of input as provided val = inputs[idx] if node.op_type in reduce_op_types: data_forward.append(mx.nd.array(val)) # depends on [control=['if'], data=[]] else: data_forward.append(mx.nd.array([val])) # depends on [control=['for'], data=[]] mod.forward(mx.io.DataBatch(data_forward)) result = mod.get_outputs()[0].asnumpy() if node.op_type in reduce_op_types: return [result] # depends on [control=['if'], data=[]] return result
def get_messages(self): """ Returns list of Message objects which represents messages being transported. """ cs = self.data["comments"]["data"] res = [] for c in cs: res.append(Message(c,self)) return res
def function[get_messages, parameter[self]]: constant[ Returns list of Message objects which represents messages being transported. ] variable[cs] assign[=] call[call[name[self].data][constant[comments]]][constant[data]] variable[res] assign[=] list[[]] for taget[name[c]] in starred[name[cs]] begin[:] call[name[res].append, parameter[call[name[Message], parameter[name[c], name[self]]]]] return[name[res]]
keyword[def] identifier[get_messages] ( identifier[self] ): literal[string] identifier[cs] = identifier[self] . identifier[data] [ literal[string] ][ literal[string] ] identifier[res] =[] keyword[for] identifier[c] keyword[in] identifier[cs] : identifier[res] . identifier[append] ( identifier[Message] ( identifier[c] , identifier[self] )) keyword[return] identifier[res]
def get_messages(self): """ Returns list of Message objects which represents messages being transported. """ cs = self.data['comments']['data'] res = [] for c in cs: res.append(Message(c, self)) # depends on [control=['for'], data=['c']] return res
def len_gt(name, value): ''' Only succeed if length of the given register location is greater than the given value. USAGE: .. code-block:: yaml foo: check.len_gt: - value: 42 run_remote_ex: local.cmd: - tgt: '*' - func: test.ping - require: - check: foo ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if name not in __reg__: ret['result'] = False ret['comment'] = 'Value {0} not in register'.format(name) return ret if len(__reg__[name]['val']) > value: ret['result'] = True return ret
def function[len_gt, parameter[name, value]]: constant[ Only succeed if length of the given register location is greater than the given value. USAGE: .. code-block:: yaml foo: check.len_gt: - value: 42 run_remote_ex: local.cmd: - tgt: '*' - func: test.ping - require: - check: foo ] variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b2344910>, <ast.Constant object at 0x7da1b2346f80>, <ast.Constant object at 0x7da1b2346cb0>, <ast.Constant object at 0x7da1b2347eb0>], [<ast.Name object at 0x7da1b2347220>, <ast.Constant object at 0x7da1b2344be0>, <ast.Constant object at 0x7da1b2345e10>, <ast.Dict object at 0x7da1b2345f90>]] if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[__reg__]] begin[:] call[name[ret]][constant[result]] assign[=] constant[False] call[name[ret]][constant[comment]] assign[=] call[constant[Value {0} not in register].format, parameter[name[name]]] return[name[ret]] if compare[call[name[len], parameter[call[call[name[__reg__]][name[name]]][constant[val]]]] greater[>] name[value]] begin[:] call[name[ret]][constant[result]] assign[=] constant[True] return[name[ret]]
keyword[def] identifier[len_gt] ( identifier[name] , identifier[value] ): literal[string] identifier[ret] ={ literal[string] : identifier[name] , literal[string] : keyword[False] , literal[string] : literal[string] , literal[string] :{}} keyword[if] identifier[name] keyword[not] keyword[in] identifier[__reg__] : identifier[ret] [ literal[string] ]= keyword[False] identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] ) keyword[return] identifier[ret] keyword[if] identifier[len] ( identifier[__reg__] [ identifier[name] ][ literal[string] ])> identifier[value] : identifier[ret] [ literal[string] ]= keyword[True] keyword[return] identifier[ret]
def len_gt(name, value): """ Only succeed if length of the given register location is greater than the given value. USAGE: .. code-block:: yaml foo: check.len_gt: - value: 42 run_remote_ex: local.cmd: - tgt: '*' - func: test.ping - require: - check: foo """ ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if name not in __reg__: ret['result'] = False ret['comment'] = 'Value {0} not in register'.format(name) return ret # depends on [control=['if'], data=['name']] if len(__reg__[name]['val']) > value: ret['result'] = True # depends on [control=['if'], data=[]] return ret
def create_cli(create_app=None): """Create CLI for ``inveniomanage`` command. :param create_app: Flask application factory. :returns: Click command group. .. versionadded: 1.0.0 """ def create_cli_app(info): """Application factory for CLI app. Internal function for creating the CLI. When invoked via ``inveniomanage`` FLASK_APP must be set. """ if create_app is None: # Fallback to normal Flask behavior info.create_app = None app = info.load_app() else: app = create_app(debug=get_debug_flag()) return app @click.group(cls=FlaskGroup, create_app=create_cli_app) def cli(**params): """Command Line Interface for Invenio.""" pass return cli
def function[create_cli, parameter[create_app]]: constant[Create CLI for ``inveniomanage`` command. :param create_app: Flask application factory. :returns: Click command group. .. versionadded: 1.0.0 ] def function[create_cli_app, parameter[info]]: constant[Application factory for CLI app. Internal function for creating the CLI. When invoked via ``inveniomanage`` FLASK_APP must be set. ] if compare[name[create_app] is constant[None]] begin[:] name[info].create_app assign[=] constant[None] variable[app] assign[=] call[name[info].load_app, parameter[]] return[name[app]] def function[cli, parameter[]]: constant[Command Line Interface for Invenio.] pass return[name[cli]]
keyword[def] identifier[create_cli] ( identifier[create_app] = keyword[None] ): literal[string] keyword[def] identifier[create_cli_app] ( identifier[info] ): literal[string] keyword[if] identifier[create_app] keyword[is] keyword[None] : identifier[info] . identifier[create_app] = keyword[None] identifier[app] = identifier[info] . identifier[load_app] () keyword[else] : identifier[app] = identifier[create_app] ( identifier[debug] = identifier[get_debug_flag] ()) keyword[return] identifier[app] @ identifier[click] . identifier[group] ( identifier[cls] = identifier[FlaskGroup] , identifier[create_app] = identifier[create_cli_app] ) keyword[def] identifier[cli] (** identifier[params] ): literal[string] keyword[pass] keyword[return] identifier[cli]
def create_cli(create_app=None): """Create CLI for ``inveniomanage`` command. :param create_app: Flask application factory. :returns: Click command group. .. versionadded: 1.0.0 """ def create_cli_app(info): """Application factory for CLI app. Internal function for creating the CLI. When invoked via ``inveniomanage`` FLASK_APP must be set. """ if create_app is None: # Fallback to normal Flask behavior info.create_app = None app = info.load_app() # depends on [control=['if'], data=[]] else: app = create_app(debug=get_debug_flag()) return app @click.group(cls=FlaskGroup, create_app=create_cli_app) def cli(**params): """Command Line Interface for Invenio.""" pass return cli
def get_offset(self, x, y): """ Computes how far away and at what angle a coordinate is located. Distance is returned in feet, angle is returned in degrees :returns: distance,angle offset of the given x,y coordinate .. versionadded:: 2018.1.7 """ with self._lock: dx = self.x - x dy = self.y - y distance = math.hypot(dx, dy) angle = math.atan2(dy, dx) return distance, math.degrees(angle)
def function[get_offset, parameter[self, x, y]]: constant[ Computes how far away and at what angle a coordinate is located. Distance is returned in feet, angle is returned in degrees :returns: distance,angle offset of the given x,y coordinate .. versionadded:: 2018.1.7 ] with name[self]._lock begin[:] variable[dx] assign[=] binary_operation[name[self].x - name[x]] variable[dy] assign[=] binary_operation[name[self].y - name[y]] variable[distance] assign[=] call[name[math].hypot, parameter[name[dx], name[dy]]] variable[angle] assign[=] call[name[math].atan2, parameter[name[dy], name[dx]]] return[tuple[[<ast.Name object at 0x7da1b1800a90>, <ast.Call object at 0x7da1b18026e0>]]]
keyword[def] identifier[get_offset] ( identifier[self] , identifier[x] , identifier[y] ): literal[string] keyword[with] identifier[self] . identifier[_lock] : identifier[dx] = identifier[self] . identifier[x] - identifier[x] identifier[dy] = identifier[self] . identifier[y] - identifier[y] identifier[distance] = identifier[math] . identifier[hypot] ( identifier[dx] , identifier[dy] ) identifier[angle] = identifier[math] . identifier[atan2] ( identifier[dy] , identifier[dx] ) keyword[return] identifier[distance] , identifier[math] . identifier[degrees] ( identifier[angle] )
def get_offset(self, x, y): """ Computes how far away and at what angle a coordinate is located. Distance is returned in feet, angle is returned in degrees :returns: distance,angle offset of the given x,y coordinate .. versionadded:: 2018.1.7 """ with self._lock: dx = self.x - x dy = self.y - y # depends on [control=['with'], data=[]] distance = math.hypot(dx, dy) angle = math.atan2(dy, dx) return (distance, math.degrees(angle))
def _setup_logger(self, logging_level: int, log_to_console: bool): """Sets up the internal logger Args: logging_level: what logging level to use log_to_console: whether or not to log to the console """ self.logger = logging.getLogger('discord') self.logger.handlers = [] self.logger.setLevel(logging_level) formatter = logging.Formatter(style='{', fmt='{asctime} [{levelname}] {message}', datefmt='%Y-%m-%d %H:%M:%S') file_handler = logging.FileHandler('pycord.log') file_handler.setFormatter(formatter) file_handler.setLevel(logging_level) self.logger.addHandler(file_handler) if log_to_console: stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(formatter) stream_handler.setLevel(logging_level) self.logger.addHandler(stream_handler)
def function[_setup_logger, parameter[self, logging_level, log_to_console]]: constant[Sets up the internal logger Args: logging_level: what logging level to use log_to_console: whether or not to log to the console ] name[self].logger assign[=] call[name[logging].getLogger, parameter[constant[discord]]] name[self].logger.handlers assign[=] list[[]] call[name[self].logger.setLevel, parameter[name[logging_level]]] variable[formatter] assign[=] call[name[logging].Formatter, parameter[]] variable[file_handler] assign[=] call[name[logging].FileHandler, parameter[constant[pycord.log]]] call[name[file_handler].setFormatter, parameter[name[formatter]]] call[name[file_handler].setLevel, parameter[name[logging_level]]] call[name[self].logger.addHandler, parameter[name[file_handler]]] if name[log_to_console] begin[:] variable[stream_handler] assign[=] call[name[logging].StreamHandler, parameter[name[sys].stdout]] call[name[stream_handler].setFormatter, parameter[name[formatter]]] call[name[stream_handler].setLevel, parameter[name[logging_level]]] call[name[self].logger.addHandler, parameter[name[stream_handler]]]
keyword[def] identifier[_setup_logger] ( identifier[self] , identifier[logging_level] : identifier[int] , identifier[log_to_console] : identifier[bool] ): literal[string] identifier[self] . identifier[logger] = identifier[logging] . identifier[getLogger] ( literal[string] ) identifier[self] . identifier[logger] . identifier[handlers] =[] identifier[self] . identifier[logger] . identifier[setLevel] ( identifier[logging_level] ) identifier[formatter] = identifier[logging] . identifier[Formatter] ( identifier[style] = literal[string] , identifier[fmt] = literal[string] , identifier[datefmt] = literal[string] ) identifier[file_handler] = identifier[logging] . identifier[FileHandler] ( literal[string] ) identifier[file_handler] . identifier[setFormatter] ( identifier[formatter] ) identifier[file_handler] . identifier[setLevel] ( identifier[logging_level] ) identifier[self] . identifier[logger] . identifier[addHandler] ( identifier[file_handler] ) keyword[if] identifier[log_to_console] : identifier[stream_handler] = identifier[logging] . identifier[StreamHandler] ( identifier[sys] . identifier[stdout] ) identifier[stream_handler] . identifier[setFormatter] ( identifier[formatter] ) identifier[stream_handler] . identifier[setLevel] ( identifier[logging_level] ) identifier[self] . identifier[logger] . identifier[addHandler] ( identifier[stream_handler] )
def _setup_logger(self, logging_level: int, log_to_console: bool): """Sets up the internal logger Args: logging_level: what logging level to use log_to_console: whether or not to log to the console """ self.logger = logging.getLogger('discord') self.logger.handlers = [] self.logger.setLevel(logging_level) formatter = logging.Formatter(style='{', fmt='{asctime} [{levelname}] {message}', datefmt='%Y-%m-%d %H:%M:%S') file_handler = logging.FileHandler('pycord.log') file_handler.setFormatter(formatter) file_handler.setLevel(logging_level) self.logger.addHandler(file_handler) if log_to_console: stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(formatter) stream_handler.setLevel(logging_level) self.logger.addHandler(stream_handler) # depends on [control=['if'], data=[]]
def _validate_positional_arguments(args): """ To validate the positional argument feature - https://github.com/Azure/azure-cli/pull/6055. Assuming that unknown commands are positional arguments immediately led by words that only appear at the end of the commands Slight modification of https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/commands/__init__.py#L356-L373 Args: args: The arguments that the user inputs in the terminal. Returns: Rudimentary parsed arguments. """ nouns = [] for arg in args: if not arg.startswith('-') or not arg.startswith('{{'): nouns.append(arg) else: break while nouns: search = ' '.join(nouns) # Since the command name may be immediately followed by a positional arg, strip those off if not next((x for x in azext_alias.cached_reserved_commands if x.endswith(search)), False): del nouns[-1] else: return raise CLIError(INVALID_ALIAS_COMMAND_ERROR.format(' '.join(args)))
def function[_validate_positional_arguments, parameter[args]]: constant[ To validate the positional argument feature - https://github.com/Azure/azure-cli/pull/6055. Assuming that unknown commands are positional arguments immediately led by words that only appear at the end of the commands Slight modification of https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/commands/__init__.py#L356-L373 Args: args: The arguments that the user inputs in the terminal. Returns: Rudimentary parsed arguments. ] variable[nouns] assign[=] list[[]] for taget[name[arg]] in starred[name[args]] begin[:] if <ast.BoolOp object at 0x7da18dc066b0> begin[:] call[name[nouns].append, parameter[name[arg]]] while name[nouns] begin[:] variable[search] assign[=] call[constant[ ].join, parameter[name[nouns]]] if <ast.UnaryOp object at 0x7da18dc07e20> begin[:] <ast.Delete object at 0x7da18dc056f0> <ast.Raise object at 0x7da18dc06800>
keyword[def] identifier[_validate_positional_arguments] ( identifier[args] ): literal[string] identifier[nouns] =[] keyword[for] identifier[arg] keyword[in] identifier[args] : keyword[if] keyword[not] identifier[arg] . identifier[startswith] ( literal[string] ) keyword[or] keyword[not] identifier[arg] . identifier[startswith] ( literal[string] ): identifier[nouns] . identifier[append] ( identifier[arg] ) keyword[else] : keyword[break] keyword[while] identifier[nouns] : identifier[search] = literal[string] . identifier[join] ( identifier[nouns] ) keyword[if] keyword[not] identifier[next] (( identifier[x] keyword[for] identifier[x] keyword[in] identifier[azext_alias] . identifier[cached_reserved_commands] keyword[if] identifier[x] . identifier[endswith] ( identifier[search] )), keyword[False] ): keyword[del] identifier[nouns] [- literal[int] ] keyword[else] : keyword[return] keyword[raise] identifier[CLIError] ( identifier[INVALID_ALIAS_COMMAND_ERROR] . identifier[format] ( literal[string] . identifier[join] ( identifier[args] )))
def _validate_positional_arguments(args): """ To validate the positional argument feature - https://github.com/Azure/azure-cli/pull/6055. Assuming that unknown commands are positional arguments immediately led by words that only appear at the end of the commands Slight modification of https://github.com/Azure/azure-cli/blob/dev/src/azure-cli-core/azure/cli/core/commands/__init__.py#L356-L373 Args: args: The arguments that the user inputs in the terminal. Returns: Rudimentary parsed arguments. """ nouns = [] for arg in args: if not arg.startswith('-') or not arg.startswith('{{'): nouns.append(arg) # depends on [control=['if'], data=[]] else: break # depends on [control=['for'], data=['arg']] while nouns: search = ' '.join(nouns) # Since the command name may be immediately followed by a positional arg, strip those off if not next((x for x in azext_alias.cached_reserved_commands if x.endswith(search)), False): del nouns[-1] # depends on [control=['if'], data=[]] else: return # depends on [control=['while'], data=[]] raise CLIError(INVALID_ALIAS_COMMAND_ERROR.format(' '.join(args)))
def _load(abspath, serializer_type, loader_func=None, decompress=True, verbose=False, **kwargs): """load object from file. :param abspath: The file path you want load from. :type abspath: str :param serializer_type: 'binary' or 'str'. :type serializer_type: str :param loader_func: A loader function that takes binary as input, return an object. :type loader_func: callable function :param decompress: default ``False``. If True, then decompress binary. :type decompress: bool :param verbose: default True, help-message-display trigger. :type verbose: boolean """ _check_serializer_type(serializer_type) if not inspect.isfunction(loader_func): raise TypeError("loader_func has to be a function take binary as input " "and return an object!") prt_console("\nLoad from '%s' ..." % abspath, verbose) if not os.path.exists(abspath): raise ValueError("'%s' doesn't exist." % abspath) st = time.clock() with open(abspath, "rb") as f: b = f.read() if decompress: b = zlib.decompress(b) if serializer_type is "str": obj = loader_func(b.decode("utf-8"), **kwargs) else: obj = loader_func(b, **kwargs) elapsed = time.clock() - st prt_console(" Complete! Elapse %.6f sec." % elapsed, verbose) return obj
def function[_load, parameter[abspath, serializer_type, loader_func, decompress, verbose]]: constant[load object from file. :param abspath: The file path you want load from. :type abspath: str :param serializer_type: 'binary' or 'str'. :type serializer_type: str :param loader_func: A loader function that takes binary as input, return an object. :type loader_func: callable function :param decompress: default ``False``. If True, then decompress binary. :type decompress: bool :param verbose: default True, help-message-display trigger. :type verbose: boolean ] call[name[_check_serializer_type], parameter[name[serializer_type]]] if <ast.UnaryOp object at 0x7da1b23b1cc0> begin[:] <ast.Raise object at 0x7da1b23b1c30> call[name[prt_console], parameter[binary_operation[constant[ Load from '%s' ...] <ast.Mod object at 0x7da2590d6920> name[abspath]], name[verbose]]] if <ast.UnaryOp object at 0x7da1b23b0280> begin[:] <ast.Raise object at 0x7da1b23b21a0> variable[st] assign[=] call[name[time].clock, parameter[]] with call[name[open], parameter[name[abspath], constant[rb]]] begin[:] variable[b] assign[=] call[name[f].read, parameter[]] if name[decompress] begin[:] variable[b] assign[=] call[name[zlib].decompress, parameter[name[b]]] if compare[name[serializer_type] is constant[str]] begin[:] variable[obj] assign[=] call[name[loader_func], parameter[call[name[b].decode, parameter[constant[utf-8]]]]] variable[elapsed] assign[=] binary_operation[call[name[time].clock, parameter[]] - name[st]] call[name[prt_console], parameter[binary_operation[constant[ Complete! Elapse %.6f sec.] <ast.Mod object at 0x7da2590d6920> name[elapsed]], name[verbose]]] return[name[obj]]
keyword[def] identifier[_load] ( identifier[abspath] , identifier[serializer_type] , identifier[loader_func] = keyword[None] , identifier[decompress] = keyword[True] , identifier[verbose] = keyword[False] , ** identifier[kwargs] ): literal[string] identifier[_check_serializer_type] ( identifier[serializer_type] ) keyword[if] keyword[not] identifier[inspect] . identifier[isfunction] ( identifier[loader_func] ): keyword[raise] identifier[TypeError] ( literal[string] literal[string] ) identifier[prt_console] ( literal[string] % identifier[abspath] , identifier[verbose] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[abspath] ): keyword[raise] identifier[ValueError] ( literal[string] % identifier[abspath] ) identifier[st] = identifier[time] . identifier[clock] () keyword[with] identifier[open] ( identifier[abspath] , literal[string] ) keyword[as] identifier[f] : identifier[b] = identifier[f] . identifier[read] () keyword[if] identifier[decompress] : identifier[b] = identifier[zlib] . identifier[decompress] ( identifier[b] ) keyword[if] identifier[serializer_type] keyword[is] literal[string] : identifier[obj] = identifier[loader_func] ( identifier[b] . identifier[decode] ( literal[string] ),** identifier[kwargs] ) keyword[else] : identifier[obj] = identifier[loader_func] ( identifier[b] ,** identifier[kwargs] ) identifier[elapsed] = identifier[time] . identifier[clock] ()- identifier[st] identifier[prt_console] ( literal[string] % identifier[elapsed] , identifier[verbose] ) keyword[return] identifier[obj]
def _load(abspath, serializer_type, loader_func=None, decompress=True, verbose=False, **kwargs): """load object from file. :param abspath: The file path you want load from. :type abspath: str :param serializer_type: 'binary' or 'str'. :type serializer_type: str :param loader_func: A loader function that takes binary as input, return an object. :type loader_func: callable function :param decompress: default ``False``. If True, then decompress binary. :type decompress: bool :param verbose: default True, help-message-display trigger. :type verbose: boolean """ _check_serializer_type(serializer_type) if not inspect.isfunction(loader_func): raise TypeError('loader_func has to be a function take binary as input and return an object!') # depends on [control=['if'], data=[]] prt_console("\nLoad from '%s' ..." % abspath, verbose) if not os.path.exists(abspath): raise ValueError("'%s' doesn't exist." % abspath) # depends on [control=['if'], data=[]] st = time.clock() with open(abspath, 'rb') as f: b = f.read() if decompress: b = zlib.decompress(b) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['f']] if serializer_type is 'str': obj = loader_func(b.decode('utf-8'), **kwargs) # depends on [control=['if'], data=[]] else: obj = loader_func(b, **kwargs) elapsed = time.clock() - st prt_console(' Complete! Elapse %.6f sec.' % elapsed, verbose) return obj
def secret_write(backend,entry): """ Write a secret """ path,value=entry.split('=') if value.startswith('@'): with open(value[1:]) as vfile: value = vfile.read() click.echo(click.style('%s - Writing secret' % get_datetime(), fg='green')) check_and_print( DKCloudCommandRunner.secret_write(backend.dki,path,value))
def function[secret_write, parameter[backend, entry]]: constant[ Write a secret ] <ast.Tuple object at 0x7da20c6c5ff0> assign[=] call[name[entry].split, parameter[constant[=]]] if call[name[value].startswith, parameter[constant[@]]] begin[:] with call[name[open], parameter[call[name[value]][<ast.Slice object at 0x7da20c6c6200>]]] begin[:] variable[value] assign[=] call[name[vfile].read, parameter[]] call[name[click].echo, parameter[call[name[click].style, parameter[binary_operation[constant[%s - Writing secret] <ast.Mod object at 0x7da2590d6920> call[name[get_datetime], parameter[]]]]]]] call[name[check_and_print], parameter[call[name[DKCloudCommandRunner].secret_write, parameter[name[backend].dki, name[path], name[value]]]]]
keyword[def] identifier[secret_write] ( identifier[backend] , identifier[entry] ): literal[string] identifier[path] , identifier[value] = identifier[entry] . identifier[split] ( literal[string] ) keyword[if] identifier[value] . identifier[startswith] ( literal[string] ): keyword[with] identifier[open] ( identifier[value] [ literal[int] :]) keyword[as] identifier[vfile] : identifier[value] = identifier[vfile] . identifier[read] () identifier[click] . identifier[echo] ( identifier[click] . identifier[style] ( literal[string] % identifier[get_datetime] (), identifier[fg] = literal[string] )) identifier[check_and_print] ( identifier[DKCloudCommandRunner] . identifier[secret_write] ( identifier[backend] . identifier[dki] , identifier[path] , identifier[value] ))
def secret_write(backend, entry): """ Write a secret """ (path, value) = entry.split('=') if value.startswith('@'): with open(value[1:]) as vfile: value = vfile.read() # depends on [control=['with'], data=['vfile']] # depends on [control=['if'], data=[]] click.echo(click.style('%s - Writing secret' % get_datetime(), fg='green')) check_and_print(DKCloudCommandRunner.secret_write(backend.dki, path, value))
def trunc_str(s: str) -> str: """Truncate strings to maximum length.""" if len(s) > max_str_size: i = max(0, (max_str_size - 3) // 2) j = max(0, max_str_size - 3 - i) s = s[:i] + "..." + s[-j:] return s
def function[trunc_str, parameter[s]]: constant[Truncate strings to maximum length.] if compare[call[name[len], parameter[name[s]]] greater[>] name[max_str_size]] begin[:] variable[i] assign[=] call[name[max], parameter[constant[0], binary_operation[binary_operation[name[max_str_size] - constant[3]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]]] variable[j] assign[=] call[name[max], parameter[constant[0], binary_operation[binary_operation[name[max_str_size] - constant[3]] - name[i]]]] variable[s] assign[=] binary_operation[binary_operation[call[name[s]][<ast.Slice object at 0x7da1b2260790>] + constant[...]] + call[name[s]][<ast.Slice object at 0x7da1b2263be0>]] return[name[s]]
keyword[def] identifier[trunc_str] ( identifier[s] : identifier[str] )-> identifier[str] : literal[string] keyword[if] identifier[len] ( identifier[s] )> identifier[max_str_size] : identifier[i] = identifier[max] ( literal[int] ,( identifier[max_str_size] - literal[int] )// literal[int] ) identifier[j] = identifier[max] ( literal[int] , identifier[max_str_size] - literal[int] - identifier[i] ) identifier[s] = identifier[s] [: identifier[i] ]+ literal[string] + identifier[s] [- identifier[j] :] keyword[return] identifier[s]
def trunc_str(s: str) -> str: """Truncate strings to maximum length.""" if len(s) > max_str_size: i = max(0, (max_str_size - 3) // 2) j = max(0, max_str_size - 3 - i) s = s[:i] + '...' + s[-j:] # depends on [control=['if'], data=['max_str_size']] return s
def add_alignment(self, ref_seq, annotation) -> Annotation: """ add_alignment - method for adding the alignment to an annotation :param ref_seq: List of reference sequences :type ref_seq: List :param annotation: The complete annotation :type annotation: Annotation :rtype: Annotation """ seq_features = get_seqs(ref_seq) annoated_align = {} allele = ref_seq.description.split(",")[0] locus = allele.split("*")[0].split("-")[1] for feat in seq_features: if feat in annotation.annotation: if isinstance(annotation.annotation[feat], DBSeq): seq_len = len(str(annotation.annotation[feat])) ref_len = len(seq_features[feat]) else: seq_len = len(str(annotation.annotation[feat].seq)) ref_len = len(seq_features[feat]) if seq_len == ref_len: seq = list(annotation.annotation[feat].seq) gaps = self.refdata.annoated_alignments[locus][allele][feat]['Gaps'] if self.verbose and self.verbosity > 0: self.logger.info(self.logname + " Lengths match for " + feat) self.logger.info(self.logname + " Gaps at " + feat) self.logger.info(self.logname + "-".join([",".join([str(s) for s in g]) for g in gaps])) for i in range(0, len(gaps)): for j in gaps[i]: loc = j seq.insert(loc, '-') nseq = ''.join(seq) annoated_align.update({feat: nseq}) else: in_seq = str(annotation.annotation[feat].seq) ref_seq = self.refdata.annoated_alignments[locus][allele][feat]['Seq'] alignment = pairwise2.align.globalxx(in_seq, ref_seq) if self.verbose and self.verbosity > 0: self.logger.info(self.logname + " Align2 -> in_seq != ref_len " + feat) self.logger.info(self.logname + " " + str(len(in_seq)) + " == " + str(ref_len)) annoated_align.update({feat: alignment[0][0]}) else: nseq = ''.join(list(repeat('-', len(seq_features[feat])))) annoated_align.update({feat: nseq}) annotation.aligned = annoated_align return annotation
def function[add_alignment, parameter[self, ref_seq, annotation]]: constant[ add_alignment - method for adding the alignment to an annotation :param ref_seq: List of reference sequences :type ref_seq: List :param annotation: The complete annotation :type annotation: Annotation :rtype: Annotation ] variable[seq_features] assign[=] call[name[get_seqs], parameter[name[ref_seq]]] variable[annoated_align] assign[=] dictionary[[], []] variable[allele] assign[=] call[call[name[ref_seq].description.split, parameter[constant[,]]]][constant[0]] variable[locus] assign[=] call[call[call[call[name[allele].split, parameter[constant[*]]]][constant[0]].split, parameter[constant[-]]]][constant[1]] for taget[name[feat]] in starred[name[seq_features]] begin[:] if compare[name[feat] in name[annotation].annotation] begin[:] if call[name[isinstance], parameter[call[name[annotation].annotation][name[feat]], name[DBSeq]]] begin[:] variable[seq_len] assign[=] call[name[len], parameter[call[name[str], parameter[call[name[annotation].annotation][name[feat]]]]]] variable[ref_len] assign[=] call[name[len], parameter[call[name[seq_features]][name[feat]]]] if compare[name[seq_len] equal[==] name[ref_len]] begin[:] variable[seq] assign[=] call[name[list], parameter[call[name[annotation].annotation][name[feat]].seq]] variable[gaps] assign[=] call[call[call[call[name[self].refdata.annoated_alignments][name[locus]]][name[allele]]][name[feat]]][constant[Gaps]] if <ast.BoolOp object at 0x7da2054a4310> begin[:] call[name[self].logger.info, parameter[binary_operation[binary_operation[name[self].logname + constant[ Lengths match for ]] + name[feat]]]] call[name[self].logger.info, parameter[binary_operation[binary_operation[name[self].logname + constant[ Gaps at ]] + name[feat]]]] call[name[self].logger.info, parameter[binary_operation[name[self].logname + call[constant[-].join, parameter[<ast.ListComp object at 0x7da2054a7940>]]]]] for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[gaps]]]]]] begin[:] for taget[name[j]] in starred[call[name[gaps]][name[i]]] begin[:] variable[loc] assign[=] name[j] call[name[seq].insert, parameter[name[loc], constant[-]]] variable[nseq] assign[=] call[constant[].join, parameter[name[seq]]] call[name[annoated_align].update, parameter[dictionary[[<ast.Name object at 0x7da18f00fbb0>], [<ast.Name object at 0x7da18f00fb20>]]]] name[annotation].aligned assign[=] name[annoated_align] return[name[annotation]]
keyword[def] identifier[add_alignment] ( identifier[self] , identifier[ref_seq] , identifier[annotation] )-> identifier[Annotation] : literal[string] identifier[seq_features] = identifier[get_seqs] ( identifier[ref_seq] ) identifier[annoated_align] ={} identifier[allele] = identifier[ref_seq] . identifier[description] . identifier[split] ( literal[string] )[ literal[int] ] identifier[locus] = identifier[allele] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ] keyword[for] identifier[feat] keyword[in] identifier[seq_features] : keyword[if] identifier[feat] keyword[in] identifier[annotation] . identifier[annotation] : keyword[if] identifier[isinstance] ( identifier[annotation] . identifier[annotation] [ identifier[feat] ], identifier[DBSeq] ): identifier[seq_len] = identifier[len] ( identifier[str] ( identifier[annotation] . identifier[annotation] [ identifier[feat] ])) identifier[ref_len] = identifier[len] ( identifier[seq_features] [ identifier[feat] ]) keyword[else] : identifier[seq_len] = identifier[len] ( identifier[str] ( identifier[annotation] . identifier[annotation] [ identifier[feat] ]. identifier[seq] )) identifier[ref_len] = identifier[len] ( identifier[seq_features] [ identifier[feat] ]) keyword[if] identifier[seq_len] == identifier[ref_len] : identifier[seq] = identifier[list] ( identifier[annotation] . identifier[annotation] [ identifier[feat] ]. identifier[seq] ) identifier[gaps] = identifier[self] . identifier[refdata] . identifier[annoated_alignments] [ identifier[locus] ][ identifier[allele] ][ identifier[feat] ][ literal[string] ] keyword[if] identifier[self] . identifier[verbose] keyword[and] identifier[self] . identifier[verbosity] > literal[int] : identifier[self] . identifier[logger] . identifier[info] ( identifier[self] . identifier[logname] + literal[string] + identifier[feat] ) identifier[self] . identifier[logger] . identifier[info] ( identifier[self] . identifier[logname] + literal[string] + identifier[feat] ) identifier[self] . identifier[logger] . identifier[info] ( identifier[self] . identifier[logname] + literal[string] . identifier[join] ([ literal[string] . identifier[join] ([ identifier[str] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[g] ]) keyword[for] identifier[g] keyword[in] identifier[gaps] ])) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[gaps] )): keyword[for] identifier[j] keyword[in] identifier[gaps] [ identifier[i] ]: identifier[loc] = identifier[j] identifier[seq] . identifier[insert] ( identifier[loc] , literal[string] ) identifier[nseq] = literal[string] . identifier[join] ( identifier[seq] ) identifier[annoated_align] . identifier[update] ({ identifier[feat] : identifier[nseq] }) keyword[else] : identifier[in_seq] = identifier[str] ( identifier[annotation] . identifier[annotation] [ identifier[feat] ]. identifier[seq] ) identifier[ref_seq] = identifier[self] . identifier[refdata] . identifier[annoated_alignments] [ identifier[locus] ][ identifier[allele] ][ identifier[feat] ][ literal[string] ] identifier[alignment] = identifier[pairwise2] . identifier[align] . identifier[globalxx] ( identifier[in_seq] , identifier[ref_seq] ) keyword[if] identifier[self] . identifier[verbose] keyword[and] identifier[self] . identifier[verbosity] > literal[int] : identifier[self] . identifier[logger] . identifier[info] ( identifier[self] . identifier[logname] + literal[string] + identifier[feat] ) identifier[self] . identifier[logger] . identifier[info] ( identifier[self] . identifier[logname] + literal[string] + identifier[str] ( identifier[len] ( identifier[in_seq] ))+ literal[string] + identifier[str] ( identifier[ref_len] )) identifier[annoated_align] . identifier[update] ({ identifier[feat] : identifier[alignment] [ literal[int] ][ literal[int] ]}) keyword[else] : identifier[nseq] = literal[string] . identifier[join] ( identifier[list] ( identifier[repeat] ( literal[string] , identifier[len] ( identifier[seq_features] [ identifier[feat] ])))) identifier[annoated_align] . identifier[update] ({ identifier[feat] : identifier[nseq] }) identifier[annotation] . identifier[aligned] = identifier[annoated_align] keyword[return] identifier[annotation]
def add_alignment(self, ref_seq, annotation) -> Annotation: """ add_alignment - method for adding the alignment to an annotation :param ref_seq: List of reference sequences :type ref_seq: List :param annotation: The complete annotation :type annotation: Annotation :rtype: Annotation """ seq_features = get_seqs(ref_seq) annoated_align = {} allele = ref_seq.description.split(',')[0] locus = allele.split('*')[0].split('-')[1] for feat in seq_features: if feat in annotation.annotation: if isinstance(annotation.annotation[feat], DBSeq): seq_len = len(str(annotation.annotation[feat])) ref_len = len(seq_features[feat]) # depends on [control=['if'], data=[]] else: seq_len = len(str(annotation.annotation[feat].seq)) ref_len = len(seq_features[feat]) if seq_len == ref_len: seq = list(annotation.annotation[feat].seq) gaps = self.refdata.annoated_alignments[locus][allele][feat]['Gaps'] if self.verbose and self.verbosity > 0: self.logger.info(self.logname + ' Lengths match for ' + feat) self.logger.info(self.logname + ' Gaps at ' + feat) self.logger.info(self.logname + '-'.join([','.join([str(s) for s in g]) for g in gaps])) # depends on [control=['if'], data=[]] for i in range(0, len(gaps)): for j in gaps[i]: loc = j seq.insert(loc, '-') # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']] nseq = ''.join(seq) annoated_align.update({feat: nseq}) # depends on [control=['if'], data=[]] else: in_seq = str(annotation.annotation[feat].seq) ref_seq = self.refdata.annoated_alignments[locus][allele][feat]['Seq'] alignment = pairwise2.align.globalxx(in_seq, ref_seq) if self.verbose and self.verbosity > 0: self.logger.info(self.logname + ' Align2 -> in_seq != ref_len ' + feat) self.logger.info(self.logname + ' ' + str(len(in_seq)) + ' == ' + str(ref_len)) # depends on [control=['if'], data=[]] annoated_align.update({feat: alignment[0][0]}) # depends on [control=['if'], data=['feat']] else: nseq = ''.join(list(repeat('-', len(seq_features[feat])))) annoated_align.update({feat: nseq}) # depends on [control=['for'], data=['feat']] annotation.aligned = annoated_align return annotation
def _handle_properties(self, stmt: Statement, sctx: SchemaContext) -> None: """Handle type substatements.""" self._handle_restrictions(stmt, sctx)
def function[_handle_properties, parameter[self, stmt, sctx]]: constant[Handle type substatements.] call[name[self]._handle_restrictions, parameter[name[stmt], name[sctx]]]
keyword[def] identifier[_handle_properties] ( identifier[self] , identifier[stmt] : identifier[Statement] , identifier[sctx] : identifier[SchemaContext] )-> keyword[None] : literal[string] identifier[self] . identifier[_handle_restrictions] ( identifier[stmt] , identifier[sctx] )
def _handle_properties(self, stmt: Statement, sctx: SchemaContext) -> None: """Handle type substatements.""" self._handle_restrictions(stmt, sctx)
def iri_to_uri(iri, kwargs=None): '''Convert an Internationalised Resource Identifier (IRI) portion to a URI portion that is suitable for inclusion in a URL. This is the algorithm from section 3.1 of RFC 3987. Returns an ASCII native string containing the encoded result. ''' if iri is None: return iri if kwargs: iri = '%s?%s' % (to_string(iri, 'latin1'), '&'.join(('%s=%s' % kv for kv in kwargs.items()))) return urlquote(unquote_unreserved(iri))
def function[iri_to_uri, parameter[iri, kwargs]]: constant[Convert an Internationalised Resource Identifier (IRI) portion to a URI portion that is suitable for inclusion in a URL. This is the algorithm from section 3.1 of RFC 3987. Returns an ASCII native string containing the encoded result. ] if compare[name[iri] is constant[None]] begin[:] return[name[iri]] if name[kwargs] begin[:] variable[iri] assign[=] binary_operation[constant[%s?%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18f09c070>, <ast.Call object at 0x7da18f09fd90>]]] return[call[name[urlquote], parameter[call[name[unquote_unreserved], parameter[name[iri]]]]]]
keyword[def] identifier[iri_to_uri] ( identifier[iri] , identifier[kwargs] = keyword[None] ): literal[string] keyword[if] identifier[iri] keyword[is] keyword[None] : keyword[return] identifier[iri] keyword[if] identifier[kwargs] : identifier[iri] = literal[string] %( identifier[to_string] ( identifier[iri] , literal[string] ), literal[string] . identifier[join] (( literal[string] % identifier[kv] keyword[for] identifier[kv] keyword[in] identifier[kwargs] . identifier[items] ()))) keyword[return] identifier[urlquote] ( identifier[unquote_unreserved] ( identifier[iri] ))
def iri_to_uri(iri, kwargs=None): """Convert an Internationalised Resource Identifier (IRI) portion to a URI portion that is suitable for inclusion in a URL. This is the algorithm from section 3.1 of RFC 3987. Returns an ASCII native string containing the encoded result. """ if iri is None: return iri # depends on [control=['if'], data=['iri']] if kwargs: iri = '%s?%s' % (to_string(iri, 'latin1'), '&'.join(('%s=%s' % kv for kv in kwargs.items()))) # depends on [control=['if'], data=[]] return urlquote(unquote_unreserved(iri))
def list_tables(self): ''' Load existing tables and their descriptions. :return: ''' if not self._tables: for table_name in os.listdir(self.db_path): self._tables[table_name] = self._load_table(table_name) return self._tables.keys()
def function[list_tables, parameter[self]]: constant[ Load existing tables and their descriptions. :return: ] if <ast.UnaryOp object at 0x7da18f8129e0> begin[:] for taget[name[table_name]] in starred[call[name[os].listdir, parameter[name[self].db_path]]] begin[:] call[name[self]._tables][name[table_name]] assign[=] call[name[self]._load_table, parameter[name[table_name]]] return[call[name[self]._tables.keys, parameter[]]]
keyword[def] identifier[list_tables] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_tables] : keyword[for] identifier[table_name] keyword[in] identifier[os] . identifier[listdir] ( identifier[self] . identifier[db_path] ): identifier[self] . identifier[_tables] [ identifier[table_name] ]= identifier[self] . identifier[_load_table] ( identifier[table_name] ) keyword[return] identifier[self] . identifier[_tables] . identifier[keys] ()
def list_tables(self): """ Load existing tables and their descriptions. :return: """ if not self._tables: for table_name in os.listdir(self.db_path): self._tables[table_name] = self._load_table(table_name) # depends on [control=['for'], data=['table_name']] # depends on [control=['if'], data=[]] return self._tables.keys()
def id_and_revision_extractor(self,xml_elt): """ Function for determing an identifier (and, where applicable, timestamp/revision information) for extracted embedded content; to be used for DINGO's xml-import hook 'id_and_revision_extractor'. This function is called - for the top-level node of the XML to be imported. - for each node at which an embedded object is extracted from the XML (when this occurs is governed by the following function, the embedding_pred It must return an identifier and, where applicable, a revision and or timestamp; in the form of a dictionary {'id':<identifier>, 'timestamp': <timestamp>}. How you format the identifier is up to you, because you will have to adopt the code in function xml_import such that the Information Objects are created with the proper identifier (consisting of qualifying namespace and uri.) In OpenIOC, the identifier is contained in the 'id' attribute of an element; the top-level 'ioc' element carries a timestamp in the 'last-modified' attribute. Note: the xml_elt is an XMLNode defined by the Python libxml2 bindings. If you have never worked with these, have a look at - Mike Kneller's brief intro: http://mikekneller.com/kb/python/libxml2python/part1 - the functions in django-dingos core.xml_utils module """ result = {'id':None, 'timestamp': None} attributes = extract_attributes(xml_elt,prefix_key_char='@') # Extract identifier: if '@id' in attributes: result['id']=attributes['@id'] # Extract time-stamp if '@last-modified' in attributes: naive = parse_datetime(attributes['@last-modified'].strip()) if naive: # Make sure that information regarding the timezone is # included in the time stamp. If it is not, we chose # utc as default timezone: if we assume that the same # producer of OpenIOC data always uses the same timezone # for filling in the 'last-modified' attribute, then # this serves the main purpose of time stamps for our # means: we can find out the latest revision of a # given piece of data. if not timezone.is_aware(naive): aware = timezone.make_aware(naive,timezone.utc) else: aware = naive result['timestamp']= aware return result
def function[id_and_revision_extractor, parameter[self, xml_elt]]: constant[ Function for determing an identifier (and, where applicable, timestamp/revision information) for extracted embedded content; to be used for DINGO's xml-import hook 'id_and_revision_extractor'. This function is called - for the top-level node of the XML to be imported. - for each node at which an embedded object is extracted from the XML (when this occurs is governed by the following function, the embedding_pred It must return an identifier and, where applicable, a revision and or timestamp; in the form of a dictionary {'id':<identifier>, 'timestamp': <timestamp>}. How you format the identifier is up to you, because you will have to adopt the code in function xml_import such that the Information Objects are created with the proper identifier (consisting of qualifying namespace and uri.) In OpenIOC, the identifier is contained in the 'id' attribute of an element; the top-level 'ioc' element carries a timestamp in the 'last-modified' attribute. Note: the xml_elt is an XMLNode defined by the Python libxml2 bindings. If you have never worked with these, have a look at - Mike Kneller's brief intro: http://mikekneller.com/kb/python/libxml2python/part1 - the functions in django-dingos core.xml_utils module ] variable[result] assign[=] dictionary[[<ast.Constant object at 0x7da20e954820>, <ast.Constant object at 0x7da20e955ae0>], [<ast.Constant object at 0x7da20e957280>, <ast.Constant object at 0x7da20e957850>]] variable[attributes] assign[=] call[name[extract_attributes], parameter[name[xml_elt]]] if compare[constant[@id] in name[attributes]] begin[:] call[name[result]][constant[id]] assign[=] call[name[attributes]][constant[@id]] if compare[constant[@last-modified] in name[attributes]] begin[:] variable[naive] assign[=] call[name[parse_datetime], parameter[call[call[name[attributes]][constant[@last-modified]].strip, parameter[]]]] if name[naive] begin[:] if <ast.UnaryOp object at 0x7da20c794d30> begin[:] variable[aware] assign[=] call[name[timezone].make_aware, parameter[name[naive], name[timezone].utc]] call[name[result]][constant[timestamp]] assign[=] name[aware] return[name[result]]
keyword[def] identifier[id_and_revision_extractor] ( identifier[self] , identifier[xml_elt] ): literal[string] identifier[result] ={ literal[string] : keyword[None] , literal[string] : keyword[None] } identifier[attributes] = identifier[extract_attributes] ( identifier[xml_elt] , identifier[prefix_key_char] = literal[string] ) keyword[if] literal[string] keyword[in] identifier[attributes] : identifier[result] [ literal[string] ]= identifier[attributes] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[attributes] : identifier[naive] = identifier[parse_datetime] ( identifier[attributes] [ literal[string] ]. identifier[strip] ()) keyword[if] identifier[naive] : keyword[if] keyword[not] identifier[timezone] . identifier[is_aware] ( identifier[naive] ): identifier[aware] = identifier[timezone] . identifier[make_aware] ( identifier[naive] , identifier[timezone] . identifier[utc] ) keyword[else] : identifier[aware] = identifier[naive] identifier[result] [ literal[string] ]= identifier[aware] keyword[return] identifier[result]
def id_and_revision_extractor(self, xml_elt): """ Function for determing an identifier (and, where applicable, timestamp/revision information) for extracted embedded content; to be used for DINGO's xml-import hook 'id_and_revision_extractor'. This function is called - for the top-level node of the XML to be imported. - for each node at which an embedded object is extracted from the XML (when this occurs is governed by the following function, the embedding_pred It must return an identifier and, where applicable, a revision and or timestamp; in the form of a dictionary {'id':<identifier>, 'timestamp': <timestamp>}. How you format the identifier is up to you, because you will have to adopt the code in function xml_import such that the Information Objects are created with the proper identifier (consisting of qualifying namespace and uri.) In OpenIOC, the identifier is contained in the 'id' attribute of an element; the top-level 'ioc' element carries a timestamp in the 'last-modified' attribute. Note: the xml_elt is an XMLNode defined by the Python libxml2 bindings. If you have never worked with these, have a look at - Mike Kneller's brief intro: http://mikekneller.com/kb/python/libxml2python/part1 - the functions in django-dingos core.xml_utils module """ result = {'id': None, 'timestamp': None} attributes = extract_attributes(xml_elt, prefix_key_char='@') # Extract identifier: if '@id' in attributes: result['id'] = attributes['@id'] # depends on [control=['if'], data=['attributes']] # Extract time-stamp if '@last-modified' in attributes: naive = parse_datetime(attributes['@last-modified'].strip()) if naive: # Make sure that information regarding the timezone is # included in the time stamp. If it is not, we chose # utc as default timezone: if we assume that the same # producer of OpenIOC data always uses the same timezone # for filling in the 'last-modified' attribute, then # this serves the main purpose of time stamps for our # means: we can find out the latest revision of a # given piece of data. if not timezone.is_aware(naive): aware = timezone.make_aware(naive, timezone.utc) # depends on [control=['if'], data=[]] else: aware = naive result['timestamp'] = aware # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['attributes']] return result
def find_converting_reactions(model, pair): """ Find all reactions which convert a given metabolite pair. Parameters ---------- model : cobra.Model The metabolic model under investigation. pair: tuple or list A pair of metabolite identifiers without compartment suffix. Returns ------- frozenset The set of reactions that have one of the pair on their left-hand side and the other on the right-hand side. """ first = set(find_met_in_model(model, pair[0])) second = set(find_met_in_model(model, pair[1])) hits = list() for rxn in model.reactions: # FIXME: Use `set.issubset` much more idiomatic. if len(first & set(rxn.reactants)) > 0 and len( second & set(rxn.products)) > 0: hits.append(rxn) elif len(first & set(rxn.products)) > 0 and len( second & set(rxn.reactants)) > 0: hits.append(rxn) return frozenset(hits)
def function[find_converting_reactions, parameter[model, pair]]: constant[ Find all reactions which convert a given metabolite pair. Parameters ---------- model : cobra.Model The metabolic model under investigation. pair: tuple or list A pair of metabolite identifiers without compartment suffix. Returns ------- frozenset The set of reactions that have one of the pair on their left-hand side and the other on the right-hand side. ] variable[first] assign[=] call[name[set], parameter[call[name[find_met_in_model], parameter[name[model], call[name[pair]][constant[0]]]]]] variable[second] assign[=] call[name[set], parameter[call[name[find_met_in_model], parameter[name[model], call[name[pair]][constant[1]]]]]] variable[hits] assign[=] call[name[list], parameter[]] for taget[name[rxn]] in starred[name[model].reactions] begin[:] if <ast.BoolOp object at 0x7da20c76ec20> begin[:] call[name[hits].append, parameter[name[rxn]]] return[call[name[frozenset], parameter[name[hits]]]]
keyword[def] identifier[find_converting_reactions] ( identifier[model] , identifier[pair] ): literal[string] identifier[first] = identifier[set] ( identifier[find_met_in_model] ( identifier[model] , identifier[pair] [ literal[int] ])) identifier[second] = identifier[set] ( identifier[find_met_in_model] ( identifier[model] , identifier[pair] [ literal[int] ])) identifier[hits] = identifier[list] () keyword[for] identifier[rxn] keyword[in] identifier[model] . identifier[reactions] : keyword[if] identifier[len] ( identifier[first] & identifier[set] ( identifier[rxn] . identifier[reactants] ))> literal[int] keyword[and] identifier[len] ( identifier[second] & identifier[set] ( identifier[rxn] . identifier[products] ))> literal[int] : identifier[hits] . identifier[append] ( identifier[rxn] ) keyword[elif] identifier[len] ( identifier[first] & identifier[set] ( identifier[rxn] . identifier[products] ))> literal[int] keyword[and] identifier[len] ( identifier[second] & identifier[set] ( identifier[rxn] . identifier[reactants] ))> literal[int] : identifier[hits] . identifier[append] ( identifier[rxn] ) keyword[return] identifier[frozenset] ( identifier[hits] )
def find_converting_reactions(model, pair): """ Find all reactions which convert a given metabolite pair. Parameters ---------- model : cobra.Model The metabolic model under investigation. pair: tuple or list A pair of metabolite identifiers without compartment suffix. Returns ------- frozenset The set of reactions that have one of the pair on their left-hand side and the other on the right-hand side. """ first = set(find_met_in_model(model, pair[0])) second = set(find_met_in_model(model, pair[1])) hits = list() for rxn in model.reactions: # FIXME: Use `set.issubset` much more idiomatic. if len(first & set(rxn.reactants)) > 0 and len(second & set(rxn.products)) > 0: hits.append(rxn) # depends on [control=['if'], data=[]] elif len(first & set(rxn.products)) > 0 and len(second & set(rxn.reactants)) > 0: hits.append(rxn) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rxn']] return frozenset(hits)
def run_command(self, args): """ Parse command line arguments and run function registered for the appropriate command. :param args: [str] command line arguments """ parsed_args = self.parser.parse_args(args) if hasattr(parsed_args, 'func'): parsed_args.func(parsed_args) else: self.parser.print_help()
def function[run_command, parameter[self, args]]: constant[ Parse command line arguments and run function registered for the appropriate command. :param args: [str] command line arguments ] variable[parsed_args] assign[=] call[name[self].parser.parse_args, parameter[name[args]]] if call[name[hasattr], parameter[name[parsed_args], constant[func]]] begin[:] call[name[parsed_args].func, parameter[name[parsed_args]]]
keyword[def] identifier[run_command] ( identifier[self] , identifier[args] ): literal[string] identifier[parsed_args] = identifier[self] . identifier[parser] . identifier[parse_args] ( identifier[args] ) keyword[if] identifier[hasattr] ( identifier[parsed_args] , literal[string] ): identifier[parsed_args] . identifier[func] ( identifier[parsed_args] ) keyword[else] : identifier[self] . identifier[parser] . identifier[print_help] ()
def run_command(self, args): """ Parse command line arguments and run function registered for the appropriate command. :param args: [str] command line arguments """ parsed_args = self.parser.parse_args(args) if hasattr(parsed_args, 'func'): parsed_args.func(parsed_args) # depends on [control=['if'], data=[]] else: self.parser.print_help()
def split_line(self, line): """ Try to do pure shlex.split unless it can't parse the line. In that case we trim the input line until shlex can split the args and tack the unparsable portion on as the last argument. """ remainder = [] while True: try: args = shlex.split(line) except ValueError: remainder.append(line[-1]) line = line[:-1] else: if remainder: args.append(''.join(reversed(remainder))) return args
def function[split_line, parameter[self, line]]: constant[ Try to do pure shlex.split unless it can't parse the line. In that case we trim the input line until shlex can split the args and tack the unparsable portion on as the last argument. ] variable[remainder] assign[=] list[[]] while constant[True] begin[:] <ast.Try object at 0x7da18dc9a080>
keyword[def] identifier[split_line] ( identifier[self] , identifier[line] ): literal[string] identifier[remainder] =[] keyword[while] keyword[True] : keyword[try] : identifier[args] = identifier[shlex] . identifier[split] ( identifier[line] ) keyword[except] identifier[ValueError] : identifier[remainder] . identifier[append] ( identifier[line] [- literal[int] ]) identifier[line] = identifier[line] [:- literal[int] ] keyword[else] : keyword[if] identifier[remainder] : identifier[args] . identifier[append] ( literal[string] . identifier[join] ( identifier[reversed] ( identifier[remainder] ))) keyword[return] identifier[args]
def split_line(self, line): """ Try to do pure shlex.split unless it can't parse the line. In that case we trim the input line until shlex can split the args and tack the unparsable portion on as the last argument. """ remainder = [] while True: try: args = shlex.split(line) # depends on [control=['try'], data=[]] except ValueError: remainder.append(line[-1]) line = line[:-1] # depends on [control=['except'], data=[]] else: if remainder: args.append(''.join(reversed(remainder))) # depends on [control=['if'], data=[]] return args # depends on [control=['while'], data=[]]
def export_agg_losses_ebr(ekey, dstore): """ :param ekey: export key, i.e. a pair (datastore key, fmt) :param dstore: datastore object """ if 'ruptures' not in dstore: logging.warning('There are no ruptures in the datastore') return [] name, ext = export.keyfunc(ekey) agg_losses = dstore['losses_by_event'] has_rup_data = 'ruptures' in dstore extra_list = [('magnitude', F32), ('centroid_lon', F32), ('centroid_lat', F32), ('centroid_depth', F32)] if has_rup_data else [] oq = dstore['oqparam'] lti = oq.lti dtlist = ([('event_id', U64), ('rup_id', U32), ('year', U32)] + extra_list + oq.loss_dt_list()) elt_dt = numpy.dtype(dtlist) elt = numpy.zeros(len(agg_losses), elt_dt) writer = writers.CsvWriter(fmt=writers.FIVEDIGITS) events = dstore['events'].value events_by_rupid = collections.defaultdict(list) for event in events: rupid = event['eid'] // TWO32 events_by_rupid[rupid].append(event) year_of = year_dict(events['eid'], oq.investigation_time, oq.ses_seed) rup_data = {} event_by_eid = {} # eid -> event # populate rup_data and event_by_eid # TODO: avoid reading the events twice for rgetter in getters.gen_rupture_getters(dstore): ruptures = rgetter.get_ruptures() for ebr in ruptures: for event in events_by_rupid[ebr.serial]: event_by_eid[event['eid']] = event if has_rup_data: rup_data.update(get_rup_data(ruptures)) for r, row in enumerate(agg_losses): rec = elt[r] event = event_by_eid[row['eid']] rec['event_id'] = eid = event['eid'] rec['year'] = year_of[eid] if rup_data: rec['rup_id'] = rup_id = event['eid'] // TWO32 (rec['magnitude'], rec['centroid_lon'], rec['centroid_lat'], rec['centroid_depth']) = rup_data[rup_id] for lt, i in lti.items(): rec[lt] = row['loss'][i] elt.sort(order=['year', 'event_id']) dest = dstore.build_fname('elt', '', 'csv') writer.save(elt, dest) return writer.getsaved()
def function[export_agg_losses_ebr, parameter[ekey, dstore]]: constant[ :param ekey: export key, i.e. a pair (datastore key, fmt) :param dstore: datastore object ] if compare[constant[ruptures] <ast.NotIn object at 0x7da2590d7190> name[dstore]] begin[:] call[name[logging].warning, parameter[constant[There are no ruptures in the datastore]]] return[list[[]]] <ast.Tuple object at 0x7da1b14c6470> assign[=] call[name[export].keyfunc, parameter[name[ekey]]] variable[agg_losses] assign[=] call[name[dstore]][constant[losses_by_event]] variable[has_rup_data] assign[=] compare[constant[ruptures] in name[dstore]] variable[extra_list] assign[=] <ast.IfExp object at 0x7da1b14c4a60> variable[oq] assign[=] call[name[dstore]][constant[oqparam]] variable[lti] assign[=] name[oq].lti variable[dtlist] assign[=] binary_operation[binary_operation[list[[<ast.Tuple object at 0x7da1b14c7ee0>, <ast.Tuple object at 0x7da1b14c4eb0>, <ast.Tuple object at 0x7da1b14c6380>]] + name[extra_list]] + call[name[oq].loss_dt_list, parameter[]]] variable[elt_dt] assign[=] call[name[numpy].dtype, parameter[name[dtlist]]] variable[elt] assign[=] call[name[numpy].zeros, parameter[call[name[len], parameter[name[agg_losses]]], name[elt_dt]]] variable[writer] assign[=] call[name[writers].CsvWriter, parameter[]] variable[events] assign[=] call[name[dstore]][constant[events]].value variable[events_by_rupid] assign[=] call[name[collections].defaultdict, parameter[name[list]]] for taget[name[event]] in starred[name[events]] begin[:] variable[rupid] assign[=] binary_operation[call[name[event]][constant[eid]] <ast.FloorDiv object at 0x7da2590d6bc0> name[TWO32]] call[call[name[events_by_rupid]][name[rupid]].append, parameter[name[event]]] variable[year_of] assign[=] call[name[year_dict], parameter[call[name[events]][constant[eid]], name[oq].investigation_time, name[oq].ses_seed]] variable[rup_data] assign[=] dictionary[[], []] variable[event_by_eid] assign[=] dictionary[[], []] for taget[name[rgetter]] in starred[call[name[getters].gen_rupture_getters, parameter[name[dstore]]]] begin[:] variable[ruptures] assign[=] call[name[rgetter].get_ruptures, parameter[]] for taget[name[ebr]] in starred[name[ruptures]] begin[:] for taget[name[event]] in starred[call[name[events_by_rupid]][name[ebr].serial]] begin[:] call[name[event_by_eid]][call[name[event]][constant[eid]]] assign[=] name[event] if name[has_rup_data] begin[:] call[name[rup_data].update, parameter[call[name[get_rup_data], parameter[name[ruptures]]]]] for taget[tuple[[<ast.Name object at 0x7da1b26ac220>, <ast.Name object at 0x7da1b26aebc0>]]] in starred[call[name[enumerate], parameter[name[agg_losses]]]] begin[:] variable[rec] assign[=] call[name[elt]][name[r]] variable[event] assign[=] call[name[event_by_eid]][call[name[row]][constant[eid]]] call[name[rec]][constant[event_id]] assign[=] call[name[event]][constant[eid]] call[name[rec]][constant[year]] assign[=] call[name[year_of]][name[eid]] if name[rup_data] begin[:] call[name[rec]][constant[rup_id]] assign[=] binary_operation[call[name[event]][constant[eid]] <ast.FloorDiv object at 0x7da2590d6bc0> name[TWO32]] <ast.Tuple object at 0x7da207f9bd90> assign[=] call[name[rup_data]][name[rup_id]] for taget[tuple[[<ast.Name object at 0x7da207f9bdf0>, <ast.Name object at 0x7da207f98c10>]]] in starred[call[name[lti].items, parameter[]]] begin[:] call[name[rec]][name[lt]] assign[=] call[call[name[row]][constant[loss]]][name[i]] call[name[elt].sort, parameter[]] variable[dest] assign[=] call[name[dstore].build_fname, parameter[constant[elt], constant[], constant[csv]]] call[name[writer].save, parameter[name[elt], name[dest]]] return[call[name[writer].getsaved, parameter[]]]
keyword[def] identifier[export_agg_losses_ebr] ( identifier[ekey] , identifier[dstore] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[dstore] : identifier[logging] . identifier[warning] ( literal[string] ) keyword[return] [] identifier[name] , identifier[ext] = identifier[export] . identifier[keyfunc] ( identifier[ekey] ) identifier[agg_losses] = identifier[dstore] [ literal[string] ] identifier[has_rup_data] = literal[string] keyword[in] identifier[dstore] identifier[extra_list] =[( literal[string] , identifier[F32] ), ( literal[string] , identifier[F32] ), ( literal[string] , identifier[F32] ), ( literal[string] , identifier[F32] )] keyword[if] identifier[has_rup_data] keyword[else] [] identifier[oq] = identifier[dstore] [ literal[string] ] identifier[lti] = identifier[oq] . identifier[lti] identifier[dtlist] =([( literal[string] , identifier[U64] ),( literal[string] , identifier[U32] ),( literal[string] , identifier[U32] )] + identifier[extra_list] + identifier[oq] . identifier[loss_dt_list] ()) identifier[elt_dt] = identifier[numpy] . identifier[dtype] ( identifier[dtlist] ) identifier[elt] = identifier[numpy] . identifier[zeros] ( identifier[len] ( identifier[agg_losses] ), identifier[elt_dt] ) identifier[writer] = identifier[writers] . identifier[CsvWriter] ( identifier[fmt] = identifier[writers] . identifier[FIVEDIGITS] ) identifier[events] = identifier[dstore] [ literal[string] ]. identifier[value] identifier[events_by_rupid] = identifier[collections] . identifier[defaultdict] ( identifier[list] ) keyword[for] identifier[event] keyword[in] identifier[events] : identifier[rupid] = identifier[event] [ literal[string] ]// identifier[TWO32] identifier[events_by_rupid] [ identifier[rupid] ]. identifier[append] ( identifier[event] ) identifier[year_of] = identifier[year_dict] ( identifier[events] [ literal[string] ], identifier[oq] . identifier[investigation_time] , identifier[oq] . identifier[ses_seed] ) identifier[rup_data] ={} identifier[event_by_eid] ={} keyword[for] identifier[rgetter] keyword[in] identifier[getters] . identifier[gen_rupture_getters] ( identifier[dstore] ): identifier[ruptures] = identifier[rgetter] . identifier[get_ruptures] () keyword[for] identifier[ebr] keyword[in] identifier[ruptures] : keyword[for] identifier[event] keyword[in] identifier[events_by_rupid] [ identifier[ebr] . identifier[serial] ]: identifier[event_by_eid] [ identifier[event] [ literal[string] ]]= identifier[event] keyword[if] identifier[has_rup_data] : identifier[rup_data] . identifier[update] ( identifier[get_rup_data] ( identifier[ruptures] )) keyword[for] identifier[r] , identifier[row] keyword[in] identifier[enumerate] ( identifier[agg_losses] ): identifier[rec] = identifier[elt] [ identifier[r] ] identifier[event] = identifier[event_by_eid] [ identifier[row] [ literal[string] ]] identifier[rec] [ literal[string] ]= identifier[eid] = identifier[event] [ literal[string] ] identifier[rec] [ literal[string] ]= identifier[year_of] [ identifier[eid] ] keyword[if] identifier[rup_data] : identifier[rec] [ literal[string] ]= identifier[rup_id] = identifier[event] [ literal[string] ]// identifier[TWO32] ( identifier[rec] [ literal[string] ], identifier[rec] [ literal[string] ], identifier[rec] [ literal[string] ], identifier[rec] [ literal[string] ])= identifier[rup_data] [ identifier[rup_id] ] keyword[for] identifier[lt] , identifier[i] keyword[in] identifier[lti] . identifier[items] (): identifier[rec] [ identifier[lt] ]= identifier[row] [ literal[string] ][ identifier[i] ] identifier[elt] . identifier[sort] ( identifier[order] =[ literal[string] , literal[string] ]) identifier[dest] = identifier[dstore] . identifier[build_fname] ( literal[string] , literal[string] , literal[string] ) identifier[writer] . identifier[save] ( identifier[elt] , identifier[dest] ) keyword[return] identifier[writer] . identifier[getsaved] ()
def export_agg_losses_ebr(ekey, dstore): """ :param ekey: export key, i.e. a pair (datastore key, fmt) :param dstore: datastore object """ if 'ruptures' not in dstore: logging.warning('There are no ruptures in the datastore') return [] # depends on [control=['if'], data=[]] (name, ext) = export.keyfunc(ekey) agg_losses = dstore['losses_by_event'] has_rup_data = 'ruptures' in dstore extra_list = [('magnitude', F32), ('centroid_lon', F32), ('centroid_lat', F32), ('centroid_depth', F32)] if has_rup_data else [] oq = dstore['oqparam'] lti = oq.lti dtlist = [('event_id', U64), ('rup_id', U32), ('year', U32)] + extra_list + oq.loss_dt_list() elt_dt = numpy.dtype(dtlist) elt = numpy.zeros(len(agg_losses), elt_dt) writer = writers.CsvWriter(fmt=writers.FIVEDIGITS) events = dstore['events'].value events_by_rupid = collections.defaultdict(list) for event in events: rupid = event['eid'] // TWO32 events_by_rupid[rupid].append(event) # depends on [control=['for'], data=['event']] year_of = year_dict(events['eid'], oq.investigation_time, oq.ses_seed) rup_data = {} event_by_eid = {} # eid -> event # populate rup_data and event_by_eid # TODO: avoid reading the events twice for rgetter in getters.gen_rupture_getters(dstore): ruptures = rgetter.get_ruptures() for ebr in ruptures: for event in events_by_rupid[ebr.serial]: event_by_eid[event['eid']] = event # depends on [control=['for'], data=['event']] # depends on [control=['for'], data=['ebr']] if has_rup_data: rup_data.update(get_rup_data(ruptures)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rgetter']] for (r, row) in enumerate(agg_losses): rec = elt[r] event = event_by_eid[row['eid']] rec['event_id'] = eid = event['eid'] rec['year'] = year_of[eid] if rup_data: rec['rup_id'] = rup_id = event['eid'] // TWO32 (rec['magnitude'], rec['centroid_lon'], rec['centroid_lat'], rec['centroid_depth']) = rup_data[rup_id] # depends on [control=['if'], data=[]] for (lt, i) in lti.items(): rec[lt] = row['loss'][i] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] elt.sort(order=['year', 'event_id']) dest = dstore.build_fname('elt', '', 'csv') writer.save(elt, dest) return writer.getsaved()
def _get_result_constructor(self): """ Returns a function that will be used to instantiate query results """ if not self._values_list: # we want models return lambda rows: self.model._construct_instance(rows) elif self._flat_values_list: # the user has requested flattened list (1 value per row) return lambda row: row.popitem()[1] else: return lambda row: self._get_row_value_list(self._only_fields, row)
def function[_get_result_constructor, parameter[self]]: constant[ Returns a function that will be used to instantiate query results ] if <ast.UnaryOp object at 0x7da20c6c7040> begin[:] return[<ast.Lambda object at 0x7da18f723970>]
keyword[def] identifier[_get_result_constructor] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_values_list] : keyword[return] keyword[lambda] identifier[rows] : identifier[self] . identifier[model] . identifier[_construct_instance] ( identifier[rows] ) keyword[elif] identifier[self] . identifier[_flat_values_list] : keyword[return] keyword[lambda] identifier[row] : identifier[row] . identifier[popitem] ()[ literal[int] ] keyword[else] : keyword[return] keyword[lambda] identifier[row] : identifier[self] . identifier[_get_row_value_list] ( identifier[self] . identifier[_only_fields] , identifier[row] )
def _get_result_constructor(self): """ Returns a function that will be used to instantiate query results """ if not self._values_list: # we want models return lambda rows: self.model._construct_instance(rows) # depends on [control=['if'], data=[]] elif self._flat_values_list: # the user has requested flattened list (1 value per row) return lambda row: row.popitem()[1] # depends on [control=['if'], data=[]] else: return lambda row: self._get_row_value_list(self._only_fields, row)
def listdir(store, path=None): """Obtain a directory listing for the given path. If `store` provides a `listdir` method, this will be called, otherwise will fall back to implementation via the `MutableMapping` interface.""" path = normalize_storage_path(path) if hasattr(store, 'listdir'): # pass through return store.listdir(path) else: # slow version, iterate through all keys return _listdir_from_keys(store, path)
def function[listdir, parameter[store, path]]: constant[Obtain a directory listing for the given path. If `store` provides a `listdir` method, this will be called, otherwise will fall back to implementation via the `MutableMapping` interface.] variable[path] assign[=] call[name[normalize_storage_path], parameter[name[path]]] if call[name[hasattr], parameter[name[store], constant[listdir]]] begin[:] return[call[name[store].listdir, parameter[name[path]]]]
keyword[def] identifier[listdir] ( identifier[store] , identifier[path] = keyword[None] ): literal[string] identifier[path] = identifier[normalize_storage_path] ( identifier[path] ) keyword[if] identifier[hasattr] ( identifier[store] , literal[string] ): keyword[return] identifier[store] . identifier[listdir] ( identifier[path] ) keyword[else] : keyword[return] identifier[_listdir_from_keys] ( identifier[store] , identifier[path] )
def listdir(store, path=None): """Obtain a directory listing for the given path. If `store` provides a `listdir` method, this will be called, otherwise will fall back to implementation via the `MutableMapping` interface.""" path = normalize_storage_path(path) if hasattr(store, 'listdir'): # pass through return store.listdir(path) # depends on [control=['if'], data=[]] else: # slow version, iterate through all keys return _listdir_from_keys(store, path)
def _delete_file(self, line=""): """ Delete an ontology 2016-04-11: not a direct command anymore """ if not self.all_ontologies: self._help_nofiles() else: out = [] for each in self.all_ontologies: if line in each: out += [each] choice = self._selectFromList(out, line) if choice: fullpath = self.LOCAL_MODELS + "/" + choice if os.path.isfile(fullpath): self._print("--------------") self._print("Are you sure? [Y/N]") var = input() if var == "y" or var == "Y": os.remove(fullpath) manager.del_pickled_ontology(choice) self._print("<%s> was deleted succesfully." % choice) self.all_ontologies = manager.get_localontologies() else: return else: self._print("File not found.") # delete if self.current and self.current['fullpath'] == fullpath: self.current = None self.currentEntity = None self.prompt = _get_prompt() return
def function[_delete_file, parameter[self, line]]: constant[ Delete an ontology 2016-04-11: not a direct command anymore ] if <ast.UnaryOp object at 0x7da1b1115ab0> begin[:] call[name[self]._help_nofiles, parameter[]] return[None]
keyword[def] identifier[_delete_file] ( identifier[self] , identifier[line] = literal[string] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[all_ontologies] : identifier[self] . identifier[_help_nofiles] () keyword[else] : identifier[out] =[] keyword[for] identifier[each] keyword[in] identifier[self] . identifier[all_ontologies] : keyword[if] identifier[line] keyword[in] identifier[each] : identifier[out] +=[ identifier[each] ] identifier[choice] = identifier[self] . identifier[_selectFromList] ( identifier[out] , identifier[line] ) keyword[if] identifier[choice] : identifier[fullpath] = identifier[self] . identifier[LOCAL_MODELS] + literal[string] + identifier[choice] keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[fullpath] ): identifier[self] . identifier[_print] ( literal[string] ) identifier[self] . identifier[_print] ( literal[string] ) identifier[var] = identifier[input] () keyword[if] identifier[var] == literal[string] keyword[or] identifier[var] == literal[string] : identifier[os] . identifier[remove] ( identifier[fullpath] ) identifier[manager] . identifier[del_pickled_ontology] ( identifier[choice] ) identifier[self] . identifier[_print] ( literal[string] % identifier[choice] ) identifier[self] . identifier[all_ontologies] = identifier[manager] . identifier[get_localontologies] () keyword[else] : keyword[return] keyword[else] : identifier[self] . identifier[_print] ( literal[string] ) keyword[if] identifier[self] . identifier[current] keyword[and] identifier[self] . identifier[current] [ literal[string] ]== identifier[fullpath] : identifier[self] . identifier[current] = keyword[None] identifier[self] . identifier[currentEntity] = keyword[None] identifier[self] . identifier[prompt] = identifier[_get_prompt] () keyword[return]
def _delete_file(self, line=''): """ Delete an ontology 2016-04-11: not a direct command anymore """ if not self.all_ontologies: self._help_nofiles() # depends on [control=['if'], data=[]] else: out = [] for each in self.all_ontologies: if line in each: out += [each] # depends on [control=['if'], data=['each']] # depends on [control=['for'], data=['each']] choice = self._selectFromList(out, line) if choice: fullpath = self.LOCAL_MODELS + '/' + choice if os.path.isfile(fullpath): self._print('--------------') self._print('Are you sure? [Y/N]') var = input() if var == 'y' or var == 'Y': os.remove(fullpath) manager.del_pickled_ontology(choice) self._print('<%s> was deleted succesfully.' % choice) self.all_ontologies = manager.get_localontologies() # depends on [control=['if'], data=[]] else: return # depends on [control=['if'], data=[]] else: self._print('File not found.') # delete if self.current and self.current['fullpath'] == fullpath: self.current = None self.currentEntity = None self.prompt = _get_prompt() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return
def convergence_from_grid(self, grid): """Compute the summed convergence of the galaxy's mass profiles using a grid of Cartesian (y,x) \ coordinates. If the galaxy has no mass profiles, a grid of zeros is returned. See *profiles.mass_profiles* module for details of how this is performed. Parameters ---------- grid : ndarray The (y, x) coordinates in the original reference frame of the grid. """ if self.has_mass_profile: return sum(map(lambda p: p.convergence_from_grid(grid), self.mass_profiles)) else: return np.zeros((grid.shape[0],))
def function[convergence_from_grid, parameter[self, grid]]: constant[Compute the summed convergence of the galaxy's mass profiles using a grid of Cartesian (y,x) coordinates. If the galaxy has no mass profiles, a grid of zeros is returned. See *profiles.mass_profiles* module for details of how this is performed. Parameters ---------- grid : ndarray The (y, x) coordinates in the original reference frame of the grid. ] if name[self].has_mass_profile begin[:] return[call[name[sum], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da204620190>, name[self].mass_profiles]]]]]
keyword[def] identifier[convergence_from_grid] ( identifier[self] , identifier[grid] ): literal[string] keyword[if] identifier[self] . identifier[has_mass_profile] : keyword[return] identifier[sum] ( identifier[map] ( keyword[lambda] identifier[p] : identifier[p] . identifier[convergence_from_grid] ( identifier[grid] ), identifier[self] . identifier[mass_profiles] )) keyword[else] : keyword[return] identifier[np] . identifier[zeros] (( identifier[grid] . identifier[shape] [ literal[int] ],))
def convergence_from_grid(self, grid): """Compute the summed convergence of the galaxy's mass profiles using a grid of Cartesian (y,x) coordinates. If the galaxy has no mass profiles, a grid of zeros is returned. See *profiles.mass_profiles* module for details of how this is performed. Parameters ---------- grid : ndarray The (y, x) coordinates in the original reference frame of the grid. """ if self.has_mass_profile: return sum(map(lambda p: p.convergence_from_grid(grid), self.mass_profiles)) # depends on [control=['if'], data=[]] else: return np.zeros((grid.shape[0],))
def _init_metadata(self): """stub""" super(EulerRotationAnswerFormRecord, self)._init_metadata() self._euler_rotation_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'angle_values'), 'element_label': 'Euler Angle Values', 'instructions': 'Provide X, Y, and Z euler angle rotation values', 'required': True, 'read_only': False, 'linked': True, 'array': False, 'default_object_values': [{}], 'syntax': 'OBJECT', 'object_set': [] }
def function[_init_metadata, parameter[self]]: constant[stub] call[call[name[super], parameter[name[EulerRotationAnswerFormRecord], name[self]]]._init_metadata, parameter[]] name[self]._euler_rotation_metadata assign[=] dictionary[[<ast.Constant object at 0x7da20c6ab310>, <ast.Constant object at 0x7da20c6ab7f0>, <ast.Constant object at 0x7da20c6aa4a0>, <ast.Constant object at 0x7da20c6aa9e0>, <ast.Constant object at 0x7da20c6a9060>, <ast.Constant object at 0x7da20c6ab460>, <ast.Constant object at 0x7da20c6aa770>, <ast.Constant object at 0x7da20c6ab4c0>, <ast.Constant object at 0x7da20c6a82e0>, <ast.Constant object at 0x7da20c6ab610>], [<ast.Call object at 0x7da20c6aa020>, <ast.Constant object at 0x7da20c6abca0>, <ast.Constant object at 0x7da20c6ab3d0>, <ast.Constant object at 0x7da20c6a9c30>, <ast.Constant object at 0x7da20c6aa6e0>, <ast.Constant object at 0x7da20c6aa6b0>, <ast.Constant object at 0x7da20c6a8370>, <ast.List object at 0x7da20c6aaf80>, <ast.Constant object at 0x7da20c6a8b20>, <ast.List object at 0x7da20c6a9f90>]]
keyword[def] identifier[_init_metadata] ( identifier[self] ): literal[string] identifier[super] ( identifier[EulerRotationAnswerFormRecord] , identifier[self] ). identifier[_init_metadata] () identifier[self] . identifier[_euler_rotation_metadata] ={ literal[string] : identifier[Id] ( identifier[self] . identifier[my_osid_object_form] . identifier[_authority] , identifier[self] . identifier[my_osid_object_form] . identifier[_namespace] , literal[string] ), literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[True] , literal[string] : keyword[False] , literal[string] : keyword[True] , literal[string] : keyword[False] , literal[string] :[{}], literal[string] : literal[string] , literal[string] :[] }
def _init_metadata(self): """stub""" super(EulerRotationAnswerFormRecord, self)._init_metadata() self._euler_rotation_metadata = {'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'angle_values'), 'element_label': 'Euler Angle Values', 'instructions': 'Provide X, Y, and Z euler angle rotation values', 'required': True, 'read_only': False, 'linked': True, 'array': False, 'default_object_values': [{}], 'syntax': 'OBJECT', 'object_set': []}
def trace_job(self, jobId): """ Get information about the specified remote job :param jobId: the job identifier :return: a dictionary with the information """ header = self.__check_authentication() status_url = self.address + "/jobs/" + jobId + "/trace" status_resp = requests.get(status_url, headers=header) if status_resp.status_code != 200: raise ValueError("Code {}. {}".format(status_resp.status_code, status_resp.json().get("error"))) return status_resp.json()
def function[trace_job, parameter[self, jobId]]: constant[ Get information about the specified remote job :param jobId: the job identifier :return: a dictionary with the information ] variable[header] assign[=] call[name[self].__check_authentication, parameter[]] variable[status_url] assign[=] binary_operation[binary_operation[binary_operation[name[self].address + constant[/jobs/]] + name[jobId]] + constant[/trace]] variable[status_resp] assign[=] call[name[requests].get, parameter[name[status_url]]] if compare[name[status_resp].status_code not_equal[!=] constant[200]] begin[:] <ast.Raise object at 0x7da20c76dc60> return[call[name[status_resp].json, parameter[]]]
keyword[def] identifier[trace_job] ( identifier[self] , identifier[jobId] ): literal[string] identifier[header] = identifier[self] . identifier[__check_authentication] () identifier[status_url] = identifier[self] . identifier[address] + literal[string] + identifier[jobId] + literal[string] identifier[status_resp] = identifier[requests] . identifier[get] ( identifier[status_url] , identifier[headers] = identifier[header] ) keyword[if] identifier[status_resp] . identifier[status_code] != literal[int] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[status_resp] . identifier[status_code] , identifier[status_resp] . identifier[json] (). identifier[get] ( literal[string] ))) keyword[return] identifier[status_resp] . identifier[json] ()
def trace_job(self, jobId): """ Get information about the specified remote job :param jobId: the job identifier :return: a dictionary with the information """ header = self.__check_authentication() status_url = self.address + '/jobs/' + jobId + '/trace' status_resp = requests.get(status_url, headers=header) if status_resp.status_code != 200: raise ValueError('Code {}. {}'.format(status_resp.status_code, status_resp.json().get('error'))) # depends on [control=['if'], data=[]] return status_resp.json()
def get_process_pid(process_name): """ check for process' pid file and returns pid from there """ try: pid_filename = get_pid_filename(process_name) with open(pid_filename, mode='r') as pid_file: pid = int(pid_file.read().strip()) except IOError: pid = None return pid
def function[get_process_pid, parameter[process_name]]: constant[ check for process' pid file and returns pid from there ] <ast.Try object at 0x7da1b269a800> return[name[pid]]
keyword[def] identifier[get_process_pid] ( identifier[process_name] ): literal[string] keyword[try] : identifier[pid_filename] = identifier[get_pid_filename] ( identifier[process_name] ) keyword[with] identifier[open] ( identifier[pid_filename] , identifier[mode] = literal[string] ) keyword[as] identifier[pid_file] : identifier[pid] = identifier[int] ( identifier[pid_file] . identifier[read] (). identifier[strip] ()) keyword[except] identifier[IOError] : identifier[pid] = keyword[None] keyword[return] identifier[pid]
def get_process_pid(process_name): """ check for process' pid file and returns pid from there """ try: pid_filename = get_pid_filename(process_name) with open(pid_filename, mode='r') as pid_file: pid = int(pid_file.read().strip()) # depends on [control=['with'], data=['pid_file']] # depends on [control=['try'], data=[]] except IOError: pid = None # depends on [control=['except'], data=[]] return pid
def visit(folder, provenance_id, step_name, previous_step_id=None, config=None, db_url=None, is_organised=True): """Record all files from a folder into the database. Note: If a file has been copied from a previous processing step without any transformation, it will be detected and marked in the DB. The type of file will be detected and stored in the DB (NIFTI, DICOM, ...). If a files (e.g. a DICOM file) contains some meta-data, those will be stored in the DB. Arguments: :param folder: folder path. :param provenance_id: provenance label. :param step_name: Name of the processing step that produced the folder to visit. :param previous_step_id: (optional) previous processing step ID. If not defined, we assume this is the first processing step. :param config: List of flags: - boost: (optional) When enabled, we consider that all the files from a same folder share the same meta-data. When enabled, the processing is (about 2 times) faster. This option is enabled by default. - session_id_by_patient: Rarely, a data set might use study IDs which are unique by patient (not for the whole study). E.g.: LREN data. In such a case, you have to enable this flag. This will use PatientID + StudyID as a session ID. - visit_id_in_patient_id: Rarely, a data set might mix patient IDs and visit IDs. E.g. : LREN data. In such a case, you have to enable this flag. This will try to split PatientID into VisitID and PatientID. - visit_id_from_path: Enable this flag to get the visit ID from the folder hierarchy instead of DICOM meta-data (e.g. can be useful for PPMI). - repetition_from_path: Enable this flag to get the repetition ID from the folder hierarchy instead of DICOM meta-data (e.g. can be useful for PPMI). :param db_url: (optional) Database URL. If not defined, it looks for an Airflow configuration file. :param is_organised: (optional) Disable this flag when scanning a folder that has not been organised yet (should only affect nifti files). :return: return processing step ID. """ config = config if config else [] logging.info("Visiting %s", folder) logging.info("-> is_organised=%s", str(is_organised)) logging.info("-> config=%s", str(config)) logging.info("Connecting to database...") db_conn = connection.Connection(db_url) step_id = _create_step(db_conn, step_name, provenance_id, previous_step_id) previous_files_hash = _get_files_hash_from_step(db_conn, previous_step_id) checked = dict() def process_file(file_path): logging.debug("Processing '%s'" % file_path) file_type = _find_type(file_path) if "DICOM" == file_type: is_copy = _hash_file(file_path) in previous_files_hash leaf_folder = os.path.split(file_path)[0] if leaf_folder not in checked or 'boost' not in config: ret = dicom_import.dicom2db(file_path, file_type, is_copy, step_id, db_conn, 'session_id_by_patient' in config, 'visit_id_in_patient_id' in config, 'visit_id_in_patient_id' in config, 'repetition_from_path' in config) try: checked[leaf_folder] = ret['repetition_id'] except KeyError: # TODO: Remove it when dicom2db will be more stable logging.warning("Cannot find repetition ID !") else: dicom_import.extract_dicom( file_path, file_type, is_copy, checked[leaf_folder], step_id) elif "NIFTI" == file_type and is_organised: is_copy = _hash_file(file_path) in previous_files_hash nifti_import.nifti2db(file_path, file_type, is_copy, step_id, db_conn, 'session_id_by_patient' in config, 'visit_id_in_patient_id' in config) elif file_type: is_copy = _hash_file(file_path) in previous_files_hash others_import.others2db( file_path, file_type, is_copy, step_id, db_conn) if sys.version_info.major == 3 and sys.version_info.minor < 5: matches = [] for root, dirnames, filenames in os.walk(folder): for filename in fnmatch.filter(filenames, '*'): matches.append(os.path.join(root, filename)) for file_path in matches: process_file(file_path) else: for file_path in glob.iglob(os.path.join(folder, "**/*"), recursive=True): process_file(file_path) logging.info("Closing database connection...") db_conn.close() return step_id
def function[visit, parameter[folder, provenance_id, step_name, previous_step_id, config, db_url, is_organised]]: constant[Record all files from a folder into the database. Note: If a file has been copied from a previous processing step without any transformation, it will be detected and marked in the DB. The type of file will be detected and stored in the DB (NIFTI, DICOM, ...). If a files (e.g. a DICOM file) contains some meta-data, those will be stored in the DB. Arguments: :param folder: folder path. :param provenance_id: provenance label. :param step_name: Name of the processing step that produced the folder to visit. :param previous_step_id: (optional) previous processing step ID. If not defined, we assume this is the first processing step. :param config: List of flags: - boost: (optional) When enabled, we consider that all the files from a same folder share the same meta-data. When enabled, the processing is (about 2 times) faster. This option is enabled by default. - session_id_by_patient: Rarely, a data set might use study IDs which are unique by patient (not for the whole study). E.g.: LREN data. In such a case, you have to enable this flag. This will use PatientID + StudyID as a session ID. - visit_id_in_patient_id: Rarely, a data set might mix patient IDs and visit IDs. E.g. : LREN data. In such a case, you have to enable this flag. This will try to split PatientID into VisitID and PatientID. - visit_id_from_path: Enable this flag to get the visit ID from the folder hierarchy instead of DICOM meta-data (e.g. can be useful for PPMI). - repetition_from_path: Enable this flag to get the repetition ID from the folder hierarchy instead of DICOM meta-data (e.g. can be useful for PPMI). :param db_url: (optional) Database URL. If not defined, it looks for an Airflow configuration file. :param is_organised: (optional) Disable this flag when scanning a folder that has not been organised yet (should only affect nifti files). :return: return processing step ID. ] variable[config] assign[=] <ast.IfExp object at 0x7da20e748730> call[name[logging].info, parameter[constant[Visiting %s], name[folder]]] call[name[logging].info, parameter[constant[-> is_organised=%s], call[name[str], parameter[name[is_organised]]]]] call[name[logging].info, parameter[constant[-> config=%s], call[name[str], parameter[name[config]]]]] call[name[logging].info, parameter[constant[Connecting to database...]]] variable[db_conn] assign[=] call[name[connection].Connection, parameter[name[db_url]]] variable[step_id] assign[=] call[name[_create_step], parameter[name[db_conn], name[step_name], name[provenance_id], name[previous_step_id]]] variable[previous_files_hash] assign[=] call[name[_get_files_hash_from_step], parameter[name[db_conn], name[previous_step_id]]] variable[checked] assign[=] call[name[dict], parameter[]] def function[process_file, parameter[file_path]]: call[name[logging].debug, parameter[binary_operation[constant[Processing '%s'] <ast.Mod object at 0x7da2590d6920> name[file_path]]]] variable[file_type] assign[=] call[name[_find_type], parameter[name[file_path]]] if compare[constant[DICOM] equal[==] name[file_type]] begin[:] variable[is_copy] assign[=] compare[call[name[_hash_file], parameter[name[file_path]]] in name[previous_files_hash]] variable[leaf_folder] assign[=] call[call[name[os].path.split, parameter[name[file_path]]]][constant[0]] if <ast.BoolOp object at 0x7da20c6e7550> begin[:] variable[ret] assign[=] call[name[dicom_import].dicom2db, parameter[name[file_path], name[file_type], name[is_copy], name[step_id], name[db_conn], compare[constant[session_id_by_patient] in name[config]], compare[constant[visit_id_in_patient_id] in name[config]], compare[constant[visit_id_in_patient_id] in name[config]], compare[constant[repetition_from_path] in name[config]]]] <ast.Try object at 0x7da20c6e4730> if <ast.BoolOp object at 0x7da20eb29630> begin[:] variable[matches] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da20e957af0>, <ast.Name object at 0x7da20e9541f0>, <ast.Name object at 0x7da20e956a10>]]] in starred[call[name[os].walk, parameter[name[folder]]]] begin[:] for taget[name[filename]] in starred[call[name[fnmatch].filter, parameter[name[filenames], constant[*]]]] begin[:] call[name[matches].append, parameter[call[name[os].path.join, parameter[name[root], name[filename]]]]] for taget[name[file_path]] in starred[name[matches]] begin[:] call[name[process_file], parameter[name[file_path]]] call[name[logging].info, parameter[constant[Closing database connection...]]] call[name[db_conn].close, parameter[]] return[name[step_id]]
keyword[def] identifier[visit] ( identifier[folder] , identifier[provenance_id] , identifier[step_name] , identifier[previous_step_id] = keyword[None] , identifier[config] = keyword[None] , identifier[db_url] = keyword[None] , identifier[is_organised] = keyword[True] ): literal[string] identifier[config] = identifier[config] keyword[if] identifier[config] keyword[else] [] identifier[logging] . identifier[info] ( literal[string] , identifier[folder] ) identifier[logging] . identifier[info] ( literal[string] , identifier[str] ( identifier[is_organised] )) identifier[logging] . identifier[info] ( literal[string] , identifier[str] ( identifier[config] )) identifier[logging] . identifier[info] ( literal[string] ) identifier[db_conn] = identifier[connection] . identifier[Connection] ( identifier[db_url] ) identifier[step_id] = identifier[_create_step] ( identifier[db_conn] , identifier[step_name] , identifier[provenance_id] , identifier[previous_step_id] ) identifier[previous_files_hash] = identifier[_get_files_hash_from_step] ( identifier[db_conn] , identifier[previous_step_id] ) identifier[checked] = identifier[dict] () keyword[def] identifier[process_file] ( identifier[file_path] ): identifier[logging] . identifier[debug] ( literal[string] % identifier[file_path] ) identifier[file_type] = identifier[_find_type] ( identifier[file_path] ) keyword[if] literal[string] == identifier[file_type] : identifier[is_copy] = identifier[_hash_file] ( identifier[file_path] ) keyword[in] identifier[previous_files_hash] identifier[leaf_folder] = identifier[os] . identifier[path] . identifier[split] ( identifier[file_path] )[ literal[int] ] keyword[if] identifier[leaf_folder] keyword[not] keyword[in] identifier[checked] keyword[or] literal[string] keyword[not] keyword[in] identifier[config] : identifier[ret] = identifier[dicom_import] . identifier[dicom2db] ( identifier[file_path] , identifier[file_type] , identifier[is_copy] , identifier[step_id] , identifier[db_conn] , literal[string] keyword[in] identifier[config] , literal[string] keyword[in] identifier[config] , literal[string] keyword[in] identifier[config] , literal[string] keyword[in] identifier[config] ) keyword[try] : identifier[checked] [ identifier[leaf_folder] ]= identifier[ret] [ literal[string] ] keyword[except] identifier[KeyError] : identifier[logging] . identifier[warning] ( literal[string] ) keyword[else] : identifier[dicom_import] . identifier[extract_dicom] ( identifier[file_path] , identifier[file_type] , identifier[is_copy] , identifier[checked] [ identifier[leaf_folder] ], identifier[step_id] ) keyword[elif] literal[string] == identifier[file_type] keyword[and] identifier[is_organised] : identifier[is_copy] = identifier[_hash_file] ( identifier[file_path] ) keyword[in] identifier[previous_files_hash] identifier[nifti_import] . identifier[nifti2db] ( identifier[file_path] , identifier[file_type] , identifier[is_copy] , identifier[step_id] , identifier[db_conn] , literal[string] keyword[in] identifier[config] , literal[string] keyword[in] identifier[config] ) keyword[elif] identifier[file_type] : identifier[is_copy] = identifier[_hash_file] ( identifier[file_path] ) keyword[in] identifier[previous_files_hash] identifier[others_import] . identifier[others2db] ( identifier[file_path] , identifier[file_type] , identifier[is_copy] , identifier[step_id] , identifier[db_conn] ) keyword[if] identifier[sys] . identifier[version_info] . identifier[major] == literal[int] keyword[and] identifier[sys] . identifier[version_info] . identifier[minor] < literal[int] : identifier[matches] =[] keyword[for] identifier[root] , identifier[dirnames] , identifier[filenames] keyword[in] identifier[os] . identifier[walk] ( identifier[folder] ): keyword[for] identifier[filename] keyword[in] identifier[fnmatch] . identifier[filter] ( identifier[filenames] , literal[string] ): identifier[matches] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[filename] )) keyword[for] identifier[file_path] keyword[in] identifier[matches] : identifier[process_file] ( identifier[file_path] ) keyword[else] : keyword[for] identifier[file_path] keyword[in] identifier[glob] . identifier[iglob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[folder] , literal[string] ), identifier[recursive] = keyword[True] ): identifier[process_file] ( identifier[file_path] ) identifier[logging] . identifier[info] ( literal[string] ) identifier[db_conn] . identifier[close] () keyword[return] identifier[step_id]
def visit(folder, provenance_id, step_name, previous_step_id=None, config=None, db_url=None, is_organised=True): """Record all files from a folder into the database. Note: If a file has been copied from a previous processing step without any transformation, it will be detected and marked in the DB. The type of file will be detected and stored in the DB (NIFTI, DICOM, ...). If a files (e.g. a DICOM file) contains some meta-data, those will be stored in the DB. Arguments: :param folder: folder path. :param provenance_id: provenance label. :param step_name: Name of the processing step that produced the folder to visit. :param previous_step_id: (optional) previous processing step ID. If not defined, we assume this is the first processing step. :param config: List of flags: - boost: (optional) When enabled, we consider that all the files from a same folder share the same meta-data. When enabled, the processing is (about 2 times) faster. This option is enabled by default. - session_id_by_patient: Rarely, a data set might use study IDs which are unique by patient (not for the whole study). E.g.: LREN data. In such a case, you have to enable this flag. This will use PatientID + StudyID as a session ID. - visit_id_in_patient_id: Rarely, a data set might mix patient IDs and visit IDs. E.g. : LREN data. In such a case, you have to enable this flag. This will try to split PatientID into VisitID and PatientID. - visit_id_from_path: Enable this flag to get the visit ID from the folder hierarchy instead of DICOM meta-data (e.g. can be useful for PPMI). - repetition_from_path: Enable this flag to get the repetition ID from the folder hierarchy instead of DICOM meta-data (e.g. can be useful for PPMI). :param db_url: (optional) Database URL. If not defined, it looks for an Airflow configuration file. :param is_organised: (optional) Disable this flag when scanning a folder that has not been organised yet (should only affect nifti files). :return: return processing step ID. """ config = config if config else [] logging.info('Visiting %s', folder) logging.info('-> is_organised=%s', str(is_organised)) logging.info('-> config=%s', str(config)) logging.info('Connecting to database...') db_conn = connection.Connection(db_url) step_id = _create_step(db_conn, step_name, provenance_id, previous_step_id) previous_files_hash = _get_files_hash_from_step(db_conn, previous_step_id) checked = dict() def process_file(file_path): logging.debug("Processing '%s'" % file_path) file_type = _find_type(file_path) if 'DICOM' == file_type: is_copy = _hash_file(file_path) in previous_files_hash leaf_folder = os.path.split(file_path)[0] if leaf_folder not in checked or 'boost' not in config: ret = dicom_import.dicom2db(file_path, file_type, is_copy, step_id, db_conn, 'session_id_by_patient' in config, 'visit_id_in_patient_id' in config, 'visit_id_in_patient_id' in config, 'repetition_from_path' in config) try: checked[leaf_folder] = ret['repetition_id'] # depends on [control=['try'], data=[]] except KeyError: # TODO: Remove it when dicom2db will be more stable logging.warning('Cannot find repetition ID !') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: dicom_import.extract_dicom(file_path, file_type, is_copy, checked[leaf_folder], step_id) # depends on [control=['if'], data=['file_type']] elif 'NIFTI' == file_type and is_organised: is_copy = _hash_file(file_path) in previous_files_hash nifti_import.nifti2db(file_path, file_type, is_copy, step_id, db_conn, 'session_id_by_patient' in config, 'visit_id_in_patient_id' in config) # depends on [control=['if'], data=[]] elif file_type: is_copy = _hash_file(file_path) in previous_files_hash others_import.others2db(file_path, file_type, is_copy, step_id, db_conn) # depends on [control=['if'], data=[]] if sys.version_info.major == 3 and sys.version_info.minor < 5: matches = [] for (root, dirnames, filenames) in os.walk(folder): for filename in fnmatch.filter(filenames, '*'): matches.append(os.path.join(root, filename)) # depends on [control=['for'], data=['filename']] # depends on [control=['for'], data=[]] for file_path in matches: process_file(file_path) # depends on [control=['for'], data=['file_path']] # depends on [control=['if'], data=[]] else: for file_path in glob.iglob(os.path.join(folder, '**/*'), recursive=True): process_file(file_path) # depends on [control=['for'], data=['file_path']] logging.info('Closing database connection...') db_conn.close() return step_id
def state_type_changed(self, model, prop_name, info): """Reopen state editor when state type is changed When the type of the observed state changes, a new model is created. The look of this controller's view depends on the kind of model. Therefore, we have to destroy this editor and open a new one with the new model. """ msg = info['arg'] # print(self.__class__.__name__, "state_type_changed check", info) if msg.action in ['change_state_type', 'change_root_state_type'] and msg.after: # print(self.__class__.__name__, "state_type_changed") import rafcon.gui.singleton as gui_singletons msg = info['arg'] new_state_m = msg.affected_models[-1] states_editor_ctrl = gui_singletons.main_window_controller.get_controller('states_editor_ctrl') states_editor_ctrl.recreate_state_editor(self.model, new_state_m)
def function[state_type_changed, parameter[self, model, prop_name, info]]: constant[Reopen state editor when state type is changed When the type of the observed state changes, a new model is created. The look of this controller's view depends on the kind of model. Therefore, we have to destroy this editor and open a new one with the new model. ] variable[msg] assign[=] call[name[info]][constant[arg]] if <ast.BoolOp object at 0x7da1b1b9fc10> begin[:] import module[rafcon.gui.singleton] as alias[gui_singletons] variable[msg] assign[=] call[name[info]][constant[arg]] variable[new_state_m] assign[=] call[name[msg].affected_models][<ast.UnaryOp object at 0x7da1b1b9c4c0>] variable[states_editor_ctrl] assign[=] call[name[gui_singletons].main_window_controller.get_controller, parameter[constant[states_editor_ctrl]]] call[name[states_editor_ctrl].recreate_state_editor, parameter[name[self].model, name[new_state_m]]]
keyword[def] identifier[state_type_changed] ( identifier[self] , identifier[model] , identifier[prop_name] , identifier[info] ): literal[string] identifier[msg] = identifier[info] [ literal[string] ] keyword[if] identifier[msg] . identifier[action] keyword[in] [ literal[string] , literal[string] ] keyword[and] identifier[msg] . identifier[after] : keyword[import] identifier[rafcon] . identifier[gui] . identifier[singleton] keyword[as] identifier[gui_singletons] identifier[msg] = identifier[info] [ literal[string] ] identifier[new_state_m] = identifier[msg] . identifier[affected_models] [- literal[int] ] identifier[states_editor_ctrl] = identifier[gui_singletons] . identifier[main_window_controller] . identifier[get_controller] ( literal[string] ) identifier[states_editor_ctrl] . identifier[recreate_state_editor] ( identifier[self] . identifier[model] , identifier[new_state_m] )
def state_type_changed(self, model, prop_name, info): """Reopen state editor when state type is changed When the type of the observed state changes, a new model is created. The look of this controller's view depends on the kind of model. Therefore, we have to destroy this editor and open a new one with the new model. """ msg = info['arg'] # print(self.__class__.__name__, "state_type_changed check", info) if msg.action in ['change_state_type', 'change_root_state_type'] and msg.after: # print(self.__class__.__name__, "state_type_changed") import rafcon.gui.singleton as gui_singletons msg = info['arg'] new_state_m = msg.affected_models[-1] states_editor_ctrl = gui_singletons.main_window_controller.get_controller('states_editor_ctrl') states_editor_ctrl.recreate_state_editor(self.model, new_state_m) # depends on [control=['if'], data=[]]
def is_disconnect(e, connection, cursor): """ Connection state check from SQLAlchemy: https://bitbucket.org/sqlalchemy/sqlalchemy/src/tip/lib/sqlalchemy/dialects/postgresql/psycopg2.py """ if isinstance(e, OperationalError): # these error messages from libpq: interfaces/libpq/fe-misc.c. # TODO: these are sent through gettext in libpq and we can't # check within other locales - consider using connection.closed return 'terminating connection' in str(e) or \ 'closed the connection' in str(e) or \ 'connection not open' in str(e) or \ 'could not receive data from server' in str(e) elif isinstance(e, InterfaceError): # psycopg2 client errors, psycopg2/conenction.h, psycopg2/cursor.h return 'connection already closed' in str(e) or \ 'cursor already closed' in str(e) elif isinstance(e, ProgrammingError): # not sure where this path is originally from, it may # be obsolete. It really says "losed", not "closed". return "closed the connection unexpectedly" in str(e) else: return False
def function[is_disconnect, parameter[e, connection, cursor]]: constant[ Connection state check from SQLAlchemy: https://bitbucket.org/sqlalchemy/sqlalchemy/src/tip/lib/sqlalchemy/dialects/postgresql/psycopg2.py ] if call[name[isinstance], parameter[name[e], name[OperationalError]]] begin[:] return[<ast.BoolOp object at 0x7da1b184ba00>]
keyword[def] identifier[is_disconnect] ( identifier[e] , identifier[connection] , identifier[cursor] ): literal[string] keyword[if] identifier[isinstance] ( identifier[e] , identifier[OperationalError] ): keyword[return] literal[string] keyword[in] identifier[str] ( identifier[e] ) keyword[or] literal[string] keyword[in] identifier[str] ( identifier[e] ) keyword[or] literal[string] keyword[in] identifier[str] ( identifier[e] ) keyword[or] literal[string] keyword[in] identifier[str] ( identifier[e] ) keyword[elif] identifier[isinstance] ( identifier[e] , identifier[InterfaceError] ): keyword[return] literal[string] keyword[in] identifier[str] ( identifier[e] ) keyword[or] literal[string] keyword[in] identifier[str] ( identifier[e] ) keyword[elif] identifier[isinstance] ( identifier[e] , identifier[ProgrammingError] ): keyword[return] literal[string] keyword[in] identifier[str] ( identifier[e] ) keyword[else] : keyword[return] keyword[False]
def is_disconnect(e, connection, cursor): """ Connection state check from SQLAlchemy: https://bitbucket.org/sqlalchemy/sqlalchemy/src/tip/lib/sqlalchemy/dialects/postgresql/psycopg2.py """ if isinstance(e, OperationalError): # these error messages from libpq: interfaces/libpq/fe-misc.c. # TODO: these are sent through gettext in libpq and we can't # check within other locales - consider using connection.closed return 'terminating connection' in str(e) or 'closed the connection' in str(e) or 'connection not open' in str(e) or ('could not receive data from server' in str(e)) # depends on [control=['if'], data=[]] elif isinstance(e, InterfaceError): # psycopg2 client errors, psycopg2/conenction.h, psycopg2/cursor.h return 'connection already closed' in str(e) or 'cursor already closed' in str(e) # depends on [control=['if'], data=[]] elif isinstance(e, ProgrammingError): # not sure where this path is originally from, it may # be obsolete. It really says "losed", not "closed". return 'closed the connection unexpectedly' in str(e) # depends on [control=['if'], data=[]] else: return False
def read_data(self, chan=None, begtime=None, endtime=None, begsam=None, endsam=None, s_freq=None): """Read the data and creates a ChanTime instance Parameters ---------- chan : list of strings names of the channels to read begtime : int or datedelta or datetime or list start of the data to read; if it's int or float, it's assumed it's s; if it's timedelta, it's assumed from the start of the recording; if it's datetime, it's assumed it's absolute time. It can also be a list of any of the above type. endtime : int or datedelta or datetime end of the data to read; if it's int or float, it's assumed it's s; if it's timedelta, it's assumed from the start of the recording; if it's datetime, it's assumed it's absolute time. It can also be a list of any of the above type. begsam : int first sample (this sample will be included) endsam : int last sample (this sample will NOT be included) s_freq : int sampling frequency of the data Returns ------- An instance of ChanTime Notes ----- begsam and endsam follow Python convention, which starts at zero, includes begsam but DOES NOT include endsam. If begtime and endtime are a list, they both need the exact same length and the data will be stored in trials. If neither begtime or begsam are specified, it starts from the first sample. If neither endtime or endsam are specified, it reads until the end. """ data = ChanTime() data.start_time = self.header['start_time'] data.s_freq = s_freq = s_freq if s_freq else self.header['s_freq'] if chan is None: chan = self.header['chan_name'] if not (isinstance(chan, list) or isinstance(chan, tuple)): raise TypeError('Parameter "chan" should be a list') add_ref = False if '_REF' in chan: add_ref = True chan[:] = [x for x in chan if x != '_REF'] idx_chan = [self.header['chan_name'].index(x) for x in chan] if begtime is None and begsam is None: begsam = 0 if endtime is None and endsam is None: endsam = self.header['n_samples'] if begtime is not None: if not isinstance(begtime, list): begtime = [begtime] begsam = [] for one_begtime in begtime: begsam.append(_convert_time_to_sample(one_begtime, self)) if endtime is not None: if not isinstance(endtime, list): endtime = [endtime] endsam = [] for one_endtime in endtime: endsam.append(_convert_time_to_sample(one_endtime, self)) if not isinstance(begsam, list): begsam = [begsam] if not isinstance(endsam, list): endsam = [endsam] if len(begsam) != len(endsam): raise ValueError('There should be the same number of start and ' + 'end point') n_trl = len(begsam) data.axis['chan'] = empty(n_trl, dtype='O') data.axis['time'] = empty(n_trl, dtype='O') data.data = empty(n_trl, dtype='O') for i, one_begsam, one_endsam in zip(range(n_trl), begsam, endsam): dataset = self.dataset lg.debug('begsam {0: 6}, endsam {1: 6}'.format(one_begsam, one_endsam)) dat = dataset.return_dat(idx_chan, one_begsam, one_endsam) chan_in_dat = chan if add_ref: zero_ref = zeros((1, one_endsam - one_begsam)) dat = concatenate((dat, zero_ref), axis=0) chan_in_dat.append('_REF') data.data[i] = dat data.axis['chan'][i] = asarray(chan_in_dat, dtype='U') data.axis['time'][i] = (arange(one_begsam, one_endsam) / s_freq) return data
def function[read_data, parameter[self, chan, begtime, endtime, begsam, endsam, s_freq]]: constant[Read the data and creates a ChanTime instance Parameters ---------- chan : list of strings names of the channels to read begtime : int or datedelta or datetime or list start of the data to read; if it's int or float, it's assumed it's s; if it's timedelta, it's assumed from the start of the recording; if it's datetime, it's assumed it's absolute time. It can also be a list of any of the above type. endtime : int or datedelta or datetime end of the data to read; if it's int or float, it's assumed it's s; if it's timedelta, it's assumed from the start of the recording; if it's datetime, it's assumed it's absolute time. It can also be a list of any of the above type. begsam : int first sample (this sample will be included) endsam : int last sample (this sample will NOT be included) s_freq : int sampling frequency of the data Returns ------- An instance of ChanTime Notes ----- begsam and endsam follow Python convention, which starts at zero, includes begsam but DOES NOT include endsam. If begtime and endtime are a list, they both need the exact same length and the data will be stored in trials. If neither begtime or begsam are specified, it starts from the first sample. If neither endtime or endsam are specified, it reads until the end. ] variable[data] assign[=] call[name[ChanTime], parameter[]] name[data].start_time assign[=] call[name[self].header][constant[start_time]] name[data].s_freq assign[=] <ast.IfExp object at 0x7da18dc98b20> if compare[name[chan] is constant[None]] begin[:] variable[chan] assign[=] call[name[self].header][constant[chan_name]] if <ast.UnaryOp object at 0x7da18dc9bac0> begin[:] <ast.Raise object at 0x7da18dc99c00> variable[add_ref] assign[=] constant[False] if compare[constant[_REF] in name[chan]] begin[:] variable[add_ref] assign[=] constant[True] call[name[chan]][<ast.Slice object at 0x7da18dc99810>] assign[=] <ast.ListComp object at 0x7da18dc9a740> variable[idx_chan] assign[=] <ast.ListComp object at 0x7da18dc9a800> if <ast.BoolOp object at 0x7da18dc9ba30> begin[:] variable[begsam] assign[=] constant[0] if <ast.BoolOp object at 0x7da18dc99a50> begin[:] variable[endsam] assign[=] call[name[self].header][constant[n_samples]] if compare[name[begtime] is_not constant[None]] begin[:] if <ast.UnaryOp object at 0x7da18dc9bdc0> begin[:] variable[begtime] assign[=] list[[<ast.Name object at 0x7da18dc996f0>]] variable[begsam] assign[=] list[[]] for taget[name[one_begtime]] in starred[name[begtime]] begin[:] call[name[begsam].append, parameter[call[name[_convert_time_to_sample], parameter[name[one_begtime], name[self]]]]] if compare[name[endtime] is_not constant[None]] begin[:] if <ast.UnaryOp object at 0x7da18dc9a620> begin[:] variable[endtime] assign[=] list[[<ast.Name object at 0x7da18dc9bd30>]] variable[endsam] assign[=] list[[]] for taget[name[one_endtime]] in starred[name[endtime]] begin[:] call[name[endsam].append, parameter[call[name[_convert_time_to_sample], parameter[name[one_endtime], name[self]]]]] if <ast.UnaryOp object at 0x7da18dc98400> begin[:] variable[begsam] assign[=] list[[<ast.Name object at 0x7da18dc98e50>]] if <ast.UnaryOp object at 0x7da18dc99210> begin[:] variable[endsam] assign[=] list[[<ast.Name object at 0x7da18dc9a950>]] if compare[call[name[len], parameter[name[begsam]]] not_equal[!=] call[name[len], parameter[name[endsam]]]] begin[:] <ast.Raise object at 0x7da18dc99e40> variable[n_trl] assign[=] call[name[len], parameter[name[begsam]]] call[name[data].axis][constant[chan]] assign[=] call[name[empty], parameter[name[n_trl]]] call[name[data].axis][constant[time]] assign[=] call[name[empty], parameter[name[n_trl]]] name[data].data assign[=] call[name[empty], parameter[name[n_trl]]] for taget[tuple[[<ast.Name object at 0x7da2044c1e70>, <ast.Name object at 0x7da2044c2c50>, <ast.Name object at 0x7da2044c3610>]]] in starred[call[name[zip], parameter[call[name[range], parameter[name[n_trl]]], name[begsam], name[endsam]]]] begin[:] variable[dataset] assign[=] name[self].dataset call[name[lg].debug, parameter[call[constant[begsam {0: 6}, endsam {1: 6}].format, parameter[name[one_begsam], name[one_endsam]]]]] variable[dat] assign[=] call[name[dataset].return_dat, parameter[name[idx_chan], name[one_begsam], name[one_endsam]]] variable[chan_in_dat] assign[=] name[chan] if name[add_ref] begin[:] variable[zero_ref] assign[=] call[name[zeros], parameter[tuple[[<ast.Constant object at 0x7da2044c11e0>, <ast.BinOp object at 0x7da2044c23e0>]]]] variable[dat] assign[=] call[name[concatenate], parameter[tuple[[<ast.Name object at 0x7da2044c2dd0>, <ast.Name object at 0x7da2044c2e30>]]]] call[name[chan_in_dat].append, parameter[constant[_REF]]] call[name[data].data][name[i]] assign[=] name[dat] call[call[name[data].axis][constant[chan]]][name[i]] assign[=] call[name[asarray], parameter[name[chan_in_dat]]] call[call[name[data].axis][constant[time]]][name[i]] assign[=] binary_operation[call[name[arange], parameter[name[one_begsam], name[one_endsam]]] / name[s_freq]] return[name[data]]
keyword[def] identifier[read_data] ( identifier[self] , identifier[chan] = keyword[None] , identifier[begtime] = keyword[None] , identifier[endtime] = keyword[None] , identifier[begsam] = keyword[None] , identifier[endsam] = keyword[None] , identifier[s_freq] = keyword[None] ): literal[string] identifier[data] = identifier[ChanTime] () identifier[data] . identifier[start_time] = identifier[self] . identifier[header] [ literal[string] ] identifier[data] . identifier[s_freq] = identifier[s_freq] = identifier[s_freq] keyword[if] identifier[s_freq] keyword[else] identifier[self] . identifier[header] [ literal[string] ] keyword[if] identifier[chan] keyword[is] keyword[None] : identifier[chan] = identifier[self] . identifier[header] [ literal[string] ] keyword[if] keyword[not] ( identifier[isinstance] ( identifier[chan] , identifier[list] ) keyword[or] identifier[isinstance] ( identifier[chan] , identifier[tuple] )): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[add_ref] = keyword[False] keyword[if] literal[string] keyword[in] identifier[chan] : identifier[add_ref] = keyword[True] identifier[chan] [:]=[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[chan] keyword[if] identifier[x] != literal[string] ] identifier[idx_chan] =[ identifier[self] . identifier[header] [ literal[string] ]. identifier[index] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[chan] ] keyword[if] identifier[begtime] keyword[is] keyword[None] keyword[and] identifier[begsam] keyword[is] keyword[None] : identifier[begsam] = literal[int] keyword[if] identifier[endtime] keyword[is] keyword[None] keyword[and] identifier[endsam] keyword[is] keyword[None] : identifier[endsam] = identifier[self] . identifier[header] [ literal[string] ] keyword[if] identifier[begtime] keyword[is] keyword[not] keyword[None] : keyword[if] keyword[not] identifier[isinstance] ( identifier[begtime] , identifier[list] ): identifier[begtime] =[ identifier[begtime] ] identifier[begsam] =[] keyword[for] identifier[one_begtime] keyword[in] identifier[begtime] : identifier[begsam] . identifier[append] ( identifier[_convert_time_to_sample] ( identifier[one_begtime] , identifier[self] )) keyword[if] identifier[endtime] keyword[is] keyword[not] keyword[None] : keyword[if] keyword[not] identifier[isinstance] ( identifier[endtime] , identifier[list] ): identifier[endtime] =[ identifier[endtime] ] identifier[endsam] =[] keyword[for] identifier[one_endtime] keyword[in] identifier[endtime] : identifier[endsam] . identifier[append] ( identifier[_convert_time_to_sample] ( identifier[one_endtime] , identifier[self] )) keyword[if] keyword[not] identifier[isinstance] ( identifier[begsam] , identifier[list] ): identifier[begsam] =[ identifier[begsam] ] keyword[if] keyword[not] identifier[isinstance] ( identifier[endsam] , identifier[list] ): identifier[endsam] =[ identifier[endsam] ] keyword[if] identifier[len] ( identifier[begsam] )!= identifier[len] ( identifier[endsam] ): keyword[raise] identifier[ValueError] ( literal[string] + literal[string] ) identifier[n_trl] = identifier[len] ( identifier[begsam] ) identifier[data] . identifier[axis] [ literal[string] ]= identifier[empty] ( identifier[n_trl] , identifier[dtype] = literal[string] ) identifier[data] . identifier[axis] [ literal[string] ]= identifier[empty] ( identifier[n_trl] , identifier[dtype] = literal[string] ) identifier[data] . identifier[data] = identifier[empty] ( identifier[n_trl] , identifier[dtype] = literal[string] ) keyword[for] identifier[i] , identifier[one_begsam] , identifier[one_endsam] keyword[in] identifier[zip] ( identifier[range] ( identifier[n_trl] ), identifier[begsam] , identifier[endsam] ): identifier[dataset] = identifier[self] . identifier[dataset] identifier[lg] . identifier[debug] ( literal[string] . identifier[format] ( identifier[one_begsam] , identifier[one_endsam] )) identifier[dat] = identifier[dataset] . identifier[return_dat] ( identifier[idx_chan] , identifier[one_begsam] , identifier[one_endsam] ) identifier[chan_in_dat] = identifier[chan] keyword[if] identifier[add_ref] : identifier[zero_ref] = identifier[zeros] (( literal[int] , identifier[one_endsam] - identifier[one_begsam] )) identifier[dat] = identifier[concatenate] (( identifier[dat] , identifier[zero_ref] ), identifier[axis] = literal[int] ) identifier[chan_in_dat] . identifier[append] ( literal[string] ) identifier[data] . identifier[data] [ identifier[i] ]= identifier[dat] identifier[data] . identifier[axis] [ literal[string] ][ identifier[i] ]= identifier[asarray] ( identifier[chan_in_dat] , identifier[dtype] = literal[string] ) identifier[data] . identifier[axis] [ literal[string] ][ identifier[i] ]=( identifier[arange] ( identifier[one_begsam] , identifier[one_endsam] )/ identifier[s_freq] ) keyword[return] identifier[data]
def read_data(self, chan=None, begtime=None, endtime=None, begsam=None, endsam=None, s_freq=None): """Read the data and creates a ChanTime instance Parameters ---------- chan : list of strings names of the channels to read begtime : int or datedelta or datetime or list start of the data to read; if it's int or float, it's assumed it's s; if it's timedelta, it's assumed from the start of the recording; if it's datetime, it's assumed it's absolute time. It can also be a list of any of the above type. endtime : int or datedelta or datetime end of the data to read; if it's int or float, it's assumed it's s; if it's timedelta, it's assumed from the start of the recording; if it's datetime, it's assumed it's absolute time. It can also be a list of any of the above type. begsam : int first sample (this sample will be included) endsam : int last sample (this sample will NOT be included) s_freq : int sampling frequency of the data Returns ------- An instance of ChanTime Notes ----- begsam and endsam follow Python convention, which starts at zero, includes begsam but DOES NOT include endsam. If begtime and endtime are a list, they both need the exact same length and the data will be stored in trials. If neither begtime or begsam are specified, it starts from the first sample. If neither endtime or endsam are specified, it reads until the end. """ data = ChanTime() data.start_time = self.header['start_time'] data.s_freq = s_freq = s_freq if s_freq else self.header['s_freq'] if chan is None: chan = self.header['chan_name'] # depends on [control=['if'], data=['chan']] if not (isinstance(chan, list) or isinstance(chan, tuple)): raise TypeError('Parameter "chan" should be a list') # depends on [control=['if'], data=[]] add_ref = False if '_REF' in chan: add_ref = True chan[:] = [x for x in chan if x != '_REF'] # depends on [control=['if'], data=['chan']] idx_chan = [self.header['chan_name'].index(x) for x in chan] if begtime is None and begsam is None: begsam = 0 # depends on [control=['if'], data=[]] if endtime is None and endsam is None: endsam = self.header['n_samples'] # depends on [control=['if'], data=[]] if begtime is not None: if not isinstance(begtime, list): begtime = [begtime] # depends on [control=['if'], data=[]] begsam = [] for one_begtime in begtime: begsam.append(_convert_time_to_sample(one_begtime, self)) # depends on [control=['for'], data=['one_begtime']] # depends on [control=['if'], data=['begtime']] if endtime is not None: if not isinstance(endtime, list): endtime = [endtime] # depends on [control=['if'], data=[]] endsam = [] for one_endtime in endtime: endsam.append(_convert_time_to_sample(one_endtime, self)) # depends on [control=['for'], data=['one_endtime']] # depends on [control=['if'], data=['endtime']] if not isinstance(begsam, list): begsam = [begsam] # depends on [control=['if'], data=[]] if not isinstance(endsam, list): endsam = [endsam] # depends on [control=['if'], data=[]] if len(begsam) != len(endsam): raise ValueError('There should be the same number of start and ' + 'end point') # depends on [control=['if'], data=[]] n_trl = len(begsam) data.axis['chan'] = empty(n_trl, dtype='O') data.axis['time'] = empty(n_trl, dtype='O') data.data = empty(n_trl, dtype='O') for (i, one_begsam, one_endsam) in zip(range(n_trl), begsam, endsam): dataset = self.dataset lg.debug('begsam {0: 6}, endsam {1: 6}'.format(one_begsam, one_endsam)) dat = dataset.return_dat(idx_chan, one_begsam, one_endsam) chan_in_dat = chan if add_ref: zero_ref = zeros((1, one_endsam - one_begsam)) dat = concatenate((dat, zero_ref), axis=0) chan_in_dat.append('_REF') # depends on [control=['if'], data=[]] data.data[i] = dat data.axis['chan'][i] = asarray(chan_in_dat, dtype='U') data.axis['time'][i] = arange(one_begsam, one_endsam) / s_freq # depends on [control=['for'], data=[]] return data
def _parse_mtu(self, config): """Parses the config block and returns the configured IP MTU value The provided configuration block is scanned and the configured value for the IP MTU is returned as a dict object. The IP MTU value is expected to always be present in the provided config block Args: config (str): The interface configuration block to parse Return: dict: A dict object intended to be merged into the resource dict """ match = re.search(r'mtu (\d+)', config) return dict(mtu=int(match.group(1)))
def function[_parse_mtu, parameter[self, config]]: constant[Parses the config block and returns the configured IP MTU value The provided configuration block is scanned and the configured value for the IP MTU is returned as a dict object. The IP MTU value is expected to always be present in the provided config block Args: config (str): The interface configuration block to parse Return: dict: A dict object intended to be merged into the resource dict ] variable[match] assign[=] call[name[re].search, parameter[constant[mtu (\d+)], name[config]]] return[call[name[dict], parameter[]]]
keyword[def] identifier[_parse_mtu] ( identifier[self] , identifier[config] ): literal[string] identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[config] ) keyword[return] identifier[dict] ( identifier[mtu] = identifier[int] ( identifier[match] . identifier[group] ( literal[int] )))
def _parse_mtu(self, config): """Parses the config block and returns the configured IP MTU value The provided configuration block is scanned and the configured value for the IP MTU is returned as a dict object. The IP MTU value is expected to always be present in the provided config block Args: config (str): The interface configuration block to parse Return: dict: A dict object intended to be merged into the resource dict """ match = re.search('mtu (\\d+)', config) return dict(mtu=int(match.group(1)))
def update_user_password(client, userpass): """Will update the password for a userpass user""" vault_path = '' user = '' user_path_bits = userpass.split('/') if len(user_path_bits) == 1: user = user_path_bits[0] vault_path = "auth/userpass/users/%s/password" % user LOG.debug("Updating password for user %s at the default path", user) elif len(user_path_bits) == 2: mount = user_path_bits[0] user = user_path_bits[1] vault_path = "auth/%s/users/%s/password" % (mount, user) LOG.debug("Updating password for user %s at path %s", user, mount) else: client.revoke_self_token() raise aomi.exceptions.AomiCommand("invalid user path") new_password = get_password() obj = { 'user': user, 'password': new_password } client.write(vault_path, **obj)
def function[update_user_password, parameter[client, userpass]]: constant[Will update the password for a userpass user] variable[vault_path] assign[=] constant[] variable[user] assign[=] constant[] variable[user_path_bits] assign[=] call[name[userpass].split, parameter[constant[/]]] if compare[call[name[len], parameter[name[user_path_bits]]] equal[==] constant[1]] begin[:] variable[user] assign[=] call[name[user_path_bits]][constant[0]] variable[vault_path] assign[=] binary_operation[constant[auth/userpass/users/%s/password] <ast.Mod object at 0x7da2590d6920> name[user]] call[name[LOG].debug, parameter[constant[Updating password for user %s at the default path], name[user]]] variable[new_password] assign[=] call[name[get_password], parameter[]] variable[obj] assign[=] dictionary[[<ast.Constant object at 0x7da1b183ab30>, <ast.Constant object at 0x7da1b1838e20>], [<ast.Name object at 0x7da1b183a0e0>, <ast.Name object at 0x7da1b183bd00>]] call[name[client].write, parameter[name[vault_path]]]
keyword[def] identifier[update_user_password] ( identifier[client] , identifier[userpass] ): literal[string] identifier[vault_path] = literal[string] identifier[user] = literal[string] identifier[user_path_bits] = identifier[userpass] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[user_path_bits] )== literal[int] : identifier[user] = identifier[user_path_bits] [ literal[int] ] identifier[vault_path] = literal[string] % identifier[user] identifier[LOG] . identifier[debug] ( literal[string] , identifier[user] ) keyword[elif] identifier[len] ( identifier[user_path_bits] )== literal[int] : identifier[mount] = identifier[user_path_bits] [ literal[int] ] identifier[user] = identifier[user_path_bits] [ literal[int] ] identifier[vault_path] = literal[string] %( identifier[mount] , identifier[user] ) identifier[LOG] . identifier[debug] ( literal[string] , identifier[user] , identifier[mount] ) keyword[else] : identifier[client] . identifier[revoke_self_token] () keyword[raise] identifier[aomi] . identifier[exceptions] . identifier[AomiCommand] ( literal[string] ) identifier[new_password] = identifier[get_password] () identifier[obj] ={ literal[string] : identifier[user] , literal[string] : identifier[new_password] } identifier[client] . identifier[write] ( identifier[vault_path] ,** identifier[obj] )
def update_user_password(client, userpass): """Will update the password for a userpass user""" vault_path = '' user = '' user_path_bits = userpass.split('/') if len(user_path_bits) == 1: user = user_path_bits[0] vault_path = 'auth/userpass/users/%s/password' % user LOG.debug('Updating password for user %s at the default path', user) # depends on [control=['if'], data=[]] elif len(user_path_bits) == 2: mount = user_path_bits[0] user = user_path_bits[1] vault_path = 'auth/%s/users/%s/password' % (mount, user) LOG.debug('Updating password for user %s at path %s', user, mount) # depends on [control=['if'], data=[]] else: client.revoke_self_token() raise aomi.exceptions.AomiCommand('invalid user path') new_password = get_password() obj = {'user': user, 'password': new_password} client.write(vault_path, **obj)
def _poly_eval_0(self, u, ids): """Evaluate internal polynomial.""" return u * (u * (self._a[ids] * u + self._b[ids]) + self._c[ids]) + self._d[ids]
def function[_poly_eval_0, parameter[self, u, ids]]: constant[Evaluate internal polynomial.] return[binary_operation[binary_operation[name[u] * binary_operation[binary_operation[name[u] * binary_operation[binary_operation[call[name[self]._a][name[ids]] * name[u]] + call[name[self]._b][name[ids]]]] + call[name[self]._c][name[ids]]]] + call[name[self]._d][name[ids]]]]
keyword[def] identifier[_poly_eval_0] ( identifier[self] , identifier[u] , identifier[ids] ): literal[string] keyword[return] identifier[u] *( identifier[u] *( identifier[self] . identifier[_a] [ identifier[ids] ]* identifier[u] + identifier[self] . identifier[_b] [ identifier[ids] ])+ identifier[self] . identifier[_c] [ identifier[ids] ])+ identifier[self] . identifier[_d] [ identifier[ids] ]
def _poly_eval_0(self, u, ids): """Evaluate internal polynomial.""" return u * (u * (self._a[ids] * u + self._b[ids]) + self._c[ids]) + self._d[ids]
def read(self, *names): # type: ignore # noqa: F811 # type: (Any) -> Any r"""Read results from FORM. Wait for a response of FORM to obtain the results specified by the given names and return a corresponding string or (nested) list of strings. Objects to be read from FORM are expressions, $-variables and preprocessor variables. ========== ============================= name meaning ========== ============================= "F" expression F "$x" $-variable $x "$x[]" factorized $-variable $x "\`A'" preprocessor variable A ========== ============================= Note that the communication for the reading is performed within the preprocessor of FORM (i.e., at compile-time), so one may need to write ".sort" to get the correct result. If non-string objects are passed, they are considered as sequences, and the return value becomes a list corresponding to the arguments. If a sequence is passed as the argument to this method, it is guaranteed that the return value is always a list: >>> import form >>> f = form.open() >>> f.write(''' ... S a1,...,a3; ... L F1 = a1; ... L F2 = a2; ... L F3 = a3; ... .sort ... ''') >>> f.read(['F1']) ['a1'] >>> f.read(['F1', 'F2']) ['a1', 'a2'] >>> f.read(['F1', 'F2', 'F3']) ['a1', 'a2', 'a3'] A more complicated example, which returns a nested list, is >>> f.read('F1', ['F2', 'F3']) ['a1', ['a2', 'a3']] >>> f.close() """ if self._closed: raise IOError('tried to read from closed connection') if len(names) == 1 and not isinstance(names[0], string_types): names = tuple(names[0]) if len(names) == 1: return [self.read(*names)] # Guarantee to return a list. else: return self.read(*names) if any(not isinstance(x, string_types) for x in names): return [self.read(x) for x in names] assert self._parentin is not None assert self._parentout is not None assert self._loggingin is not None for e in names: if len(e) >= 2 and e[0] == '`' and e[-1] == "'": self._parentout.write( '#toexternal "{0}{1}"\n'.format(e, self._END_MARK)) elif len(e) >= 3 and e[0] == '$' and e[-2:] == '[]': # Special syntax "$x[]" for factorized $-variables. # NOTE: (1) isfactorized($x) is zero when $x is 0 or $x has # only one factor even after FactArg is performed. # (2) `$x[0]' is accessible even if FactArg has not been # performed. Use `$x[0]' rather than # `isfactorized($x)`. # (3) `$x[1]' is not accessible (segfault) with versions # before Sep 3 2015, if $x has only one factor and # `$x[0]' gives 1. self._parentout.write(( "#if `${0}[0]'\n" "#toexternal \"(%$)\",${0}[1]\n" "#do i=2,`${0}[0]'\n" "#toexternal \"*(%$)\",${0}[`i']\n" "#enddo\n" "#else\n" "#if termsin(${0})\n" "#toexternal \"%$\",${0}\n" "#else\n" "#toexternal \"(0)\"\n" "#endif\n" "#endif\n" "#toexternal \"{1}\"\n" ).format(e[1:-2], self._END_MARK)) elif len(e) >= 1 and e[0] == '$': self._parentout.write( '#toexternal "%${1}",{0}\n'.format(e, self._END_MARK)) else: self._parentout.write( '#toexternal "%E{1}",{0}\n'.format(e, self._END_MARK)) self._parentout.write('#redefine FORMLINKLOOPVAR "0"') self._parentout.write(self._PROMPT) self._parentout.flush() result = [] out = self._parentin.read0() out_start = 0 # start position for searching _END_MARK. for _e in names: while True: i = out.find(self._END_MARK, out_start) if i >= 0: result.append(out[:i]) out = out[i + self._END_MARK_LEN:] out_start = 0 break out_start = max(len(out) - self._END_MARK_LEN, 0) r, _, _ = select.select((self._parentin, self._loggingin), (), ()) if self._loggingin in r: s = self._loggingin.read() if s: i = s.rfind('\n') if i >= 0: msgs = s[:i].split('\n') if self._log is not None: self._log.extend(msgs) for msg in msgs: if (msg.find('-->') >= 0 or msg.find('==>') >= 0): if self._log: msg += '\n' msg += '\n'.join(self._log) self.close() raise FormError(msg) self._loggingin.unread(s[i + 1:]) if self._parentin in r: out += (self._parentin.read() .replace('\n', '') .replace('\\', '') .replace(' ', '')) self._parentin.unread(out) if len(names) == 0: return None elif len(names) == 1: return result[0] else: return result
def function[read, parameter[self]]: constant[Read results from FORM. Wait for a response of FORM to obtain the results specified by the given names and return a corresponding string or (nested) list of strings. Objects to be read from FORM are expressions, $-variables and preprocessor variables. ========== ============================= name meaning ========== ============================= "F" expression F "$x" $-variable $x "$x[]" factorized $-variable $x "\`A'" preprocessor variable A ========== ============================= Note that the communication for the reading is performed within the preprocessor of FORM (i.e., at compile-time), so one may need to write ".sort" to get the correct result. If non-string objects are passed, they are considered as sequences, and the return value becomes a list corresponding to the arguments. If a sequence is passed as the argument to this method, it is guaranteed that the return value is always a list: >>> import form >>> f = form.open() >>> f.write(''' ... S a1,...,a3; ... L F1 = a1; ... L F2 = a2; ... L F3 = a3; ... .sort ... ''') >>> f.read(['F1']) ['a1'] >>> f.read(['F1', 'F2']) ['a1', 'a2'] >>> f.read(['F1', 'F2', 'F3']) ['a1', 'a2', 'a3'] A more complicated example, which returns a nested list, is >>> f.read('F1', ['F2', 'F3']) ['a1', ['a2', 'a3']] >>> f.close() ] if name[self]._closed begin[:] <ast.Raise object at 0x7da18c4cd0c0> if <ast.BoolOp object at 0x7da18c4cf6a0> begin[:] variable[names] assign[=] call[name[tuple], parameter[call[name[names]][constant[0]]]] if compare[call[name[len], parameter[name[names]]] equal[==] constant[1]] begin[:] return[list[[<ast.Call object at 0x7da18c4cdc00>]]] if call[name[any], parameter[<ast.GeneratorExp object at 0x7da18c4cf7c0>]] begin[:] return[<ast.ListComp object at 0x7da18c4cc970>] assert[compare[name[self]._parentin is_not constant[None]]] assert[compare[name[self]._parentout is_not constant[None]]] assert[compare[name[self]._loggingin is_not constant[None]]] for taget[name[e]] in starred[name[names]] begin[:] if <ast.BoolOp object at 0x7da18eb57a90> begin[:] call[name[self]._parentout.write, parameter[call[constant[#toexternal "{0}{1}" ].format, parameter[name[e], name[self]._END_MARK]]]] call[name[self]._parentout.write, parameter[constant[#redefine FORMLINKLOOPVAR "0"]]] call[name[self]._parentout.write, parameter[name[self]._PROMPT]] call[name[self]._parentout.flush, parameter[]] variable[result] assign[=] list[[]] variable[out] assign[=] call[name[self]._parentin.read0, parameter[]] variable[out_start] assign[=] constant[0] for taget[name[_e]] in starred[name[names]] begin[:] while constant[True] begin[:] variable[i] assign[=] call[name[out].find, parameter[name[self]._END_MARK, name[out_start]]] if compare[name[i] greater_or_equal[>=] constant[0]] begin[:] call[name[result].append, parameter[call[name[out]][<ast.Slice object at 0x7da18eb56c20>]]] variable[out] assign[=] call[name[out]][<ast.Slice object at 0x7da18eb56b00>] variable[out_start] assign[=] constant[0] break variable[out_start] assign[=] call[name[max], parameter[binary_operation[call[name[len], parameter[name[out]]] - name[self]._END_MARK_LEN], constant[0]]] <ast.Tuple object at 0x7da18eb55270> assign[=] call[name[select].select, parameter[tuple[[<ast.Attribute object at 0x7da18eb562c0>, <ast.Attribute object at 0x7da18eb56aa0>]], tuple[[]], tuple[[]]]] if compare[name[self]._loggingin in name[r]] begin[:] variable[s] assign[=] call[name[self]._loggingin.read, parameter[]] if name[s] begin[:] variable[i] assign[=] call[name[s].rfind, parameter[constant[ ]]] if compare[name[i] greater_or_equal[>=] constant[0]] begin[:] variable[msgs] assign[=] call[call[name[s]][<ast.Slice object at 0x7da18eb569e0>].split, parameter[constant[ ]]] if compare[name[self]._log is_not constant[None]] begin[:] call[name[self]._log.extend, parameter[name[msgs]]] for taget[name[msg]] in starred[name[msgs]] begin[:] if <ast.BoolOp object at 0x7da18eb54be0> begin[:] if name[self]._log begin[:] <ast.AugAssign object at 0x7da18eb56f20> <ast.AugAssign object at 0x7da18eb54220> call[name[self].close, parameter[]] <ast.Raise object at 0x7da2054a6c20> call[name[self]._loggingin.unread, parameter[call[name[s]][<ast.Slice object at 0x7da2054a7a60>]]] if compare[name[self]._parentin in name[r]] begin[:] <ast.AugAssign object at 0x7da2054a6740> call[name[self]._parentin.unread, parameter[name[out]]] if compare[call[name[len], parameter[name[names]]] equal[==] constant[0]] begin[:] return[constant[None]]
keyword[def] identifier[read] ( identifier[self] ,* identifier[names] ): literal[string] keyword[if] identifier[self] . identifier[_closed] : keyword[raise] identifier[IOError] ( literal[string] ) keyword[if] identifier[len] ( identifier[names] )== literal[int] keyword[and] keyword[not] identifier[isinstance] ( identifier[names] [ literal[int] ], identifier[string_types] ): identifier[names] = identifier[tuple] ( identifier[names] [ literal[int] ]) keyword[if] identifier[len] ( identifier[names] )== literal[int] : keyword[return] [ identifier[self] . identifier[read] (* identifier[names] )] keyword[else] : keyword[return] identifier[self] . identifier[read] (* identifier[names] ) keyword[if] identifier[any] ( keyword[not] identifier[isinstance] ( identifier[x] , identifier[string_types] ) keyword[for] identifier[x] keyword[in] identifier[names] ): keyword[return] [ identifier[self] . identifier[read] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[names] ] keyword[assert] identifier[self] . identifier[_parentin] keyword[is] keyword[not] keyword[None] keyword[assert] identifier[self] . identifier[_parentout] keyword[is] keyword[not] keyword[None] keyword[assert] identifier[self] . identifier[_loggingin] keyword[is] keyword[not] keyword[None] keyword[for] identifier[e] keyword[in] identifier[names] : keyword[if] identifier[len] ( identifier[e] )>= literal[int] keyword[and] identifier[e] [ literal[int] ]== literal[string] keyword[and] identifier[e] [- literal[int] ]== literal[string] : identifier[self] . identifier[_parentout] . identifier[write] ( literal[string] . identifier[format] ( identifier[e] , identifier[self] . identifier[_END_MARK] )) keyword[elif] identifier[len] ( identifier[e] )>= literal[int] keyword[and] identifier[e] [ literal[int] ]== literal[string] keyword[and] identifier[e] [- literal[int] :]== literal[string] : identifier[self] . identifier[_parentout] . identifier[write] (( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] ). identifier[format] ( identifier[e] [ literal[int] :- literal[int] ], identifier[self] . identifier[_END_MARK] )) keyword[elif] identifier[len] ( identifier[e] )>= literal[int] keyword[and] identifier[e] [ literal[int] ]== literal[string] : identifier[self] . identifier[_parentout] . identifier[write] ( literal[string] . identifier[format] ( identifier[e] , identifier[self] . identifier[_END_MARK] )) keyword[else] : identifier[self] . identifier[_parentout] . identifier[write] ( literal[string] . identifier[format] ( identifier[e] , identifier[self] . identifier[_END_MARK] )) identifier[self] . identifier[_parentout] . identifier[write] ( literal[string] ) identifier[self] . identifier[_parentout] . identifier[write] ( identifier[self] . identifier[_PROMPT] ) identifier[self] . identifier[_parentout] . identifier[flush] () identifier[result] =[] identifier[out] = identifier[self] . identifier[_parentin] . identifier[read0] () identifier[out_start] = literal[int] keyword[for] identifier[_e] keyword[in] identifier[names] : keyword[while] keyword[True] : identifier[i] = identifier[out] . identifier[find] ( identifier[self] . identifier[_END_MARK] , identifier[out_start] ) keyword[if] identifier[i] >= literal[int] : identifier[result] . identifier[append] ( identifier[out] [: identifier[i] ]) identifier[out] = identifier[out] [ identifier[i] + identifier[self] . identifier[_END_MARK_LEN] :] identifier[out_start] = literal[int] keyword[break] identifier[out_start] = identifier[max] ( identifier[len] ( identifier[out] )- identifier[self] . identifier[_END_MARK_LEN] , literal[int] ) identifier[r] , identifier[_] , identifier[_] = identifier[select] . identifier[select] (( identifier[self] . identifier[_parentin] , identifier[self] . identifier[_loggingin] ), (),()) keyword[if] identifier[self] . identifier[_loggingin] keyword[in] identifier[r] : identifier[s] = identifier[self] . identifier[_loggingin] . identifier[read] () keyword[if] identifier[s] : identifier[i] = identifier[s] . identifier[rfind] ( literal[string] ) keyword[if] identifier[i] >= literal[int] : identifier[msgs] = identifier[s] [: identifier[i] ]. identifier[split] ( literal[string] ) keyword[if] identifier[self] . identifier[_log] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[_log] . identifier[extend] ( identifier[msgs] ) keyword[for] identifier[msg] keyword[in] identifier[msgs] : keyword[if] ( identifier[msg] . identifier[find] ( literal[string] )>= literal[int] keyword[or] identifier[msg] . identifier[find] ( literal[string] )>= literal[int] ): keyword[if] identifier[self] . identifier[_log] : identifier[msg] += literal[string] identifier[msg] += literal[string] . identifier[join] ( identifier[self] . identifier[_log] ) identifier[self] . identifier[close] () keyword[raise] identifier[FormError] ( identifier[msg] ) identifier[self] . identifier[_loggingin] . identifier[unread] ( identifier[s] [ identifier[i] + literal[int] :]) keyword[if] identifier[self] . identifier[_parentin] keyword[in] identifier[r] : identifier[out] +=( identifier[self] . identifier[_parentin] . identifier[read] () . identifier[replace] ( literal[string] , literal[string] ) . identifier[replace] ( literal[string] , literal[string] ) . identifier[replace] ( literal[string] , literal[string] )) identifier[self] . identifier[_parentin] . identifier[unread] ( identifier[out] ) keyword[if] identifier[len] ( identifier[names] )== literal[int] : keyword[return] keyword[None] keyword[elif] identifier[len] ( identifier[names] )== literal[int] : keyword[return] identifier[result] [ literal[int] ] keyword[else] : keyword[return] identifier[result]
def read(self, *names): # type: ignore # noqa: F811 # type: (Any) -> Any 'Read results from FORM.\n\n Wait for a response of FORM to obtain the results specified by\n the given names and return a corresponding string or (nested) list of\n strings. Objects to be read from FORM are expressions, $-variables and\n preprocessor variables.\n\n ========== =============================\n name meaning\n ========== =============================\n "F" expression F\n "$x" $-variable $x\n "$x[]" factorized $-variable $x\n "\\`A\'" preprocessor variable A\n ========== =============================\n\n Note that the communication for the reading is performed within the\n preprocessor of FORM (i.e., at compile-time), so one may need to write\n ".sort" to get the correct result.\n\n If non-string objects are passed, they are considered as sequences, and\n the return value becomes a list corresponding to the arguments. If\n a sequence is passed as the argument to this method, it is guaranteed\n that the return value is always a list:\n\n >>> import form\n >>> f = form.open()\n >>> f.write(\'\'\'\n ... S a1,...,a3;\n ... L F1 = a1;\n ... L F2 = a2;\n ... L F3 = a3;\n ... .sort\n ... \'\'\')\n\n >>> f.read([\'F1\'])\n [\'a1\']\n >>> f.read([\'F1\', \'F2\'])\n [\'a1\', \'a2\']\n >>> f.read([\'F1\', \'F2\', \'F3\'])\n [\'a1\', \'a2\', \'a3\']\n\n A more complicated example, which returns a nested list, is\n\n >>> f.read(\'F1\', [\'F2\', \'F3\'])\n [\'a1\', [\'a2\', \'a3\']]\n\n >>> f.close()\n ' if self._closed: raise IOError('tried to read from closed connection') # depends on [control=['if'], data=[]] if len(names) == 1 and (not isinstance(names[0], string_types)): names = tuple(names[0]) if len(names) == 1: return [self.read(*names)] # Guarantee to return a list. # depends on [control=['if'], data=[]] else: return self.read(*names) # depends on [control=['if'], data=[]] if any((not isinstance(x, string_types) for x in names)): return [self.read(x) for x in names] # depends on [control=['if'], data=[]] assert self._parentin is not None assert self._parentout is not None assert self._loggingin is not None for e in names: if len(e) >= 2 and e[0] == '`' and (e[-1] == "'"): self._parentout.write('#toexternal "{0}{1}"\n'.format(e, self._END_MARK)) # depends on [control=['if'], data=[]] elif len(e) >= 3 and e[0] == '$' and (e[-2:] == '[]'): # Special syntax "$x[]" for factorized $-variables. # NOTE: (1) isfactorized($x) is zero when $x is 0 or $x has # only one factor even after FactArg is performed. # (2) `$x[0]' is accessible even if FactArg has not been # performed. Use `$x[0]' rather than # `isfactorized($x)`. # (3) `$x[1]' is not accessible (segfault) with versions # before Sep 3 2015, if $x has only one factor and # `$x[0]' gives 1. self._parentout.write('#if `${0}[0]\'\n#toexternal "(%$)",${0}[1]\n#do i=2,`${0}[0]\'\n#toexternal "*(%$)",${0}[`i\']\n#enddo\n#else\n#if termsin(${0})\n#toexternal "%$",${0}\n#else\n#toexternal "(0)"\n#endif\n#endif\n#toexternal "{1}"\n'.format(e[1:-2], self._END_MARK)) # depends on [control=['if'], data=[]] elif len(e) >= 1 and e[0] == '$': self._parentout.write('#toexternal "%${1}",{0}\n'.format(e, self._END_MARK)) # depends on [control=['if'], data=[]] else: self._parentout.write('#toexternal "%E{1}",{0}\n'.format(e, self._END_MARK)) # depends on [control=['for'], data=['e']] self._parentout.write('#redefine FORMLINKLOOPVAR "0"') self._parentout.write(self._PROMPT) self._parentout.flush() result = [] out = self._parentin.read0() out_start = 0 # start position for searching _END_MARK. for _e in names: while True: i = out.find(self._END_MARK, out_start) if i >= 0: result.append(out[:i]) out = out[i + self._END_MARK_LEN:] out_start = 0 break # depends on [control=['if'], data=['i']] out_start = max(len(out) - self._END_MARK_LEN, 0) (r, _, _) = select.select((self._parentin, self._loggingin), (), ()) if self._loggingin in r: s = self._loggingin.read() if s: i = s.rfind('\n') if i >= 0: msgs = s[:i].split('\n') if self._log is not None: self._log.extend(msgs) # depends on [control=['if'], data=[]] for msg in msgs: if msg.find('-->') >= 0 or msg.find('==>') >= 0: if self._log: msg += '\n' msg += '\n'.join(self._log) # depends on [control=['if'], data=[]] self.close() raise FormError(msg) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['msg']] # depends on [control=['if'], data=['i']] self._loggingin.unread(s[i + 1:]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if self._parentin in r: out += self._parentin.read().replace('\n', '').replace('\\', '').replace(' ', '') # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['for'], data=[]] self._parentin.unread(out) if len(names) == 0: return None # depends on [control=['if'], data=[]] elif len(names) == 1: return result[0] # depends on [control=['if'], data=[]] else: return result
def _lex(s): """ Lex the input string according to _tsql_lex_re. Yields (gid, token, line_number) """ s += '.' # make sure there's a terminator to know when to stop parsing lines = enumerate(s.splitlines(), 1) lineno = pos = 0 try: for lineno, line in lines: matches = _tsql_lex_re.finditer(line) for m in matches: gid = m.lastindex if gid == 11: raise TSQLSyntaxError('unexpected input', lineno=lineno, offset=m.start(), text=line) else: token = m.group(gid) yield (gid, token, lineno) except StopIteration: pass
def function[_lex, parameter[s]]: constant[ Lex the input string according to _tsql_lex_re. Yields (gid, token, line_number) ] <ast.AugAssign object at 0x7da18f09d480> variable[lines] assign[=] call[name[enumerate], parameter[call[name[s].splitlines, parameter[]], constant[1]]] variable[lineno] assign[=] constant[0] <ast.Try object at 0x7da18f09ef50>
keyword[def] identifier[_lex] ( identifier[s] ): literal[string] identifier[s] += literal[string] identifier[lines] = identifier[enumerate] ( identifier[s] . identifier[splitlines] (), literal[int] ) identifier[lineno] = identifier[pos] = literal[int] keyword[try] : keyword[for] identifier[lineno] , identifier[line] keyword[in] identifier[lines] : identifier[matches] = identifier[_tsql_lex_re] . identifier[finditer] ( identifier[line] ) keyword[for] identifier[m] keyword[in] identifier[matches] : identifier[gid] = identifier[m] . identifier[lastindex] keyword[if] identifier[gid] == literal[int] : keyword[raise] identifier[TSQLSyntaxError] ( literal[string] , identifier[lineno] = identifier[lineno] , identifier[offset] = identifier[m] . identifier[start] (), identifier[text] = identifier[line] ) keyword[else] : identifier[token] = identifier[m] . identifier[group] ( identifier[gid] ) keyword[yield] ( identifier[gid] , identifier[token] , identifier[lineno] ) keyword[except] identifier[StopIteration] : keyword[pass]
def _lex(s): """ Lex the input string according to _tsql_lex_re. Yields (gid, token, line_number) """ s += '.' # make sure there's a terminator to know when to stop parsing lines = enumerate(s.splitlines(), 1) lineno = pos = 0 try: for (lineno, line) in lines: matches = _tsql_lex_re.finditer(line) for m in matches: gid = m.lastindex if gid == 11: raise TSQLSyntaxError('unexpected input', lineno=lineno, offset=m.start(), text=line) # depends on [control=['if'], data=[]] else: token = m.group(gid) yield (gid, token, lineno) # depends on [control=['for'], data=['m']] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]] except StopIteration: pass # depends on [control=['except'], data=[]]
def get_sample_frame(self): """Return first available image in observation result""" for frame in self.frames: return frame.open() for res in self.results.values(): return res.open() return None
def function[get_sample_frame, parameter[self]]: constant[Return first available image in observation result] for taget[name[frame]] in starred[name[self].frames] begin[:] return[call[name[frame].open, parameter[]]] for taget[name[res]] in starred[call[name[self].results.values, parameter[]]] begin[:] return[call[name[res].open, parameter[]]] return[constant[None]]
keyword[def] identifier[get_sample_frame] ( identifier[self] ): literal[string] keyword[for] identifier[frame] keyword[in] identifier[self] . identifier[frames] : keyword[return] identifier[frame] . identifier[open] () keyword[for] identifier[res] keyword[in] identifier[self] . identifier[results] . identifier[values] (): keyword[return] identifier[res] . identifier[open] () keyword[return] keyword[None]
def get_sample_frame(self): """Return first available image in observation result""" for frame in self.frames: return frame.open() # depends on [control=['for'], data=['frame']] for res in self.results.values(): return res.open() # depends on [control=['for'], data=['res']] return None
async def invites(self): """|coro| Returns a list of all active instant invites from this channel. You must have :attr:`~.Permissions.manage_guild` to get this information. Raises ------- Forbidden You do not have proper permissions to get the information. HTTPException An error occurred while fetching the information. Returns ------- List[:class:`Invite`] The list of invites that are currently active. """ state = self._state data = await state.http.invites_from_channel(self.id) result = [] for invite in data: invite['channel'] = self invite['guild'] = self.guild result.append(Invite(state=state, data=invite)) return result
<ast.AsyncFunctionDef object at 0x7da1b20c8130>
keyword[async] keyword[def] identifier[invites] ( identifier[self] ): literal[string] identifier[state] = identifier[self] . identifier[_state] identifier[data] = keyword[await] identifier[state] . identifier[http] . identifier[invites_from_channel] ( identifier[self] . identifier[id] ) identifier[result] =[] keyword[for] identifier[invite] keyword[in] identifier[data] : identifier[invite] [ literal[string] ]= identifier[self] identifier[invite] [ literal[string] ]= identifier[self] . identifier[guild] identifier[result] . identifier[append] ( identifier[Invite] ( identifier[state] = identifier[state] , identifier[data] = identifier[invite] )) keyword[return] identifier[result]
async def invites(self): """|coro| Returns a list of all active instant invites from this channel. You must have :attr:`~.Permissions.manage_guild` to get this information. Raises ------- Forbidden You do not have proper permissions to get the information. HTTPException An error occurred while fetching the information. Returns ------- List[:class:`Invite`] The list of invites that are currently active. """ state = self._state data = await state.http.invites_from_channel(self.id) result = [] for invite in data: invite['channel'] = self invite['guild'] = self.guild result.append(Invite(state=state, data=invite)) # depends on [control=['for'], data=['invite']] return result
async def jsk_show(self, ctx: commands.Context): """ Shows Jishaku in the help command. """ if not self.jsk.hidden: return await ctx.send("Jishaku is already visible.") self.jsk.hidden = False await ctx.send("Jishaku is now visible.")
<ast.AsyncFunctionDef object at 0x7da1b1e69150>
keyword[async] keyword[def] identifier[jsk_show] ( identifier[self] , identifier[ctx] : identifier[commands] . identifier[Context] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[jsk] . identifier[hidden] : keyword[return] keyword[await] identifier[ctx] . identifier[send] ( literal[string] ) identifier[self] . identifier[jsk] . identifier[hidden] = keyword[False] keyword[await] identifier[ctx] . identifier[send] ( literal[string] )
async def jsk_show(self, ctx: commands.Context): """ Shows Jishaku in the help command. """ if not self.jsk.hidden: return await ctx.send('Jishaku is already visible.') # depends on [control=['if'], data=[]] self.jsk.hidden = False await ctx.send('Jishaku is now visible.')
def fetch_exac_constraint(): """Fetch the file with exac constraint scores Returns: exac_lines(iterable(str)) """ file_name = 'fordist_cleaned_exac_r03_march16_z_pli_rec_null_data.txt' url = ('ftp://ftp.broadinstitute.org/pub/ExAC_release/release0.3/functional_gene_constraint' '/{0}').format(file_name) LOG.info("Fetching ExAC genes") try: exac_lines = fetch_resource(url) except URLError as err: LOG.info("Failed to fetch exac constraint scores file from ftp server") LOG.info("Try to fetch from google bucket...") url = ("https://storage.googleapis.com/gnomad-public/legacy/exacv1_downloads/release0.3.1" "/manuscript_data/forweb_cleaned_exac_r03_march16_z_data_pLI.txt.gz") exac_lines = fetch_resource(url) return exac_lines
def function[fetch_exac_constraint, parameter[]]: constant[Fetch the file with exac constraint scores Returns: exac_lines(iterable(str)) ] variable[file_name] assign[=] constant[fordist_cleaned_exac_r03_march16_z_pli_rec_null_data.txt] variable[url] assign[=] call[constant[ftp://ftp.broadinstitute.org/pub/ExAC_release/release0.3/functional_gene_constraint/{0}].format, parameter[name[file_name]]] call[name[LOG].info, parameter[constant[Fetching ExAC genes]]] <ast.Try object at 0x7da18fe901c0> variable[exac_lines] assign[=] call[name[fetch_resource], parameter[name[url]]] return[name[exac_lines]]
keyword[def] identifier[fetch_exac_constraint] (): literal[string] identifier[file_name] = literal[string] identifier[url] =( literal[string] literal[string] ). identifier[format] ( identifier[file_name] ) identifier[LOG] . identifier[info] ( literal[string] ) keyword[try] : identifier[exac_lines] = identifier[fetch_resource] ( identifier[url] ) keyword[except] identifier[URLError] keyword[as] identifier[err] : identifier[LOG] . identifier[info] ( literal[string] ) identifier[LOG] . identifier[info] ( literal[string] ) identifier[url] =( literal[string] literal[string] ) identifier[exac_lines] = identifier[fetch_resource] ( identifier[url] ) keyword[return] identifier[exac_lines]
def fetch_exac_constraint(): """Fetch the file with exac constraint scores Returns: exac_lines(iterable(str)) """ file_name = 'fordist_cleaned_exac_r03_march16_z_pli_rec_null_data.txt' url = 'ftp://ftp.broadinstitute.org/pub/ExAC_release/release0.3/functional_gene_constraint/{0}'.format(file_name) LOG.info('Fetching ExAC genes') try: exac_lines = fetch_resource(url) # depends on [control=['try'], data=[]] except URLError as err: LOG.info('Failed to fetch exac constraint scores file from ftp server') LOG.info('Try to fetch from google bucket...') url = 'https://storage.googleapis.com/gnomad-public/legacy/exacv1_downloads/release0.3.1/manuscript_data/forweb_cleaned_exac_r03_march16_z_data_pLI.txt.gz' # depends on [control=['except'], data=[]] exac_lines = fetch_resource(url) return exac_lines