code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def camera_position(self): """ Returns camera position of active render window """ return [self.camera.GetPosition(), self.camera.GetFocalPoint(), self.camera.GetViewUp()]
def function[camera_position, parameter[self]]: constant[ Returns camera position of active render window ] return[list[[<ast.Call object at 0x7da20c992770>, <ast.Call object at 0x7da20c993250>, <ast.Call object at 0x7da20c993100>]]]
keyword[def] identifier[camera_position] ( identifier[self] ): literal[string] keyword[return] [ identifier[self] . identifier[camera] . identifier[GetPosition] (), identifier[self] . identifier[camera] . identifier[GetFocalPoint] (), identifier[self] . identifier[camera] . identifier[GetViewUp] ()]
def camera_position(self): """ Returns camera position of active render window """ return [self.camera.GetPosition(), self.camera.GetFocalPoint(), self.camera.GetViewUp()]
def handle_relative_import(self, node): """ from A means node.level == 0 from . import B means node.level == 1 from .A means node.level == 1 """ no_file = os.path.abspath(os.path.join(self.filenames[-1], os.pardir)) skip_init = False if node.level == 1: # Same directory as current file if node.module: name_with_dir = os.path.join(no_file, node.module.replace('.', '/')) if not os.path.isdir(name_with_dir): name_with_dir = name_with_dir + '.py' # e.g. from . import X else: name_with_dir = no_file # We do not want to analyse the init file of the current directory skip_init = True else: parent = os.path.abspath(os.path.join(no_file, os.pardir)) if node.level > 2: # Perform extra `cd ..` however many times for _ in range(0, node.level - 2): parent = os.path.abspath(os.path.join(parent, os.pardir)) if node.module: name_with_dir = os.path.join(parent, node.module.replace('.', '/')) if not os.path.isdir(name_with_dir): name_with_dir = name_with_dir + '.py' # e.g. from .. import X else: name_with_dir = parent # Is it a file? if name_with_dir.endswith('.py'): return self.add_module( module=(node.module, name_with_dir), module_or_package_name=None, local_names=as_alias_handler(node.names), import_alias_mapping=retrieve_import_alias_mapping(node.names), from_from=True ) return self.from_directory_import( (node.module, name_with_dir), not_as_alias_handler(node.names), as_alias_handler(node.names), retrieve_import_alias_mapping(node.names), skip_init=skip_init )
def function[handle_relative_import, parameter[self, node]]: constant[ from A means node.level == 0 from . import B means node.level == 1 from .A means node.level == 1 ] variable[no_file] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.join, parameter[call[name[self].filenames][<ast.UnaryOp object at 0x7da1b1e07bb0>], name[os].pardir]]]] variable[skip_init] assign[=] constant[False] if compare[name[node].level equal[==] constant[1]] begin[:] if name[node].module begin[:] variable[name_with_dir] assign[=] call[name[os].path.join, parameter[name[no_file], call[name[node].module.replace, parameter[constant[.], constant[/]]]]] if <ast.UnaryOp object at 0x7da1b1eccfa0> begin[:] variable[name_with_dir] assign[=] binary_operation[name[name_with_dir] + constant[.py]] if call[name[name_with_dir].endswith, parameter[constant[.py]]] begin[:] return[call[name[self].add_module, parameter[]]] return[call[name[self].from_directory_import, parameter[tuple[[<ast.Attribute object at 0x7da1b1edb9d0>, <ast.Name object at 0x7da1b1edafb0>]], call[name[not_as_alias_handler], parameter[name[node].names]], call[name[as_alias_handler], parameter[name[node].names]], call[name[retrieve_import_alias_mapping], parameter[name[node].names]]]]]
keyword[def] identifier[handle_relative_import] ( identifier[self] , identifier[node] ): literal[string] identifier[no_file] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[filenames] [- literal[int] ], identifier[os] . identifier[pardir] )) identifier[skip_init] = keyword[False] keyword[if] identifier[node] . identifier[level] == literal[int] : keyword[if] identifier[node] . identifier[module] : identifier[name_with_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[no_file] , identifier[node] . identifier[module] . identifier[replace] ( literal[string] , literal[string] )) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[name_with_dir] ): identifier[name_with_dir] = identifier[name_with_dir] + literal[string] keyword[else] : identifier[name_with_dir] = identifier[no_file] identifier[skip_init] = keyword[True] keyword[else] : identifier[parent] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[no_file] , identifier[os] . identifier[pardir] )) keyword[if] identifier[node] . identifier[level] > literal[int] : keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] , identifier[node] . identifier[level] - literal[int] ): identifier[parent] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[parent] , identifier[os] . identifier[pardir] )) keyword[if] identifier[node] . identifier[module] : identifier[name_with_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[parent] , identifier[node] . identifier[module] . identifier[replace] ( literal[string] , literal[string] )) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[name_with_dir] ): identifier[name_with_dir] = identifier[name_with_dir] + literal[string] keyword[else] : identifier[name_with_dir] = identifier[parent] keyword[if] identifier[name_with_dir] . identifier[endswith] ( literal[string] ): keyword[return] identifier[self] . identifier[add_module] ( identifier[module] =( identifier[node] . identifier[module] , identifier[name_with_dir] ), identifier[module_or_package_name] = keyword[None] , identifier[local_names] = identifier[as_alias_handler] ( identifier[node] . identifier[names] ), identifier[import_alias_mapping] = identifier[retrieve_import_alias_mapping] ( identifier[node] . identifier[names] ), identifier[from_from] = keyword[True] ) keyword[return] identifier[self] . identifier[from_directory_import] ( ( identifier[node] . identifier[module] , identifier[name_with_dir] ), identifier[not_as_alias_handler] ( identifier[node] . identifier[names] ), identifier[as_alias_handler] ( identifier[node] . identifier[names] ), identifier[retrieve_import_alias_mapping] ( identifier[node] . identifier[names] ), identifier[skip_init] = identifier[skip_init] )
def handle_relative_import(self, node): """ from A means node.level == 0 from . import B means node.level == 1 from .A means node.level == 1 """ no_file = os.path.abspath(os.path.join(self.filenames[-1], os.pardir)) skip_init = False if node.level == 1: # Same directory as current file if node.module: name_with_dir = os.path.join(no_file, node.module.replace('.', '/')) if not os.path.isdir(name_with_dir): name_with_dir = name_with_dir + '.py' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: # e.g. from . import X name_with_dir = no_file # We do not want to analyse the init file of the current directory skip_init = True # depends on [control=['if'], data=[]] else: parent = os.path.abspath(os.path.join(no_file, os.pardir)) if node.level > 2: # Perform extra `cd ..` however many times for _ in range(0, node.level - 2): parent = os.path.abspath(os.path.join(parent, os.pardir)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] if node.module: name_with_dir = os.path.join(parent, node.module.replace('.', '/')) if not os.path.isdir(name_with_dir): name_with_dir = name_with_dir + '.py' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: # e.g. from .. import X name_with_dir = parent # Is it a file? if name_with_dir.endswith('.py'): return self.add_module(module=(node.module, name_with_dir), module_or_package_name=None, local_names=as_alias_handler(node.names), import_alias_mapping=retrieve_import_alias_mapping(node.names), from_from=True) # depends on [control=['if'], data=[]] return self.from_directory_import((node.module, name_with_dir), not_as_alias_handler(node.names), as_alias_handler(node.names), retrieve_import_alias_mapping(node.names), skip_init=skip_init)
def get_fieldname(self, field, arnum): """Generate a new fieldname with a '-<arnum>' suffix """ name = field.getName() # ensure we have only *one* suffix base_name = name.split("-")[0] suffix = "-{}".format(arnum) return "{}{}".format(base_name, suffix)
def function[get_fieldname, parameter[self, field, arnum]]: constant[Generate a new fieldname with a '-<arnum>' suffix ] variable[name] assign[=] call[name[field].getName, parameter[]] variable[base_name] assign[=] call[call[name[name].split, parameter[constant[-]]]][constant[0]] variable[suffix] assign[=] call[constant[-{}].format, parameter[name[arnum]]] return[call[constant[{}{}].format, parameter[name[base_name], name[suffix]]]]
keyword[def] identifier[get_fieldname] ( identifier[self] , identifier[field] , identifier[arnum] ): literal[string] identifier[name] = identifier[field] . identifier[getName] () identifier[base_name] = identifier[name] . identifier[split] ( literal[string] )[ literal[int] ] identifier[suffix] = literal[string] . identifier[format] ( identifier[arnum] ) keyword[return] literal[string] . identifier[format] ( identifier[base_name] , identifier[suffix] )
def get_fieldname(self, field, arnum): """Generate a new fieldname with a '-<arnum>' suffix """ name = field.getName() # ensure we have only *one* suffix base_name = name.split('-')[0] suffix = '-{}'.format(arnum) return '{}{}'.format(base_name, suffix)
def find_cross_contamination(databases, pair, tmpdir='tmp', log='log.txt', threads=1): """ Usese mash to find out whether or not a sample has more than one genus present, indicating cross-contamination. :param databases: A databases folder, which must contain refseq.msh, a mash sketch that has one representative per genus from refseq. :param tmpdir: Temporary directory to store mash result files in. :param pair: Array with path to forward reads at index 0 and path to reverse reads at index o :param log: Logfile to write to. :param threads: Number of threads to run mash wit. :return: cross_contam: a bool that is True if more than one genus is found, and False otherwise. :return: genera_present: A string. If only one genus is found, string is just genus. If more than one genus is found, the string is a list of genera present, separated by colons (i.e. for Escherichia and Salmonella found, string would be 'Escherichia:Salmonella'. If no genus found, return 'NA' """ genera_present = list() out, err, cmd = mash.screen('{}/refseq.msh'.format(databases), pair[0], pair[1], threads=threads, w='', i='0.95', output_file=os.path.join(tmpdir, 'screen.tab'), returncmd=True) write_to_logfile(log, out, err, cmd) screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab')) for item in screen_output: mash_genus = item.query_id.split('/')[-3] if mash_genus == 'Shigella': mash_genus = 'Escherichia' if mash_genus not in genera_present: genera_present.append(mash_genus) if len(genera_present) == 1: genera_present = genera_present[0] elif len(genera_present) == 0: genera_present = 'NA' else: tmpstr = '' for mash_genus in genera_present: tmpstr += mash_genus + ':' genera_present = tmpstr[:-1] return genera_present
def function[find_cross_contamination, parameter[databases, pair, tmpdir, log, threads]]: constant[ Usese mash to find out whether or not a sample has more than one genus present, indicating cross-contamination. :param databases: A databases folder, which must contain refseq.msh, a mash sketch that has one representative per genus from refseq. :param tmpdir: Temporary directory to store mash result files in. :param pair: Array with path to forward reads at index 0 and path to reverse reads at index o :param log: Logfile to write to. :param threads: Number of threads to run mash wit. :return: cross_contam: a bool that is True if more than one genus is found, and False otherwise. :return: genera_present: A string. If only one genus is found, string is just genus. If more than one genus is found, the string is a list of genera present, separated by colons (i.e. for Escherichia and Salmonella found, string would be 'Escherichia:Salmonella'. If no genus found, return 'NA' ] variable[genera_present] assign[=] call[name[list], parameter[]] <ast.Tuple object at 0x7da1b19f1c30> assign[=] call[name[mash].screen, parameter[call[constant[{}/refseq.msh].format, parameter[name[databases]]], call[name[pair]][constant[0]], call[name[pair]][constant[1]]]] call[name[write_to_logfile], parameter[name[log], name[out], name[err], name[cmd]]] variable[screen_output] assign[=] call[name[mash].read_mash_screen, parameter[call[name[os].path.join, parameter[name[tmpdir], constant[screen.tab]]]]] for taget[name[item]] in starred[name[screen_output]] begin[:] variable[mash_genus] assign[=] call[call[name[item].query_id.split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da1b19f05e0>] if compare[name[mash_genus] equal[==] constant[Shigella]] begin[:] variable[mash_genus] assign[=] constant[Escherichia] if compare[name[mash_genus] <ast.NotIn object at 0x7da2590d7190> name[genera_present]] begin[:] call[name[genera_present].append, parameter[name[mash_genus]]] if compare[call[name[len], parameter[name[genera_present]]] equal[==] constant[1]] begin[:] variable[genera_present] assign[=] call[name[genera_present]][constant[0]] return[name[genera_present]]
keyword[def] identifier[find_cross_contamination] ( identifier[databases] , identifier[pair] , identifier[tmpdir] = literal[string] , identifier[log] = literal[string] , identifier[threads] = literal[int] ): literal[string] identifier[genera_present] = identifier[list] () identifier[out] , identifier[err] , identifier[cmd] = identifier[mash] . identifier[screen] ( literal[string] . identifier[format] ( identifier[databases] ), identifier[pair] [ literal[int] ], identifier[pair] [ literal[int] ], identifier[threads] = identifier[threads] , identifier[w] = literal[string] , identifier[i] = literal[string] , identifier[output_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[tmpdir] , literal[string] ), identifier[returncmd] = keyword[True] ) identifier[write_to_logfile] ( identifier[log] , identifier[out] , identifier[err] , identifier[cmd] ) identifier[screen_output] = identifier[mash] . identifier[read_mash_screen] ( identifier[os] . identifier[path] . identifier[join] ( identifier[tmpdir] , literal[string] )) keyword[for] identifier[item] keyword[in] identifier[screen_output] : identifier[mash_genus] = identifier[item] . identifier[query_id] . identifier[split] ( literal[string] )[- literal[int] ] keyword[if] identifier[mash_genus] == literal[string] : identifier[mash_genus] = literal[string] keyword[if] identifier[mash_genus] keyword[not] keyword[in] identifier[genera_present] : identifier[genera_present] . identifier[append] ( identifier[mash_genus] ) keyword[if] identifier[len] ( identifier[genera_present] )== literal[int] : identifier[genera_present] = identifier[genera_present] [ literal[int] ] keyword[elif] identifier[len] ( identifier[genera_present] )== literal[int] : identifier[genera_present] = literal[string] keyword[else] : identifier[tmpstr] = literal[string] keyword[for] identifier[mash_genus] keyword[in] identifier[genera_present] : identifier[tmpstr] += identifier[mash_genus] + literal[string] identifier[genera_present] = identifier[tmpstr] [:- literal[int] ] keyword[return] identifier[genera_present]
def find_cross_contamination(databases, pair, tmpdir='tmp', log='log.txt', threads=1): """ Usese mash to find out whether or not a sample has more than one genus present, indicating cross-contamination. :param databases: A databases folder, which must contain refseq.msh, a mash sketch that has one representative per genus from refseq. :param tmpdir: Temporary directory to store mash result files in. :param pair: Array with path to forward reads at index 0 and path to reverse reads at index o :param log: Logfile to write to. :param threads: Number of threads to run mash wit. :return: cross_contam: a bool that is True if more than one genus is found, and False otherwise. :return: genera_present: A string. If only one genus is found, string is just genus. If more than one genus is found, the string is a list of genera present, separated by colons (i.e. for Escherichia and Salmonella found, string would be 'Escherichia:Salmonella'. If no genus found, return 'NA' """ genera_present = list() (out, err, cmd) = mash.screen('{}/refseq.msh'.format(databases), pair[0], pair[1], threads=threads, w='', i='0.95', output_file=os.path.join(tmpdir, 'screen.tab'), returncmd=True) write_to_logfile(log, out, err, cmd) screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab')) for item in screen_output: mash_genus = item.query_id.split('/')[-3] if mash_genus == 'Shigella': mash_genus = 'Escherichia' # depends on [control=['if'], data=['mash_genus']] if mash_genus not in genera_present: genera_present.append(mash_genus) # depends on [control=['if'], data=['mash_genus', 'genera_present']] # depends on [control=['for'], data=['item']] if len(genera_present) == 1: genera_present = genera_present[0] # depends on [control=['if'], data=[]] elif len(genera_present) == 0: genera_present = 'NA' # depends on [control=['if'], data=[]] else: tmpstr = '' for mash_genus in genera_present: tmpstr += mash_genus + ':' # depends on [control=['for'], data=['mash_genus']] genera_present = tmpstr[:-1] return genera_present
def __skeleton_difference(graph, image, boundary_term, spacing): """ A skeleton for the calculation of intensity difference based boundary terms. Iterates over the images dimensions and generates for each an array of absolute neighbouring voxel :math:`(p, q)` intensity differences :math:`|I_p, I_q|`. These are then passed to the supplied function :math:`g(\cdot)` for for boundary term computation. Finally the returned edge weights are added to the graph. Formally for each edge :math:`(p, q)` of the image, their edge weight is computed as .. math:: w(p,q) = g(|I_p - I_q|) ,where :math:`g(\cdot)` is the supplied boundary term function. The boundary term function has to take an array of intensity differences as only parameter and return an array of the same shape containing the edge weights. For the implemented function the condition :math:`g(\cdot)\in(0, 1]` must hold true, i.e., it has to be strictly positive with :math:`1` as the upper limit. @note the underlying neighbourhood connectivity is 4 for 2D, 6 for 3D, etc. @note This function is able to work with images of arbitrary dimensions, but was only tested for 2D and 3D cases. @param graph An initialized graph.GCGraph object @type graph.GCGraph @param image The image to compute on @type image numpy.ndarray @param boundary_term A function to compute the boundary term over an array of absolute intensity differences @type boundary_term function @param spacing A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If False, no distance based weighting of the graph edges is performed. @param spacing sequence | False """ def intensity_difference(neighbour_one, neighbour_two): """ Takes two voxel arrays constituting neighbours and computes the absolute intensity differences. """ return scipy.absolute(neighbour_one - neighbour_two) __skeleton_base(graph, image, boundary_term, intensity_difference, spacing)
def function[__skeleton_difference, parameter[graph, image, boundary_term, spacing]]: constant[ A skeleton for the calculation of intensity difference based boundary terms. Iterates over the images dimensions and generates for each an array of absolute neighbouring voxel :math:`(p, q)` intensity differences :math:`|I_p, I_q|`. These are then passed to the supplied function :math:`g(\cdot)` for for boundary term computation. Finally the returned edge weights are added to the graph. Formally for each edge :math:`(p, q)` of the image, their edge weight is computed as .. math:: w(p,q) = g(|I_p - I_q|) ,where :math:`g(\cdot)` is the supplied boundary term function. The boundary term function has to take an array of intensity differences as only parameter and return an array of the same shape containing the edge weights. For the implemented function the condition :math:`g(\cdot)\in(0, 1]` must hold true, i.e., it has to be strictly positive with :math:`1` as the upper limit. @note the underlying neighbourhood connectivity is 4 for 2D, 6 for 3D, etc. @note This function is able to work with images of arbitrary dimensions, but was only tested for 2D and 3D cases. @param graph An initialized graph.GCGraph object @type graph.GCGraph @param image The image to compute on @type image numpy.ndarray @param boundary_term A function to compute the boundary term over an array of absolute intensity differences @type boundary_term function @param spacing A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If False, no distance based weighting of the graph edges is performed. @param spacing sequence | False ] def function[intensity_difference, parameter[neighbour_one, neighbour_two]]: constant[ Takes two voxel arrays constituting neighbours and computes the absolute intensity differences. ] return[call[name[scipy].absolute, parameter[binary_operation[name[neighbour_one] - name[neighbour_two]]]]] call[name[__skeleton_base], parameter[name[graph], name[image], name[boundary_term], name[intensity_difference], name[spacing]]]
keyword[def] identifier[__skeleton_difference] ( identifier[graph] , identifier[image] , identifier[boundary_term] , identifier[spacing] ): literal[string] keyword[def] identifier[intensity_difference] ( identifier[neighbour_one] , identifier[neighbour_two] ): literal[string] keyword[return] identifier[scipy] . identifier[absolute] ( identifier[neighbour_one] - identifier[neighbour_two] ) identifier[__skeleton_base] ( identifier[graph] , identifier[image] , identifier[boundary_term] , identifier[intensity_difference] , identifier[spacing] )
def __skeleton_difference(graph, image, boundary_term, spacing): """ A skeleton for the calculation of intensity difference based boundary terms. Iterates over the images dimensions and generates for each an array of absolute neighbouring voxel :math:`(p, q)` intensity differences :math:`|I_p, I_q|`. These are then passed to the supplied function :math:`g(\\cdot)` for for boundary term computation. Finally the returned edge weights are added to the graph. Formally for each edge :math:`(p, q)` of the image, their edge weight is computed as .. math:: w(p,q) = g(|I_p - I_q|) ,where :math:`g(\\cdot)` is the supplied boundary term function. The boundary term function has to take an array of intensity differences as only parameter and return an array of the same shape containing the edge weights. For the implemented function the condition :math:`g(\\cdot)\\in(0, 1]` must hold true, i.e., it has to be strictly positive with :math:`1` as the upper limit. @note the underlying neighbourhood connectivity is 4 for 2D, 6 for 3D, etc. @note This function is able to work with images of arbitrary dimensions, but was only tested for 2D and 3D cases. @param graph An initialized graph.GCGraph object @type graph.GCGraph @param image The image to compute on @type image numpy.ndarray @param boundary_term A function to compute the boundary term over an array of absolute intensity differences @type boundary_term function @param spacing A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If False, no distance based weighting of the graph edges is performed. @param spacing sequence | False """ def intensity_difference(neighbour_one, neighbour_two): """ Takes two voxel arrays constituting neighbours and computes the absolute intensity differences. """ return scipy.absolute(neighbour_one - neighbour_two) __skeleton_base(graph, image, boundary_term, intensity_difference, spacing)
def delete_dataset(self, owner, id, **kwargs): """ Delete a dataset Permanently deletes a dataset and all data associated with it. This operation cannot be undone, although a new dataset may be created with the same id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_dataset(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_dataset_with_http_info(owner, id, **kwargs) else: (data) = self.delete_dataset_with_http_info(owner, id, **kwargs) return data
def function[delete_dataset, parameter[self, owner, id]]: constant[ Delete a dataset Permanently deletes a dataset and all data associated with it. This operation cannot be undone, although a new dataset may be created with the same id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_dataset(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[callback]]] begin[:] return[call[name[self].delete_dataset_with_http_info, parameter[name[owner], name[id]]]]
keyword[def] identifier[delete_dataset] ( identifier[self] , identifier[owner] , identifier[id] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[delete_dataset_with_http_info] ( identifier[owner] , identifier[id] ,** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[delete_dataset_with_http_info] ( identifier[owner] , identifier[id] ,** identifier[kwargs] ) keyword[return] identifier[data]
def delete_dataset(self, owner, id, **kwargs): """ Delete a dataset Permanently deletes a dataset and all data associated with it. This operation cannot be undone, although a new dataset may be created with the same id. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_dataset(owner, id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required) :param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required) :return: SuccessMessage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_dataset_with_http_info(owner, id, **kwargs) # depends on [control=['if'], data=[]] else: data = self.delete_dataset_with_http_info(owner, id, **kwargs) return data
def dist(self, x1, x2): """Return the weighted distance between ``x1`` and ``x2``. Parameters ---------- x1, x2 : `NumpyTensor` Tensors whose mutual distance is calculated. Returns ------- dist : float The distance between the tensors. """ if self.exponent == 2.0: return float(np.sqrt(self.const) * _norm_default(x1 - x2)) elif self.exponent == float('inf'): return float(self.const * _pnorm_default(x1 - x2, self.exponent)) else: return float((self.const ** (1 / self.exponent) * _pnorm_default(x1 - x2, self.exponent)))
def function[dist, parameter[self, x1, x2]]: constant[Return the weighted distance between ``x1`` and ``x2``. Parameters ---------- x1, x2 : `NumpyTensor` Tensors whose mutual distance is calculated. Returns ------- dist : float The distance between the tensors. ] if compare[name[self].exponent equal[==] constant[2.0]] begin[:] return[call[name[float], parameter[binary_operation[call[name[np].sqrt, parameter[name[self].const]] * call[name[_norm_default], parameter[binary_operation[name[x1] - name[x2]]]]]]]]
keyword[def] identifier[dist] ( identifier[self] , identifier[x1] , identifier[x2] ): literal[string] keyword[if] identifier[self] . identifier[exponent] == literal[int] : keyword[return] identifier[float] ( identifier[np] . identifier[sqrt] ( identifier[self] . identifier[const] )* identifier[_norm_default] ( identifier[x1] - identifier[x2] )) keyword[elif] identifier[self] . identifier[exponent] == identifier[float] ( literal[string] ): keyword[return] identifier[float] ( identifier[self] . identifier[const] * identifier[_pnorm_default] ( identifier[x1] - identifier[x2] , identifier[self] . identifier[exponent] )) keyword[else] : keyword[return] identifier[float] (( identifier[self] . identifier[const] **( literal[int] / identifier[self] . identifier[exponent] )* identifier[_pnorm_default] ( identifier[x1] - identifier[x2] , identifier[self] . identifier[exponent] )))
def dist(self, x1, x2): """Return the weighted distance between ``x1`` and ``x2``. Parameters ---------- x1, x2 : `NumpyTensor` Tensors whose mutual distance is calculated. Returns ------- dist : float The distance between the tensors. """ if self.exponent == 2.0: return float(np.sqrt(self.const) * _norm_default(x1 - x2)) # depends on [control=['if'], data=[]] elif self.exponent == float('inf'): return float(self.const * _pnorm_default(x1 - x2, self.exponent)) # depends on [control=['if'], data=[]] else: return float(self.const ** (1 / self.exponent) * _pnorm_default(x1 - x2, self.exponent))
def acquire(self, **kwargs): """ Copy the file and return its path Returns ------- str or None The path of the file or None if it does not exist or if verification failed. """ path = path_string(self.path) if os.path.exists(path): if config.verify_file(path, self.sha256): return path return None
def function[acquire, parameter[self]]: constant[ Copy the file and return its path Returns ------- str or None The path of the file or None if it does not exist or if verification failed. ] variable[path] assign[=] call[name[path_string], parameter[name[self].path]] if call[name[os].path.exists, parameter[name[path]]] begin[:] if call[name[config].verify_file, parameter[name[path], name[self].sha256]] begin[:] return[name[path]] return[constant[None]]
keyword[def] identifier[acquire] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[path] = identifier[path_string] ( identifier[self] . identifier[path] ) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ): keyword[if] identifier[config] . identifier[verify_file] ( identifier[path] , identifier[self] . identifier[sha256] ): keyword[return] identifier[path] keyword[return] keyword[None]
def acquire(self, **kwargs): """ Copy the file and return its path Returns ------- str or None The path of the file or None if it does not exist or if verification failed. """ path = path_string(self.path) if os.path.exists(path): if config.verify_file(path, self.sha256): return path # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return None
def _pys2shape(self, line): """Updates shape in code_array""" self.code_array.shape = self._get_key(*self._split_tidy(line))
def function[_pys2shape, parameter[self, line]]: constant[Updates shape in code_array] name[self].code_array.shape assign[=] call[name[self]._get_key, parameter[<ast.Starred object at 0x7da1b16a7dc0>]]
keyword[def] identifier[_pys2shape] ( identifier[self] , identifier[line] ): literal[string] identifier[self] . identifier[code_array] . identifier[shape] = identifier[self] . identifier[_get_key] (* identifier[self] . identifier[_split_tidy] ( identifier[line] ))
def _pys2shape(self, line): """Updates shape in code_array""" self.code_array.shape = self._get_key(*self._split_tidy(line))
def ValidateRequiredFieldsAreNotEmpty(gtfs_object, required_field_names, problems=None): """ Validates whether all required fields of an object have a value: - if value empty adds MissingValue errors (if problems accumulator is provided) """ no_missing_value = True for name in required_field_names: if IsEmpty(getattr(gtfs_object, name, None)): if problems: problems.MissingValue(name) no_missing_value = False return no_missing_value
def function[ValidateRequiredFieldsAreNotEmpty, parameter[gtfs_object, required_field_names, problems]]: constant[ Validates whether all required fields of an object have a value: - if value empty adds MissingValue errors (if problems accumulator is provided) ] variable[no_missing_value] assign[=] constant[True] for taget[name[name]] in starred[name[required_field_names]] begin[:] if call[name[IsEmpty], parameter[call[name[getattr], parameter[name[gtfs_object], name[name], constant[None]]]]] begin[:] if name[problems] begin[:] call[name[problems].MissingValue, parameter[name[name]]] variable[no_missing_value] assign[=] constant[False] return[name[no_missing_value]]
keyword[def] identifier[ValidateRequiredFieldsAreNotEmpty] ( identifier[gtfs_object] , identifier[required_field_names] , identifier[problems] = keyword[None] ): literal[string] identifier[no_missing_value] = keyword[True] keyword[for] identifier[name] keyword[in] identifier[required_field_names] : keyword[if] identifier[IsEmpty] ( identifier[getattr] ( identifier[gtfs_object] , identifier[name] , keyword[None] )): keyword[if] identifier[problems] : identifier[problems] . identifier[MissingValue] ( identifier[name] ) identifier[no_missing_value] = keyword[False] keyword[return] identifier[no_missing_value]
def ValidateRequiredFieldsAreNotEmpty(gtfs_object, required_field_names, problems=None): """ Validates whether all required fields of an object have a value: - if value empty adds MissingValue errors (if problems accumulator is provided) """ no_missing_value = True for name in required_field_names: if IsEmpty(getattr(gtfs_object, name, None)): if problems: problems.MissingValue(name) # depends on [control=['if'], data=[]] no_missing_value = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']] return no_missing_value
def first_key(res): """ Returns the first result for the given command. If more then 1 result is returned then a `RedisClusterException` is raised. """ if not isinstance(res, dict): raise ValueError('Value should be of dict type') if len(res.keys()) != 1: raise RedisClusterException("More then 1 result from command") return list(res.values())[0]
def function[first_key, parameter[res]]: constant[ Returns the first result for the given command. If more then 1 result is returned then a `RedisClusterException` is raised. ] if <ast.UnaryOp object at 0x7da1b0841240> begin[:] <ast.Raise object at 0x7da1b077b520> if compare[call[name[len], parameter[call[name[res].keys, parameter[]]]] not_equal[!=] constant[1]] begin[:] <ast.Raise object at 0x7da1b0778730> return[call[call[name[list], parameter[call[name[res].values, parameter[]]]]][constant[0]]]
keyword[def] identifier[first_key] ( identifier[res] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[res] , identifier[dict] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[len] ( identifier[res] . identifier[keys] ())!= literal[int] : keyword[raise] identifier[RedisClusterException] ( literal[string] ) keyword[return] identifier[list] ( identifier[res] . identifier[values] ())[ literal[int] ]
def first_key(res): """ Returns the first result for the given command. If more then 1 result is returned then a `RedisClusterException` is raised. """ if not isinstance(res, dict): raise ValueError('Value should be of dict type') # depends on [control=['if'], data=[]] if len(res.keys()) != 1: raise RedisClusterException('More then 1 result from command') # depends on [control=['if'], data=[]] return list(res.values())[0]
def get_user(session, user_id): """Get user.""" try: user_id = int(user_id) except ValueError: user_id = find_user(session, user_id) resp = _make_request(session, USER_URL, user_id) if not resp: raise VooblyError('user id not found') return resp[0]
def function[get_user, parameter[session, user_id]]: constant[Get user.] <ast.Try object at 0x7da1b18b8580> variable[resp] assign[=] call[name[_make_request], parameter[name[session], name[USER_URL], name[user_id]]] if <ast.UnaryOp object at 0x7da1b18b8970> begin[:] <ast.Raise object at 0x7da1b18b9b40> return[call[name[resp]][constant[0]]]
keyword[def] identifier[get_user] ( identifier[session] , identifier[user_id] ): literal[string] keyword[try] : identifier[user_id] = identifier[int] ( identifier[user_id] ) keyword[except] identifier[ValueError] : identifier[user_id] = identifier[find_user] ( identifier[session] , identifier[user_id] ) identifier[resp] = identifier[_make_request] ( identifier[session] , identifier[USER_URL] , identifier[user_id] ) keyword[if] keyword[not] identifier[resp] : keyword[raise] identifier[VooblyError] ( literal[string] ) keyword[return] identifier[resp] [ literal[int] ]
def get_user(session, user_id): """Get user.""" try: user_id = int(user_id) # depends on [control=['try'], data=[]] except ValueError: user_id = find_user(session, user_id) # depends on [control=['except'], data=[]] resp = _make_request(session, USER_URL, user_id) if not resp: raise VooblyError('user id not found') # depends on [control=['if'], data=[]] return resp[0]
def is_dir_inside(dir, check_dirs): """ Check to see if dir is a subdirectory of (or matches) check_dir directories. Return True if the dir is a subdirectory of or matches any of the check directories. :param dir: String path of directory that may or may not be a subdirectory of or match the directories listed in check_dirs. :param check_dirs: List of string paths of directories that may be parents of dir :return: True if dir is a child/matches any of the directories in check. False otherwise """ for check_dir in check_dirs: if os.path.commonprefix([dir, check_dir]) == check_dir: return True return False
def function[is_dir_inside, parameter[dir, check_dirs]]: constant[ Check to see if dir is a subdirectory of (or matches) check_dir directories. Return True if the dir is a subdirectory of or matches any of the check directories. :param dir: String path of directory that may or may not be a subdirectory of or match the directories listed in check_dirs. :param check_dirs: List of string paths of directories that may be parents of dir :return: True if dir is a child/matches any of the directories in check. False otherwise ] for taget[name[check_dir]] in starred[name[check_dirs]] begin[:] if compare[call[name[os].path.commonprefix, parameter[list[[<ast.Name object at 0x7da1b09bf9a0>, <ast.Name object at 0x7da1b09bd180>]]]] equal[==] name[check_dir]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[is_dir_inside] ( identifier[dir] , identifier[check_dirs] ): literal[string] keyword[for] identifier[check_dir] keyword[in] identifier[check_dirs] : keyword[if] identifier[os] . identifier[path] . identifier[commonprefix] ([ identifier[dir] , identifier[check_dir] ])== identifier[check_dir] : keyword[return] keyword[True] keyword[return] keyword[False]
def is_dir_inside(dir, check_dirs): """ Check to see if dir is a subdirectory of (or matches) check_dir directories. Return True if the dir is a subdirectory of or matches any of the check directories. :param dir: String path of directory that may or may not be a subdirectory of or match the directories listed in check_dirs. :param check_dirs: List of string paths of directories that may be parents of dir :return: True if dir is a child/matches any of the directories in check. False otherwise """ for check_dir in check_dirs: if os.path.commonprefix([dir, check_dir]) == check_dir: return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['check_dir']] return False
def _validate_privileges(object_type, privs, privileges): ''' Validate the supplied privileges ''' if object_type != 'group': _perms = [_PRIVILEGES_MAP[perm] for perm in _PRIVILEGE_TYPE_MAP[object_type]] _perms.append('ALL') if object_type not in _PRIVILEGES_OBJECTS: raise SaltInvocationError( 'Invalid object_type: {0} provided'.format(object_type)) if not set(privs).issubset(set(_perms)): raise SaltInvocationError( 'Invalid privilege(s): {0} provided for object {1}'.format( privileges, object_type)) else: if privileges: raise SaltInvocationError( 'The privileges option should not ' 'be set for object_type group')
def function[_validate_privileges, parameter[object_type, privs, privileges]]: constant[ Validate the supplied privileges ] if compare[name[object_type] not_equal[!=] constant[group]] begin[:] variable[_perms] assign[=] <ast.ListComp object at 0x7da18dc079a0> call[name[_perms].append, parameter[constant[ALL]]] if compare[name[object_type] <ast.NotIn object at 0x7da2590d7190> name[_PRIVILEGES_OBJECTS]] begin[:] <ast.Raise object at 0x7da2041da770> if <ast.UnaryOp object at 0x7da2041dbd60> begin[:] <ast.Raise object at 0x7da2041da380>
keyword[def] identifier[_validate_privileges] ( identifier[object_type] , identifier[privs] , identifier[privileges] ): literal[string] keyword[if] identifier[object_type] != literal[string] : identifier[_perms] =[ identifier[_PRIVILEGES_MAP] [ identifier[perm] ] keyword[for] identifier[perm] keyword[in] identifier[_PRIVILEGE_TYPE_MAP] [ identifier[object_type] ]] identifier[_perms] . identifier[append] ( literal[string] ) keyword[if] identifier[object_type] keyword[not] keyword[in] identifier[_PRIVILEGES_OBJECTS] : keyword[raise] identifier[SaltInvocationError] ( literal[string] . identifier[format] ( identifier[object_type] )) keyword[if] keyword[not] identifier[set] ( identifier[privs] ). identifier[issubset] ( identifier[set] ( identifier[_perms] )): keyword[raise] identifier[SaltInvocationError] ( literal[string] . identifier[format] ( identifier[privileges] , identifier[object_type] )) keyword[else] : keyword[if] identifier[privileges] : keyword[raise] identifier[SaltInvocationError] ( literal[string] literal[string] )
def _validate_privileges(object_type, privs, privileges): """ Validate the supplied privileges """ if object_type != 'group': _perms = [_PRIVILEGES_MAP[perm] for perm in _PRIVILEGE_TYPE_MAP[object_type]] _perms.append('ALL') if object_type not in _PRIVILEGES_OBJECTS: raise SaltInvocationError('Invalid object_type: {0} provided'.format(object_type)) # depends on [control=['if'], data=['object_type']] if not set(privs).issubset(set(_perms)): raise SaltInvocationError('Invalid privilege(s): {0} provided for object {1}'.format(privileges, object_type)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['object_type']] elif privileges: raise SaltInvocationError('The privileges option should not be set for object_type group') # depends on [control=['if'], data=[]]
def freqs_to_heights( matrix ): """ Calculate logo height using the method of: Schneider TD, Stephens RM. "Sequence logos: a new way to display consensus sequences." Nucleic Acids Res. 1990 Oct 25;18(20):6097-100. """ # Columns are sequence positions, rows are symbol counts/frequencies f = matrix.values.transpose() n, m = f.shape # Ensure normalized f = f / sum( f, axis=0 ) # Shannon entropy (the where replaces 0 with 1 so that '0 log 0 == 0') H = - sum( f * log2( where( f, f, 1 ) ), axis=0 ) # Height return transpose( f * ( log2( n ) - H ) )
def function[freqs_to_heights, parameter[matrix]]: constant[ Calculate logo height using the method of: Schneider TD, Stephens RM. "Sequence logos: a new way to display consensus sequences." Nucleic Acids Res. 1990 Oct 25;18(20):6097-100. ] variable[f] assign[=] call[name[matrix].values.transpose, parameter[]] <ast.Tuple object at 0x7da1b0d89450> assign[=] name[f].shape variable[f] assign[=] binary_operation[name[f] / call[name[sum], parameter[name[f]]]] variable[H] assign[=] <ast.UnaryOp object at 0x7da1b0d891b0> return[call[name[transpose], parameter[binary_operation[name[f] * binary_operation[call[name[log2], parameter[name[n]]] - name[H]]]]]]
keyword[def] identifier[freqs_to_heights] ( identifier[matrix] ): literal[string] identifier[f] = identifier[matrix] . identifier[values] . identifier[transpose] () identifier[n] , identifier[m] = identifier[f] . identifier[shape] identifier[f] = identifier[f] / identifier[sum] ( identifier[f] , identifier[axis] = literal[int] ) identifier[H] =- identifier[sum] ( identifier[f] * identifier[log2] ( identifier[where] ( identifier[f] , identifier[f] , literal[int] )), identifier[axis] = literal[int] ) keyword[return] identifier[transpose] ( identifier[f] *( identifier[log2] ( identifier[n] )- identifier[H] ))
def freqs_to_heights(matrix): """ Calculate logo height using the method of: Schneider TD, Stephens RM. "Sequence logos: a new way to display consensus sequences." Nucleic Acids Res. 1990 Oct 25;18(20):6097-100. """ # Columns are sequence positions, rows are symbol counts/frequencies f = matrix.values.transpose() (n, m) = f.shape # Ensure normalized f = f / sum(f, axis=0) # Shannon entropy (the where replaces 0 with 1 so that '0 log 0 == 0') H = -sum(f * log2(where(f, f, 1)), axis=0) # Height return transpose(f * (log2(n) - H))
def diet(filename, configuration): ''' Squeeze files if there is a pipeline defined for them or leave them be otherwise. :param filename: filename of the file to process :param configuration: configuration dict describing commands and pipelines :type configuration: dict :return: has file changed :rtype: bool ''' changed = False if not isfile(filename): raise NotFileDietException('Passed filename does not point to a file') conf = copy.deepcopy(DEFAULT_CONFIG) if not configuration.get('parsed'): new_config = parse_configuration(configuration) else: new_config = configuration update_configuration(conf, new_config) filetype = determine_type(filename) squeeze_cmd = conf['pipelines'].get(filetype) if squeeze_cmd: tmpbackup_ext = 'diet_internal' ext = conf.get('backup', tmpbackup_ext) backup = backup_file(filename, ext) size = os.stat(filename).st_size new_size = squeeze(squeeze_cmd, filename, backup) if not conf.get('keep_processed', False) and new_size > size: copy_if_different(backup, filename) # Delete backup, if it was internal if not conf.get('backup'): os.remove(backup) changed = True return changed
def function[diet, parameter[filename, configuration]]: constant[ Squeeze files if there is a pipeline defined for them or leave them be otherwise. :param filename: filename of the file to process :param configuration: configuration dict describing commands and pipelines :type configuration: dict :return: has file changed :rtype: bool ] variable[changed] assign[=] constant[False] if <ast.UnaryOp object at 0x7da18bcc8fd0> begin[:] <ast.Raise object at 0x7da18bccb970> variable[conf] assign[=] call[name[copy].deepcopy, parameter[name[DEFAULT_CONFIG]]] if <ast.UnaryOp object at 0x7da18bccad40> begin[:] variable[new_config] assign[=] call[name[parse_configuration], parameter[name[configuration]]] call[name[update_configuration], parameter[name[conf], name[new_config]]] variable[filetype] assign[=] call[name[determine_type], parameter[name[filename]]] variable[squeeze_cmd] assign[=] call[call[name[conf]][constant[pipelines]].get, parameter[name[filetype]]] if name[squeeze_cmd] begin[:] variable[tmpbackup_ext] assign[=] constant[diet_internal] variable[ext] assign[=] call[name[conf].get, parameter[constant[backup], name[tmpbackup_ext]]] variable[backup] assign[=] call[name[backup_file], parameter[name[filename], name[ext]]] variable[size] assign[=] call[name[os].stat, parameter[name[filename]]].st_size variable[new_size] assign[=] call[name[squeeze], parameter[name[squeeze_cmd], name[filename], name[backup]]] if <ast.BoolOp object at 0x7da18bcc8190> begin[:] call[name[copy_if_different], parameter[name[backup], name[filename]]] if <ast.UnaryOp object at 0x7da18bccb6a0> begin[:] call[name[os].remove, parameter[name[backup]]] variable[changed] assign[=] constant[True] return[name[changed]]
keyword[def] identifier[diet] ( identifier[filename] , identifier[configuration] ): literal[string] identifier[changed] = keyword[False] keyword[if] keyword[not] identifier[isfile] ( identifier[filename] ): keyword[raise] identifier[NotFileDietException] ( literal[string] ) identifier[conf] = identifier[copy] . identifier[deepcopy] ( identifier[DEFAULT_CONFIG] ) keyword[if] keyword[not] identifier[configuration] . identifier[get] ( literal[string] ): identifier[new_config] = identifier[parse_configuration] ( identifier[configuration] ) keyword[else] : identifier[new_config] = identifier[configuration] identifier[update_configuration] ( identifier[conf] , identifier[new_config] ) identifier[filetype] = identifier[determine_type] ( identifier[filename] ) identifier[squeeze_cmd] = identifier[conf] [ literal[string] ]. identifier[get] ( identifier[filetype] ) keyword[if] identifier[squeeze_cmd] : identifier[tmpbackup_ext] = literal[string] identifier[ext] = identifier[conf] . identifier[get] ( literal[string] , identifier[tmpbackup_ext] ) identifier[backup] = identifier[backup_file] ( identifier[filename] , identifier[ext] ) identifier[size] = identifier[os] . identifier[stat] ( identifier[filename] ). identifier[st_size] identifier[new_size] = identifier[squeeze] ( identifier[squeeze_cmd] , identifier[filename] , identifier[backup] ) keyword[if] keyword[not] identifier[conf] . identifier[get] ( literal[string] , keyword[False] ) keyword[and] identifier[new_size] > identifier[size] : identifier[copy_if_different] ( identifier[backup] , identifier[filename] ) keyword[if] keyword[not] identifier[conf] . identifier[get] ( literal[string] ): identifier[os] . identifier[remove] ( identifier[backup] ) identifier[changed] = keyword[True] keyword[return] identifier[changed]
def diet(filename, configuration): """ Squeeze files if there is a pipeline defined for them or leave them be otherwise. :param filename: filename of the file to process :param configuration: configuration dict describing commands and pipelines :type configuration: dict :return: has file changed :rtype: bool """ changed = False if not isfile(filename): raise NotFileDietException('Passed filename does not point to a file') # depends on [control=['if'], data=[]] conf = copy.deepcopy(DEFAULT_CONFIG) if not configuration.get('parsed'): new_config = parse_configuration(configuration) # depends on [control=['if'], data=[]] else: new_config = configuration update_configuration(conf, new_config) filetype = determine_type(filename) squeeze_cmd = conf['pipelines'].get(filetype) if squeeze_cmd: tmpbackup_ext = 'diet_internal' ext = conf.get('backup', tmpbackup_ext) backup = backup_file(filename, ext) size = os.stat(filename).st_size new_size = squeeze(squeeze_cmd, filename, backup) if not conf.get('keep_processed', False) and new_size > size: copy_if_different(backup, filename) # depends on [control=['if'], data=[]] # Delete backup, if it was internal if not conf.get('backup'): os.remove(backup) # depends on [control=['if'], data=[]] changed = True # depends on [control=['if'], data=[]] return changed
def delete_one_letter(self, letter=RIGHT): """Delete one letter the right or the the left of the cursor.""" assert letter in (self.RIGHT, self.LEFT) if letter == self.LEFT: papy = self.cursor self.text = self.text[:self.cursor - 1] + self.text[self.cursor:] self.cursor = papy - 1 else: self.text = self.text[:self.cursor] + self.text[self.cursor + 1:]
def function[delete_one_letter, parameter[self, letter]]: constant[Delete one letter the right or the the left of the cursor.] assert[compare[name[letter] in tuple[[<ast.Attribute object at 0x7da20c990970>, <ast.Attribute object at 0x7da20c992590>]]]] if compare[name[letter] equal[==] name[self].LEFT] begin[:] variable[papy] assign[=] name[self].cursor name[self].text assign[=] binary_operation[call[name[self].text][<ast.Slice object at 0x7da18f720b50>] + call[name[self].text][<ast.Slice object at 0x7da20c7957b0>]] name[self].cursor assign[=] binary_operation[name[papy] - constant[1]]
keyword[def] identifier[delete_one_letter] ( identifier[self] , identifier[letter] = identifier[RIGHT] ): literal[string] keyword[assert] identifier[letter] keyword[in] ( identifier[self] . identifier[RIGHT] , identifier[self] . identifier[LEFT] ) keyword[if] identifier[letter] == identifier[self] . identifier[LEFT] : identifier[papy] = identifier[self] . identifier[cursor] identifier[self] . identifier[text] = identifier[self] . identifier[text] [: identifier[self] . identifier[cursor] - literal[int] ]+ identifier[self] . identifier[text] [ identifier[self] . identifier[cursor] :] identifier[self] . identifier[cursor] = identifier[papy] - literal[int] keyword[else] : identifier[self] . identifier[text] = identifier[self] . identifier[text] [: identifier[self] . identifier[cursor] ]+ identifier[self] . identifier[text] [ identifier[self] . identifier[cursor] + literal[int] :]
def delete_one_letter(self, letter=RIGHT): """Delete one letter the right or the the left of the cursor.""" assert letter in (self.RIGHT, self.LEFT) if letter == self.LEFT: papy = self.cursor self.text = self.text[:self.cursor - 1] + self.text[self.cursor:] self.cursor = papy - 1 # depends on [control=['if'], data=[]] else: self.text = self.text[:self.cursor] + self.text[self.cursor + 1:]
def _parseKeyName(self, name): """ Returns dict with fullpath, to, from. """ if name.endswith(Store.theInfoExtension): return {'type': 'info'} match = self.keyPattern.match(name) if not match: return None match = match.groupdict() match.update(type='diff') return match
def function[_parseKeyName, parameter[self, name]]: constant[ Returns dict with fullpath, to, from. ] if call[name[name].endswith, parameter[name[Store].theInfoExtension]] begin[:] return[dictionary[[<ast.Constant object at 0x7da2041d9ae0>], [<ast.Constant object at 0x7da2041d8b20>]]] variable[match] assign[=] call[name[self].keyPattern.match, parameter[name[name]]] if <ast.UnaryOp object at 0x7da2041d9810> begin[:] return[constant[None]] variable[match] assign[=] call[name[match].groupdict, parameter[]] call[name[match].update, parameter[]] return[name[match]]
keyword[def] identifier[_parseKeyName] ( identifier[self] , identifier[name] ): literal[string] keyword[if] identifier[name] . identifier[endswith] ( identifier[Store] . identifier[theInfoExtension] ): keyword[return] { literal[string] : literal[string] } identifier[match] = identifier[self] . identifier[keyPattern] . identifier[match] ( identifier[name] ) keyword[if] keyword[not] identifier[match] : keyword[return] keyword[None] identifier[match] = identifier[match] . identifier[groupdict] () identifier[match] . identifier[update] ( identifier[type] = literal[string] ) keyword[return] identifier[match]
def _parseKeyName(self, name): """ Returns dict with fullpath, to, from. """ if name.endswith(Store.theInfoExtension): return {'type': 'info'} # depends on [control=['if'], data=[]] match = self.keyPattern.match(name) if not match: return None # depends on [control=['if'], data=[]] match = match.groupdict() match.update(type='diff') return match
def unbuffered_write(self, buf): """Performs an unbuffered write, the default unless socket.send does not send everything, in which case an unbuffered write is done and the write method is set to be a buffered write until the buffer is empty once again. buf -- bytes to send """ if self.closed: raise ConnectionClosed() result = 0 try: result = self.sock.send(buf) except EnvironmentError as e: # if the socket is simply backed up ignore the error if e.errno != errno.EAGAIN: self._close(e) return # when the socket buffers are full/backed up then we need to poll to see # when we can write again if result != len(buf): self.write = self.buffered_write self.write_watcher.start() self.write(buf[result:])
def function[unbuffered_write, parameter[self, buf]]: constant[Performs an unbuffered write, the default unless socket.send does not send everything, in which case an unbuffered write is done and the write method is set to be a buffered write until the buffer is empty once again. buf -- bytes to send ] if name[self].closed begin[:] <ast.Raise object at 0x7da1b094a530> variable[result] assign[=] constant[0] <ast.Try object at 0x7da1b0948760> if compare[name[result] not_equal[!=] call[name[len], parameter[name[buf]]]] begin[:] name[self].write assign[=] name[self].buffered_write call[name[self].write_watcher.start, parameter[]] call[name[self].write, parameter[call[name[buf]][<ast.Slice object at 0x7da1b0804970>]]]
keyword[def] identifier[unbuffered_write] ( identifier[self] , identifier[buf] ): literal[string] keyword[if] identifier[self] . identifier[closed] : keyword[raise] identifier[ConnectionClosed] () identifier[result] = literal[int] keyword[try] : identifier[result] = identifier[self] . identifier[sock] . identifier[send] ( identifier[buf] ) keyword[except] identifier[EnvironmentError] keyword[as] identifier[e] : keyword[if] identifier[e] . identifier[errno] != identifier[errno] . identifier[EAGAIN] : identifier[self] . identifier[_close] ( identifier[e] ) keyword[return] keyword[if] identifier[result] != identifier[len] ( identifier[buf] ): identifier[self] . identifier[write] = identifier[self] . identifier[buffered_write] identifier[self] . identifier[write_watcher] . identifier[start] () identifier[self] . identifier[write] ( identifier[buf] [ identifier[result] :])
def unbuffered_write(self, buf): """Performs an unbuffered write, the default unless socket.send does not send everything, in which case an unbuffered write is done and the write method is set to be a buffered write until the buffer is empty once again. buf -- bytes to send """ if self.closed: raise ConnectionClosed() # depends on [control=['if'], data=[]] result = 0 try: result = self.sock.send(buf) # depends on [control=['try'], data=[]] except EnvironmentError as e: # if the socket is simply backed up ignore the error if e.errno != errno.EAGAIN: self._close(e) return # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] # when the socket buffers are full/backed up then we need to poll to see # when we can write again if result != len(buf): self.write = self.buffered_write self.write_watcher.start() self.write(buf[result:]) # depends on [control=['if'], data=['result']]
def client_connection_made(self, transport): """ Called by twisted protocol when a connection attempt has succeeded. """ with self.lock: self.is_closed = False self.transport = transport self._send_options_message()
def function[client_connection_made, parameter[self, transport]]: constant[ Called by twisted protocol when a connection attempt has succeeded. ] with name[self].lock begin[:] name[self].is_closed assign[=] constant[False] name[self].transport assign[=] name[transport] call[name[self]._send_options_message, parameter[]]
keyword[def] identifier[client_connection_made] ( identifier[self] , identifier[transport] ): literal[string] keyword[with] identifier[self] . identifier[lock] : identifier[self] . identifier[is_closed] = keyword[False] identifier[self] . identifier[transport] = identifier[transport] identifier[self] . identifier[_send_options_message] ()
def client_connection_made(self, transport): """ Called by twisted protocol when a connection attempt has succeeded. """ with self.lock: self.is_closed = False # depends on [control=['with'], data=[]] self.transport = transport self._send_options_message()
def monitor_session_span_command_direction(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") monitor = ET.SubElement(config, "monitor", xmlns="urn:brocade.com:mgmt:brocade-span") session = ET.SubElement(monitor, "session") session_number_key = ET.SubElement(session, "session-number") session_number_key.text = kwargs.pop('session_number') span_command = ET.SubElement(session, "span-command") direction = ET.SubElement(span_command, "direction") direction.text = kwargs.pop('direction') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[monitor_session_span_command_direction, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[monitor] assign[=] call[name[ET].SubElement, parameter[name[config], constant[monitor]]] variable[session] assign[=] call[name[ET].SubElement, parameter[name[monitor], constant[session]]] variable[session_number_key] assign[=] call[name[ET].SubElement, parameter[name[session], constant[session-number]]] name[session_number_key].text assign[=] call[name[kwargs].pop, parameter[constant[session_number]]] variable[span_command] assign[=] call[name[ET].SubElement, parameter[name[session], constant[span-command]]] variable[direction] assign[=] call[name[ET].SubElement, parameter[name[span_command], constant[direction]]] name[direction].text assign[=] call[name[kwargs].pop, parameter[constant[direction]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[monitor_session_span_command_direction] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[monitor] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] ) identifier[session] = identifier[ET] . identifier[SubElement] ( identifier[monitor] , literal[string] ) identifier[session_number_key] = identifier[ET] . identifier[SubElement] ( identifier[session] , literal[string] ) identifier[session_number_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[span_command] = identifier[ET] . identifier[SubElement] ( identifier[session] , literal[string] ) identifier[direction] = identifier[ET] . identifier[SubElement] ( identifier[span_command] , literal[string] ) identifier[direction] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def monitor_session_span_command_direction(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') monitor = ET.SubElement(config, 'monitor', xmlns='urn:brocade.com:mgmt:brocade-span') session = ET.SubElement(monitor, 'session') session_number_key = ET.SubElement(session, 'session-number') session_number_key.text = kwargs.pop('session_number') span_command = ET.SubElement(session, 'span-command') direction = ET.SubElement(span_command, 'direction') direction.text = kwargs.pop('direction') callback = kwargs.pop('callback', self._callback) return callback(config)
def pre_build(self, traj, brian_list, network_dict): """Pre-builds the connections. Pre-build is only performed if none of the relevant parameters is explored and the relevant neuron groups exist. :param traj: Trajectory container :param brian_list: List of objects passed to BRIAN network constructor. Adds: Connections, amount depends on clustering :param network_dict: Dictionary of elements shared among the components Expects: 'neurons_i': Inhibitory neuron group 'neurons_e': Excitatory neuron group Adds: Connections, amount depends on clustering """ self._pre_build = not _explored_parameters_in_group(traj, traj.parameters.connections) self._pre_build = (self._pre_build and 'neurons_i' in network_dict and 'neurons_e' in network_dict) if self._pre_build: self._build_connections(traj, brian_list, network_dict)
def function[pre_build, parameter[self, traj, brian_list, network_dict]]: constant[Pre-builds the connections. Pre-build is only performed if none of the relevant parameters is explored and the relevant neuron groups exist. :param traj: Trajectory container :param brian_list: List of objects passed to BRIAN network constructor. Adds: Connections, amount depends on clustering :param network_dict: Dictionary of elements shared among the components Expects: 'neurons_i': Inhibitory neuron group 'neurons_e': Excitatory neuron group Adds: Connections, amount depends on clustering ] name[self]._pre_build assign[=] <ast.UnaryOp object at 0x7da1b26ad510> name[self]._pre_build assign[=] <ast.BoolOp object at 0x7da1b26acc70> if name[self]._pre_build begin[:] call[name[self]._build_connections, parameter[name[traj], name[brian_list], name[network_dict]]]
keyword[def] identifier[pre_build] ( identifier[self] , identifier[traj] , identifier[brian_list] , identifier[network_dict] ): literal[string] identifier[self] . identifier[_pre_build] = keyword[not] identifier[_explored_parameters_in_group] ( identifier[traj] , identifier[traj] . identifier[parameters] . identifier[connections] ) identifier[self] . identifier[_pre_build] =( identifier[self] . identifier[_pre_build] keyword[and] literal[string] keyword[in] identifier[network_dict] keyword[and] literal[string] keyword[in] identifier[network_dict] ) keyword[if] identifier[self] . identifier[_pre_build] : identifier[self] . identifier[_build_connections] ( identifier[traj] , identifier[brian_list] , identifier[network_dict] )
def pre_build(self, traj, brian_list, network_dict): """Pre-builds the connections. Pre-build is only performed if none of the relevant parameters is explored and the relevant neuron groups exist. :param traj: Trajectory container :param brian_list: List of objects passed to BRIAN network constructor. Adds: Connections, amount depends on clustering :param network_dict: Dictionary of elements shared among the components Expects: 'neurons_i': Inhibitory neuron group 'neurons_e': Excitatory neuron group Adds: Connections, amount depends on clustering """ self._pre_build = not _explored_parameters_in_group(traj, traj.parameters.connections) self._pre_build = self._pre_build and 'neurons_i' in network_dict and ('neurons_e' in network_dict) if self._pre_build: self._build_connections(traj, brian_list, network_dict) # depends on [control=['if'], data=[]]
def _read_data(self): """ Reads data from the connection and adds it to _push_packet, until the connection is closed or the task in cancelled. """ while True: try: data = yield from self._socket.recv() except asyncio.CancelledError: break except ConnectionClosed: break self._push_packet(data) self._loop.call_soon(self.close)
def function[_read_data, parameter[self]]: constant[ Reads data from the connection and adds it to _push_packet, until the connection is closed or the task in cancelled. ] while constant[True] begin[:] <ast.Try object at 0x7da207f03d60> call[name[self]._push_packet, parameter[name[data]]] call[name[self]._loop.call_soon, parameter[name[self].close]]
keyword[def] identifier[_read_data] ( identifier[self] ): literal[string] keyword[while] keyword[True] : keyword[try] : identifier[data] = keyword[yield] keyword[from] identifier[self] . identifier[_socket] . identifier[recv] () keyword[except] identifier[asyncio] . identifier[CancelledError] : keyword[break] keyword[except] identifier[ConnectionClosed] : keyword[break] identifier[self] . identifier[_push_packet] ( identifier[data] ) identifier[self] . identifier[_loop] . identifier[call_soon] ( identifier[self] . identifier[close] )
def _read_data(self): """ Reads data from the connection and adds it to _push_packet, until the connection is closed or the task in cancelled. """ while True: try: data = (yield from self._socket.recv()) # depends on [control=['try'], data=[]] except asyncio.CancelledError: break # depends on [control=['except'], data=[]] except ConnectionClosed: break # depends on [control=['except'], data=[]] self._push_packet(data) # depends on [control=['while'], data=[]] self._loop.call_soon(self.close)
def get_summaries_log_dir(decode_hp, output_dir, dataset_split): """Get nested summaries_log_dir based on decode_hp.""" child_dir = decode_hp.summaries_log_dir level_dir = "".join([str(level) for level in decode_hp.level_interp]) if decode_hp.channel_interp == "all": rank_dir = "all" else: rank_dir = "rank_%d" % decode_hp.rank_interp child_dir = "%s/%s_%s" % (child_dir, level_dir, rank_dir) if dataset_split is not None: child_dir += "_{}".format(dataset_split) return os.path.join(output_dir, child_dir)
def function[get_summaries_log_dir, parameter[decode_hp, output_dir, dataset_split]]: constant[Get nested summaries_log_dir based on decode_hp.] variable[child_dir] assign[=] name[decode_hp].summaries_log_dir variable[level_dir] assign[=] call[constant[].join, parameter[<ast.ListComp object at 0x7da1b1efab60>]] if compare[name[decode_hp].channel_interp equal[==] constant[all]] begin[:] variable[rank_dir] assign[=] constant[all] variable[child_dir] assign[=] binary_operation[constant[%s/%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1fe0070>, <ast.Name object at 0x7da1b1fe0550>, <ast.Name object at 0x7da1b1fe0040>]]] if compare[name[dataset_split] is_not constant[None]] begin[:] <ast.AugAssign object at 0x7da1b1fe0130> return[call[name[os].path.join, parameter[name[output_dir], name[child_dir]]]]
keyword[def] identifier[get_summaries_log_dir] ( identifier[decode_hp] , identifier[output_dir] , identifier[dataset_split] ): literal[string] identifier[child_dir] = identifier[decode_hp] . identifier[summaries_log_dir] identifier[level_dir] = literal[string] . identifier[join] ([ identifier[str] ( identifier[level] ) keyword[for] identifier[level] keyword[in] identifier[decode_hp] . identifier[level_interp] ]) keyword[if] identifier[decode_hp] . identifier[channel_interp] == literal[string] : identifier[rank_dir] = literal[string] keyword[else] : identifier[rank_dir] = literal[string] % identifier[decode_hp] . identifier[rank_interp] identifier[child_dir] = literal[string] %( identifier[child_dir] , identifier[level_dir] , identifier[rank_dir] ) keyword[if] identifier[dataset_split] keyword[is] keyword[not] keyword[None] : identifier[child_dir] += literal[string] . identifier[format] ( identifier[dataset_split] ) keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[output_dir] , identifier[child_dir] )
def get_summaries_log_dir(decode_hp, output_dir, dataset_split): """Get nested summaries_log_dir based on decode_hp.""" child_dir = decode_hp.summaries_log_dir level_dir = ''.join([str(level) for level in decode_hp.level_interp]) if decode_hp.channel_interp == 'all': rank_dir = 'all' # depends on [control=['if'], data=[]] else: rank_dir = 'rank_%d' % decode_hp.rank_interp child_dir = '%s/%s_%s' % (child_dir, level_dir, rank_dir) if dataset_split is not None: child_dir += '_{}'.format(dataset_split) # depends on [control=['if'], data=['dataset_split']] return os.path.join(output_dir, child_dir)
def replace(s, replace): """Replace multiple values in a string""" for r in replace: s = s.replace(*r) return s
def function[replace, parameter[s, replace]]: constant[Replace multiple values in a string] for taget[name[r]] in starred[name[replace]] begin[:] variable[s] assign[=] call[name[s].replace, parameter[<ast.Starred object at 0x7da1b2344250>]] return[name[s]]
keyword[def] identifier[replace] ( identifier[s] , identifier[replace] ): literal[string] keyword[for] identifier[r] keyword[in] identifier[replace] : identifier[s] = identifier[s] . identifier[replace] (* identifier[r] ) keyword[return] identifier[s]
def replace(s, replace): """Replace multiple values in a string""" for r in replace: s = s.replace(*r) # depends on [control=['for'], data=['r']] return s
def n_atom(self, node): """atom ::= ('(' [yield_expr|testlist_gexp] ')' | '[' [listmaker] ']' | '{' [dictmaker] '}' | '`' testlist1 '`' | NAME | NUMBER | STRING+) """ length = len(node) if length == 1: self.preorder(node[0]) elif length == 3: self.preorder(node[0]) self.preorder(node[1]) self.preorder(node[2]) else: assert False, "Expecting atom to have length 1 or 3" self.prune()
def function[n_atom, parameter[self, node]]: constant[atom ::= ('(' [yield_expr|testlist_gexp] ')' | '[' [listmaker] ']' | '{' [dictmaker] '}' | '`' testlist1 '`' | NAME | NUMBER | STRING+) ] variable[length] assign[=] call[name[len], parameter[name[node]]] if compare[name[length] equal[==] constant[1]] begin[:] call[name[self].preorder, parameter[call[name[node]][constant[0]]]] call[name[self].prune, parameter[]]
keyword[def] identifier[n_atom] ( identifier[self] , identifier[node] ): literal[string] identifier[length] = identifier[len] ( identifier[node] ) keyword[if] identifier[length] == literal[int] : identifier[self] . identifier[preorder] ( identifier[node] [ literal[int] ]) keyword[elif] identifier[length] == literal[int] : identifier[self] . identifier[preorder] ( identifier[node] [ literal[int] ]) identifier[self] . identifier[preorder] ( identifier[node] [ literal[int] ]) identifier[self] . identifier[preorder] ( identifier[node] [ literal[int] ]) keyword[else] : keyword[assert] keyword[False] , literal[string] identifier[self] . identifier[prune] ()
def n_atom(self, node): """atom ::= ('(' [yield_expr|testlist_gexp] ')' | '[' [listmaker] ']' | '{' [dictmaker] '}' | '`' testlist1 '`' | NAME | NUMBER | STRING+) """ length = len(node) if length == 1: self.preorder(node[0]) # depends on [control=['if'], data=[]] elif length == 3: self.preorder(node[0]) self.preorder(node[1]) self.preorder(node[2]) # depends on [control=['if'], data=[]] else: assert False, 'Expecting atom to have length 1 or 3' self.prune()
def create_record(cls, dump): """Create a new record from dump.""" # Reserve record identifier, create record and recid pid in one # operation. timestamp, data = dump.latest record = Record.create(data) record.model.created = dump.created.replace(tzinfo=None) record.model.updated = timestamp.replace(tzinfo=None) RecordIdentifier.insert(dump.recid) PersistentIdentifier.create( pid_type='recid', pid_value=str(dump.recid), object_type='rec', object_uuid=str(record.id), status=PIDStatus.REGISTERED ) db.session.commit() return cls.update_record(revisions=dump.rest, record=record, created=dump.created)
def function[create_record, parameter[cls, dump]]: constant[Create a new record from dump.] <ast.Tuple object at 0x7da1b012ca00> assign[=] name[dump].latest variable[record] assign[=] call[name[Record].create, parameter[name[data]]] name[record].model.created assign[=] call[name[dump].created.replace, parameter[]] name[record].model.updated assign[=] call[name[timestamp].replace, parameter[]] call[name[RecordIdentifier].insert, parameter[name[dump].recid]] call[name[PersistentIdentifier].create, parameter[]] call[name[db].session.commit, parameter[]] return[call[name[cls].update_record, parameter[]]]
keyword[def] identifier[create_record] ( identifier[cls] , identifier[dump] ): literal[string] identifier[timestamp] , identifier[data] = identifier[dump] . identifier[latest] identifier[record] = identifier[Record] . identifier[create] ( identifier[data] ) identifier[record] . identifier[model] . identifier[created] = identifier[dump] . identifier[created] . identifier[replace] ( identifier[tzinfo] = keyword[None] ) identifier[record] . identifier[model] . identifier[updated] = identifier[timestamp] . identifier[replace] ( identifier[tzinfo] = keyword[None] ) identifier[RecordIdentifier] . identifier[insert] ( identifier[dump] . identifier[recid] ) identifier[PersistentIdentifier] . identifier[create] ( identifier[pid_type] = literal[string] , identifier[pid_value] = identifier[str] ( identifier[dump] . identifier[recid] ), identifier[object_type] = literal[string] , identifier[object_uuid] = identifier[str] ( identifier[record] . identifier[id] ), identifier[status] = identifier[PIDStatus] . identifier[REGISTERED] ) identifier[db] . identifier[session] . identifier[commit] () keyword[return] identifier[cls] . identifier[update_record] ( identifier[revisions] = identifier[dump] . identifier[rest] , identifier[record] = identifier[record] , identifier[created] = identifier[dump] . identifier[created] )
def create_record(cls, dump): """Create a new record from dump.""" # Reserve record identifier, create record and recid pid in one # operation. (timestamp, data) = dump.latest record = Record.create(data) record.model.created = dump.created.replace(tzinfo=None) record.model.updated = timestamp.replace(tzinfo=None) RecordIdentifier.insert(dump.recid) PersistentIdentifier.create(pid_type='recid', pid_value=str(dump.recid), object_type='rec', object_uuid=str(record.id), status=PIDStatus.REGISTERED) db.session.commit() return cls.update_record(revisions=dump.rest, record=record, created=dump.created)
def on_close(self, filename): """Move this file to destination folder.""" shutil.move(filename, self.destination_folder) path, fn = os.path.split(filename) return os.path.join(self.destination_folder, fn)
def function[on_close, parameter[self, filename]]: constant[Move this file to destination folder.] call[name[shutil].move, parameter[name[filename], name[self].destination_folder]] <ast.Tuple object at 0x7da18f813ca0> assign[=] call[name[os].path.split, parameter[name[filename]]] return[call[name[os].path.join, parameter[name[self].destination_folder, name[fn]]]]
keyword[def] identifier[on_close] ( identifier[self] , identifier[filename] ): literal[string] identifier[shutil] . identifier[move] ( identifier[filename] , identifier[self] . identifier[destination_folder] ) identifier[path] , identifier[fn] = identifier[os] . identifier[path] . identifier[split] ( identifier[filename] ) keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[destination_folder] , identifier[fn] )
def on_close(self, filename): """Move this file to destination folder.""" shutil.move(filename, self.destination_folder) (path, fn) = os.path.split(filename) return os.path.join(self.destination_folder, fn)
def calculate_heading_longpath(locator1, locator2): """calculates the heading from the first to the second locator (long path) Args: locator1 (string): Locator, either 4 or 6 characters locator2 (string): Locator, either 4 or 6 characters Returns: float: Long path heading in deg Raises: ValueError: When called with wrong or invalid input arg AttributeError: When args are not a string Example: The following calculates the long path heading from locator1 to locator2 >>> from pyhamtools.locator import calculate_heading_longpath >>> calculate_heading_longpath("JN48QM", "QF67bf") 254.3136 """ heading = calculate_heading(locator1, locator2) lp = (heading + 180)%360 return lp
def function[calculate_heading_longpath, parameter[locator1, locator2]]: constant[calculates the heading from the first to the second locator (long path) Args: locator1 (string): Locator, either 4 or 6 characters locator2 (string): Locator, either 4 or 6 characters Returns: float: Long path heading in deg Raises: ValueError: When called with wrong or invalid input arg AttributeError: When args are not a string Example: The following calculates the long path heading from locator1 to locator2 >>> from pyhamtools.locator import calculate_heading_longpath >>> calculate_heading_longpath("JN48QM", "QF67bf") 254.3136 ] variable[heading] assign[=] call[name[calculate_heading], parameter[name[locator1], name[locator2]]] variable[lp] assign[=] binary_operation[binary_operation[name[heading] + constant[180]] <ast.Mod object at 0x7da2590d6920> constant[360]] return[name[lp]]
keyword[def] identifier[calculate_heading_longpath] ( identifier[locator1] , identifier[locator2] ): literal[string] identifier[heading] = identifier[calculate_heading] ( identifier[locator1] , identifier[locator2] ) identifier[lp] =( identifier[heading] + literal[int] )% literal[int] keyword[return] identifier[lp]
def calculate_heading_longpath(locator1, locator2): """calculates the heading from the first to the second locator (long path) Args: locator1 (string): Locator, either 4 or 6 characters locator2 (string): Locator, either 4 or 6 characters Returns: float: Long path heading in deg Raises: ValueError: When called with wrong or invalid input arg AttributeError: When args are not a string Example: The following calculates the long path heading from locator1 to locator2 >>> from pyhamtools.locator import calculate_heading_longpath >>> calculate_heading_longpath("JN48QM", "QF67bf") 254.3136 """ heading = calculate_heading(locator1, locator2) lp = (heading + 180) % 360 return lp
def get_arguments(self): """ Extracts the specific arguments of this CLI """ # ApiCli.get_arguments(self) if self.args.file_name is not None: self.file_name = self.args.file_name
def function[get_arguments, parameter[self]]: constant[ Extracts the specific arguments of this CLI ] if compare[name[self].args.file_name is_not constant[None]] begin[:] name[self].file_name assign[=] name[self].args.file_name
keyword[def] identifier[get_arguments] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[args] . identifier[file_name] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[file_name] = identifier[self] . identifier[args] . identifier[file_name]
def get_arguments(self): """ Extracts the specific arguments of this CLI """ # ApiCli.get_arguments(self) if self.args.file_name is not None: self.file_name = self.args.file_name # depends on [control=['if'], data=[]]
def split_on_condition(seq, condition): """Split a sequence into two iterables without looping twice""" l1, l2 = tee((condition(item), item) for item in seq) return (i for p, i in l1 if p), (i for p, i in l2 if not p)
def function[split_on_condition, parameter[seq, condition]]: constant[Split a sequence into two iterables without looping twice] <ast.Tuple object at 0x7da1b253a410> assign[=] call[name[tee], parameter[<ast.GeneratorExp object at 0x7da1b253b9d0>]] return[tuple[[<ast.GeneratorExp object at 0x7da18f00ffd0>, <ast.GeneratorExp object at 0x7da18f00fac0>]]]
keyword[def] identifier[split_on_condition] ( identifier[seq] , identifier[condition] ): literal[string] identifier[l1] , identifier[l2] = identifier[tee] (( identifier[condition] ( identifier[item] ), identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[seq] ) keyword[return] ( identifier[i] keyword[for] identifier[p] , identifier[i] keyword[in] identifier[l1] keyword[if] identifier[p] ),( identifier[i] keyword[for] identifier[p] , identifier[i] keyword[in] identifier[l2] keyword[if] keyword[not] identifier[p] )
def split_on_condition(seq, condition): """Split a sequence into two iterables without looping twice""" (l1, l2) = tee(((condition(item), item) for item in seq)) return ((i for (p, i) in l1 if p), (i for (p, i) in l2 if not p))
def delete(self, cluster): """Deletes the cluster from persistent state. :param cluster: cluster to delete from persistent state :type cluster: :py:class:`elasticluster.cluster.Cluster` """ path = self._get_cluster_storage_path(cluster.name) if os.path.exists(path): os.unlink(path)
def function[delete, parameter[self, cluster]]: constant[Deletes the cluster from persistent state. :param cluster: cluster to delete from persistent state :type cluster: :py:class:`elasticluster.cluster.Cluster` ] variable[path] assign[=] call[name[self]._get_cluster_storage_path, parameter[name[cluster].name]] if call[name[os].path.exists, parameter[name[path]]] begin[:] call[name[os].unlink, parameter[name[path]]]
keyword[def] identifier[delete] ( identifier[self] , identifier[cluster] ): literal[string] identifier[path] = identifier[self] . identifier[_get_cluster_storage_path] ( identifier[cluster] . identifier[name] ) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ): identifier[os] . identifier[unlink] ( identifier[path] )
def delete(self, cluster): """Deletes the cluster from persistent state. :param cluster: cluster to delete from persistent state :type cluster: :py:class:`elasticluster.cluster.Cluster` """ path = self._get_cluster_storage_path(cluster.name) if os.path.exists(path): os.unlink(path) # depends on [control=['if'], data=[]]
def uuid5(namespace, name): """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" from hashlib import sha1 hash = sha1(namespace.bytes + name).digest() return UUID(bytes=hash[:16], version=5)
def function[uuid5, parameter[namespace, name]]: constant[Generate a UUID from the SHA-1 hash of a namespace UUID and a name.] from relative_module[hashlib] import module[sha1] variable[hash] assign[=] call[call[name[sha1], parameter[binary_operation[name[namespace].bytes + name[name]]]].digest, parameter[]] return[call[name[UUID], parameter[]]]
keyword[def] identifier[uuid5] ( identifier[namespace] , identifier[name] ): literal[string] keyword[from] identifier[hashlib] keyword[import] identifier[sha1] identifier[hash] = identifier[sha1] ( identifier[namespace] . identifier[bytes] + identifier[name] ). identifier[digest] () keyword[return] identifier[UUID] ( identifier[bytes] = identifier[hash] [: literal[int] ], identifier[version] = literal[int] )
def uuid5(namespace, name): """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" from hashlib import sha1 hash = sha1(namespace.bytes + name).digest() return UUID(bytes=hash[:16], version=5)
def plash_map(*args): from subprocess import check_output 'thin wrapper around plash map' out = check_output(['plash', 'map'] + list(args)) if out == '': return None return out.decode().strip('\n')
def function[plash_map, parameter[]]: from relative_module[subprocess] import module[check_output] constant[thin wrapper around plash map] variable[out] assign[=] call[name[check_output], parameter[binary_operation[list[[<ast.Constant object at 0x7da18f09cc70>, <ast.Constant object at 0x7da18f09f8e0>]] + call[name[list], parameter[name[args]]]]]] if compare[name[out] equal[==] constant[]] begin[:] return[constant[None]] return[call[call[name[out].decode, parameter[]].strip, parameter[constant[ ]]]]
keyword[def] identifier[plash_map] (* identifier[args] ): keyword[from] identifier[subprocess] keyword[import] identifier[check_output] literal[string] identifier[out] = identifier[check_output] ([ literal[string] , literal[string] ]+ identifier[list] ( identifier[args] )) keyword[if] identifier[out] == literal[string] : keyword[return] keyword[None] keyword[return] identifier[out] . identifier[decode] (). identifier[strip] ( literal[string] )
def plash_map(*args): from subprocess import check_output 'thin wrapper around plash map' out = check_output(['plash', 'map'] + list(args)) if out == '': return None # depends on [control=['if'], data=[]] return out.decode().strip('\n')
def _get_field_types(self): 'Retrieve the field types. Useful for debugging.' mapping = self.conn.indices.get_mapping( index=self.index, doc_type=self.type) return mapping[self.index]['mappings'][self.type]['properties']
def function[_get_field_types, parameter[self]]: constant[Retrieve the field types. Useful for debugging.] variable[mapping] assign[=] call[name[self].conn.indices.get_mapping, parameter[]] return[call[call[call[call[name[mapping]][name[self].index]][constant[mappings]]][name[self].type]][constant[properties]]]
keyword[def] identifier[_get_field_types] ( identifier[self] ): literal[string] identifier[mapping] = identifier[self] . identifier[conn] . identifier[indices] . identifier[get_mapping] ( identifier[index] = identifier[self] . identifier[index] , identifier[doc_type] = identifier[self] . identifier[type] ) keyword[return] identifier[mapping] [ identifier[self] . identifier[index] ][ literal[string] ][ identifier[self] . identifier[type] ][ literal[string] ]
def _get_field_types(self): """Retrieve the field types. Useful for debugging.""" mapping = self.conn.indices.get_mapping(index=self.index, doc_type=self.type) return mapping[self.index]['mappings'][self.type]['properties']
def __getSequenceVariants(self, x1, polyStart, polyStop, listSequence) : """polyStop, is the polymorphisme at wixh number where the calcul of combinaisons stops""" if polyStart < len(self.polymorphisms) and polyStart < polyStop: sequence = copy.copy(listSequence) ret = [] pk = self.polymorphisms[polyStart] posInSequence = pk[0]-x1 if posInSequence < len(listSequence) : for allele in pk[1] : sequence[posInSequence] = allele ret.extend(self.__getSequenceVariants(x1, polyStart+1, polyStop, sequence)) return ret else : return [''.join(listSequence)]
def function[__getSequenceVariants, parameter[self, x1, polyStart, polyStop, listSequence]]: constant[polyStop, is the polymorphisme at wixh number where the calcul of combinaisons stops] if <ast.BoolOp object at 0x7da18fe93e50> begin[:] variable[sequence] assign[=] call[name[copy].copy, parameter[name[listSequence]]] variable[ret] assign[=] list[[]] variable[pk] assign[=] call[name[self].polymorphisms][name[polyStart]] variable[posInSequence] assign[=] binary_operation[call[name[pk]][constant[0]] - name[x1]] if compare[name[posInSequence] less[<] call[name[len], parameter[name[listSequence]]]] begin[:] for taget[name[allele]] in starred[call[name[pk]][constant[1]]] begin[:] call[name[sequence]][name[posInSequence]] assign[=] name[allele] call[name[ret].extend, parameter[call[name[self].__getSequenceVariants, parameter[name[x1], binary_operation[name[polyStart] + constant[1]], name[polyStop], name[sequence]]]]] return[name[ret]]
keyword[def] identifier[__getSequenceVariants] ( identifier[self] , identifier[x1] , identifier[polyStart] , identifier[polyStop] , identifier[listSequence] ): literal[string] keyword[if] identifier[polyStart] < identifier[len] ( identifier[self] . identifier[polymorphisms] ) keyword[and] identifier[polyStart] < identifier[polyStop] : identifier[sequence] = identifier[copy] . identifier[copy] ( identifier[listSequence] ) identifier[ret] =[] identifier[pk] = identifier[self] . identifier[polymorphisms] [ identifier[polyStart] ] identifier[posInSequence] = identifier[pk] [ literal[int] ]- identifier[x1] keyword[if] identifier[posInSequence] < identifier[len] ( identifier[listSequence] ): keyword[for] identifier[allele] keyword[in] identifier[pk] [ literal[int] ]: identifier[sequence] [ identifier[posInSequence] ]= identifier[allele] identifier[ret] . identifier[extend] ( identifier[self] . identifier[__getSequenceVariants] ( identifier[x1] , identifier[polyStart] + literal[int] , identifier[polyStop] , identifier[sequence] )) keyword[return] identifier[ret] keyword[else] : keyword[return] [ literal[string] . identifier[join] ( identifier[listSequence] )]
def __getSequenceVariants(self, x1, polyStart, polyStop, listSequence): """polyStop, is the polymorphisme at wixh number where the calcul of combinaisons stops""" if polyStart < len(self.polymorphisms) and polyStart < polyStop: sequence = copy.copy(listSequence) ret = [] pk = self.polymorphisms[polyStart] posInSequence = pk[0] - x1 if posInSequence < len(listSequence): for allele in pk[1]: sequence[posInSequence] = allele ret.extend(self.__getSequenceVariants(x1, polyStart + 1, polyStop, sequence)) # depends on [control=['for'], data=['allele']] # depends on [control=['if'], data=['posInSequence']] return ret # depends on [control=['if'], data=[]] else: return [''.join(listSequence)]
def add_or_update(data, item, value): """ Add or update value in configuration file format used by proftpd. Args: data (str): Configuration file as string. item (str): What option will be added/updated. value (str): Value of option. Returns: str: updated configuration """ data = data.splitlines() # to list of bytearrays (this is useful, because their reference passed to # other functions can be changed, and it will change objects in arrays # unlike strings) data = map(lambda x: bytearray(x), data) # search for the item in raw (ucommented) values conf = filter(lambda x: x.strip() and x.strip().split()[0] == item, data) if conf: conf[0][:] = conf[0].strip().split()[0] + " " + value else: # search for the item in commented values, if found, uncomment it comments = filter( lambda x: x.strip().startswith("#") and len(x.split("#")) >= 2 and x.split("#")[1].split() and x.split("#")[1].split()[0] == item, data ) if comments: comments[0][:] = comments[0].split("#")[1].split()[0] + " " + value else: # add item, if not found in raw/commented values data.append(item + " " + value + "\n") return "\n".join(map(lambda x: str(x), data))
def function[add_or_update, parameter[data, item, value]]: constant[ Add or update value in configuration file format used by proftpd. Args: data (str): Configuration file as string. item (str): What option will be added/updated. value (str): Value of option. Returns: str: updated configuration ] variable[data] assign[=] call[name[data].splitlines, parameter[]] variable[data] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da18dc99270>, name[data]]] variable[conf] assign[=] call[name[filter], parameter[<ast.Lambda object at 0x7da18dc9acb0>, name[data]]] if name[conf] begin[:] call[call[name[conf]][constant[0]]][<ast.Slice object at 0x7da18dc9b7c0>] assign[=] binary_operation[binary_operation[call[call[call[call[name[conf]][constant[0]].strip, parameter[]].split, parameter[]]][constant[0]] + constant[ ]] + name[value]] return[call[constant[ ].join, parameter[call[name[map], parameter[<ast.Lambda object at 0x7da18f00d0c0>, name[data]]]]]]
keyword[def] identifier[add_or_update] ( identifier[data] , identifier[item] , identifier[value] ): literal[string] identifier[data] = identifier[data] . identifier[splitlines] () identifier[data] = identifier[map] ( keyword[lambda] identifier[x] : identifier[bytearray] ( identifier[x] ), identifier[data] ) identifier[conf] = identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] . identifier[strip] () keyword[and] identifier[x] . identifier[strip] (). identifier[split] ()[ literal[int] ]== identifier[item] , identifier[data] ) keyword[if] identifier[conf] : identifier[conf] [ literal[int] ][:]= identifier[conf] [ literal[int] ]. identifier[strip] (). identifier[split] ()[ literal[int] ]+ literal[string] + identifier[value] keyword[else] : identifier[comments] = identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] . identifier[strip] (). identifier[startswith] ( literal[string] ) keyword[and] identifier[len] ( identifier[x] . identifier[split] ( literal[string] ))>= literal[int] keyword[and] identifier[x] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] () keyword[and] identifier[x] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ()[ literal[int] ]== identifier[item] , identifier[data] ) keyword[if] identifier[comments] : identifier[comments] [ literal[int] ][:]= identifier[comments] [ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ()[ literal[int] ]+ literal[string] + identifier[value] keyword[else] : identifier[data] . identifier[append] ( identifier[item] + literal[string] + identifier[value] + literal[string] ) keyword[return] literal[string] . identifier[join] ( identifier[map] ( keyword[lambda] identifier[x] : identifier[str] ( identifier[x] ), identifier[data] ))
def add_or_update(data, item, value): """ Add or update value in configuration file format used by proftpd. Args: data (str): Configuration file as string. item (str): What option will be added/updated. value (str): Value of option. Returns: str: updated configuration """ data = data.splitlines() # to list of bytearrays (this is useful, because their reference passed to # other functions can be changed, and it will change objects in arrays # unlike strings) data = map(lambda x: bytearray(x), data) # search for the item in raw (ucommented) values conf = filter(lambda x: x.strip() and x.strip().split()[0] == item, data) if conf: conf[0][:] = conf[0].strip().split()[0] + ' ' + value # depends on [control=['if'], data=[]] else: # search for the item in commented values, if found, uncomment it comments = filter(lambda x: x.strip().startswith('#') and len(x.split('#')) >= 2 and x.split('#')[1].split() and (x.split('#')[1].split()[0] == item), data) if comments: comments[0][:] = comments[0].split('#')[1].split()[0] + ' ' + value # depends on [control=['if'], data=[]] else: # add item, if not found in raw/commented values data.append(item + ' ' + value + '\n') return '\n'.join(map(lambda x: str(x), data))
def bounding_ellipsoids(points, pointvol=0., vol_dec=0.5, vol_check=2.): """ Calculate a set of ellipsoids that bound the collection of points. Parameters ---------- points : `~numpy.ndarray` with shape (npoints, ndim) A set of coordinates. pointvol : float, optional Volume represented by a single point. When provided, used to set a minimum bound on the ellipsoid volume as `npoints * pointvol`. Default is `0.`. vol_dec : float, optional The required fractional reduction in volume after splitting an ellipsoid in order to to accept the split. Default is `0.5`. vol_check : float, optional The factor used to when checking whether the volume of the original bounding ellipsoid is large enough to warrant more trial splits via `ell.vol > vol_check * npoints * pointvol`. Default is `2.0`. Returns ------- mell : :class:`MultiEllipsoid` object The :class:`MultiEllipsoid` object used to bound the collection of points. """ if not HAVE_KMEANS: raise ValueError("scipy.cluster.vq.kmeans2 is required to compute " "ellipsoid decompositions.") # pragma: no cover # Calculate the bounding ellipsoid for the points possibly # enlarged to a minimum volume. ell = bounding_ellipsoid(points, pointvol=pointvol) # Recursively split the bounding ellipsoid until the volume of each # split no longer decreases by a factor of `vol_dec`. ells = _bounding_ellipsoids(points, ell, pointvol=pointvol, vol_dec=vol_dec, vol_check=vol_check) return MultiEllipsoid(ells=ells)
def function[bounding_ellipsoids, parameter[points, pointvol, vol_dec, vol_check]]: constant[ Calculate a set of ellipsoids that bound the collection of points. Parameters ---------- points : `~numpy.ndarray` with shape (npoints, ndim) A set of coordinates. pointvol : float, optional Volume represented by a single point. When provided, used to set a minimum bound on the ellipsoid volume as `npoints * pointvol`. Default is `0.`. vol_dec : float, optional The required fractional reduction in volume after splitting an ellipsoid in order to to accept the split. Default is `0.5`. vol_check : float, optional The factor used to when checking whether the volume of the original bounding ellipsoid is large enough to warrant more trial splits via `ell.vol > vol_check * npoints * pointvol`. Default is `2.0`. Returns ------- mell : :class:`MultiEllipsoid` object The :class:`MultiEllipsoid` object used to bound the collection of points. ] if <ast.UnaryOp object at 0x7da1b1d50700> begin[:] <ast.Raise object at 0x7da1b20e8070> variable[ell] assign[=] call[name[bounding_ellipsoid], parameter[name[points]]] variable[ells] assign[=] call[name[_bounding_ellipsoids], parameter[name[points], name[ell]]] return[call[name[MultiEllipsoid], parameter[]]]
keyword[def] identifier[bounding_ellipsoids] ( identifier[points] , identifier[pointvol] = literal[int] , identifier[vol_dec] = literal[int] , identifier[vol_check] = literal[int] ): literal[string] keyword[if] keyword[not] identifier[HAVE_KMEANS] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) identifier[ell] = identifier[bounding_ellipsoid] ( identifier[points] , identifier[pointvol] = identifier[pointvol] ) identifier[ells] = identifier[_bounding_ellipsoids] ( identifier[points] , identifier[ell] , identifier[pointvol] = identifier[pointvol] , identifier[vol_dec] = identifier[vol_dec] , identifier[vol_check] = identifier[vol_check] ) keyword[return] identifier[MultiEllipsoid] ( identifier[ells] = identifier[ells] )
def bounding_ellipsoids(points, pointvol=0.0, vol_dec=0.5, vol_check=2.0): """ Calculate a set of ellipsoids that bound the collection of points. Parameters ---------- points : `~numpy.ndarray` with shape (npoints, ndim) A set of coordinates. pointvol : float, optional Volume represented by a single point. When provided, used to set a minimum bound on the ellipsoid volume as `npoints * pointvol`. Default is `0.`. vol_dec : float, optional The required fractional reduction in volume after splitting an ellipsoid in order to to accept the split. Default is `0.5`. vol_check : float, optional The factor used to when checking whether the volume of the original bounding ellipsoid is large enough to warrant more trial splits via `ell.vol > vol_check * npoints * pointvol`. Default is `2.0`. Returns ------- mell : :class:`MultiEllipsoid` object The :class:`MultiEllipsoid` object used to bound the collection of points. """ if not HAVE_KMEANS: raise ValueError('scipy.cluster.vq.kmeans2 is required to compute ellipsoid decompositions.') # pragma: no cover # depends on [control=['if'], data=[]] # Calculate the bounding ellipsoid for the points possibly # enlarged to a minimum volume. ell = bounding_ellipsoid(points, pointvol=pointvol) # Recursively split the bounding ellipsoid until the volume of each # split no longer decreases by a factor of `vol_dec`. ells = _bounding_ellipsoids(points, ell, pointvol=pointvol, vol_dec=vol_dec, vol_check=vol_check) return MultiEllipsoid(ells=ells)
def rebuild(self, **kwargs): '''Repopulate the node-tracking data structures. Shouldn't really ever be needed. ''' self.nodes = [] self.node_types = [] self.id_dict = {} self.type_dict = {} self.add_node(self.root)
def function[rebuild, parameter[self]]: constant[Repopulate the node-tracking data structures. Shouldn't really ever be needed. ] name[self].nodes assign[=] list[[]] name[self].node_types assign[=] list[[]] name[self].id_dict assign[=] dictionary[[], []] name[self].type_dict assign[=] dictionary[[], []] call[name[self].add_node, parameter[name[self].root]]
keyword[def] identifier[rebuild] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[self] . identifier[nodes] =[] identifier[self] . identifier[node_types] =[] identifier[self] . identifier[id_dict] ={} identifier[self] . identifier[type_dict] ={} identifier[self] . identifier[add_node] ( identifier[self] . identifier[root] )
def rebuild(self, **kwargs): """Repopulate the node-tracking data structures. Shouldn't really ever be needed. """ self.nodes = [] self.node_types = [] self.id_dict = {} self.type_dict = {} self.add_node(self.root)
def write_line(self, values): """ Process a list of values according to the format specified to generate a line of output. """ if not self._write_line_init: self.init_write_line() if len(self._out_widths) > len(values): raise For2PyError(f"ERROR: too few values for format {self._format_list}\n") out_strs = [] for i in range(len(self._out_widths)): out_fmt = self._out_gen_fmt[i] out_width = self._out_widths[i] out_val = out_fmt.format(values[i]) if len(out_val) > out_width: # value too big for field out_val = "*" * out_width out_strs.append(out_val) out_str_exp = ( '"' + self._output_fmt + '".format' + str(tuple(out_strs)) ) out_str = eval(out_str_exp) return out_str + "\n"
def function[write_line, parameter[self, values]]: constant[ Process a list of values according to the format specified to generate a line of output. ] if <ast.UnaryOp object at 0x7da1b04fd4b0> begin[:] call[name[self].init_write_line, parameter[]] if compare[call[name[len], parameter[name[self]._out_widths]] greater[>] call[name[len], parameter[name[values]]]] begin[:] <ast.Raise object at 0x7da1b04fe4a0> variable[out_strs] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self]._out_widths]]]]] begin[:] variable[out_fmt] assign[=] call[name[self]._out_gen_fmt][name[i]] variable[out_width] assign[=] call[name[self]._out_widths][name[i]] variable[out_val] assign[=] call[name[out_fmt].format, parameter[call[name[values]][name[i]]]] if compare[call[name[len], parameter[name[out_val]]] greater[>] name[out_width]] begin[:] variable[out_val] assign[=] binary_operation[constant[*] * name[out_width]] call[name[out_strs].append, parameter[name[out_val]]] variable[out_str_exp] assign[=] binary_operation[binary_operation[binary_operation[constant["] + name[self]._output_fmt] + constant[".format]] + call[name[str], parameter[call[name[tuple], parameter[name[out_strs]]]]]] variable[out_str] assign[=] call[name[eval], parameter[name[out_str_exp]]] return[binary_operation[name[out_str] + constant[ ]]]
keyword[def] identifier[write_line] ( identifier[self] , identifier[values] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_write_line_init] : identifier[self] . identifier[init_write_line] () keyword[if] identifier[len] ( identifier[self] . identifier[_out_widths] )> identifier[len] ( identifier[values] ): keyword[raise] identifier[For2PyError] ( literal[string] ) identifier[out_strs] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[_out_widths] )): identifier[out_fmt] = identifier[self] . identifier[_out_gen_fmt] [ identifier[i] ] identifier[out_width] = identifier[self] . identifier[_out_widths] [ identifier[i] ] identifier[out_val] = identifier[out_fmt] . identifier[format] ( identifier[values] [ identifier[i] ]) keyword[if] identifier[len] ( identifier[out_val] )> identifier[out_width] : identifier[out_val] = literal[string] * identifier[out_width] identifier[out_strs] . identifier[append] ( identifier[out_val] ) identifier[out_str_exp] =( literal[string] + identifier[self] . identifier[_output_fmt] + literal[string] + identifier[str] ( identifier[tuple] ( identifier[out_strs] )) ) identifier[out_str] = identifier[eval] ( identifier[out_str_exp] ) keyword[return] identifier[out_str] + literal[string]
def write_line(self, values): """ Process a list of values according to the format specified to generate a line of output. """ if not self._write_line_init: self.init_write_line() # depends on [control=['if'], data=[]] if len(self._out_widths) > len(values): raise For2PyError(f'ERROR: too few values for format {self._format_list}\n') # depends on [control=['if'], data=[]] out_strs = [] for i in range(len(self._out_widths)): out_fmt = self._out_gen_fmt[i] out_width = self._out_widths[i] out_val = out_fmt.format(values[i]) if len(out_val) > out_width: # value too big for field out_val = '*' * out_width # depends on [control=['if'], data=['out_width']] out_strs.append(out_val) # depends on [control=['for'], data=['i']] out_str_exp = '"' + self._output_fmt + '".format' + str(tuple(out_strs)) out_str = eval(out_str_exp) return out_str + '\n'
def bm3_dPdV(v, v0, k0, k0p, precision=1.e-5): """ calculate dP/dV for numerical calculation of bulk modulus according to test this differs from analytical result by 1.e-5 :param v: volume :param v0: volume at reference conditions :param k0: bulk modulus at reference conditions :param k0p: pressure derivative of bulk modulus at different conditions :param precision: precision for numerical calculation (default = 1.e-5*v0) :return: dP/dV """ def f_scalar(v, v0, k0, k0p, precision=precision): return derivative(bm3_p, v, args=(v0, k0, k0p), dx=v0 * precision) f_v = np.vectorize(f_scalar, excluded=[1, 2, 3, 4]) return f_v(v, v0, k0, k0p, precision=precision)
def function[bm3_dPdV, parameter[v, v0, k0, k0p, precision]]: constant[ calculate dP/dV for numerical calculation of bulk modulus according to test this differs from analytical result by 1.e-5 :param v: volume :param v0: volume at reference conditions :param k0: bulk modulus at reference conditions :param k0p: pressure derivative of bulk modulus at different conditions :param precision: precision for numerical calculation (default = 1.e-5*v0) :return: dP/dV ] def function[f_scalar, parameter[v, v0, k0, k0p, precision]]: return[call[name[derivative], parameter[name[bm3_p], name[v]]]] variable[f_v] assign[=] call[name[np].vectorize, parameter[name[f_scalar]]] return[call[name[f_v], parameter[name[v], name[v0], name[k0], name[k0p]]]]
keyword[def] identifier[bm3_dPdV] ( identifier[v] , identifier[v0] , identifier[k0] , identifier[k0p] , identifier[precision] = literal[int] ): literal[string] keyword[def] identifier[f_scalar] ( identifier[v] , identifier[v0] , identifier[k0] , identifier[k0p] , identifier[precision] = identifier[precision] ): keyword[return] identifier[derivative] ( identifier[bm3_p] , identifier[v] , identifier[args] =( identifier[v0] , identifier[k0] , identifier[k0p] ), identifier[dx] = identifier[v0] * identifier[precision] ) identifier[f_v] = identifier[np] . identifier[vectorize] ( identifier[f_scalar] , identifier[excluded] =[ literal[int] , literal[int] , literal[int] , literal[int] ]) keyword[return] identifier[f_v] ( identifier[v] , identifier[v0] , identifier[k0] , identifier[k0p] , identifier[precision] = identifier[precision] )
def bm3_dPdV(v, v0, k0, k0p, precision=1e-05): """ calculate dP/dV for numerical calculation of bulk modulus according to test this differs from analytical result by 1.e-5 :param v: volume :param v0: volume at reference conditions :param k0: bulk modulus at reference conditions :param k0p: pressure derivative of bulk modulus at different conditions :param precision: precision for numerical calculation (default = 1.e-5*v0) :return: dP/dV """ def f_scalar(v, v0, k0, k0p, precision=precision): return derivative(bm3_p, v, args=(v0, k0, k0p), dx=v0 * precision) f_v = np.vectorize(f_scalar, excluded=[1, 2, 3, 4]) return f_v(v, v0, k0, k0p, precision=precision)
def _cut_to_heading( self, content: str, to_heading: str or None = None, options={} ) -> str: '''Cut part of Markdown string from the start to a certain heading, set internal heading level, and remove top heading. If not heading is defined, the whole string is returned. Heading shift and top heading elimination are optional. :param content: Markdown content :param to_heading: Ending heading (will not be incuded in the output) :param options: ``sethead``, ``nohead`` :returns: Part of the Markdown content from the start to ``to_heading``, with internal headings adjusted ''' self.logger.debug(f'Cutting to heading: {to_heading}, options: {options}') content_buffer = StringIO(content) first_line = content_buffer.readline() if self._heading_pattern.fullmatch(first_line): from_heading_line = first_line from_heading_level = len(self._heading_pattern.match(from_heading_line).group('hashes')) result = content_buffer.read() else: from_heading_line = '' from_heading_level = self._find_top_heading_level(content) result = content self.logger.debug(f'From heading level: {from_heading_level}') if to_heading: to_heading_pattern = re.compile(r'^\#{1,6}\s+' + rf'{to_heading}\s*$', flags=re.MULTILINE) result = to_heading_pattern.split(result)[0] if not options.get('nohead'): result = from_heading_line + result if options.get('sethead'): if options['sethead'] > 0: result = self._shift_headings( result, options['sethead'] - from_heading_level ) return result
def function[_cut_to_heading, parameter[self, content, to_heading, options]]: constant[Cut part of Markdown string from the start to a certain heading, set internal heading level, and remove top heading. If not heading is defined, the whole string is returned. Heading shift and top heading elimination are optional. :param content: Markdown content :param to_heading: Ending heading (will not be incuded in the output) :param options: ``sethead``, ``nohead`` :returns: Part of the Markdown content from the start to ``to_heading``, with internal headings adjusted ] call[name[self].logger.debug, parameter[<ast.JoinedStr object at 0x7da18bc71390>]] variable[content_buffer] assign[=] call[name[StringIO], parameter[name[content]]] variable[first_line] assign[=] call[name[content_buffer].readline, parameter[]] if call[name[self]._heading_pattern.fullmatch, parameter[name[first_line]]] begin[:] variable[from_heading_line] assign[=] name[first_line] variable[from_heading_level] assign[=] call[name[len], parameter[call[call[name[self]._heading_pattern.match, parameter[name[from_heading_line]]].group, parameter[constant[hashes]]]]] variable[result] assign[=] call[name[content_buffer].read, parameter[]] call[name[self].logger.debug, parameter[<ast.JoinedStr object at 0x7da20c9902b0>]] if name[to_heading] begin[:] variable[to_heading_pattern] assign[=] call[name[re].compile, parameter[binary_operation[constant[^\#{1,6}\s+] + <ast.JoinedStr object at 0x7da20c992a40>]]] variable[result] assign[=] call[call[name[to_heading_pattern].split, parameter[name[result]]]][constant[0]] if <ast.UnaryOp object at 0x7da20c9907f0> begin[:] variable[result] assign[=] binary_operation[name[from_heading_line] + name[result]] if call[name[options].get, parameter[constant[sethead]]] begin[:] if compare[call[name[options]][constant[sethead]] greater[>] constant[0]] begin[:] variable[result] assign[=] call[name[self]._shift_headings, parameter[name[result], binary_operation[call[name[options]][constant[sethead]] - name[from_heading_level]]]] return[name[result]]
keyword[def] identifier[_cut_to_heading] ( identifier[self] , identifier[content] : identifier[str] , identifier[to_heading] : identifier[str] keyword[or] keyword[None] = keyword[None] , identifier[options] ={} )-> identifier[str] : literal[string] identifier[self] . identifier[logger] . identifier[debug] ( literal[string] ) identifier[content_buffer] = identifier[StringIO] ( identifier[content] ) identifier[first_line] = identifier[content_buffer] . identifier[readline] () keyword[if] identifier[self] . identifier[_heading_pattern] . identifier[fullmatch] ( identifier[first_line] ): identifier[from_heading_line] = identifier[first_line] identifier[from_heading_level] = identifier[len] ( identifier[self] . identifier[_heading_pattern] . identifier[match] ( identifier[from_heading_line] ). identifier[group] ( literal[string] )) identifier[result] = identifier[content_buffer] . identifier[read] () keyword[else] : identifier[from_heading_line] = literal[string] identifier[from_heading_level] = identifier[self] . identifier[_find_top_heading_level] ( identifier[content] ) identifier[result] = identifier[content] identifier[self] . identifier[logger] . identifier[debug] ( literal[string] ) keyword[if] identifier[to_heading] : identifier[to_heading_pattern] = identifier[re] . identifier[compile] ( literal[string] + literal[string] , identifier[flags] = identifier[re] . identifier[MULTILINE] ) identifier[result] = identifier[to_heading_pattern] . identifier[split] ( identifier[result] )[ literal[int] ] keyword[if] keyword[not] identifier[options] . identifier[get] ( literal[string] ): identifier[result] = identifier[from_heading_line] + identifier[result] keyword[if] identifier[options] . identifier[get] ( literal[string] ): keyword[if] identifier[options] [ literal[string] ]> literal[int] : identifier[result] = identifier[self] . identifier[_shift_headings] ( identifier[result] , identifier[options] [ literal[string] ]- identifier[from_heading_level] ) keyword[return] identifier[result]
def _cut_to_heading(self, content: str, to_heading: str or None=None, options={}) -> str: """Cut part of Markdown string from the start to a certain heading, set internal heading level, and remove top heading. If not heading is defined, the whole string is returned. Heading shift and top heading elimination are optional. :param content: Markdown content :param to_heading: Ending heading (will not be incuded in the output) :param options: ``sethead``, ``nohead`` :returns: Part of the Markdown content from the start to ``to_heading``, with internal headings adjusted """ self.logger.debug(f'Cutting to heading: {to_heading}, options: {options}') content_buffer = StringIO(content) first_line = content_buffer.readline() if self._heading_pattern.fullmatch(first_line): from_heading_line = first_line from_heading_level = len(self._heading_pattern.match(from_heading_line).group('hashes')) result = content_buffer.read() # depends on [control=['if'], data=[]] else: from_heading_line = '' from_heading_level = self._find_top_heading_level(content) result = content self.logger.debug(f'From heading level: {from_heading_level}') if to_heading: to_heading_pattern = re.compile('^\\#{1,6}\\s+' + f'{to_heading}\\s*$', flags=re.MULTILINE) result = to_heading_pattern.split(result)[0] # depends on [control=['if'], data=[]] if not options.get('nohead'): result = from_heading_line + result # depends on [control=['if'], data=[]] if options.get('sethead'): if options['sethead'] > 0: result = self._shift_headings(result, options['sethead'] - from_heading_level) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return result
def from_value(cls, value): """ Return the EChoice object associated with this value, if any. Parameters ---------- value In the type of the `value` field, as set when instantiating this EChoice. Returns ------- EChoice Raises ------ KeyError if `value` does not exist in any element """ warnings.warn("{0}.{1} will be deprecated in a future release. " "Please use {0}.{2} instead".format(cls.__name__, cls.from_value.__name__, cls.get.__name__), PendingDeprecationWarning) return cls[value]
def function[from_value, parameter[cls, value]]: constant[ Return the EChoice object associated with this value, if any. Parameters ---------- value In the type of the `value` field, as set when instantiating this EChoice. Returns ------- EChoice Raises ------ KeyError if `value` does not exist in any element ] call[name[warnings].warn, parameter[call[constant[{0}.{1} will be deprecated in a future release. Please use {0}.{2} instead].format, parameter[name[cls].__name__, name[cls].from_value.__name__, name[cls].get.__name__]], name[PendingDeprecationWarning]]] return[call[name[cls]][name[value]]]
keyword[def] identifier[from_value] ( identifier[cls] , identifier[value] ): literal[string] identifier[warnings] . identifier[warn] ( literal[string] literal[string] . identifier[format] ( identifier[cls] . identifier[__name__] , identifier[cls] . identifier[from_value] . identifier[__name__] , identifier[cls] . identifier[get] . identifier[__name__] ), identifier[PendingDeprecationWarning] ) keyword[return] identifier[cls] [ identifier[value] ]
def from_value(cls, value): """ Return the EChoice object associated with this value, if any. Parameters ---------- value In the type of the `value` field, as set when instantiating this EChoice. Returns ------- EChoice Raises ------ KeyError if `value` does not exist in any element """ warnings.warn('{0}.{1} will be deprecated in a future release. Please use {0}.{2} instead'.format(cls.__name__, cls.from_value.__name__, cls.get.__name__), PendingDeprecationWarning) return cls[value]
def depth_bounds(self): """Depth at grid interfaces (m) :getter: Returns the bounds of axis ``'depth'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'depth'`` axis can be found. """ try: for domname, dom in self.domains.items(): try: thisdepth = dom.axes['depth'].bounds except: pass return thisdepth except: raise ValueError('Can\'t resolve a depth axis.')
def function[depth_bounds, parameter[self]]: constant[Depth at grid interfaces (m) :getter: Returns the bounds of axis ``'depth'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'depth'`` axis can be found. ] <ast.Try object at 0x7da1b12f2770>
keyword[def] identifier[depth_bounds] ( identifier[self] ): literal[string] keyword[try] : keyword[for] identifier[domname] , identifier[dom] keyword[in] identifier[self] . identifier[domains] . identifier[items] (): keyword[try] : identifier[thisdepth] = identifier[dom] . identifier[axes] [ literal[string] ]. identifier[bounds] keyword[except] : keyword[pass] keyword[return] identifier[thisdepth] keyword[except] : keyword[raise] identifier[ValueError] ( literal[string] )
def depth_bounds(self): """Depth at grid interfaces (m) :getter: Returns the bounds of axis ``'depth'`` if availible in the process's domains. :type: array :raises: :exc:`ValueError` if no ``'depth'`` axis can be found. """ try: for (domname, dom) in self.domains.items(): try: thisdepth = dom.axes['depth'].bounds # depends on [control=['try'], data=[]] except: pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] return thisdepth # depends on [control=['try'], data=[]] except: raise ValueError("Can't resolve a depth axis.") # depends on [control=['except'], data=[]]
def _get_placeholders(sql_statement, parameters): """ Retrieve the list of placeholders and their type defined in an SQL statement. @param sql_statement: a parameterized statement. @param parameters: the list of parameters used in the SQL statement. @return: a dictionary of placeholders where the key represents the name of a placeholder, the value corresponds to a tuple:: (``type:PlaceholderType``, ``value``) where : * ``type``: type of the placeholder * ``value``: value to replace the placeholder. """ # Find the list of placeholders, and their type, defined in the SQL # statement. placeholders = {} try: for match in REGEX_PATTERN_SQL_PLACEHOLDERS.findall(sql_statement): for (i, placeholder_type) in enumerate(PlaceholderType._values): placeholder_name = match[i] if placeholder_name: placeholder_value = parameters[placeholder_name] if placeholder_type == PlaceholderType.nested_list \ and (isinstance(placeholder_value, tuple) and len(placeholder_value) == 1) \ and not isinstance(placeholder_value, (list, set, tuple)): raise ValueError('The value to replace the placeholder "%s" is not a list as expected' % placeholder_name) placeholders[placeholder_name] = (placeholder_type, placeholder_value) break except KeyError: raise ValueError('The placeholder %s has no corresponding parameter' % placeholder_name) # Check whether all the specified parameters have their corresponding # placeholder in the SQL statement. undefined_placeholders = [ parameter for parameter in parameters.iterkeys() if parameter not in placeholders ] if undefined_placeholders: raise ValueError('The placeholders %s are missing from the extended pyformat SQL statement\n%s' \ % (', '.join([ '"%s"' % _ for _ in undefined_placeholders ]), sql_statement)) return placeholders
def function[_get_placeholders, parameter[sql_statement, parameters]]: constant[ Retrieve the list of placeholders and their type defined in an SQL statement. @param sql_statement: a parameterized statement. @param parameters: the list of parameters used in the SQL statement. @return: a dictionary of placeholders where the key represents the name of a placeholder, the value corresponds to a tuple:: (``type:PlaceholderType``, ``value``) where : * ``type``: type of the placeholder * ``value``: value to replace the placeholder. ] variable[placeholders] assign[=] dictionary[[], []] <ast.Try object at 0x7da20e9551b0> variable[undefined_placeholders] assign[=] <ast.ListComp object at 0x7da20c6e53c0> if name[undefined_placeholders] begin[:] <ast.Raise object at 0x7da20c6e7850> return[name[placeholders]]
keyword[def] identifier[_get_placeholders] ( identifier[sql_statement] , identifier[parameters] ): literal[string] identifier[placeholders] ={} keyword[try] : keyword[for] identifier[match] keyword[in] identifier[REGEX_PATTERN_SQL_PLACEHOLDERS] . identifier[findall] ( identifier[sql_statement] ): keyword[for] ( identifier[i] , identifier[placeholder_type] ) keyword[in] identifier[enumerate] ( identifier[PlaceholderType] . identifier[_values] ): identifier[placeholder_name] = identifier[match] [ identifier[i] ] keyword[if] identifier[placeholder_name] : identifier[placeholder_value] = identifier[parameters] [ identifier[placeholder_name] ] keyword[if] identifier[placeholder_type] == identifier[PlaceholderType] . identifier[nested_list] keyword[and] ( identifier[isinstance] ( identifier[placeholder_value] , identifier[tuple] ) keyword[and] identifier[len] ( identifier[placeholder_value] )== literal[int] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[placeholder_value] ,( identifier[list] , identifier[set] , identifier[tuple] )): keyword[raise] identifier[ValueError] ( literal[string] % identifier[placeholder_name] ) identifier[placeholders] [ identifier[placeholder_name] ]=( identifier[placeholder_type] , identifier[placeholder_value] ) keyword[break] keyword[except] identifier[KeyError] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[placeholder_name] ) identifier[undefined_placeholders] =[ identifier[parameter] keyword[for] identifier[parameter] keyword[in] identifier[parameters] . identifier[iterkeys] () keyword[if] identifier[parameter] keyword[not] keyword[in] identifier[placeholders] ] keyword[if] identifier[undefined_placeholders] : keyword[raise] identifier[ValueError] ( literal[string] %( literal[string] . identifier[join] ([ literal[string] % identifier[_] keyword[for] identifier[_] keyword[in] identifier[undefined_placeholders] ]), identifier[sql_statement] )) keyword[return] identifier[placeholders]
def _get_placeholders(sql_statement, parameters): """ Retrieve the list of placeholders and their type defined in an SQL statement. @param sql_statement: a parameterized statement. @param parameters: the list of parameters used in the SQL statement. @return: a dictionary of placeholders where the key represents the name of a placeholder, the value corresponds to a tuple:: (``type:PlaceholderType``, ``value``) where : * ``type``: type of the placeholder * ``value``: value to replace the placeholder. """ # Find the list of placeholders, and their type, defined in the SQL # statement. placeholders = {} try: for match in REGEX_PATTERN_SQL_PLACEHOLDERS.findall(sql_statement): for (i, placeholder_type) in enumerate(PlaceholderType._values): placeholder_name = match[i] if placeholder_name: placeholder_value = parameters[placeholder_name] if placeholder_type == PlaceholderType.nested_list and (isinstance(placeholder_value, tuple) and len(placeholder_value) == 1) and (not isinstance(placeholder_value, (list, set, tuple))): raise ValueError('The value to replace the placeholder "%s" is not a list as expected' % placeholder_name) # depends on [control=['if'], data=[]] placeholders[placeholder_name] = (placeholder_type, placeholder_value) break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['match']] # depends on [control=['try'], data=[]] except KeyError: raise ValueError('The placeholder %s has no corresponding parameter' % placeholder_name) # depends on [control=['except'], data=[]] # Check whether all the specified parameters have their corresponding # placeholder in the SQL statement. undefined_placeholders = [parameter for parameter in parameters.iterkeys() if parameter not in placeholders] if undefined_placeholders: raise ValueError('The placeholders %s are missing from the extended pyformat SQL statement\n%s' % (', '.join(['"%s"' % _ for _ in undefined_placeholders]), sql_statement)) # depends on [control=['if'], data=[]] return placeholders
def device_query_list(self, **kwargs): # noqa: E501 """List device queries. # noqa: E501 List all device queries. The result will be paged into pages of 100. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.device_query_list(asynchronous=True) >>> result = thread.get() :param asynchronous bool :param int limit: How many objects to retrieve in the page. :param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`. :param str after: The ID of The item after which to retrieve the next page. :param str include: Comma-separated list of data fields to return. Currently supported: `total_count`. :param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>created_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>etag</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>name</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>query</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>updated_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> </tbody> </table> &nbsp; The examples below show the queries in *unencoded* form. ###### By device query properties (all properties are filterable): For example: ```description={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to &ndash; field name suffixed with ```__gte``` * less than or equal to &ndash; field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```query_id=0158d38771f70000000000010010038c&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z``` Encoded: ```filter=query_id%3D0158d38771f70000000000010010038c%26created_at__gte%3D2016-11-30T16%3A25%3A12.1234Z%26created_at__lte%3D2016-11-30T00%3A00%3A00Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `name__nin=query1,query2` :return: DeviceQueryPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.device_query_list_with_http_info(**kwargs) # noqa: E501 else: (data) = self.device_query_list_with_http_info(**kwargs) # noqa: E501 return data
def function[device_query_list, parameter[self]]: constant[List device queries. # noqa: E501 List all device queries. The result will be paged into pages of 100. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.device_query_list(asynchronous=True) >>> result = thread.get() :param asynchronous bool :param int limit: How many objects to retrieve in the page. :param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`. :param str after: The ID of The item after which to retrieve the next page. :param str include: Comma-separated list of data fields to return. Currently supported: `total_count`. :param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>created_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>etag</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>name</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>query</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>updated_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> </tbody> </table> &nbsp; The examples below show the queries in *unencoded* form. ###### By device query properties (all properties are filterable): For example: ```description={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to &ndash; field name suffixed with ```__gte``` * less than or equal to &ndash; field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```query_id=0158d38771f70000000000010010038c&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z``` Encoded: ```filter=query_id%3D0158d38771f70000000000010010038c%26created_at__gte%3D2016-11-30T16%3A25%3A12.1234Z%26created_at__lte%3D2016-11-30T00%3A00%3A00Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `name__nin=query1,query2` :return: DeviceQueryPage If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[asynchronous]]] begin[:] return[call[name[self].device_query_list_with_http_info, parameter[]]]
keyword[def] identifier[device_query_list] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[device_query_list_with_http_info] (** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[device_query_list_with_http_info] (** identifier[kwargs] ) keyword[return] identifier[data]
def device_query_list(self, **kwargs): # noqa: E501 'List device queries. # noqa: E501\n\n List all device queries. The result will be paged into pages of 100. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.device_query_list(asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param int limit: How many objects to retrieve in the page.\n :param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`.\n :param str after: The ID of The item after which to retrieve the next page.\n :param str include: Comma-separated list of data fields to return. Currently supported: `total_count`.\n :param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>created_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>etag</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> <tr> <td>id</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>name</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>query</td> <td>✓</td> <td>✓</td> <td>&nbsp;</td> </tr> <tr> <td>updated_at</td> <td>✓</td> <td>✓</td> <td>✓</td> </tr> </tbody> </table> &nbsp; The examples below show the queries in *unencoded* form. ###### By device query properties (all properties are filterable): For example: ```description={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to &ndash; field name suffixed with ```__gte``` * less than or equal to &ndash; field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```query_id=0158d38771f70000000000010010038c&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z``` Encoded: ```filter=query_id%3D0158d38771f70000000000010010038c%26created_at__gte%3D2016-11-30T16%3A25%3A12.1234Z%26created_at__lte%3D2016-11-30T00%3A00%3A00Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `name__nin=query1,query2`\n :return: DeviceQueryPage\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.device_query_list_with_http_info(**kwargs) # noqa: E501 # depends on [control=['if'], data=[]] else: data = self.device_query_list_with_http_info(**kwargs) # noqa: E501 return data
def _print_layers(targets, components, tasks): """ Print dependency information, grouping components based on their position in the dependency graph. Components with no dependnecies will be in layer 0, components that only depend on layer 0 will be in layer 1, and so on. If there's a circular dependency, those nodes and their dependencies will be colored red. Arguments targets - the targets explicitly requested components - full configuration for all components in a project """ layer = 0 expected_count = len(tasks) counts = {} def _add_layer(resolved, dep_fn): nonlocal layer nonlocal counts nonlocal expected_count really_resolved = [] for resolved_task in resolved: resolved_component_tasks = counts.get(resolved_task[0], []) resolved_component_tasks.append(resolved_task) if len(resolved_component_tasks) == expected_count: really_resolved.extend(resolved_component_tasks) del counts[resolved_task[0]] else: counts[resolved_task[0]] = resolved_component_tasks if really_resolved: indentation = " " * 4 print("{}subgraph cluster_{} {{".format(indentation, layer)) print('{}label="Layer {}"'.format(indentation * 2, layer)) dep_fn(indentation * 2, really_resolved) print("{}}}".format(indentation)) layer += 1 _do_dot(targets, components, tasks, _add_layer)
def function[_print_layers, parameter[targets, components, tasks]]: constant[ Print dependency information, grouping components based on their position in the dependency graph. Components with no dependnecies will be in layer 0, components that only depend on layer 0 will be in layer 1, and so on. If there's a circular dependency, those nodes and their dependencies will be colored red. Arguments targets - the targets explicitly requested components - full configuration for all components in a project ] variable[layer] assign[=] constant[0] variable[expected_count] assign[=] call[name[len], parameter[name[tasks]]] variable[counts] assign[=] dictionary[[], []] def function[_add_layer, parameter[resolved, dep_fn]]: <ast.Nonlocal object at 0x7da18ede44c0> <ast.Nonlocal object at 0x7da18ede4970> <ast.Nonlocal object at 0x7da18ede5b10> variable[really_resolved] assign[=] list[[]] for taget[name[resolved_task]] in starred[name[resolved]] begin[:] variable[resolved_component_tasks] assign[=] call[name[counts].get, parameter[call[name[resolved_task]][constant[0]], list[[]]]] call[name[resolved_component_tasks].append, parameter[name[resolved_task]]] if compare[call[name[len], parameter[name[resolved_component_tasks]]] equal[==] name[expected_count]] begin[:] call[name[really_resolved].extend, parameter[name[resolved_component_tasks]]] <ast.Delete object at 0x7da1b25804f0> if name[really_resolved] begin[:] variable[indentation] assign[=] binary_operation[constant[ ] * constant[4]] call[name[print], parameter[call[constant[{}subgraph cluster_{} {{].format, parameter[name[indentation], name[layer]]]]] call[name[print], parameter[call[constant[{}label="Layer {}"].format, parameter[binary_operation[name[indentation] * constant[2]], name[layer]]]]] call[name[dep_fn], parameter[binary_operation[name[indentation] * constant[2]], name[really_resolved]]] call[name[print], parameter[call[constant[{}}}].format, parameter[name[indentation]]]]] <ast.AugAssign object at 0x7da1b2581c30> call[name[_do_dot], parameter[name[targets], name[components], name[tasks], name[_add_layer]]]
keyword[def] identifier[_print_layers] ( identifier[targets] , identifier[components] , identifier[tasks] ): literal[string] identifier[layer] = literal[int] identifier[expected_count] = identifier[len] ( identifier[tasks] ) identifier[counts] ={} keyword[def] identifier[_add_layer] ( identifier[resolved] , identifier[dep_fn] ): keyword[nonlocal] identifier[layer] keyword[nonlocal] identifier[counts] keyword[nonlocal] identifier[expected_count] identifier[really_resolved] =[] keyword[for] identifier[resolved_task] keyword[in] identifier[resolved] : identifier[resolved_component_tasks] = identifier[counts] . identifier[get] ( identifier[resolved_task] [ literal[int] ],[]) identifier[resolved_component_tasks] . identifier[append] ( identifier[resolved_task] ) keyword[if] identifier[len] ( identifier[resolved_component_tasks] )== identifier[expected_count] : identifier[really_resolved] . identifier[extend] ( identifier[resolved_component_tasks] ) keyword[del] identifier[counts] [ identifier[resolved_task] [ literal[int] ]] keyword[else] : identifier[counts] [ identifier[resolved_task] [ literal[int] ]]= identifier[resolved_component_tasks] keyword[if] identifier[really_resolved] : identifier[indentation] = literal[string] * literal[int] identifier[print] ( literal[string] . identifier[format] ( identifier[indentation] , identifier[layer] )) identifier[print] ( literal[string] . identifier[format] ( identifier[indentation] * literal[int] , identifier[layer] )) identifier[dep_fn] ( identifier[indentation] * literal[int] , identifier[really_resolved] ) identifier[print] ( literal[string] . identifier[format] ( identifier[indentation] )) identifier[layer] += literal[int] identifier[_do_dot] ( identifier[targets] , identifier[components] , identifier[tasks] , identifier[_add_layer] )
def _print_layers(targets, components, tasks): """ Print dependency information, grouping components based on their position in the dependency graph. Components with no dependnecies will be in layer 0, components that only depend on layer 0 will be in layer 1, and so on. If there's a circular dependency, those nodes and their dependencies will be colored red. Arguments targets - the targets explicitly requested components - full configuration for all components in a project """ layer = 0 expected_count = len(tasks) counts = {} def _add_layer(resolved, dep_fn): nonlocal layer nonlocal counts nonlocal expected_count really_resolved = [] for resolved_task in resolved: resolved_component_tasks = counts.get(resolved_task[0], []) resolved_component_tasks.append(resolved_task) if len(resolved_component_tasks) == expected_count: really_resolved.extend(resolved_component_tasks) del counts[resolved_task[0]] # depends on [control=['if'], data=[]] else: counts[resolved_task[0]] = resolved_component_tasks # depends on [control=['for'], data=['resolved_task']] if really_resolved: indentation = ' ' * 4 print('{}subgraph cluster_{} {{'.format(indentation, layer)) print('{}label="Layer {}"'.format(indentation * 2, layer)) dep_fn(indentation * 2, really_resolved) print('{}}}'.format(indentation)) layer += 1 # depends on [control=['if'], data=[]] _do_dot(targets, components, tasks, _add_layer)
def floats(self, n: int = 2) -> List[float]: """Generate a list of random float numbers. :param n: Raise 10 to the 'n' power. :return: The list of floating-point numbers. """ nums = [self.random.random() for _ in range(10 ** int(n))] return nums
def function[floats, parameter[self, n]]: constant[Generate a list of random float numbers. :param n: Raise 10 to the 'n' power. :return: The list of floating-point numbers. ] variable[nums] assign[=] <ast.ListComp object at 0x7da18f811ab0> return[name[nums]]
keyword[def] identifier[floats] ( identifier[self] , identifier[n] : identifier[int] = literal[int] )-> identifier[List] [ identifier[float] ]: literal[string] identifier[nums] =[ identifier[self] . identifier[random] . identifier[random] () keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] ** identifier[int] ( identifier[n] ))] keyword[return] identifier[nums]
def floats(self, n: int=2) -> List[float]: """Generate a list of random float numbers. :param n: Raise 10 to the 'n' power. :return: The list of floating-point numbers. """ nums = [self.random.random() for _ in range(10 ** int(n))] return nums
def score_cosine(self, term1, term2, **kwargs): """ Compute a weighting score based on the cosine distance between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float """ t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) return 1-distance.cosine(t1_kde, t2_kde)
def function[score_cosine, parameter[self, term1, term2]]: constant[ Compute a weighting score based on the cosine distance between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float ] variable[t1_kde] assign[=] call[name[self].kde, parameter[name[term1]]] variable[t2_kde] assign[=] call[name[self].kde, parameter[name[term2]]] return[binary_operation[constant[1] - call[name[distance].cosine, parameter[name[t1_kde], name[t2_kde]]]]]
keyword[def] identifier[score_cosine] ( identifier[self] , identifier[term1] , identifier[term2] ,** identifier[kwargs] ): literal[string] identifier[t1_kde] = identifier[self] . identifier[kde] ( identifier[term1] ,** identifier[kwargs] ) identifier[t2_kde] = identifier[self] . identifier[kde] ( identifier[term2] ,** identifier[kwargs] ) keyword[return] literal[int] - identifier[distance] . identifier[cosine] ( identifier[t1_kde] , identifier[t2_kde] )
def score_cosine(self, term1, term2, **kwargs): """ Compute a weighting score based on the cosine distance between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float """ t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) return 1 - distance.cosine(t1_kde, t2_kde)
def get_state_change_with_balance_proof_by_balance_hash( storage: sqlite.SQLiteStorage, canonical_identifier: CanonicalIdentifier, balance_hash: BalanceHash, sender: Address, ) -> sqlite.StateChangeRecord: """ Returns the state change which contains the corresponding balance proof. Use this function to find a balance proof for a call to settle, which only has the blinded balance proof data. """ return storage.get_latest_state_change_by_data_field({ 'balance_proof.canonical_identifier.chain_identifier': str(canonical_identifier.chain_identifier), 'balance_proof.canonical_identifier.token_network_address': to_checksum_address( canonical_identifier.token_network_address, ), 'balance_proof.canonical_identifier.channel_identifier': str( canonical_identifier.channel_identifier, ), 'balance_proof.balance_hash': serialize_bytes(balance_hash), 'balance_proof.sender': to_checksum_address(sender), })
def function[get_state_change_with_balance_proof_by_balance_hash, parameter[storage, canonical_identifier, balance_hash, sender]]: constant[ Returns the state change which contains the corresponding balance proof. Use this function to find a balance proof for a call to settle, which only has the blinded balance proof data. ] return[call[name[storage].get_latest_state_change_by_data_field, parameter[dictionary[[<ast.Constant object at 0x7da1b17565c0>, <ast.Constant object at 0x7da1b1756590>, <ast.Constant object at 0x7da1b1756560>, <ast.Constant object at 0x7da1b1756530>, <ast.Constant object at 0x7da1b1756500>], [<ast.Call object at 0x7da1b17564a0>, <ast.Call object at 0x7da1b17563e0>, <ast.Call object at 0x7da1b1756320>, <ast.Call object at 0x7da1b1756260>, <ast.Call object at 0x7da1b17561d0>]]]]]
keyword[def] identifier[get_state_change_with_balance_proof_by_balance_hash] ( identifier[storage] : identifier[sqlite] . identifier[SQLiteStorage] , identifier[canonical_identifier] : identifier[CanonicalIdentifier] , identifier[balance_hash] : identifier[BalanceHash] , identifier[sender] : identifier[Address] , )-> identifier[sqlite] . identifier[StateChangeRecord] : literal[string] keyword[return] identifier[storage] . identifier[get_latest_state_change_by_data_field] ({ literal[string] : identifier[str] ( identifier[canonical_identifier] . identifier[chain_identifier] ), literal[string] : identifier[to_checksum_address] ( identifier[canonical_identifier] . identifier[token_network_address] , ), literal[string] : identifier[str] ( identifier[canonical_identifier] . identifier[channel_identifier] , ), literal[string] : identifier[serialize_bytes] ( identifier[balance_hash] ), literal[string] : identifier[to_checksum_address] ( identifier[sender] ), })
def get_state_change_with_balance_proof_by_balance_hash(storage: sqlite.SQLiteStorage, canonical_identifier: CanonicalIdentifier, balance_hash: BalanceHash, sender: Address) -> sqlite.StateChangeRecord: """ Returns the state change which contains the corresponding balance proof. Use this function to find a balance proof for a call to settle, which only has the blinded balance proof data. """ return storage.get_latest_state_change_by_data_field({'balance_proof.canonical_identifier.chain_identifier': str(canonical_identifier.chain_identifier), 'balance_proof.canonical_identifier.token_network_address': to_checksum_address(canonical_identifier.token_network_address), 'balance_proof.canonical_identifier.channel_identifier': str(canonical_identifier.channel_identifier), 'balance_proof.balance_hash': serialize_bytes(balance_hash), 'balance_proof.sender': to_checksum_address(sender)})
def link2html(text): ''' Turns md links to html ''' match = r'\[([^\]]+)\]\(([^)]+)\)' replace = r'<a href="\2">\1</a>' return re.sub(match, replace, text)
def function[link2html, parameter[text]]: constant[ Turns md links to html ] variable[match] assign[=] constant[\[([^\]]+)\]\(([^)]+)\)] variable[replace] assign[=] constant[<a href="\2">\1</a>] return[call[name[re].sub, parameter[name[match], name[replace], name[text]]]]
keyword[def] identifier[link2html] ( identifier[text] ): literal[string] identifier[match] = literal[string] identifier[replace] = literal[string] keyword[return] identifier[re] . identifier[sub] ( identifier[match] , identifier[replace] , identifier[text] )
def link2html(text): """ Turns md links to html """ match = '\\[([^\\]]+)\\]\\(([^)]+)\\)' replace = '<a href="\\2">\\1</a>' return re.sub(match, replace, text)
def pattern_logic_aeidon(): """Return patterns to be used for searching subtitles via aeidon.""" if Config.options.pattern_files: return prep_patterns(Config.options.pattern_files) elif Config.options.regex: return Config.REGEX else: return Config.TERMS
def function[pattern_logic_aeidon, parameter[]]: constant[Return patterns to be used for searching subtitles via aeidon.] if name[Config].options.pattern_files begin[:] return[call[name[prep_patterns], parameter[name[Config].options.pattern_files]]]
keyword[def] identifier[pattern_logic_aeidon] (): literal[string] keyword[if] identifier[Config] . identifier[options] . identifier[pattern_files] : keyword[return] identifier[prep_patterns] ( identifier[Config] . identifier[options] . identifier[pattern_files] ) keyword[elif] identifier[Config] . identifier[options] . identifier[regex] : keyword[return] identifier[Config] . identifier[REGEX] keyword[else] : keyword[return] identifier[Config] . identifier[TERMS]
def pattern_logic_aeidon(): """Return patterns to be used for searching subtitles via aeidon.""" if Config.options.pattern_files: return prep_patterns(Config.options.pattern_files) # depends on [control=['if'], data=[]] elif Config.options.regex: return Config.REGEX # depends on [control=['if'], data=[]] else: return Config.TERMS
def Copy(self, field_number=None): """Returns descriptor copy, optionally changing field number.""" new_args = self._kwargs.copy() if field_number is not None: new_args["field_number"] = field_number return ProtoRDFValue( rdf_type=self.original_proto_type_name, default=getattr(self, "default", None), **new_args)
def function[Copy, parameter[self, field_number]]: constant[Returns descriptor copy, optionally changing field number.] variable[new_args] assign[=] call[name[self]._kwargs.copy, parameter[]] if compare[name[field_number] is_not constant[None]] begin[:] call[name[new_args]][constant[field_number]] assign[=] name[field_number] return[call[name[ProtoRDFValue], parameter[]]]
keyword[def] identifier[Copy] ( identifier[self] , identifier[field_number] = keyword[None] ): literal[string] identifier[new_args] = identifier[self] . identifier[_kwargs] . identifier[copy] () keyword[if] identifier[field_number] keyword[is] keyword[not] keyword[None] : identifier[new_args] [ literal[string] ]= identifier[field_number] keyword[return] identifier[ProtoRDFValue] ( identifier[rdf_type] = identifier[self] . identifier[original_proto_type_name] , identifier[default] = identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ), ** identifier[new_args] )
def Copy(self, field_number=None): """Returns descriptor copy, optionally changing field number.""" new_args = self._kwargs.copy() if field_number is not None: new_args['field_number'] = field_number # depends on [control=['if'], data=['field_number']] return ProtoRDFValue(rdf_type=self.original_proto_type_name, default=getattr(self, 'default', None), **new_args)
def close(self): """Close open resources.""" super(RarExtFile, self).close() if self._fd: self._fd.close() self._fd = None
def function[close, parameter[self]]: constant[Close open resources.] call[call[name[super], parameter[name[RarExtFile], name[self]]].close, parameter[]] if name[self]._fd begin[:] call[name[self]._fd.close, parameter[]] name[self]._fd assign[=] constant[None]
keyword[def] identifier[close] ( identifier[self] ): literal[string] identifier[super] ( identifier[RarExtFile] , identifier[self] ). identifier[close] () keyword[if] identifier[self] . identifier[_fd] : identifier[self] . identifier[_fd] . identifier[close] () identifier[self] . identifier[_fd] = keyword[None]
def close(self): """Close open resources.""" super(RarExtFile, self).close() if self._fd: self._fd.close() self._fd = None # depends on [control=['if'], data=[]]
def block(self, cutoffs=None, values=None, n_bins=0, right=False, function=None): """ Block a log based on number of bins, or on cutoffs. Args: cutoffs (array) values (array): the values to map to. Defaults to [0, 1, 2,...] n_bins (int) right (bool) function (function): transform the log if you want. Returns: Curve. """ # We'll return a copy. params = self.__dict__.copy() if (values is not None) and (cutoffs is None): cutoffs = values[1:] if (cutoffs is None) and (n_bins == 0): cutoffs = np.mean(self) if (n_bins != 0) and (cutoffs is None): mi, ma = np.amin(self), np.amax(self) cutoffs = np.linspace(mi, ma, n_bins+1) cutoffs = cutoffs[:-1] try: # To use cutoff as a list. data = np.digitize(self, cutoffs, right) except ValueError: # It's just a number. data = np.digitize(self, [cutoffs], right) if (function is None) and (values is None): return Curve(data, params=params) data = data.astype(float) # Set the function for reducing. f = function or utils.null # Find the tops of the 'zones'. tops, vals = utils.find_edges(data) # End of array trick... adding this should remove the # need for the marked lines below. But it doesn't. # np.append(tops, None) # np.append(vals, None) if values is None: # Transform each segment in turn, then deal with the last segment. for top, base in zip(tops[:-1], tops[1:]): data[top:base] = f(np.copy(self[top:base])) data[base:] = f(np.copy(self[base:])) # See above else: for top, base, val in zip(tops[:-1], tops[1:], vals[:-1]): data[top:base] = values[int(val)] data[base:] = values[int(vals[-1])] # See above return Curve(data, params=params)
def function[block, parameter[self, cutoffs, values, n_bins, right, function]]: constant[ Block a log based on number of bins, or on cutoffs. Args: cutoffs (array) values (array): the values to map to. Defaults to [0, 1, 2,...] n_bins (int) right (bool) function (function): transform the log if you want. Returns: Curve. ] variable[params] assign[=] call[name[self].__dict__.copy, parameter[]] if <ast.BoolOp object at 0x7da1b2283bb0> begin[:] variable[cutoffs] assign[=] call[name[values]][<ast.Slice object at 0x7da1b2280e80>] if <ast.BoolOp object at 0x7da1b2280df0> begin[:] variable[cutoffs] assign[=] call[name[np].mean, parameter[name[self]]] if <ast.BoolOp object at 0x7da1b2280370> begin[:] <ast.Tuple object at 0x7da1b2280190> assign[=] tuple[[<ast.Call object at 0x7da1b22800d0>, <ast.Call object at 0x7da1b22816c0>]] variable[cutoffs] assign[=] call[name[np].linspace, parameter[name[mi], name[ma], binary_operation[name[n_bins] + constant[1]]]] variable[cutoffs] assign[=] call[name[cutoffs]][<ast.Slice object at 0x7da1b2282d10>] <ast.Try object at 0x7da1b2282da0> if <ast.BoolOp object at 0x7da1b2282a10> begin[:] return[call[name[Curve], parameter[name[data]]]] variable[data] assign[=] call[name[data].astype, parameter[name[float]]] variable[f] assign[=] <ast.BoolOp object at 0x7da1b22823e0> <ast.Tuple object at 0x7da1b2281cf0> assign[=] call[name[utils].find_edges, parameter[name[data]]] if compare[name[values] is constant[None]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b2282770>, <ast.Name object at 0x7da1b2280a90>]]] in starred[call[name[zip], parameter[call[name[tops]][<ast.Slice object at 0x7da1b2280970>], call[name[tops]][<ast.Slice object at 0x7da1b2280a60>]]]] begin[:] call[name[data]][<ast.Slice object at 0x7da1b2282fe0>] assign[=] call[name[f], parameter[call[name[np].copy, parameter[call[name[self]][<ast.Slice object at 0x7da1b22834c0>]]]]] call[name[data]][<ast.Slice object at 0x7da1b2283700>] assign[=] call[name[f], parameter[call[name[np].copy, parameter[call[name[self]][<ast.Slice object at 0x7da1b2280c70>]]]]] return[call[name[Curve], parameter[name[data]]]]
keyword[def] identifier[block] ( identifier[self] , identifier[cutoffs] = keyword[None] , identifier[values] = keyword[None] , identifier[n_bins] = literal[int] , identifier[right] = keyword[False] , identifier[function] = keyword[None] ): literal[string] identifier[params] = identifier[self] . identifier[__dict__] . identifier[copy] () keyword[if] ( identifier[values] keyword[is] keyword[not] keyword[None] ) keyword[and] ( identifier[cutoffs] keyword[is] keyword[None] ): identifier[cutoffs] = identifier[values] [ literal[int] :] keyword[if] ( identifier[cutoffs] keyword[is] keyword[None] ) keyword[and] ( identifier[n_bins] == literal[int] ): identifier[cutoffs] = identifier[np] . identifier[mean] ( identifier[self] ) keyword[if] ( identifier[n_bins] != literal[int] ) keyword[and] ( identifier[cutoffs] keyword[is] keyword[None] ): identifier[mi] , identifier[ma] = identifier[np] . identifier[amin] ( identifier[self] ), identifier[np] . identifier[amax] ( identifier[self] ) identifier[cutoffs] = identifier[np] . identifier[linspace] ( identifier[mi] , identifier[ma] , identifier[n_bins] + literal[int] ) identifier[cutoffs] = identifier[cutoffs] [:- literal[int] ] keyword[try] : identifier[data] = identifier[np] . identifier[digitize] ( identifier[self] , identifier[cutoffs] , identifier[right] ) keyword[except] identifier[ValueError] : identifier[data] = identifier[np] . identifier[digitize] ( identifier[self] ,[ identifier[cutoffs] ], identifier[right] ) keyword[if] ( identifier[function] keyword[is] keyword[None] ) keyword[and] ( identifier[values] keyword[is] keyword[None] ): keyword[return] identifier[Curve] ( identifier[data] , identifier[params] = identifier[params] ) identifier[data] = identifier[data] . identifier[astype] ( identifier[float] ) identifier[f] = identifier[function] keyword[or] identifier[utils] . identifier[null] identifier[tops] , identifier[vals] = identifier[utils] . identifier[find_edges] ( identifier[data] ) keyword[if] identifier[values] keyword[is] keyword[None] : keyword[for] identifier[top] , identifier[base] keyword[in] identifier[zip] ( identifier[tops] [:- literal[int] ], identifier[tops] [ literal[int] :]): identifier[data] [ identifier[top] : identifier[base] ]= identifier[f] ( identifier[np] . identifier[copy] ( identifier[self] [ identifier[top] : identifier[base] ])) identifier[data] [ identifier[base] :]= identifier[f] ( identifier[np] . identifier[copy] ( identifier[self] [ identifier[base] :])) keyword[else] : keyword[for] identifier[top] , identifier[base] , identifier[val] keyword[in] identifier[zip] ( identifier[tops] [:- literal[int] ], identifier[tops] [ literal[int] :], identifier[vals] [:- literal[int] ]): identifier[data] [ identifier[top] : identifier[base] ]= identifier[values] [ identifier[int] ( identifier[val] )] identifier[data] [ identifier[base] :]= identifier[values] [ identifier[int] ( identifier[vals] [- literal[int] ])] keyword[return] identifier[Curve] ( identifier[data] , identifier[params] = identifier[params] )
def block(self, cutoffs=None, values=None, n_bins=0, right=False, function=None): """ Block a log based on number of bins, or on cutoffs. Args: cutoffs (array) values (array): the values to map to. Defaults to [0, 1, 2,...] n_bins (int) right (bool) function (function): transform the log if you want. Returns: Curve. """ # We'll return a copy. params = self.__dict__.copy() if values is not None and cutoffs is None: cutoffs = values[1:] # depends on [control=['if'], data=[]] if cutoffs is None and n_bins == 0: cutoffs = np.mean(self) # depends on [control=['if'], data=[]] if n_bins != 0 and cutoffs is None: (mi, ma) = (np.amin(self), np.amax(self)) cutoffs = np.linspace(mi, ma, n_bins + 1) cutoffs = cutoffs[:-1] # depends on [control=['if'], data=[]] try: # To use cutoff as a list. data = np.digitize(self, cutoffs, right) # depends on [control=['try'], data=[]] except ValueError: # It's just a number. data = np.digitize(self, [cutoffs], right) # depends on [control=['except'], data=[]] if function is None and values is None: return Curve(data, params=params) # depends on [control=['if'], data=[]] data = data.astype(float) # Set the function for reducing. f = function or utils.null # Find the tops of the 'zones'. (tops, vals) = utils.find_edges(data) # End of array trick... adding this should remove the # need for the marked lines below. But it doesn't. # np.append(tops, None) # np.append(vals, None) if values is None: # Transform each segment in turn, then deal with the last segment. for (top, base) in zip(tops[:-1], tops[1:]): data[top:base] = f(np.copy(self[top:base])) # depends on [control=['for'], data=[]] data[base:] = f(np.copy(self[base:])) # See above # depends on [control=['if'], data=[]] else: for (top, base, val) in zip(tops[:-1], tops[1:], vals[:-1]): data[top:base] = values[int(val)] # depends on [control=['for'], data=[]] data[base:] = values[int(vals[-1])] # See above return Curve(data, params=params)
def query(self, expression, vm='python'): """Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array """ condition = self.eval(expression, vm=vm) return self.compress(condition)
def function[query, parameter[self, expression, vm]]: constant[Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array ] variable[condition] assign[=] call[name[self].eval, parameter[name[expression]]] return[call[name[self].compress, parameter[name[condition]]]]
keyword[def] identifier[query] ( identifier[self] , identifier[expression] , identifier[vm] = literal[string] ): literal[string] identifier[condition] = identifier[self] . identifier[eval] ( identifier[expression] , identifier[vm] = identifier[vm] ) keyword[return] identifier[self] . identifier[compress] ( identifier[condition] )
def query(self, expression, vm='python'): """Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array """ condition = self.eval(expression, vm=vm) return self.compress(condition)
def get_waveform_end_frequency(template=None, **kwargs): """Return the stop frequency of a template """ input_params = props(template,**kwargs) approximant = kwargs['approximant'] if approximant in _filter_ends: return _filter_ends[approximant](**input_params) else: return None
def function[get_waveform_end_frequency, parameter[template]]: constant[Return the stop frequency of a template ] variable[input_params] assign[=] call[name[props], parameter[name[template]]] variable[approximant] assign[=] call[name[kwargs]][constant[approximant]] if compare[name[approximant] in name[_filter_ends]] begin[:] return[call[call[name[_filter_ends]][name[approximant]], parameter[]]]
keyword[def] identifier[get_waveform_end_frequency] ( identifier[template] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[input_params] = identifier[props] ( identifier[template] ,** identifier[kwargs] ) identifier[approximant] = identifier[kwargs] [ literal[string] ] keyword[if] identifier[approximant] keyword[in] identifier[_filter_ends] : keyword[return] identifier[_filter_ends] [ identifier[approximant] ](** identifier[input_params] ) keyword[else] : keyword[return] keyword[None]
def get_waveform_end_frequency(template=None, **kwargs): """Return the stop frequency of a template """ input_params = props(template, **kwargs) approximant = kwargs['approximant'] if approximant in _filter_ends: return _filter_ends[approximant](**input_params) # depends on [control=['if'], data=['approximant', '_filter_ends']] else: return None
def set_chat_description(chat_id, description, **kwargs): """ Use this method to change the description of a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Returns True on success. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param description: New chat description, 0-255 characters :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :return: Returns True on success. :rtype: bool """ if len(description) > 255: raise ValueError("Chat description must be less than 255 characters.") # required args params = dict( chat_id=chat_id, description=description ) return TelegramBotRPCRequest('setChatTitle', params=params, on_result=lambda result: result, **kwargs)
def function[set_chat_description, parameter[chat_id, description]]: constant[ Use this method to change the description of a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Returns True on success. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param description: New chat description, 0-255 characters :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :return: Returns True on success. :rtype: bool ] if compare[call[name[len], parameter[name[description]]] greater[>] constant[255]] begin[:] <ast.Raise object at 0x7da18c4cd720> variable[params] assign[=] call[name[dict], parameter[]] return[call[name[TelegramBotRPCRequest], parameter[constant[setChatTitle]]]]
keyword[def] identifier[set_chat_description] ( identifier[chat_id] , identifier[description] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[len] ( identifier[description] )> literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[params] = identifier[dict] ( identifier[chat_id] = identifier[chat_id] , identifier[description] = identifier[description] ) keyword[return] identifier[TelegramBotRPCRequest] ( literal[string] , identifier[params] = identifier[params] , identifier[on_result] = keyword[lambda] identifier[result] : identifier[result] ,** identifier[kwargs] )
def set_chat_description(chat_id, description, **kwargs): """ Use this method to change the description of a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Returns True on success. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param description: New chat description, 0-255 characters :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :return: Returns True on success. :rtype: bool """ if len(description) > 255: raise ValueError('Chat description must be less than 255 characters.') # depends on [control=['if'], data=[]] # required args params = dict(chat_id=chat_id, description=description) return TelegramBotRPCRequest('setChatTitle', params=params, on_result=lambda result: result, **kwargs)
def size(ctx, dataset, kwargs): "Show dataset size" kwargs = parse_kwargs(kwargs) (print)(data(dataset, **ctx.obj).get(**kwargs).complete_set.size)
def function[size, parameter[ctx, dataset, kwargs]]: constant[Show dataset size] variable[kwargs] assign[=] call[name[parse_kwargs], parameter[name[kwargs]]] call[name[print], parameter[call[call[name[data], parameter[name[dataset]]].get, parameter[]].complete_set.size]]
keyword[def] identifier[size] ( identifier[ctx] , identifier[dataset] , identifier[kwargs] ): literal[string] identifier[kwargs] = identifier[parse_kwargs] ( identifier[kwargs] ) ( identifier[print] )( identifier[data] ( identifier[dataset] ,** identifier[ctx] . identifier[obj] ). identifier[get] (** identifier[kwargs] ). identifier[complete_set] . identifier[size] )
def size(ctx, dataset, kwargs): """Show dataset size""" kwargs = parse_kwargs(kwargs) print(data(dataset, **ctx.obj).get(**kwargs).complete_set.size)
def get_pseudo_abi_for_input(s, timeout=None, proxies=None): """ Lookup sighash from 4bytes.directory, create a pseudo api and try to decode it with the parsed abi. May return multiple results as sighashes may collide. :param s: bytes input :return: pseudo abi for method """ sighash = Utils.bytes_to_str(s[:4]) for pseudo_abi in FourByteDirectory.get_pseudo_abi_for_sighash(sighash, timeout=timeout, proxies=proxies): types = [ti["type"] for ti in pseudo_abi['inputs']] try: # test decoding _ = decode_abi(types, s[4:]) yield pseudo_abi except eth_abi.exceptions.DecodingError as e: continue
def function[get_pseudo_abi_for_input, parameter[s, timeout, proxies]]: constant[ Lookup sighash from 4bytes.directory, create a pseudo api and try to decode it with the parsed abi. May return multiple results as sighashes may collide. :param s: bytes input :return: pseudo abi for method ] variable[sighash] assign[=] call[name[Utils].bytes_to_str, parameter[call[name[s]][<ast.Slice object at 0x7da1b0fac0a0>]]] for taget[name[pseudo_abi]] in starred[call[name[FourByteDirectory].get_pseudo_abi_for_sighash, parameter[name[sighash]]]] begin[:] variable[types] assign[=] <ast.ListComp object at 0x7da1b0faf370> <ast.Try object at 0x7da1b0eda6b0>
keyword[def] identifier[get_pseudo_abi_for_input] ( identifier[s] , identifier[timeout] = keyword[None] , identifier[proxies] = keyword[None] ): literal[string] identifier[sighash] = identifier[Utils] . identifier[bytes_to_str] ( identifier[s] [: literal[int] ]) keyword[for] identifier[pseudo_abi] keyword[in] identifier[FourByteDirectory] . identifier[get_pseudo_abi_for_sighash] ( identifier[sighash] , identifier[timeout] = identifier[timeout] , identifier[proxies] = identifier[proxies] ): identifier[types] =[ identifier[ti] [ literal[string] ] keyword[for] identifier[ti] keyword[in] identifier[pseudo_abi] [ literal[string] ]] keyword[try] : identifier[_] = identifier[decode_abi] ( identifier[types] , identifier[s] [ literal[int] :]) keyword[yield] identifier[pseudo_abi] keyword[except] identifier[eth_abi] . identifier[exceptions] . identifier[DecodingError] keyword[as] identifier[e] : keyword[continue]
def get_pseudo_abi_for_input(s, timeout=None, proxies=None): """ Lookup sighash from 4bytes.directory, create a pseudo api and try to decode it with the parsed abi. May return multiple results as sighashes may collide. :param s: bytes input :return: pseudo abi for method """ sighash = Utils.bytes_to_str(s[:4]) for pseudo_abi in FourByteDirectory.get_pseudo_abi_for_sighash(sighash, timeout=timeout, proxies=proxies): types = [ti['type'] for ti in pseudo_abi['inputs']] try: # test decoding _ = decode_abi(types, s[4:]) yield pseudo_abi # depends on [control=['try'], data=[]] except eth_abi.exceptions.DecodingError as e: continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['pseudo_abi']]
def Emulation_setScriptExecutionDisabled(self, value): """ Function path: Emulation.setScriptExecutionDisabled Domain: Emulation Method name: setScriptExecutionDisabled WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'value' (type: boolean) -> Whether script execution should be disabled in the page. No return value. Description: Switches script execution in the page. """ assert isinstance(value, (bool,) ), "Argument 'value' must be of type '['bool']'. Received type: '%s'" % type( value) subdom_funcs = self.synchronous_command( 'Emulation.setScriptExecutionDisabled', value=value) return subdom_funcs
def function[Emulation_setScriptExecutionDisabled, parameter[self, value]]: constant[ Function path: Emulation.setScriptExecutionDisabled Domain: Emulation Method name: setScriptExecutionDisabled WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'value' (type: boolean) -> Whether script execution should be disabled in the page. No return value. Description: Switches script execution in the page. ] assert[call[name[isinstance], parameter[name[value], tuple[[<ast.Name object at 0x7da1b11e9bd0>]]]]] variable[subdom_funcs] assign[=] call[name[self].synchronous_command, parameter[constant[Emulation.setScriptExecutionDisabled]]] return[name[subdom_funcs]]
keyword[def] identifier[Emulation_setScriptExecutionDisabled] ( identifier[self] , identifier[value] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[value] ,( identifier[bool] ,) ), literal[string] % identifier[type] ( identifier[value] ) identifier[subdom_funcs] = identifier[self] . identifier[synchronous_command] ( literal[string] , identifier[value] = identifier[value] ) keyword[return] identifier[subdom_funcs]
def Emulation_setScriptExecutionDisabled(self, value): """ Function path: Emulation.setScriptExecutionDisabled Domain: Emulation Method name: setScriptExecutionDisabled WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'value' (type: boolean) -> Whether script execution should be disabled in the page. No return value. Description: Switches script execution in the page. """ assert isinstance(value, (bool,)), "Argument 'value' must be of type '['bool']'. Received type: '%s'" % type(value) subdom_funcs = self.synchronous_command('Emulation.setScriptExecutionDisabled', value=value) return subdom_funcs
def gps2_rtk_encode(self, time_last_baseline_ms, rtk_receiver_id, wn, tow, rtk_health, rtk_rate, nsats, baseline_coords_type, baseline_a_mm, baseline_b_mm, baseline_c_mm, accuracy, iar_num_hypotheses): ''' RTK GPS data. Gives information on the relative baseline calculation the GPS is reporting time_last_baseline_ms : Time since boot of last baseline message received in ms. (uint32_t) rtk_receiver_id : Identification of connected RTK receiver. (uint8_t) wn : GPS Week Number of last baseline (uint16_t) tow : GPS Time of Week of last baseline (uint32_t) rtk_health : GPS-specific health report for RTK data. (uint8_t) rtk_rate : Rate of baseline messages being received by GPS, in HZ (uint8_t) nsats : Current number of sats used for RTK calculation. (uint8_t) baseline_coords_type : Coordinate system of baseline. 0 == ECEF, 1 == NED (uint8_t) baseline_a_mm : Current baseline in ECEF x or NED north component in mm. (int32_t) baseline_b_mm : Current baseline in ECEF y or NED east component in mm. (int32_t) baseline_c_mm : Current baseline in ECEF z or NED down component in mm. (int32_t) accuracy : Current estimate of baseline accuracy. (uint32_t) iar_num_hypotheses : Current number of integer ambiguity hypotheses. (int32_t) ''' return MAVLink_gps2_rtk_message(time_last_baseline_ms, rtk_receiver_id, wn, tow, rtk_health, rtk_rate, nsats, baseline_coords_type, baseline_a_mm, baseline_b_mm, baseline_c_mm, accuracy, iar_num_hypotheses)
def function[gps2_rtk_encode, parameter[self, time_last_baseline_ms, rtk_receiver_id, wn, tow, rtk_health, rtk_rate, nsats, baseline_coords_type, baseline_a_mm, baseline_b_mm, baseline_c_mm, accuracy, iar_num_hypotheses]]: constant[ RTK GPS data. Gives information on the relative baseline calculation the GPS is reporting time_last_baseline_ms : Time since boot of last baseline message received in ms. (uint32_t) rtk_receiver_id : Identification of connected RTK receiver. (uint8_t) wn : GPS Week Number of last baseline (uint16_t) tow : GPS Time of Week of last baseline (uint32_t) rtk_health : GPS-specific health report for RTK data. (uint8_t) rtk_rate : Rate of baseline messages being received by GPS, in HZ (uint8_t) nsats : Current number of sats used for RTK calculation. (uint8_t) baseline_coords_type : Coordinate system of baseline. 0 == ECEF, 1 == NED (uint8_t) baseline_a_mm : Current baseline in ECEF x or NED north component in mm. (int32_t) baseline_b_mm : Current baseline in ECEF y or NED east component in mm. (int32_t) baseline_c_mm : Current baseline in ECEF z or NED down component in mm. (int32_t) accuracy : Current estimate of baseline accuracy. (uint32_t) iar_num_hypotheses : Current number of integer ambiguity hypotheses. (int32_t) ] return[call[name[MAVLink_gps2_rtk_message], parameter[name[time_last_baseline_ms], name[rtk_receiver_id], name[wn], name[tow], name[rtk_health], name[rtk_rate], name[nsats], name[baseline_coords_type], name[baseline_a_mm], name[baseline_b_mm], name[baseline_c_mm], name[accuracy], name[iar_num_hypotheses]]]]
keyword[def] identifier[gps2_rtk_encode] ( identifier[self] , identifier[time_last_baseline_ms] , identifier[rtk_receiver_id] , identifier[wn] , identifier[tow] , identifier[rtk_health] , identifier[rtk_rate] , identifier[nsats] , identifier[baseline_coords_type] , identifier[baseline_a_mm] , identifier[baseline_b_mm] , identifier[baseline_c_mm] , identifier[accuracy] , identifier[iar_num_hypotheses] ): literal[string] keyword[return] identifier[MAVLink_gps2_rtk_message] ( identifier[time_last_baseline_ms] , identifier[rtk_receiver_id] , identifier[wn] , identifier[tow] , identifier[rtk_health] , identifier[rtk_rate] , identifier[nsats] , identifier[baseline_coords_type] , identifier[baseline_a_mm] , identifier[baseline_b_mm] , identifier[baseline_c_mm] , identifier[accuracy] , identifier[iar_num_hypotheses] )
def gps2_rtk_encode(self, time_last_baseline_ms, rtk_receiver_id, wn, tow, rtk_health, rtk_rate, nsats, baseline_coords_type, baseline_a_mm, baseline_b_mm, baseline_c_mm, accuracy, iar_num_hypotheses): """ RTK GPS data. Gives information on the relative baseline calculation the GPS is reporting time_last_baseline_ms : Time since boot of last baseline message received in ms. (uint32_t) rtk_receiver_id : Identification of connected RTK receiver. (uint8_t) wn : GPS Week Number of last baseline (uint16_t) tow : GPS Time of Week of last baseline (uint32_t) rtk_health : GPS-specific health report for RTK data. (uint8_t) rtk_rate : Rate of baseline messages being received by GPS, in HZ (uint8_t) nsats : Current number of sats used for RTK calculation. (uint8_t) baseline_coords_type : Coordinate system of baseline. 0 == ECEF, 1 == NED (uint8_t) baseline_a_mm : Current baseline in ECEF x or NED north component in mm. (int32_t) baseline_b_mm : Current baseline in ECEF y or NED east component in mm. (int32_t) baseline_c_mm : Current baseline in ECEF z or NED down component in mm. (int32_t) accuracy : Current estimate of baseline accuracy. (uint32_t) iar_num_hypotheses : Current number of integer ambiguity hypotheses. (int32_t) """ return MAVLink_gps2_rtk_message(time_last_baseline_ms, rtk_receiver_id, wn, tow, rtk_health, rtk_rate, nsats, baseline_coords_type, baseline_a_mm, baseline_b_mm, baseline_c_mm, accuracy, iar_num_hypotheses)
def imatch(pattern, name): # type: (Text, Text) -> bool """Test whether a name matches a wildcard pattern (case insensitive). Arguments: pattern (str): A wildcard pattern, e.g. ``"*.py"``. name (bool): A filename. Returns: bool: `True` if the filename matches the pattern. """ try: re_pat = _PATTERN_CACHE[(pattern, False)] except KeyError: res = "(?ms)" + _translate(pattern, case_sensitive=False) + r'\Z' _PATTERN_CACHE[(pattern, False)] = re_pat = re.compile(res, re.IGNORECASE) return re_pat.match(name) is not None
def function[imatch, parameter[pattern, name]]: constant[Test whether a name matches a wildcard pattern (case insensitive). Arguments: pattern (str): A wildcard pattern, e.g. ``"*.py"``. name (bool): A filename. Returns: bool: `True` if the filename matches the pattern. ] <ast.Try object at 0x7da1b16791b0> return[compare[call[name[re_pat].match, parameter[name[name]]] is_not constant[None]]]
keyword[def] identifier[imatch] ( identifier[pattern] , identifier[name] ): literal[string] keyword[try] : identifier[re_pat] = identifier[_PATTERN_CACHE] [( identifier[pattern] , keyword[False] )] keyword[except] identifier[KeyError] : identifier[res] = literal[string] + identifier[_translate] ( identifier[pattern] , identifier[case_sensitive] = keyword[False] )+ literal[string] identifier[_PATTERN_CACHE] [( identifier[pattern] , keyword[False] )]= identifier[re_pat] = identifier[re] . identifier[compile] ( identifier[res] , identifier[re] . identifier[IGNORECASE] ) keyword[return] identifier[re_pat] . identifier[match] ( identifier[name] ) keyword[is] keyword[not] keyword[None]
def imatch(pattern, name): # type: (Text, Text) -> bool 'Test whether a name matches a wildcard pattern (case insensitive).\n\n Arguments:\n pattern (str): A wildcard pattern, e.g. ``"*.py"``.\n name (bool): A filename.\n\n Returns:\n bool: `True` if the filename matches the pattern.\n\n ' try: re_pat = _PATTERN_CACHE[pattern, False] # depends on [control=['try'], data=[]] except KeyError: res = '(?ms)' + _translate(pattern, case_sensitive=False) + '\\Z' _PATTERN_CACHE[pattern, False] = re_pat = re.compile(res, re.IGNORECASE) # depends on [control=['except'], data=[]] return re_pat.match(name) is not None
def setClients(self, *args, **kwargs): """Adds the clients for this group to a 'clients' field. The 'groupMembers' field of the group holds the encodedKeys of the member clients of the group. Since Mambu REST API accepts both ids or encodedKeys to retrieve entities, we use that here. You may wish to get the full details of each client by passing a fullDetails=True argument here. Returns the number of requests done to Mambu. """ requests = 0 if 'fullDetails' in kwargs: fullDetails = kwargs['fullDetails'] kwargs.pop('fullDetails') else: fullDetails = True clients = [] for m in self['groupMembers']: try: client = self.mambuclientclass(entid=m['clientKey'], fullDetails=fullDetails, *args, **kwargs) except AttributeError as ae: from .mambuclient import MambuClient self.mambuclientclass = MambuClient client = self.mambuclientclass(entid=m['clientKey'], fullDetails=fullDetails, *args, **kwargs) requests += 1 clients.append(client) self['clients'] = clients return requests
def function[setClients, parameter[self]]: constant[Adds the clients for this group to a 'clients' field. The 'groupMembers' field of the group holds the encodedKeys of the member clients of the group. Since Mambu REST API accepts both ids or encodedKeys to retrieve entities, we use that here. You may wish to get the full details of each client by passing a fullDetails=True argument here. Returns the number of requests done to Mambu. ] variable[requests] assign[=] constant[0] if compare[constant[fullDetails] in name[kwargs]] begin[:] variable[fullDetails] assign[=] call[name[kwargs]][constant[fullDetails]] call[name[kwargs].pop, parameter[constant[fullDetails]]] variable[clients] assign[=] list[[]] for taget[name[m]] in starred[call[name[self]][constant[groupMembers]]] begin[:] <ast.Try object at 0x7da1b26ad5d0> <ast.AugAssign object at 0x7da1b26adff0> call[name[clients].append, parameter[name[client]]] call[name[self]][constant[clients]] assign[=] name[clients] return[name[requests]]
keyword[def] identifier[setClients] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[requests] = literal[int] keyword[if] literal[string] keyword[in] identifier[kwargs] : identifier[fullDetails] = identifier[kwargs] [ literal[string] ] identifier[kwargs] . identifier[pop] ( literal[string] ) keyword[else] : identifier[fullDetails] = keyword[True] identifier[clients] =[] keyword[for] identifier[m] keyword[in] identifier[self] [ literal[string] ]: keyword[try] : identifier[client] = identifier[self] . identifier[mambuclientclass] ( identifier[entid] = identifier[m] [ literal[string] ], identifier[fullDetails] = identifier[fullDetails] ,* identifier[args] ,** identifier[kwargs] ) keyword[except] identifier[AttributeError] keyword[as] identifier[ae] : keyword[from] . identifier[mambuclient] keyword[import] identifier[MambuClient] identifier[self] . identifier[mambuclientclass] = identifier[MambuClient] identifier[client] = identifier[self] . identifier[mambuclientclass] ( identifier[entid] = identifier[m] [ literal[string] ], identifier[fullDetails] = identifier[fullDetails] ,* identifier[args] ,** identifier[kwargs] ) identifier[requests] += literal[int] identifier[clients] . identifier[append] ( identifier[client] ) identifier[self] [ literal[string] ]= identifier[clients] keyword[return] identifier[requests]
def setClients(self, *args, **kwargs): """Adds the clients for this group to a 'clients' field. The 'groupMembers' field of the group holds the encodedKeys of the member clients of the group. Since Mambu REST API accepts both ids or encodedKeys to retrieve entities, we use that here. You may wish to get the full details of each client by passing a fullDetails=True argument here. Returns the number of requests done to Mambu. """ requests = 0 if 'fullDetails' in kwargs: fullDetails = kwargs['fullDetails'] kwargs.pop('fullDetails') # depends on [control=['if'], data=['kwargs']] else: fullDetails = True clients = [] for m in self['groupMembers']: try: client = self.mambuclientclass(*args, entid=m['clientKey'], fullDetails=fullDetails, **kwargs) # depends on [control=['try'], data=[]] except AttributeError as ae: from .mambuclient import MambuClient self.mambuclientclass = MambuClient client = self.mambuclientclass(*args, entid=m['clientKey'], fullDetails=fullDetails, **kwargs) # depends on [control=['except'], data=[]] requests += 1 clients.append(client) # depends on [control=['for'], data=['m']] self['clients'] = clients return requests
def load(self, **kwargs): """Custom load method to address issue in 11.6.0 Final, where non existing objects would be True. """ if LooseVersion(self.tmos_ver) == LooseVersion('11.6.0'): return self._load_11_6(**kwargs) else: return super(Rule, self)._load(**kwargs)
def function[load, parameter[self]]: constant[Custom load method to address issue in 11.6.0 Final, where non existing objects would be True. ] if compare[call[name[LooseVersion], parameter[name[self].tmos_ver]] equal[==] call[name[LooseVersion], parameter[constant[11.6.0]]]] begin[:] return[call[name[self]._load_11_6, parameter[]]]
keyword[def] identifier[load] ( identifier[self] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[LooseVersion] ( identifier[self] . identifier[tmos_ver] )== identifier[LooseVersion] ( literal[string] ): keyword[return] identifier[self] . identifier[_load_11_6] (** identifier[kwargs] ) keyword[else] : keyword[return] identifier[super] ( identifier[Rule] , identifier[self] ). identifier[_load] (** identifier[kwargs] )
def load(self, **kwargs): """Custom load method to address issue in 11.6.0 Final, where non existing objects would be True. """ if LooseVersion(self.tmos_ver) == LooseVersion('11.6.0'): return self._load_11_6(**kwargs) # depends on [control=['if'], data=[]] else: return super(Rule, self)._load(**kwargs)
def timestring(self, pattern="%Y-%m-%d %H:%M:%S", timezone=None): """Returns a time string. :param pattern = "%Y-%m-%d %H:%M:%S" The format used. By default, an ISO-type format is used. The syntax here is identical to the one used by time.strftime() and time.strptime(). :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone timestamp = self.__timestamp__ - timezone timestamp -= LOCALTZ return _strftime(pattern, _gmtime(timestamp))
def function[timestring, parameter[self, pattern, timezone]]: constant[Returns a time string. :param pattern = "%Y-%m-%d %H:%M:%S" The format used. By default, an ISO-type format is used. The syntax here is identical to the one used by time.strftime() and time.strptime(). :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. ] if compare[name[timezone] is constant[None]] begin[:] variable[timezone] assign[=] name[self].timezone variable[timestamp] assign[=] binary_operation[name[self].__timestamp__ - name[timezone]] <ast.AugAssign object at 0x7da1b11a5e40> return[call[name[_strftime], parameter[name[pattern], call[name[_gmtime], parameter[name[timestamp]]]]]]
keyword[def] identifier[timestring] ( identifier[self] , identifier[pattern] = literal[string] , identifier[timezone] = keyword[None] ): literal[string] keyword[if] identifier[timezone] keyword[is] keyword[None] : identifier[timezone] = identifier[self] . identifier[timezone] identifier[timestamp] = identifier[self] . identifier[__timestamp__] - identifier[timezone] identifier[timestamp] -= identifier[LOCALTZ] keyword[return] identifier[_strftime] ( identifier[pattern] , identifier[_gmtime] ( identifier[timestamp] ))
def timestring(self, pattern='%Y-%m-%d %H:%M:%S', timezone=None): """Returns a time string. :param pattern = "%Y-%m-%d %H:%M:%S" The format used. By default, an ISO-type format is used. The syntax here is identical to the one used by time.strftime() and time.strptime(). :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone # depends on [control=['if'], data=['timezone']] timestamp = self.__timestamp__ - timezone timestamp -= LOCALTZ return _strftime(pattern, _gmtime(timestamp))
def fetch_resource(url): """Fetch a resource and return the resulting lines in a list Send file_name to get more clean log messages Args: url(str) Returns: lines(list(str)) """ try: data = get_request(url) lines = data.split('\n') except Exception as err: raise err return lines
def function[fetch_resource, parameter[url]]: constant[Fetch a resource and return the resulting lines in a list Send file_name to get more clean log messages Args: url(str) Returns: lines(list(str)) ] <ast.Try object at 0x7da18fe93be0> return[name[lines]]
keyword[def] identifier[fetch_resource] ( identifier[url] ): literal[string] keyword[try] : identifier[data] = identifier[get_request] ( identifier[url] ) identifier[lines] = identifier[data] . identifier[split] ( literal[string] ) keyword[except] identifier[Exception] keyword[as] identifier[err] : keyword[raise] identifier[err] keyword[return] identifier[lines]
def fetch_resource(url): """Fetch a resource and return the resulting lines in a list Send file_name to get more clean log messages Args: url(str) Returns: lines(list(str)) """ try: data = get_request(url) lines = data.split('\n') # depends on [control=['try'], data=[]] except Exception as err: raise err # depends on [control=['except'], data=['err']] return lines
def read_core_state_eigen(self): """ Read the core state eigenenergies at each ionic step. Returns: A list of dict over the atom such as [{"AO":[core state eig]}]. The core state eigenenergie list for each AO is over all ionic step. Example: The core state eigenenergie of the 2s AO of the 6th atom of the structure at the last ionic step is [5]["2s"][-1] """ with zopen(self.filename, "rt") as foutcar: line = foutcar.readline() while line != "": line = foutcar.readline() if "NIONS =" in line: natom = int(line.split("NIONS =")[1]) cl = [defaultdict(list) for i in range(natom)] if "the core state eigen" in line: iat = -1 while line != "": line = foutcar.readline() # don't know number of lines to parse without knowing # specific species, so stop parsing when we reach # "E-fermi" instead if "E-fermi" in line: break data = line.split() # data will contain odd number of elements if it is # the start of a new entry, or even number of elements # if it continues the previous entry if len(data) % 2 == 1: iat += 1 # started parsing a new ion data = data[1:] # remove element with ion number for i in range(0, len(data), 2): cl[iat][data[i]].append(float(data[i + 1])) return cl
def function[read_core_state_eigen, parameter[self]]: constant[ Read the core state eigenenergies at each ionic step. Returns: A list of dict over the atom such as [{"AO":[core state eig]}]. The core state eigenenergie list for each AO is over all ionic step. Example: The core state eigenenergie of the 2s AO of the 6th atom of the structure at the last ionic step is [5]["2s"][-1] ] with call[name[zopen], parameter[name[self].filename, constant[rt]]] begin[:] variable[line] assign[=] call[name[foutcar].readline, parameter[]] while compare[name[line] not_equal[!=] constant[]] begin[:] variable[line] assign[=] call[name[foutcar].readline, parameter[]] if compare[constant[NIONS =] in name[line]] begin[:] variable[natom] assign[=] call[name[int], parameter[call[call[name[line].split, parameter[constant[NIONS =]]]][constant[1]]]] variable[cl] assign[=] <ast.ListComp object at 0x7da1b26ac760> if compare[constant[the core state eigen] in name[line]] begin[:] variable[iat] assign[=] <ast.UnaryOp object at 0x7da1b26af100> while compare[name[line] not_equal[!=] constant[]] begin[:] variable[line] assign[=] call[name[foutcar].readline, parameter[]] if compare[constant[E-fermi] in name[line]] begin[:] break variable[data] assign[=] call[name[line].split, parameter[]] if compare[binary_operation[call[name[len], parameter[name[data]]] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] constant[1]] begin[:] <ast.AugAssign object at 0x7da1b26ac880> variable[data] assign[=] call[name[data]][<ast.Slice object at 0x7da1b26acf40>] for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[data]]], constant[2]]]] begin[:] call[call[call[name[cl]][name[iat]]][call[name[data]][name[i]]].append, parameter[call[name[float], parameter[call[name[data]][binary_operation[name[i] + constant[1]]]]]]] return[name[cl]]
keyword[def] identifier[read_core_state_eigen] ( identifier[self] ): literal[string] keyword[with] identifier[zopen] ( identifier[self] . identifier[filename] , literal[string] ) keyword[as] identifier[foutcar] : identifier[line] = identifier[foutcar] . identifier[readline] () keyword[while] identifier[line] != literal[string] : identifier[line] = identifier[foutcar] . identifier[readline] () keyword[if] literal[string] keyword[in] identifier[line] : identifier[natom] = identifier[int] ( identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]) identifier[cl] =[ identifier[defaultdict] ( identifier[list] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[natom] )] keyword[if] literal[string] keyword[in] identifier[line] : identifier[iat] =- literal[int] keyword[while] identifier[line] != literal[string] : identifier[line] = identifier[foutcar] . identifier[readline] () keyword[if] literal[string] keyword[in] identifier[line] : keyword[break] identifier[data] = identifier[line] . identifier[split] () keyword[if] identifier[len] ( identifier[data] )% literal[int] == literal[int] : identifier[iat] += literal[int] identifier[data] = identifier[data] [ literal[int] :] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[data] ), literal[int] ): identifier[cl] [ identifier[iat] ][ identifier[data] [ identifier[i] ]]. identifier[append] ( identifier[float] ( identifier[data] [ identifier[i] + literal[int] ])) keyword[return] identifier[cl]
def read_core_state_eigen(self): """ Read the core state eigenenergies at each ionic step. Returns: A list of dict over the atom such as [{"AO":[core state eig]}]. The core state eigenenergie list for each AO is over all ionic step. Example: The core state eigenenergie of the 2s AO of the 6th atom of the structure at the last ionic step is [5]["2s"][-1] """ with zopen(self.filename, 'rt') as foutcar: line = foutcar.readline() while line != '': line = foutcar.readline() if 'NIONS =' in line: natom = int(line.split('NIONS =')[1]) cl = [defaultdict(list) for i in range(natom)] # depends on [control=['if'], data=['line']] if 'the core state eigen' in line: iat = -1 while line != '': line = foutcar.readline() # don't know number of lines to parse without knowing # specific species, so stop parsing when we reach # "E-fermi" instead if 'E-fermi' in line: break # depends on [control=['if'], data=[]] data = line.split() # data will contain odd number of elements if it is # the start of a new entry, or even number of elements # if it continues the previous entry if len(data) % 2 == 1: iat += 1 # started parsing a new ion data = data[1:] # remove element with ion number # depends on [control=['if'], data=[]] for i in range(0, len(data), 2): cl[iat][data[i]].append(float(data[i + 1])) # depends on [control=['for'], data=['i']] # depends on [control=['while'], data=['line']] # depends on [control=['if'], data=['line']] # depends on [control=['while'], data=['line']] # depends on [control=['with'], data=['foutcar']] return cl
def initialize_bfd(self, abfd): """Initialize underlying libOpcodes library using BFD.""" self._ptr = _opcodes.initialize_bfd(abfd._ptr) # Already done inside opcodes.c #self.architecture = abfd.architecture #self.machine = abfd.machine #self.endian = abfd.endian # force intel syntax if self.architecture == ARCH_I386: if abfd.arch_size == 32: self.machine = MACH_I386_I386_INTEL_SYNTAX #abfd.machine = MACH_I386_I386_INTEL_SYNTAX elif abfd.arch_size == 64: self.machine = MACH_X86_64_INTEL_SYNTAX
def function[initialize_bfd, parameter[self, abfd]]: constant[Initialize underlying libOpcodes library using BFD.] name[self]._ptr assign[=] call[name[_opcodes].initialize_bfd, parameter[name[abfd]._ptr]] if compare[name[self].architecture equal[==] name[ARCH_I386]] begin[:] if compare[name[abfd].arch_size equal[==] constant[32]] begin[:] name[self].machine assign[=] name[MACH_I386_I386_INTEL_SYNTAX]
keyword[def] identifier[initialize_bfd] ( identifier[self] , identifier[abfd] ): literal[string] identifier[self] . identifier[_ptr] = identifier[_opcodes] . identifier[initialize_bfd] ( identifier[abfd] . identifier[_ptr] ) keyword[if] identifier[self] . identifier[architecture] == identifier[ARCH_I386] : keyword[if] identifier[abfd] . identifier[arch_size] == literal[int] : identifier[self] . identifier[machine] = identifier[MACH_I386_I386_INTEL_SYNTAX] keyword[elif] identifier[abfd] . identifier[arch_size] == literal[int] : identifier[self] . identifier[machine] = identifier[MACH_X86_64_INTEL_SYNTAX]
def initialize_bfd(self, abfd): """Initialize underlying libOpcodes library using BFD.""" self._ptr = _opcodes.initialize_bfd(abfd._ptr) # Already done inside opcodes.c #self.architecture = abfd.architecture #self.machine = abfd.machine #self.endian = abfd.endian # force intel syntax if self.architecture == ARCH_I386: if abfd.arch_size == 32: self.machine = MACH_I386_I386_INTEL_SYNTAX # depends on [control=['if'], data=[]] #abfd.machine = MACH_I386_I386_INTEL_SYNTAX elif abfd.arch_size == 64: self.machine = MACH_X86_64_INTEL_SYNTAX # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def get_block_raw(self, block): """ Args: block: block number (eg: 223212) block hash (eg: 0000000000000000210b10d620600dc1cc2380bb58eb2408f9767eb792ed31fa) word "last" - this will always return the latest block word "first" - this will always return the first block Returns: raw block data """ url = '{}/block/raw/{}'.format(self._url, block) return self.make_request(url)
def function[get_block_raw, parameter[self, block]]: constant[ Args: block: block number (eg: 223212) block hash (eg: 0000000000000000210b10d620600dc1cc2380bb58eb2408f9767eb792ed31fa) word "last" - this will always return the latest block word "first" - this will always return the first block Returns: raw block data ] variable[url] assign[=] call[constant[{}/block/raw/{}].format, parameter[name[self]._url, name[block]]] return[call[name[self].make_request, parameter[name[url]]]]
keyword[def] identifier[get_block_raw] ( identifier[self] , identifier[block] ): literal[string] identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[_url] , identifier[block] ) keyword[return] identifier[self] . identifier[make_request] ( identifier[url] )
def get_block_raw(self, block): """ Args: block: block number (eg: 223212) block hash (eg: 0000000000000000210b10d620600dc1cc2380bb58eb2408f9767eb792ed31fa) word "last" - this will always return the latest block word "first" - this will always return the first block Returns: raw block data """ url = '{}/block/raw/{}'.format(self._url, block) return self.make_request(url)
def list_param_combinations(param_ranges): """ Create a list of all parameter combinations from a dictionary specifying desired parameter values as lists. Example: >>> param_ranges = {'a': [1], 'b': [2, 3]} >>> list_param_combinations(param_ranges) [{'a': 1, 'b': 2}, {'a': 1, 'b': 3}] Additionally, this function is robust in case values are not lists: >>> param_ranges = {'a': 1, 'b': [2, 3]} >>> list_param_combinations(param_ranges) [{'a': 1, 'b': 2}, {'a': 1, 'b': 3}] """ # Convert non-list values to single-element lists # This is required to make sure product work. for key in param_ranges: if not isinstance(param_ranges[key], list): param_ranges[key] = [param_ranges[key]] return [dict(zip(param_ranges, v)) for v in product(*param_ranges.values())]
def function[list_param_combinations, parameter[param_ranges]]: constant[ Create a list of all parameter combinations from a dictionary specifying desired parameter values as lists. Example: >>> param_ranges = {'a': [1], 'b': [2, 3]} >>> list_param_combinations(param_ranges) [{'a': 1, 'b': 2}, {'a': 1, 'b': 3}] Additionally, this function is robust in case values are not lists: >>> param_ranges = {'a': 1, 'b': [2, 3]} >>> list_param_combinations(param_ranges) [{'a': 1, 'b': 2}, {'a': 1, 'b': 3}] ] for taget[name[key]] in starred[name[param_ranges]] begin[:] if <ast.UnaryOp object at 0x7da18bcca2c0> begin[:] call[name[param_ranges]][name[key]] assign[=] list[[<ast.Subscript object at 0x7da18bcca3e0>]] return[<ast.ListComp object at 0x7da18bccae60>]
keyword[def] identifier[list_param_combinations] ( identifier[param_ranges] ): literal[string] keyword[for] identifier[key] keyword[in] identifier[param_ranges] : keyword[if] keyword[not] identifier[isinstance] ( identifier[param_ranges] [ identifier[key] ], identifier[list] ): identifier[param_ranges] [ identifier[key] ]=[ identifier[param_ranges] [ identifier[key] ]] keyword[return] [ identifier[dict] ( identifier[zip] ( identifier[param_ranges] , identifier[v] )) keyword[for] identifier[v] keyword[in] identifier[product] (* identifier[param_ranges] . identifier[values] ())]
def list_param_combinations(param_ranges): """ Create a list of all parameter combinations from a dictionary specifying desired parameter values as lists. Example: >>> param_ranges = {'a': [1], 'b': [2, 3]} >>> list_param_combinations(param_ranges) [{'a': 1, 'b': 2}, {'a': 1, 'b': 3}] Additionally, this function is robust in case values are not lists: >>> param_ranges = {'a': 1, 'b': [2, 3]} >>> list_param_combinations(param_ranges) [{'a': 1, 'b': 2}, {'a': 1, 'b': 3}] """ # Convert non-list values to single-element lists # This is required to make sure product work. for key in param_ranges: if not isinstance(param_ranges[key], list): param_ranges[key] = [param_ranges[key]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] return [dict(zip(param_ranges, v)) for v in product(*param_ranges.values())]
def draw_path_collection(self, paths, path_coordinates, path_transforms, offsets, offset_coordinates, offset_order, styles, mplobj=None): """ Draw a collection of paths. The paths, offsets, and styles are all iterables, and the number of paths is max(len(paths), len(offsets)). By default, this is implemented via multiple calls to the draw_path() function. For efficiency, Renderers may choose to customize this implementation. Examples of path collections created by matplotlib are scatter plots, histograms, contour plots, and many others. Parameters ---------- paths : list list of tuples, where each tuple has two elements: (data, pathcodes). See draw_path() for a description of these. path_coordinates: string the coordinates code for the paths, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. path_transforms: array_like an array of shape (*, 3, 3), giving a series of 2D Affine transforms for the paths. These encode translations, rotations, and scalings in the standard way. offsets: array_like An array of offsets of shape (N, 2) offset_coordinates : string the coordinates code for the offsets, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. offset_order : string either "before" or "after". This specifies whether the offset is applied before the path transform, or after. The matplotlib backend equivalent is "before"->"data", "after"->"screen". styles: dictionary A dictionary in which each value is a list of length N, containing the style(s) for the paths. mplobj : matplotlib object the matplotlib plot element which generated this collection """ if offset_order == "before": raise NotImplementedError("offset before transform") for tup in self._iter_path_collection(paths, path_transforms, offsets, styles): (path, path_transform, offset, ec, lw, fc) = tup vertices, pathcodes = path path_transform = transforms.Affine2D(path_transform) vertices = path_transform.transform(vertices) # This is a hack: if path_coordinates == "figure": path_coordinates = "points" style = {"edgecolor": utils.color_to_hex(ec), "facecolor": utils.color_to_hex(fc), "edgewidth": lw, "dasharray": "10,0", "alpha": styles['alpha'], "zorder": styles['zorder']} self.draw_path(data=vertices, coordinates=path_coordinates, pathcodes=pathcodes, style=style, offset=offset, offset_coordinates=offset_coordinates, mplobj=mplobj)
def function[draw_path_collection, parameter[self, paths, path_coordinates, path_transforms, offsets, offset_coordinates, offset_order, styles, mplobj]]: constant[ Draw a collection of paths. The paths, offsets, and styles are all iterables, and the number of paths is max(len(paths), len(offsets)). By default, this is implemented via multiple calls to the draw_path() function. For efficiency, Renderers may choose to customize this implementation. Examples of path collections created by matplotlib are scatter plots, histograms, contour plots, and many others. Parameters ---------- paths : list list of tuples, where each tuple has two elements: (data, pathcodes). See draw_path() for a description of these. path_coordinates: string the coordinates code for the paths, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. path_transforms: array_like an array of shape (*, 3, 3), giving a series of 2D Affine transforms for the paths. These encode translations, rotations, and scalings in the standard way. offsets: array_like An array of offsets of shape (N, 2) offset_coordinates : string the coordinates code for the offsets, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. offset_order : string either "before" or "after". This specifies whether the offset is applied before the path transform, or after. The matplotlib backend equivalent is "before"->"data", "after"->"screen". styles: dictionary A dictionary in which each value is a list of length N, containing the style(s) for the paths. mplobj : matplotlib object the matplotlib plot element which generated this collection ] if compare[name[offset_order] equal[==] constant[before]] begin[:] <ast.Raise object at 0x7da18c4cf070> for taget[name[tup]] in starred[call[name[self]._iter_path_collection, parameter[name[paths], name[path_transforms], name[offsets], name[styles]]]] begin[:] <ast.Tuple object at 0x7da18c4cc4f0> assign[=] name[tup] <ast.Tuple object at 0x7da18c4ccbb0> assign[=] name[path] variable[path_transform] assign[=] call[name[transforms].Affine2D, parameter[name[path_transform]]] variable[vertices] assign[=] call[name[path_transform].transform, parameter[name[vertices]]] if compare[name[path_coordinates] equal[==] constant[figure]] begin[:] variable[path_coordinates] assign[=] constant[points] variable[style] assign[=] dictionary[[<ast.Constant object at 0x7da1b0fef040>, <ast.Constant object at 0x7da1b0fee4a0>, <ast.Constant object at 0x7da1b0fee290>, <ast.Constant object at 0x7da1b0fece20>, <ast.Constant object at 0x7da1b0fef340>, <ast.Constant object at 0x7da1b0feef80>], [<ast.Call object at 0x7da1b0fece50>, <ast.Call object at 0x7da1b0fedf00>, <ast.Name object at 0x7da1b0fecb20>, <ast.Constant object at 0x7da1b0feed10>, <ast.Subscript object at 0x7da1b0fede40>, <ast.Subscript object at 0x7da1b0fee800>]] call[name[self].draw_path, parameter[]]
keyword[def] identifier[draw_path_collection] ( identifier[self] , identifier[paths] , identifier[path_coordinates] , identifier[path_transforms] , identifier[offsets] , identifier[offset_coordinates] , identifier[offset_order] , identifier[styles] , identifier[mplobj] = keyword[None] ): literal[string] keyword[if] identifier[offset_order] == literal[string] : keyword[raise] identifier[NotImplementedError] ( literal[string] ) keyword[for] identifier[tup] keyword[in] identifier[self] . identifier[_iter_path_collection] ( identifier[paths] , identifier[path_transforms] , identifier[offsets] , identifier[styles] ): ( identifier[path] , identifier[path_transform] , identifier[offset] , identifier[ec] , identifier[lw] , identifier[fc] )= identifier[tup] identifier[vertices] , identifier[pathcodes] = identifier[path] identifier[path_transform] = identifier[transforms] . identifier[Affine2D] ( identifier[path_transform] ) identifier[vertices] = identifier[path_transform] . identifier[transform] ( identifier[vertices] ) keyword[if] identifier[path_coordinates] == literal[string] : identifier[path_coordinates] = literal[string] identifier[style] ={ literal[string] : identifier[utils] . identifier[color_to_hex] ( identifier[ec] ), literal[string] : identifier[utils] . identifier[color_to_hex] ( identifier[fc] ), literal[string] : identifier[lw] , literal[string] : literal[string] , literal[string] : identifier[styles] [ literal[string] ], literal[string] : identifier[styles] [ literal[string] ]} identifier[self] . identifier[draw_path] ( identifier[data] = identifier[vertices] , identifier[coordinates] = identifier[path_coordinates] , identifier[pathcodes] = identifier[pathcodes] , identifier[style] = identifier[style] , identifier[offset] = identifier[offset] , identifier[offset_coordinates] = identifier[offset_coordinates] , identifier[mplobj] = identifier[mplobj] )
def draw_path_collection(self, paths, path_coordinates, path_transforms, offsets, offset_coordinates, offset_order, styles, mplobj=None): """ Draw a collection of paths. The paths, offsets, and styles are all iterables, and the number of paths is max(len(paths), len(offsets)). By default, this is implemented via multiple calls to the draw_path() function. For efficiency, Renderers may choose to customize this implementation. Examples of path collections created by matplotlib are scatter plots, histograms, contour plots, and many others. Parameters ---------- paths : list list of tuples, where each tuple has two elements: (data, pathcodes). See draw_path() for a description of these. path_coordinates: string the coordinates code for the paths, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. path_transforms: array_like an array of shape (*, 3, 3), giving a series of 2D Affine transforms for the paths. These encode translations, rotations, and scalings in the standard way. offsets: array_like An array of offsets of shape (N, 2) offset_coordinates : string the coordinates code for the offsets, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. offset_order : string either "before" or "after". This specifies whether the offset is applied before the path transform, or after. The matplotlib backend equivalent is "before"->"data", "after"->"screen". styles: dictionary A dictionary in which each value is a list of length N, containing the style(s) for the paths. mplobj : matplotlib object the matplotlib plot element which generated this collection """ if offset_order == 'before': raise NotImplementedError('offset before transform') # depends on [control=['if'], data=[]] for tup in self._iter_path_collection(paths, path_transforms, offsets, styles): (path, path_transform, offset, ec, lw, fc) = tup (vertices, pathcodes) = path path_transform = transforms.Affine2D(path_transform) vertices = path_transform.transform(vertices) # This is a hack: if path_coordinates == 'figure': path_coordinates = 'points' # depends on [control=['if'], data=['path_coordinates']] style = {'edgecolor': utils.color_to_hex(ec), 'facecolor': utils.color_to_hex(fc), 'edgewidth': lw, 'dasharray': '10,0', 'alpha': styles['alpha'], 'zorder': styles['zorder']} self.draw_path(data=vertices, coordinates=path_coordinates, pathcodes=pathcodes, style=style, offset=offset, offset_coordinates=offset_coordinates, mplobj=mplobj) # depends on [control=['for'], data=['tup']]
def from_string(string_data, file_format="xyz"): """ Uses OpenBabel to read a molecule from a string in all supported formats. Args: string_data: String containing molecule data. file_format: String specifying any OpenBabel supported formats. Returns: BabelMolAdaptor object """ mols = pb.readstring(str(file_format), str(string_data)) return BabelMolAdaptor(mols.OBMol)
def function[from_string, parameter[string_data, file_format]]: constant[ Uses OpenBabel to read a molecule from a string in all supported formats. Args: string_data: String containing molecule data. file_format: String specifying any OpenBabel supported formats. Returns: BabelMolAdaptor object ] variable[mols] assign[=] call[name[pb].readstring, parameter[call[name[str], parameter[name[file_format]]], call[name[str], parameter[name[string_data]]]]] return[call[name[BabelMolAdaptor], parameter[name[mols].OBMol]]]
keyword[def] identifier[from_string] ( identifier[string_data] , identifier[file_format] = literal[string] ): literal[string] identifier[mols] = identifier[pb] . identifier[readstring] ( identifier[str] ( identifier[file_format] ), identifier[str] ( identifier[string_data] )) keyword[return] identifier[BabelMolAdaptor] ( identifier[mols] . identifier[OBMol] )
def from_string(string_data, file_format='xyz'): """ Uses OpenBabel to read a molecule from a string in all supported formats. Args: string_data: String containing molecule data. file_format: String specifying any OpenBabel supported formats. Returns: BabelMolAdaptor object """ mols = pb.readstring(str(file_format), str(string_data)) return BabelMolAdaptor(mols.OBMol)
def label_from_instance(self, obj): """ Creates labels which represent the tree level of each node when generating option labels. """ return '%s %s' % (self.level_indicator * getattr(obj, obj._mptt_meta.level_attr), obj)
def function[label_from_instance, parameter[self, obj]]: constant[ Creates labels which represent the tree level of each node when generating option labels. ] return[binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da2054a6bc0>, <ast.Name object at 0x7da2054a60b0>]]]]
keyword[def] identifier[label_from_instance] ( identifier[self] , identifier[obj] ): literal[string] keyword[return] literal[string] %( identifier[self] . identifier[level_indicator] * identifier[getattr] ( identifier[obj] , identifier[obj] . identifier[_mptt_meta] . identifier[level_attr] ), identifier[obj] )
def label_from_instance(self, obj): """ Creates labels which represent the tree level of each node when generating option labels. """ return '%s %s' % (self.level_indicator * getattr(obj, obj._mptt_meta.level_attr), obj)
def parse(cls, buf, options=None): """ According to Philip, an abuf is like a TARDIS: it's bigger on the inside """ error = [] do_header = True do_question = True do_answer = True do_authority = True do_additional = True do_options = True if options and isinstance(options, dict): if 'DO_Header' in options and not options['DO_Header']: do_header = options['DO_Header'] if 'DO_Question' in options and not options['DO_Question']: do_question = options['DO_Question'] if 'DO_Answer' in options and not options['DO_Answer']: do_answer = options['DO_Answer'] if 'DO_Authority' in options and not options['DO_Authority']: do_authority = options['DO_Authority'] if 'DO_Additional' in options and not options['DO_Additional']: do_additional = options['DO_Additional'] if 'DO_Options' in options and not options['DO_Options']: do_options = options['DO_Options'] dnsres = {} offset = 0 offset, hdr = cls._parse_header(buf, offset, error) if do_header: dnsres['HEADER'] = hdr for i in range(hdr['QDCOUNT']): res = cls._do_query(buf, offset, error) if res is None: e = ('additional', offset, ('_do_query failed, additional record %d' % i)) error.append(e) dnsres['ERROR'] = error return dnsres offset, qry = res if do_question: if i == 0: dnsres['QuestionSection'] = [qry] else: dnsres['QuestionSection'].append(qry) for i in range(hdr['ANCOUNT']): res = cls._do_rr(buf, offset, error, hdr) if res is None: e = ('additional', offset, ('_do_rr failed, additional record %d' % i)) error.append(e) dnsres['ERROR'] = error return dnsres offset, rr = res if do_answer: if i == 0: dnsres['AnswerSection'] = [rr] else: dnsres['AnswerSection'].append(rr) for i in range(hdr['NSCOUNT']): res = cls._do_rr(buf, offset, error, hdr) if res is None: e = ('additional', offset, ('_do_rr failed, additional record %d' % i)) error.append(e) dnsres['ERROR'] = error return dnsres offset, rr = res if do_authority: if i == 0: dnsres['AuthoritySection'] = [rr] else: dnsres['AuthoritySection'].append(rr) for i in range(hdr['ARCOUNT']): res = cls._do_rr(buf, offset, error, hdr) if res is None: e = ('additional', offset, ('_do_rr failed, additional record %d' % i)) error.append(e) dnsres['ERROR'] = error return dnsres offset, rr = res if do_options: if "EDNS0" in rr: dnsres['EDNS0'] = rr['EDNS0'] continue if do_additional: if 'AdditionalSection' in dnsres: dnsres['AdditionalSection'].append(rr) else: dnsres['AdditionalSection'] = [rr] hdr['ReturnCode'] = cls._rcode_to_text(hdr['ReturnCode']) if offset < len(buf): e = ('end', offset, 'trailing garbage, buf size = %d' % len(buf)) error.append(e) #result['decodedabufs_with_ERROR'] += 1 dnsres['ERROR'] = error return dnsres
def function[parse, parameter[cls, buf, options]]: constant[ According to Philip, an abuf is like a TARDIS: it's bigger on the inside ] variable[error] assign[=] list[[]] variable[do_header] assign[=] constant[True] variable[do_question] assign[=] constant[True] variable[do_answer] assign[=] constant[True] variable[do_authority] assign[=] constant[True] variable[do_additional] assign[=] constant[True] variable[do_options] assign[=] constant[True] if <ast.BoolOp object at 0x7da20e9b1060> begin[:] if <ast.BoolOp object at 0x7da20e9b09a0> begin[:] variable[do_header] assign[=] call[name[options]][constant[DO_Header]] if <ast.BoolOp object at 0x7da20e9b1300> begin[:] variable[do_question] assign[=] call[name[options]][constant[DO_Question]] if <ast.BoolOp object at 0x7da20e9b3610> begin[:] variable[do_answer] assign[=] call[name[options]][constant[DO_Answer]] if <ast.BoolOp object at 0x7da20e9b1b70> begin[:] variable[do_authority] assign[=] call[name[options]][constant[DO_Authority]] if <ast.BoolOp object at 0x7da20e9b2ec0> begin[:] variable[do_additional] assign[=] call[name[options]][constant[DO_Additional]] if <ast.BoolOp object at 0x7da18ede7e20> begin[:] variable[do_options] assign[=] call[name[options]][constant[DO_Options]] variable[dnsres] assign[=] dictionary[[], []] variable[offset] assign[=] constant[0] <ast.Tuple object at 0x7da18ede64d0> assign[=] call[name[cls]._parse_header, parameter[name[buf], name[offset], name[error]]] if name[do_header] begin[:] call[name[dnsres]][constant[HEADER]] assign[=] name[hdr] for taget[name[i]] in starred[call[name[range], parameter[call[name[hdr]][constant[QDCOUNT]]]]] begin[:] variable[res] assign[=] call[name[cls]._do_query, parameter[name[buf], name[offset], name[error]]] if compare[name[res] is constant[None]] begin[:] variable[e] assign[=] tuple[[<ast.Constant object at 0x7da18ede63e0>, <ast.Name object at 0x7da18ede4850>, <ast.BinOp object at 0x7da18ede5600>]] call[name[error].append, parameter[name[e]]] call[name[dnsres]][constant[ERROR]] assign[=] name[error] return[name[dnsres]] <ast.Tuple object at 0x7da18ede7310> assign[=] name[res] if name[do_question] begin[:] if compare[name[i] equal[==] constant[0]] begin[:] call[name[dnsres]][constant[QuestionSection]] assign[=] list[[<ast.Name object at 0x7da18ede6a10>]] for taget[name[i]] in starred[call[name[range], parameter[call[name[hdr]][constant[ANCOUNT]]]]] begin[:] variable[res] assign[=] call[name[cls]._do_rr, parameter[name[buf], name[offset], name[error], name[hdr]]] if compare[name[res] is constant[None]] begin[:] variable[e] assign[=] tuple[[<ast.Constant object at 0x7da20c6e4340>, <ast.Name object at 0x7da20c6e6830>, <ast.BinOp object at 0x7da20c6e70a0>]] call[name[error].append, parameter[name[e]]] call[name[dnsres]][constant[ERROR]] assign[=] name[error] return[name[dnsres]] <ast.Tuple object at 0x7da20c6e6d40> assign[=] name[res] if name[do_answer] begin[:] if compare[name[i] equal[==] constant[0]] begin[:] call[name[dnsres]][constant[AnswerSection]] assign[=] list[[<ast.Name object at 0x7da18f09ca60>]] for taget[name[i]] in starred[call[name[range], parameter[call[name[hdr]][constant[NSCOUNT]]]]] begin[:] variable[res] assign[=] call[name[cls]._do_rr, parameter[name[buf], name[offset], name[error], name[hdr]]] if compare[name[res] is constant[None]] begin[:] variable[e] assign[=] tuple[[<ast.Constant object at 0x7da18f09e6b0>, <ast.Name object at 0x7da18f09c940>, <ast.BinOp object at 0x7da18f09ec20>]] call[name[error].append, parameter[name[e]]] call[name[dnsres]][constant[ERROR]] assign[=] name[error] return[name[dnsres]] <ast.Tuple object at 0x7da18f09d4b0> assign[=] name[res] if name[do_authority] begin[:] if compare[name[i] equal[==] constant[0]] begin[:] call[name[dnsres]][constant[AuthoritySection]] assign[=] list[[<ast.Name object at 0x7da18f09fbe0>]] for taget[name[i]] in starred[call[name[range], parameter[call[name[hdr]][constant[ARCOUNT]]]]] begin[:] variable[res] assign[=] call[name[cls]._do_rr, parameter[name[buf], name[offset], name[error], name[hdr]]] if compare[name[res] is constant[None]] begin[:] variable[e] assign[=] tuple[[<ast.Constant object at 0x7da18f09fc40>, <ast.Name object at 0x7da18f09dd20>, <ast.BinOp object at 0x7da18f09da50>]] call[name[error].append, parameter[name[e]]] call[name[dnsres]][constant[ERROR]] assign[=] name[error] return[name[dnsres]] <ast.Tuple object at 0x7da18f09cc10> assign[=] name[res] if name[do_options] begin[:] if compare[constant[EDNS0] in name[rr]] begin[:] call[name[dnsres]][constant[EDNS0]] assign[=] call[name[rr]][constant[EDNS0]] continue if name[do_additional] begin[:] if compare[constant[AdditionalSection] in name[dnsres]] begin[:] call[call[name[dnsres]][constant[AdditionalSection]].append, parameter[name[rr]]] call[name[hdr]][constant[ReturnCode]] assign[=] call[name[cls]._rcode_to_text, parameter[call[name[hdr]][constant[ReturnCode]]]] if compare[name[offset] less[<] call[name[len], parameter[name[buf]]]] begin[:] variable[e] assign[=] tuple[[<ast.Constant object at 0x7da18f811f30>, <ast.Name object at 0x7da18f8126b0>, <ast.BinOp object at 0x7da18f812f20>]] call[name[error].append, parameter[name[e]]] call[name[dnsres]][constant[ERROR]] assign[=] name[error] return[name[dnsres]]
keyword[def] identifier[parse] ( identifier[cls] , identifier[buf] , identifier[options] = keyword[None] ): literal[string] identifier[error] =[] identifier[do_header] = keyword[True] identifier[do_question] = keyword[True] identifier[do_answer] = keyword[True] identifier[do_authority] = keyword[True] identifier[do_additional] = keyword[True] identifier[do_options] = keyword[True] keyword[if] identifier[options] keyword[and] identifier[isinstance] ( identifier[options] , identifier[dict] ): keyword[if] literal[string] keyword[in] identifier[options] keyword[and] keyword[not] identifier[options] [ literal[string] ]: identifier[do_header] = identifier[options] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[options] keyword[and] keyword[not] identifier[options] [ literal[string] ]: identifier[do_question] = identifier[options] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[options] keyword[and] keyword[not] identifier[options] [ literal[string] ]: identifier[do_answer] = identifier[options] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[options] keyword[and] keyword[not] identifier[options] [ literal[string] ]: identifier[do_authority] = identifier[options] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[options] keyword[and] keyword[not] identifier[options] [ literal[string] ]: identifier[do_additional] = identifier[options] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[options] keyword[and] keyword[not] identifier[options] [ literal[string] ]: identifier[do_options] = identifier[options] [ literal[string] ] identifier[dnsres] ={} identifier[offset] = literal[int] identifier[offset] , identifier[hdr] = identifier[cls] . identifier[_parse_header] ( identifier[buf] , identifier[offset] , identifier[error] ) keyword[if] identifier[do_header] : identifier[dnsres] [ literal[string] ]= identifier[hdr] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[hdr] [ literal[string] ]): identifier[res] = identifier[cls] . identifier[_do_query] ( identifier[buf] , identifier[offset] , identifier[error] ) keyword[if] identifier[res] keyword[is] keyword[None] : identifier[e] =( literal[string] , identifier[offset] ,( literal[string] % identifier[i] )) identifier[error] . identifier[append] ( identifier[e] ) identifier[dnsres] [ literal[string] ]= identifier[error] keyword[return] identifier[dnsres] identifier[offset] , identifier[qry] = identifier[res] keyword[if] identifier[do_question] : keyword[if] identifier[i] == literal[int] : identifier[dnsres] [ literal[string] ]=[ identifier[qry] ] keyword[else] : identifier[dnsres] [ literal[string] ]. identifier[append] ( identifier[qry] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[hdr] [ literal[string] ]): identifier[res] = identifier[cls] . identifier[_do_rr] ( identifier[buf] , identifier[offset] , identifier[error] , identifier[hdr] ) keyword[if] identifier[res] keyword[is] keyword[None] : identifier[e] =( literal[string] , identifier[offset] ,( literal[string] % identifier[i] )) identifier[error] . identifier[append] ( identifier[e] ) identifier[dnsres] [ literal[string] ]= identifier[error] keyword[return] identifier[dnsres] identifier[offset] , identifier[rr] = identifier[res] keyword[if] identifier[do_answer] : keyword[if] identifier[i] == literal[int] : identifier[dnsres] [ literal[string] ]=[ identifier[rr] ] keyword[else] : identifier[dnsres] [ literal[string] ]. identifier[append] ( identifier[rr] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[hdr] [ literal[string] ]): identifier[res] = identifier[cls] . identifier[_do_rr] ( identifier[buf] , identifier[offset] , identifier[error] , identifier[hdr] ) keyword[if] identifier[res] keyword[is] keyword[None] : identifier[e] =( literal[string] , identifier[offset] ,( literal[string] % identifier[i] )) identifier[error] . identifier[append] ( identifier[e] ) identifier[dnsres] [ literal[string] ]= identifier[error] keyword[return] identifier[dnsres] identifier[offset] , identifier[rr] = identifier[res] keyword[if] identifier[do_authority] : keyword[if] identifier[i] == literal[int] : identifier[dnsres] [ literal[string] ]=[ identifier[rr] ] keyword[else] : identifier[dnsres] [ literal[string] ]. identifier[append] ( identifier[rr] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[hdr] [ literal[string] ]): identifier[res] = identifier[cls] . identifier[_do_rr] ( identifier[buf] , identifier[offset] , identifier[error] , identifier[hdr] ) keyword[if] identifier[res] keyword[is] keyword[None] : identifier[e] =( literal[string] , identifier[offset] ,( literal[string] % identifier[i] )) identifier[error] . identifier[append] ( identifier[e] ) identifier[dnsres] [ literal[string] ]= identifier[error] keyword[return] identifier[dnsres] identifier[offset] , identifier[rr] = identifier[res] keyword[if] identifier[do_options] : keyword[if] literal[string] keyword[in] identifier[rr] : identifier[dnsres] [ literal[string] ]= identifier[rr] [ literal[string] ] keyword[continue] keyword[if] identifier[do_additional] : keyword[if] literal[string] keyword[in] identifier[dnsres] : identifier[dnsres] [ literal[string] ]. identifier[append] ( identifier[rr] ) keyword[else] : identifier[dnsres] [ literal[string] ]=[ identifier[rr] ] identifier[hdr] [ literal[string] ]= identifier[cls] . identifier[_rcode_to_text] ( identifier[hdr] [ literal[string] ]) keyword[if] identifier[offset] < identifier[len] ( identifier[buf] ): identifier[e] =( literal[string] , identifier[offset] , literal[string] % identifier[len] ( identifier[buf] )) identifier[error] . identifier[append] ( identifier[e] ) identifier[dnsres] [ literal[string] ]= identifier[error] keyword[return] identifier[dnsres]
def parse(cls, buf, options=None): """ According to Philip, an abuf is like a TARDIS: it's bigger on the inside """ error = [] do_header = True do_question = True do_answer = True do_authority = True do_additional = True do_options = True if options and isinstance(options, dict): if 'DO_Header' in options and (not options['DO_Header']): do_header = options['DO_Header'] # depends on [control=['if'], data=[]] if 'DO_Question' in options and (not options['DO_Question']): do_question = options['DO_Question'] # depends on [control=['if'], data=[]] if 'DO_Answer' in options and (not options['DO_Answer']): do_answer = options['DO_Answer'] # depends on [control=['if'], data=[]] if 'DO_Authority' in options and (not options['DO_Authority']): do_authority = options['DO_Authority'] # depends on [control=['if'], data=[]] if 'DO_Additional' in options and (not options['DO_Additional']): do_additional = options['DO_Additional'] # depends on [control=['if'], data=[]] if 'DO_Options' in options and (not options['DO_Options']): do_options = options['DO_Options'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] dnsres = {} offset = 0 (offset, hdr) = cls._parse_header(buf, offset, error) if do_header: dnsres['HEADER'] = hdr # depends on [control=['if'], data=[]] for i in range(hdr['QDCOUNT']): res = cls._do_query(buf, offset, error) if res is None: e = ('additional', offset, '_do_query failed, additional record %d' % i) error.append(e) dnsres['ERROR'] = error return dnsres # depends on [control=['if'], data=[]] (offset, qry) = res if do_question: if i == 0: dnsres['QuestionSection'] = [qry] # depends on [control=['if'], data=[]] else: dnsres['QuestionSection'].append(qry) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] for i in range(hdr['ANCOUNT']): res = cls._do_rr(buf, offset, error, hdr) if res is None: e = ('additional', offset, '_do_rr failed, additional record %d' % i) error.append(e) dnsres['ERROR'] = error return dnsres # depends on [control=['if'], data=[]] (offset, rr) = res if do_answer: if i == 0: dnsres['AnswerSection'] = [rr] # depends on [control=['if'], data=[]] else: dnsres['AnswerSection'].append(rr) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] for i in range(hdr['NSCOUNT']): res = cls._do_rr(buf, offset, error, hdr) if res is None: e = ('additional', offset, '_do_rr failed, additional record %d' % i) error.append(e) dnsres['ERROR'] = error return dnsres # depends on [control=['if'], data=[]] (offset, rr) = res if do_authority: if i == 0: dnsres['AuthoritySection'] = [rr] # depends on [control=['if'], data=[]] else: dnsres['AuthoritySection'].append(rr) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] for i in range(hdr['ARCOUNT']): res = cls._do_rr(buf, offset, error, hdr) if res is None: e = ('additional', offset, '_do_rr failed, additional record %d' % i) error.append(e) dnsres['ERROR'] = error return dnsres # depends on [control=['if'], data=[]] (offset, rr) = res if do_options: if 'EDNS0' in rr: dnsres['EDNS0'] = rr['EDNS0'] continue # depends on [control=['if'], data=['rr']] # depends on [control=['if'], data=[]] if do_additional: if 'AdditionalSection' in dnsres: dnsres['AdditionalSection'].append(rr) # depends on [control=['if'], data=['dnsres']] else: dnsres['AdditionalSection'] = [rr] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] hdr['ReturnCode'] = cls._rcode_to_text(hdr['ReturnCode']) if offset < len(buf): e = ('end', offset, 'trailing garbage, buf size = %d' % len(buf)) error.append(e) #result['decodedabufs_with_ERROR'] += 1 dnsres['ERROR'] = error # depends on [control=['if'], data=['offset']] return dnsres
def sent2examples(self, sent): """ Convert ngrams into feature vectors.""" # TODO(rmyeid): use expanders. words = [w if w in self.embeddings else TaggerBase.UNK for w in sent] ngrams = TaggerBase.ngrams(words, self.context, self.transfer) fvs = [] for word, ngram in zip(sent, ngrams): fv = np.array([self.embeddings.get(w, self.embeddings.zero_vector()) for w in ngram]).flatten() if self.add_bias: fv = np.hstack((fv, np.array(1))) yield word, fv
def function[sent2examples, parameter[self, sent]]: constant[ Convert ngrams into feature vectors.] variable[words] assign[=] <ast.ListComp object at 0x7da20c76ec80> variable[ngrams] assign[=] call[name[TaggerBase].ngrams, parameter[name[words], name[self].context, name[self].transfer]] variable[fvs] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da20c76d690>, <ast.Name object at 0x7da20c76f2e0>]]] in starred[call[name[zip], parameter[name[sent], name[ngrams]]]] begin[:] variable[fv] assign[=] call[call[name[np].array, parameter[<ast.ListComp object at 0x7da20c76c520>]].flatten, parameter[]] if name[self].add_bias begin[:] variable[fv] assign[=] call[name[np].hstack, parameter[tuple[[<ast.Name object at 0x7da20c76c430>, <ast.Call object at 0x7da20c76d870>]]]] <ast.Yield object at 0x7da20c76ea70>
keyword[def] identifier[sent2examples] ( identifier[self] , identifier[sent] ): literal[string] identifier[words] =[ identifier[w] keyword[if] identifier[w] keyword[in] identifier[self] . identifier[embeddings] keyword[else] identifier[TaggerBase] . identifier[UNK] keyword[for] identifier[w] keyword[in] identifier[sent] ] identifier[ngrams] = identifier[TaggerBase] . identifier[ngrams] ( identifier[words] , identifier[self] . identifier[context] , identifier[self] . identifier[transfer] ) identifier[fvs] =[] keyword[for] identifier[word] , identifier[ngram] keyword[in] identifier[zip] ( identifier[sent] , identifier[ngrams] ): identifier[fv] = identifier[np] . identifier[array] ([ identifier[self] . identifier[embeddings] . identifier[get] ( identifier[w] , identifier[self] . identifier[embeddings] . identifier[zero_vector] ()) keyword[for] identifier[w] keyword[in] identifier[ngram] ]). identifier[flatten] () keyword[if] identifier[self] . identifier[add_bias] : identifier[fv] = identifier[np] . identifier[hstack] (( identifier[fv] , identifier[np] . identifier[array] ( literal[int] ))) keyword[yield] identifier[word] , identifier[fv]
def sent2examples(self, sent): """ Convert ngrams into feature vectors.""" # TODO(rmyeid): use expanders. words = [w if w in self.embeddings else TaggerBase.UNK for w in sent] ngrams = TaggerBase.ngrams(words, self.context, self.transfer) fvs = [] for (word, ngram) in zip(sent, ngrams): fv = np.array([self.embeddings.get(w, self.embeddings.zero_vector()) for w in ngram]).flatten() if self.add_bias: fv = np.hstack((fv, np.array(1))) # depends on [control=['if'], data=[]] yield (word, fv) # depends on [control=['for'], data=[]]
def expose_request(func): """ A decorator that adds an expose_request flag to the underlying callable. @raise TypeError: C{func} must be callable. """ if not python.callable(func): raise TypeError("func must be callable") if isinstance(func, types.UnboundMethodType): setattr(func.im_func, '_pyamf_expose_request', True) else: setattr(func, '_pyamf_expose_request', True) return func
def function[expose_request, parameter[func]]: constant[ A decorator that adds an expose_request flag to the underlying callable. @raise TypeError: C{func} must be callable. ] if <ast.UnaryOp object at 0x7da2054a7340> begin[:] <ast.Raise object at 0x7da2054a7d90> if call[name[isinstance], parameter[name[func], name[types].UnboundMethodType]] begin[:] call[name[setattr], parameter[name[func].im_func, constant[_pyamf_expose_request], constant[True]]] return[name[func]]
keyword[def] identifier[expose_request] ( identifier[func] ): literal[string] keyword[if] keyword[not] identifier[python] . identifier[callable] ( identifier[func] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] identifier[isinstance] ( identifier[func] , identifier[types] . identifier[UnboundMethodType] ): identifier[setattr] ( identifier[func] . identifier[im_func] , literal[string] , keyword[True] ) keyword[else] : identifier[setattr] ( identifier[func] , literal[string] , keyword[True] ) keyword[return] identifier[func]
def expose_request(func): """ A decorator that adds an expose_request flag to the underlying callable. @raise TypeError: C{func} must be callable. """ if not python.callable(func): raise TypeError('func must be callable') # depends on [control=['if'], data=[]] if isinstance(func, types.UnboundMethodType): setattr(func.im_func, '_pyamf_expose_request', True) # depends on [control=['if'], data=[]] else: setattr(func, '_pyamf_expose_request', True) return func
def collect_samples(agents, sample_batch_size, num_envs_per_worker, train_batch_size): """Collects at least train_batch_size samples, never discarding any.""" num_timesteps_so_far = 0 trajectories = [] agent_dict = {} for agent in agents: fut_sample = agent.sample.remote() agent_dict[fut_sample] = agent while agent_dict: [fut_sample], _ = ray.wait(list(agent_dict)) agent = agent_dict.pop(fut_sample) next_sample = ray_get_and_free(fut_sample) assert next_sample.count >= sample_batch_size * num_envs_per_worker num_timesteps_so_far += next_sample.count trajectories.append(next_sample) # Only launch more tasks if we don't already have enough pending pending = len(agent_dict) * sample_batch_size * num_envs_per_worker if num_timesteps_so_far + pending < train_batch_size: fut_sample2 = agent.sample.remote() agent_dict[fut_sample2] = agent return SampleBatch.concat_samples(trajectories)
def function[collect_samples, parameter[agents, sample_batch_size, num_envs_per_worker, train_batch_size]]: constant[Collects at least train_batch_size samples, never discarding any.] variable[num_timesteps_so_far] assign[=] constant[0] variable[trajectories] assign[=] list[[]] variable[agent_dict] assign[=] dictionary[[], []] for taget[name[agent]] in starred[name[agents]] begin[:] variable[fut_sample] assign[=] call[name[agent].sample.remote, parameter[]] call[name[agent_dict]][name[fut_sample]] assign[=] name[agent] while name[agent_dict] begin[:] <ast.Tuple object at 0x7da20c7c9060> assign[=] call[name[ray].wait, parameter[call[name[list], parameter[name[agent_dict]]]]] variable[agent] assign[=] call[name[agent_dict].pop, parameter[name[fut_sample]]] variable[next_sample] assign[=] call[name[ray_get_and_free], parameter[name[fut_sample]]] assert[compare[name[next_sample].count greater_or_equal[>=] binary_operation[name[sample_batch_size] * name[num_envs_per_worker]]]] <ast.AugAssign object at 0x7da20c7cb2b0> call[name[trajectories].append, parameter[name[next_sample]]] variable[pending] assign[=] binary_operation[binary_operation[call[name[len], parameter[name[agent_dict]]] * name[sample_batch_size]] * name[num_envs_per_worker]] if compare[binary_operation[name[num_timesteps_so_far] + name[pending]] less[<] name[train_batch_size]] begin[:] variable[fut_sample2] assign[=] call[name[agent].sample.remote, parameter[]] call[name[agent_dict]][name[fut_sample2]] assign[=] name[agent] return[call[name[SampleBatch].concat_samples, parameter[name[trajectories]]]]
keyword[def] identifier[collect_samples] ( identifier[agents] , identifier[sample_batch_size] , identifier[num_envs_per_worker] , identifier[train_batch_size] ): literal[string] identifier[num_timesteps_so_far] = literal[int] identifier[trajectories] =[] identifier[agent_dict] ={} keyword[for] identifier[agent] keyword[in] identifier[agents] : identifier[fut_sample] = identifier[agent] . identifier[sample] . identifier[remote] () identifier[agent_dict] [ identifier[fut_sample] ]= identifier[agent] keyword[while] identifier[agent_dict] : [ identifier[fut_sample] ], identifier[_] = identifier[ray] . identifier[wait] ( identifier[list] ( identifier[agent_dict] )) identifier[agent] = identifier[agent_dict] . identifier[pop] ( identifier[fut_sample] ) identifier[next_sample] = identifier[ray_get_and_free] ( identifier[fut_sample] ) keyword[assert] identifier[next_sample] . identifier[count] >= identifier[sample_batch_size] * identifier[num_envs_per_worker] identifier[num_timesteps_so_far] += identifier[next_sample] . identifier[count] identifier[trajectories] . identifier[append] ( identifier[next_sample] ) identifier[pending] = identifier[len] ( identifier[agent_dict] )* identifier[sample_batch_size] * identifier[num_envs_per_worker] keyword[if] identifier[num_timesteps_so_far] + identifier[pending] < identifier[train_batch_size] : identifier[fut_sample2] = identifier[agent] . identifier[sample] . identifier[remote] () identifier[agent_dict] [ identifier[fut_sample2] ]= identifier[agent] keyword[return] identifier[SampleBatch] . identifier[concat_samples] ( identifier[trajectories] )
def collect_samples(agents, sample_batch_size, num_envs_per_worker, train_batch_size): """Collects at least train_batch_size samples, never discarding any.""" num_timesteps_so_far = 0 trajectories = [] agent_dict = {} for agent in agents: fut_sample = agent.sample.remote() agent_dict[fut_sample] = agent # depends on [control=['for'], data=['agent']] while agent_dict: ([fut_sample], _) = ray.wait(list(agent_dict)) agent = agent_dict.pop(fut_sample) next_sample = ray_get_and_free(fut_sample) assert next_sample.count >= sample_batch_size * num_envs_per_worker num_timesteps_so_far += next_sample.count trajectories.append(next_sample) # Only launch more tasks if we don't already have enough pending pending = len(agent_dict) * sample_batch_size * num_envs_per_worker if num_timesteps_so_far + pending < train_batch_size: fut_sample2 = agent.sample.remote() agent_dict[fut_sample2] = agent # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] return SampleBatch.concat_samples(trajectories)
def get_access_token(self): """ Obtain the access token to access private resources at the API endpoint. """ if self.access_token is None: request_token = self._get_rt_from_session() oauth = OAuth1( self.consumer_key, client_secret=self.consumer_secret, resource_owner_key=request_token['oauth_token'], resource_owner_secret=request_token['oauth_token_secret']) at_url = self.access_token_url # Passing along oauth_verifier is required according to: # http://groups.google.com/group/twitter-development-talk/browse_frm/thread/472500cfe9e7cdb9# # Though, the custom oauth_callback seems to work without it? oauth_verifier = get_request_param(self.request, 'oauth_verifier') if oauth_verifier: at_url = at_url + '?' + urlencode( {'oauth_verifier': oauth_verifier}) response = requests.post(url=at_url, auth=oauth) if response.status_code not in [200, 201]: raise OAuthError( _('Invalid response while obtaining access token' ' from "%s".') % get_token_prefix( self.request_token_url)) self.access_token = dict(parse_qsl(response.text)) self.request.session['oauth_%s_access_token' % get_token_prefix( self.request_token_url)] = self.access_token return self.access_token
def function[get_access_token, parameter[self]]: constant[ Obtain the access token to access private resources at the API endpoint. ] if compare[name[self].access_token is constant[None]] begin[:] variable[request_token] assign[=] call[name[self]._get_rt_from_session, parameter[]] variable[oauth] assign[=] call[name[OAuth1], parameter[name[self].consumer_key]] variable[at_url] assign[=] name[self].access_token_url variable[oauth_verifier] assign[=] call[name[get_request_param], parameter[name[self].request, constant[oauth_verifier]]] if name[oauth_verifier] begin[:] variable[at_url] assign[=] binary_operation[binary_operation[name[at_url] + constant[?]] + call[name[urlencode], parameter[dictionary[[<ast.Constant object at 0x7da18bcc9960>], [<ast.Name object at 0x7da18bccb820>]]]]] variable[response] assign[=] call[name[requests].post, parameter[]] if compare[name[response].status_code <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18f810e20>, <ast.Constant object at 0x7da18f811600>]]] begin[:] <ast.Raise object at 0x7da18f8121a0> name[self].access_token assign[=] call[name[dict], parameter[call[name[parse_qsl], parameter[name[response].text]]]] call[name[self].request.session][binary_operation[constant[oauth_%s_access_token] <ast.Mod object at 0x7da2590d6920> call[name[get_token_prefix], parameter[name[self].request_token_url]]]] assign[=] name[self].access_token return[name[self].access_token]
keyword[def] identifier[get_access_token] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[access_token] keyword[is] keyword[None] : identifier[request_token] = identifier[self] . identifier[_get_rt_from_session] () identifier[oauth] = identifier[OAuth1] ( identifier[self] . identifier[consumer_key] , identifier[client_secret] = identifier[self] . identifier[consumer_secret] , identifier[resource_owner_key] = identifier[request_token] [ literal[string] ], identifier[resource_owner_secret] = identifier[request_token] [ literal[string] ]) identifier[at_url] = identifier[self] . identifier[access_token_url] identifier[oauth_verifier] = identifier[get_request_param] ( identifier[self] . identifier[request] , literal[string] ) keyword[if] identifier[oauth_verifier] : identifier[at_url] = identifier[at_url] + literal[string] + identifier[urlencode] ( { literal[string] : identifier[oauth_verifier] }) identifier[response] = identifier[requests] . identifier[post] ( identifier[url] = identifier[at_url] , identifier[auth] = identifier[oauth] ) keyword[if] identifier[response] . identifier[status_code] keyword[not] keyword[in] [ literal[int] , literal[int] ]: keyword[raise] identifier[OAuthError] ( identifier[_] ( literal[string] literal[string] )% identifier[get_token_prefix] ( identifier[self] . identifier[request_token_url] )) identifier[self] . identifier[access_token] = identifier[dict] ( identifier[parse_qsl] ( identifier[response] . identifier[text] )) identifier[self] . identifier[request] . identifier[session] [ literal[string] % identifier[get_token_prefix] ( identifier[self] . identifier[request_token_url] )]= identifier[self] . identifier[access_token] keyword[return] identifier[self] . identifier[access_token]
def get_access_token(self): """ Obtain the access token to access private resources at the API endpoint. """ if self.access_token is None: request_token = self._get_rt_from_session() oauth = OAuth1(self.consumer_key, client_secret=self.consumer_secret, resource_owner_key=request_token['oauth_token'], resource_owner_secret=request_token['oauth_token_secret']) at_url = self.access_token_url # Passing along oauth_verifier is required according to: # http://groups.google.com/group/twitter-development-talk/browse_frm/thread/472500cfe9e7cdb9# # Though, the custom oauth_callback seems to work without it? oauth_verifier = get_request_param(self.request, 'oauth_verifier') if oauth_verifier: at_url = at_url + '?' + urlencode({'oauth_verifier': oauth_verifier}) # depends on [control=['if'], data=[]] response = requests.post(url=at_url, auth=oauth) if response.status_code not in [200, 201]: raise OAuthError(_('Invalid response while obtaining access token from "%s".') % get_token_prefix(self.request_token_url)) # depends on [control=['if'], data=[]] self.access_token = dict(parse_qsl(response.text)) self.request.session['oauth_%s_access_token' % get_token_prefix(self.request_token_url)] = self.access_token # depends on [control=['if'], data=[]] return self.access_token
def parse_multiexpreq(self, tup_tree): # pylint: disable=unused-argument """ Not Implemented. Because this request is generally not implemented by platforms, It will probably never be implemented. """ raise CIMXMLParseError( _format("Internal Error: Parsing support for element {0!A} is not " "implemented", name(tup_tree)), conn_id=self.conn_id)
def function[parse_multiexpreq, parameter[self, tup_tree]]: constant[ Not Implemented. Because this request is generally not implemented by platforms, It will probably never be implemented. ] <ast.Raise object at 0x7da1b0ddc370>
keyword[def] identifier[parse_multiexpreq] ( identifier[self] , identifier[tup_tree] ): literal[string] keyword[raise] identifier[CIMXMLParseError] ( identifier[_format] ( literal[string] literal[string] , identifier[name] ( identifier[tup_tree] )), identifier[conn_id] = identifier[self] . identifier[conn_id] )
def parse_multiexpreq(self, tup_tree): # pylint: disable=unused-argument '\n Not Implemented. Because this request is generally not implemented\n by platforms, It will probably never be implemented.\n ' raise CIMXMLParseError(_format('Internal Error: Parsing support for element {0!A} is not implemented', name(tup_tree)), conn_id=self.conn_id)
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name): u""" Returns string with the name of the kwargs dict if the params after the first star need fixing Otherwise returns empty string """ found_kwargs = False needs_fix = False for t in raw_params[2:]: if t.type == token.COMMA: # Commas are irrelevant at this stage. continue elif t.type == token.NAME and not found_kwargs: # Keyword-only argument: definitely need to fix. needs_fix = True elif t.type == token.NAME and found_kwargs: # Return 'foobar' of **foobar, if needed. return t.value if needs_fix else u'' elif t.type == token.DOUBLESTAR: # Found either '*' from **foobar. found_kwargs = True else: # Never found **foobar. Return a synthetic name, if needed. return kwargs_default if needs_fix else u''
def function[needs_fixing, parameter[raw_params, kwargs_default]]: constant[ Returns string with the name of the kwargs dict if the params after the first star need fixing Otherwise returns empty string ] variable[found_kwargs] assign[=] constant[False] variable[needs_fix] assign[=] constant[False] for taget[name[t]] in starred[call[name[raw_params]][<ast.Slice object at 0x7da20c6ab8b0>]] begin[:] if compare[name[t].type equal[==] name[token].COMMA] begin[:] continue
keyword[def] identifier[needs_fixing] ( identifier[raw_params] , identifier[kwargs_default] = identifier[_kwargs_default_name] ): literal[string] identifier[found_kwargs] = keyword[False] identifier[needs_fix] = keyword[False] keyword[for] identifier[t] keyword[in] identifier[raw_params] [ literal[int] :]: keyword[if] identifier[t] . identifier[type] == identifier[token] . identifier[COMMA] : keyword[continue] keyword[elif] identifier[t] . identifier[type] == identifier[token] . identifier[NAME] keyword[and] keyword[not] identifier[found_kwargs] : identifier[needs_fix] = keyword[True] keyword[elif] identifier[t] . identifier[type] == identifier[token] . identifier[NAME] keyword[and] identifier[found_kwargs] : keyword[return] identifier[t] . identifier[value] keyword[if] identifier[needs_fix] keyword[else] literal[string] keyword[elif] identifier[t] . identifier[type] == identifier[token] . identifier[DOUBLESTAR] : identifier[found_kwargs] = keyword[True] keyword[else] : keyword[return] identifier[kwargs_default] keyword[if] identifier[needs_fix] keyword[else] literal[string]
def needs_fixing(raw_params, kwargs_default=_kwargs_default_name): u""" Returns string with the name of the kwargs dict if the params after the first star need fixing Otherwise returns empty string """ found_kwargs = False needs_fix = False for t in raw_params[2:]: if t.type == token.COMMA: # Commas are irrelevant at this stage. continue # depends on [control=['if'], data=[]] elif t.type == token.NAME and (not found_kwargs): # Keyword-only argument: definitely need to fix. needs_fix = True # depends on [control=['if'], data=[]] elif t.type == token.NAME and found_kwargs: # Return 'foobar' of **foobar, if needed. return t.value if needs_fix else u'' # depends on [control=['if'], data=[]] elif t.type == token.DOUBLESTAR: # Found either '*' from **foobar. found_kwargs = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']] else: # Never found **foobar. Return a synthetic name, if needed. return kwargs_default if needs_fix else u''
def delete_lower(script, layer_num=None): """ Delete all layers below the specified one. Useful for MeshLab ver 2016.12, whcih will only output layer 0. """ if layer_num is None: layer_num = script.current_layer() if layer_num != 0: change(script, 0) for i in range(layer_num): delete(script, 0) return None
def function[delete_lower, parameter[script, layer_num]]: constant[ Delete all layers below the specified one. Useful for MeshLab ver 2016.12, whcih will only output layer 0. ] if compare[name[layer_num] is constant[None]] begin[:] variable[layer_num] assign[=] call[name[script].current_layer, parameter[]] if compare[name[layer_num] not_equal[!=] constant[0]] begin[:] call[name[change], parameter[name[script], constant[0]]] for taget[name[i]] in starred[call[name[range], parameter[name[layer_num]]]] begin[:] call[name[delete], parameter[name[script], constant[0]]] return[constant[None]]
keyword[def] identifier[delete_lower] ( identifier[script] , identifier[layer_num] = keyword[None] ): literal[string] keyword[if] identifier[layer_num] keyword[is] keyword[None] : identifier[layer_num] = identifier[script] . identifier[current_layer] () keyword[if] identifier[layer_num] != literal[int] : identifier[change] ( identifier[script] , literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[layer_num] ): identifier[delete] ( identifier[script] , literal[int] ) keyword[return] keyword[None]
def delete_lower(script, layer_num=None): """ Delete all layers below the specified one. Useful for MeshLab ver 2016.12, whcih will only output layer 0. """ if layer_num is None: layer_num = script.current_layer() # depends on [control=['if'], data=['layer_num']] if layer_num != 0: change(script, 0) # depends on [control=['if'], data=[]] for i in range(layer_num): delete(script, 0) # depends on [control=['for'], data=[]] return None
def brew_query_parts(self): """ Make columns, group_bys, filters, havings """ columns, group_bys, filters, havings = [], [], set(), set() for ingredient in self.ingredients(): if ingredient.query_columns: columns.extend(ingredient.query_columns) if ingredient.group_by: group_bys.extend(ingredient.group_by) if ingredient.filters: filters.update(ingredient.filters) if ingredient.havings: havings.update(ingredient.havings) return { 'columns': columns, 'group_bys': group_bys, 'filters': filters, 'havings': havings, }
def function[brew_query_parts, parameter[self]]: constant[ Make columns, group_bys, filters, havings ] <ast.Tuple object at 0x7da18f09e110> assign[=] tuple[[<ast.List object at 0x7da18f09cb50>, <ast.List object at 0x7da18f09e740>, <ast.Call object at 0x7da18f09e440>, <ast.Call object at 0x7da18f09f880>]] for taget[name[ingredient]] in starred[call[name[self].ingredients, parameter[]]] begin[:] if name[ingredient].query_columns begin[:] call[name[columns].extend, parameter[name[ingredient].query_columns]] if name[ingredient].group_by begin[:] call[name[group_bys].extend, parameter[name[ingredient].group_by]] if name[ingredient].filters begin[:] call[name[filters].update, parameter[name[ingredient].filters]] if name[ingredient].havings begin[:] call[name[havings].update, parameter[name[ingredient].havings]] return[dictionary[[<ast.Constant object at 0x7da18f09cfa0>, <ast.Constant object at 0x7da18f09d2d0>, <ast.Constant object at 0x7da18f09d660>, <ast.Constant object at 0x7da18f09c940>], [<ast.Name object at 0x7da18f09c3a0>, <ast.Name object at 0x7da18f09df90>, <ast.Name object at 0x7da18f09c250>, <ast.Name object at 0x7da18f09d540>]]]
keyword[def] identifier[brew_query_parts] ( identifier[self] ): literal[string] identifier[columns] , identifier[group_bys] , identifier[filters] , identifier[havings] =[],[], identifier[set] (), identifier[set] () keyword[for] identifier[ingredient] keyword[in] identifier[self] . identifier[ingredients] (): keyword[if] identifier[ingredient] . identifier[query_columns] : identifier[columns] . identifier[extend] ( identifier[ingredient] . identifier[query_columns] ) keyword[if] identifier[ingredient] . identifier[group_by] : identifier[group_bys] . identifier[extend] ( identifier[ingredient] . identifier[group_by] ) keyword[if] identifier[ingredient] . identifier[filters] : identifier[filters] . identifier[update] ( identifier[ingredient] . identifier[filters] ) keyword[if] identifier[ingredient] . identifier[havings] : identifier[havings] . identifier[update] ( identifier[ingredient] . identifier[havings] ) keyword[return] { literal[string] : identifier[columns] , literal[string] : identifier[group_bys] , literal[string] : identifier[filters] , literal[string] : identifier[havings] , }
def brew_query_parts(self): """ Make columns, group_bys, filters, havings """ (columns, group_bys, filters, havings) = ([], [], set(), set()) for ingredient in self.ingredients(): if ingredient.query_columns: columns.extend(ingredient.query_columns) # depends on [control=['if'], data=[]] if ingredient.group_by: group_bys.extend(ingredient.group_by) # depends on [control=['if'], data=[]] if ingredient.filters: filters.update(ingredient.filters) # depends on [control=['if'], data=[]] if ingredient.havings: havings.update(ingredient.havings) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ingredient']] return {'columns': columns, 'group_bys': group_bys, 'filters': filters, 'havings': havings}
async def up(self): """Press key up.""" await self._send_commands( self._move('Down', 0, 20, 275), self._move('Move', 1, 20, 270), self._move('Move', 2, 20, 265), self._move('Move', 3, 20, 260), self._move('Move', 4, 20, 255), self._move('Move', 5, 20, 250), self._move('Up', 6, 20, 250))
<ast.AsyncFunctionDef object at 0x7da18fe90550>
keyword[async] keyword[def] identifier[up] ( identifier[self] ): literal[string] keyword[await] identifier[self] . identifier[_send_commands] ( identifier[self] . identifier[_move] ( literal[string] , literal[int] , literal[int] , literal[int] ), identifier[self] . identifier[_move] ( literal[string] , literal[int] , literal[int] , literal[int] ), identifier[self] . identifier[_move] ( literal[string] , literal[int] , literal[int] , literal[int] ), identifier[self] . identifier[_move] ( literal[string] , literal[int] , literal[int] , literal[int] ), identifier[self] . identifier[_move] ( literal[string] , literal[int] , literal[int] , literal[int] ), identifier[self] . identifier[_move] ( literal[string] , literal[int] , literal[int] , literal[int] ), identifier[self] . identifier[_move] ( literal[string] , literal[int] , literal[int] , literal[int] ))
async def up(self): """Press key up.""" await self._send_commands(self._move('Down', 0, 20, 275), self._move('Move', 1, 20, 270), self._move('Move', 2, 20, 265), self._move('Move', 3, 20, 260), self._move('Move', 4, 20, 255), self._move('Move', 5, 20, 250), self._move('Up', 6, 20, 250))
def init_parser(): """ Initialize the arguments parser. """ parser = argparse.ArgumentParser( description="Automated development environment initialization") parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) subparsers = parser.add_subparsers(title="commands") # List command parser_list = subparsers.add_parser("list", help="list all the available environment types") # New command parser_new = subparsers.add_parser("new", help="define a new environment type") parser_new.add_argument("environment", help="name for the environment") # Remove command parser_remove = subparsers.add_parser("remove", help="remove an environment type from the configuration file") parser_remove.add_argument("environment", help="name of the environment") # Show command parser_show = subparsers.add_parser("show", help="show the commands performed for a specific environment") parser_show.add_argument("environment", help="name of the environment") # Start command parser_start = subparsers.add_parser("start", help="start a new environment") parser_start.add_argument("environment", help="name of the environment") parser_start.add_argument("path", nargs="?", help="path in which to initialize the environment") return parser
def function[init_parser, parameter[]]: constant[ Initialize the arguments parser. ] variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]] call[name[parser].add_argument, parameter[constant[--version]]] variable[subparsers] assign[=] call[name[parser].add_subparsers, parameter[]] variable[parser_list] assign[=] call[name[subparsers].add_parser, parameter[constant[list]]] variable[parser_new] assign[=] call[name[subparsers].add_parser, parameter[constant[new]]] call[name[parser_new].add_argument, parameter[constant[environment]]] variable[parser_remove] assign[=] call[name[subparsers].add_parser, parameter[constant[remove]]] call[name[parser_remove].add_argument, parameter[constant[environment]]] variable[parser_show] assign[=] call[name[subparsers].add_parser, parameter[constant[show]]] call[name[parser_show].add_argument, parameter[constant[environment]]] variable[parser_start] assign[=] call[name[subparsers].add_parser, parameter[constant[start]]] call[name[parser_start].add_argument, parameter[constant[environment]]] call[name[parser_start].add_argument, parameter[constant[path]]] return[name[parser]]
keyword[def] identifier[init_parser] (): literal[string] identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[action] = literal[string] , identifier[version] = literal[string] + identifier[__version__] ) identifier[subparsers] = identifier[parser] . identifier[add_subparsers] ( identifier[title] = literal[string] ) identifier[parser_list] = identifier[subparsers] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_new] = identifier[subparsers] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_new] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_remove] = identifier[subparsers] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_remove] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_show] = identifier[subparsers] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_show] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_start] = identifier[subparsers] . identifier[add_parser] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_start] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] ) identifier[parser_start] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] , identifier[help] = literal[string] ) keyword[return] identifier[parser]
def init_parser(): """ Initialize the arguments parser. """ parser = argparse.ArgumentParser(description='Automated development environment initialization') parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) subparsers = parser.add_subparsers(title='commands') # List command parser_list = subparsers.add_parser('list', help='list all the available environment types') # New command parser_new = subparsers.add_parser('new', help='define a new environment type') parser_new.add_argument('environment', help='name for the environment') # Remove command parser_remove = subparsers.add_parser('remove', help='remove an environment type from the configuration file') parser_remove.add_argument('environment', help='name of the environment') # Show command parser_show = subparsers.add_parser('show', help='show the commands performed for a specific environment') parser_show.add_argument('environment', help='name of the environment') # Start command parser_start = subparsers.add_parser('start', help='start a new environment') parser_start.add_argument('environment', help='name of the environment') parser_start.add_argument('path', nargs='?', help='path in which to initialize the environment') return parser
def deseq2_size_factors(counts, meta, design): """ Get size factors for counts using DESeq2. Parameters ---------- counts : pandas.DataFrame Counts to pass to DESeq2. meta : pandas.DataFrame Pandas dataframe whose index matches the columns of counts. This is passed to DESeq2's colData. design : str Design like ~subject_id that will be passed to DESeq2. The design variables should match columns in meta. Returns ------- sf : pandas.Series Series whose index matches the columns of counts and whose values are the size factors from DESeq2. Divide each column by its size factor to obtain normalized counts. """ import rpy2.robjects as r from rpy2.robjects import pandas2ri pandas2ri.activate() r.r('suppressMessages(library(DESeq2))') r.globalenv['counts'] = counts r.globalenv['meta'] = meta r.r('dds = DESeqDataSetFromMatrix(countData=counts, colData=meta, ' 'design={})'.format(design)) r.r('dds = estimateSizeFactors(dds)') r.r('sf = sizeFactors(dds)') sf = r.globalenv['sf'] return pd.Series(sf, index=counts.columns)
def function[deseq2_size_factors, parameter[counts, meta, design]]: constant[ Get size factors for counts using DESeq2. Parameters ---------- counts : pandas.DataFrame Counts to pass to DESeq2. meta : pandas.DataFrame Pandas dataframe whose index matches the columns of counts. This is passed to DESeq2's colData. design : str Design like ~subject_id that will be passed to DESeq2. The design variables should match columns in meta. Returns ------- sf : pandas.Series Series whose index matches the columns of counts and whose values are the size factors from DESeq2. Divide each column by its size factor to obtain normalized counts. ] import module[rpy2.robjects] as alias[r] from relative_module[rpy2.robjects] import module[pandas2ri] call[name[pandas2ri].activate, parameter[]] call[name[r].r, parameter[constant[suppressMessages(library(DESeq2))]]] call[name[r].globalenv][constant[counts]] assign[=] name[counts] call[name[r].globalenv][constant[meta]] assign[=] name[meta] call[name[r].r, parameter[call[constant[dds = DESeqDataSetFromMatrix(countData=counts, colData=meta, design={})].format, parameter[name[design]]]]] call[name[r].r, parameter[constant[dds = estimateSizeFactors(dds)]]] call[name[r].r, parameter[constant[sf = sizeFactors(dds)]]] variable[sf] assign[=] call[name[r].globalenv][constant[sf]] return[call[name[pd].Series, parameter[name[sf]]]]
keyword[def] identifier[deseq2_size_factors] ( identifier[counts] , identifier[meta] , identifier[design] ): literal[string] keyword[import] identifier[rpy2] . identifier[robjects] keyword[as] identifier[r] keyword[from] identifier[rpy2] . identifier[robjects] keyword[import] identifier[pandas2ri] identifier[pandas2ri] . identifier[activate] () identifier[r] . identifier[r] ( literal[string] ) identifier[r] . identifier[globalenv] [ literal[string] ]= identifier[counts] identifier[r] . identifier[globalenv] [ literal[string] ]= identifier[meta] identifier[r] . identifier[r] ( literal[string] literal[string] . identifier[format] ( identifier[design] )) identifier[r] . identifier[r] ( literal[string] ) identifier[r] . identifier[r] ( literal[string] ) identifier[sf] = identifier[r] . identifier[globalenv] [ literal[string] ] keyword[return] identifier[pd] . identifier[Series] ( identifier[sf] , identifier[index] = identifier[counts] . identifier[columns] )
def deseq2_size_factors(counts, meta, design): """ Get size factors for counts using DESeq2. Parameters ---------- counts : pandas.DataFrame Counts to pass to DESeq2. meta : pandas.DataFrame Pandas dataframe whose index matches the columns of counts. This is passed to DESeq2's colData. design : str Design like ~subject_id that will be passed to DESeq2. The design variables should match columns in meta. Returns ------- sf : pandas.Series Series whose index matches the columns of counts and whose values are the size factors from DESeq2. Divide each column by its size factor to obtain normalized counts. """ import rpy2.robjects as r from rpy2.robjects import pandas2ri pandas2ri.activate() r.r('suppressMessages(library(DESeq2))') r.globalenv['counts'] = counts r.globalenv['meta'] = meta r.r('dds = DESeqDataSetFromMatrix(countData=counts, colData=meta, design={})'.format(design)) r.r('dds = estimateSizeFactors(dds)') r.r('sf = sizeFactors(dds)') sf = r.globalenv['sf'] return pd.Series(sf, index=counts.columns)
def get(self, key, namespace=None): """Retrieve value for key.""" full_key = generate_uppercase_key(key, namespace) full_key = full_key.lower() logger.debug('Searching %s for %s', self, full_key) # Build a map of lowercase -> actual key obj_keys = { item.lower(): item for item in dir(self.obj) if not item.startswith('__') } if full_key in obj_keys: val = getattr(self.obj, obj_keys[full_key]) # If the value is None, then we're going to treat it as a non-valid # value. if val is not None: # This is goofy, but this allows people to specify arg parser # defaults, but do the right thing in Everett where everything # is a string until it's parsed. return str(val) return NO_VALUE
def function[get, parameter[self, key, namespace]]: constant[Retrieve value for key.] variable[full_key] assign[=] call[name[generate_uppercase_key], parameter[name[key], name[namespace]]] variable[full_key] assign[=] call[name[full_key].lower, parameter[]] call[name[logger].debug, parameter[constant[Searching %s for %s], name[self], name[full_key]]] variable[obj_keys] assign[=] <ast.DictComp object at 0x7da1b0c5bbb0> if compare[name[full_key] in name[obj_keys]] begin[:] variable[val] assign[=] call[name[getattr], parameter[name[self].obj, call[name[obj_keys]][name[full_key]]]] if compare[name[val] is_not constant[None]] begin[:] return[call[name[str], parameter[name[val]]]] return[name[NO_VALUE]]
keyword[def] identifier[get] ( identifier[self] , identifier[key] , identifier[namespace] = keyword[None] ): literal[string] identifier[full_key] = identifier[generate_uppercase_key] ( identifier[key] , identifier[namespace] ) identifier[full_key] = identifier[full_key] . identifier[lower] () identifier[logger] . identifier[debug] ( literal[string] , identifier[self] , identifier[full_key] ) identifier[obj_keys] ={ identifier[item] . identifier[lower] (): identifier[item] keyword[for] identifier[item] keyword[in] identifier[dir] ( identifier[self] . identifier[obj] ) keyword[if] keyword[not] identifier[item] . identifier[startswith] ( literal[string] ) } keyword[if] identifier[full_key] keyword[in] identifier[obj_keys] : identifier[val] = identifier[getattr] ( identifier[self] . identifier[obj] , identifier[obj_keys] [ identifier[full_key] ]) keyword[if] identifier[val] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[str] ( identifier[val] ) keyword[return] identifier[NO_VALUE]
def get(self, key, namespace=None): """Retrieve value for key.""" full_key = generate_uppercase_key(key, namespace) full_key = full_key.lower() logger.debug('Searching %s for %s', self, full_key) # Build a map of lowercase -> actual key obj_keys = {item.lower(): item for item in dir(self.obj) if not item.startswith('__')} if full_key in obj_keys: val = getattr(self.obj, obj_keys[full_key]) # If the value is None, then we're going to treat it as a non-valid # value. if val is not None: # This is goofy, but this allows people to specify arg parser # defaults, but do the right thing in Everett where everything # is a string until it's parsed. return str(val) # depends on [control=['if'], data=['val']] # depends on [control=['if'], data=['full_key', 'obj_keys']] return NO_VALUE
def save(self, path=None, fatal=True, logger=None, sort_keys=True, indent=2): """ :param str|None path: Save this serializable to file with 'path' (default: self._path) :param bool|None fatal: Abort execution on failure if True :param callable|None logger: Logger to use :param bool sort_keys: Sort keys :param int indent: Indentation to use """ path = path or getattr(self, "_path", None) if path: return save_json(self.to_dict(), path, fatal=fatal, logger=logger, sort_keys=sort_keys, indent=indent)
def function[save, parameter[self, path, fatal, logger, sort_keys, indent]]: constant[ :param str|None path: Save this serializable to file with 'path' (default: self._path) :param bool|None fatal: Abort execution on failure if True :param callable|None logger: Logger to use :param bool sort_keys: Sort keys :param int indent: Indentation to use ] variable[path] assign[=] <ast.BoolOp object at 0x7da1b24b2b00> if name[path] begin[:] return[call[name[save_json], parameter[call[name[self].to_dict, parameter[]], name[path]]]]
keyword[def] identifier[save] ( identifier[self] , identifier[path] = keyword[None] , identifier[fatal] = keyword[True] , identifier[logger] = keyword[None] , identifier[sort_keys] = keyword[True] , identifier[indent] = literal[int] ): literal[string] identifier[path] = identifier[path] keyword[or] identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ) keyword[if] identifier[path] : keyword[return] identifier[save_json] ( identifier[self] . identifier[to_dict] (), identifier[path] , identifier[fatal] = identifier[fatal] , identifier[logger] = identifier[logger] , identifier[sort_keys] = identifier[sort_keys] , identifier[indent] = identifier[indent] )
def save(self, path=None, fatal=True, logger=None, sort_keys=True, indent=2): """ :param str|None path: Save this serializable to file with 'path' (default: self._path) :param bool|None fatal: Abort execution on failure if True :param callable|None logger: Logger to use :param bool sort_keys: Sort keys :param int indent: Indentation to use """ path = path or getattr(self, '_path', None) if path: return save_json(self.to_dict(), path, fatal=fatal, logger=logger, sort_keys=sort_keys, indent=indent) # depends on [control=['if'], data=[]]
def parse_formula(model_formula, parent_data, unscramble=False): """ Recursively parse a model formula by breaking it into additive atoms and tracking grouping symbol depth. Parameters ---------- model_formula: str Expression for the model formula, e.g. '(a + b)^^2 + dd1(c + (d + e)^3) + f' Note that any expressions to be expanded *must* be in parentheses, even if they include only a single variable (e.g., (x)^2, not x^2). parent_data: pandas DataFrame A tabulation of all values usable in the model formula. Each additive term in `model_formula` should correspond either to a variable in this data frame or to instructions for operating on a variable (for instance, computing temporal derivatives or exponential terms). Temporal derivative options: * d6(variable) for the 6th temporal derivative * dd6(variable) for all temporal derivatives up to the 6th * d4-6(variable) for the 4th through 6th temporal derivatives * 0 must be included in the temporal derivative range for the original term to be returned when temporal derivatives are computed. Exponential options: * (variable)^6 for the 6th power * (variable)^^6 for all powers up to the 6th * (variable)^4-6 for the 4th through 6th powers * 1 must be included in the powers range for the original term to be returned when exponential terms are computed. Temporal derivatives and exponential terms are computed for all terms in the grouping symbols that they adjoin. Returns ------- variables: list(str) A list of variables included in the model parsed from the provided formula. data: pandas DataFrame All values in the complete model. """ variables = {} data = {} expr_delimiter = 0 grouping_depth = 0 model_formula = _expand_shorthand(model_formula, parent_data.columns) for i, char in enumerate(model_formula): if char == '(': grouping_depth += 1 elif char == ')': grouping_depth -= 1 elif grouping_depth == 0 and char == '+': expression = model_formula[expr_delimiter:i].strip() variables[expression] = None data[expression] = None expr_delimiter = i + 1 expression = model_formula[expr_delimiter:].strip() variables[expression] = None data[expression] = None for expression in list(variables): if expression[0] == '(' and expression[-1] == ')': (variables[expression], data[expression]) = parse_formula(expression[1:-1], parent_data) else: (variables[expression], data[expression]) = parse_expression(expression, parent_data) variables = list(set(reduce((lambda x, y: x + y), variables.values()))) data = pd.concat((data.values()), axis=1) if unscramble: data = _unscramble_regressor_columns(parent_data, data) return variables, data
def function[parse_formula, parameter[model_formula, parent_data, unscramble]]: constant[ Recursively parse a model formula by breaking it into additive atoms and tracking grouping symbol depth. Parameters ---------- model_formula: str Expression for the model formula, e.g. '(a + b)^^2 + dd1(c + (d + e)^3) + f' Note that any expressions to be expanded *must* be in parentheses, even if they include only a single variable (e.g., (x)^2, not x^2). parent_data: pandas DataFrame A tabulation of all values usable in the model formula. Each additive term in `model_formula` should correspond either to a variable in this data frame or to instructions for operating on a variable (for instance, computing temporal derivatives or exponential terms). Temporal derivative options: * d6(variable) for the 6th temporal derivative * dd6(variable) for all temporal derivatives up to the 6th * d4-6(variable) for the 4th through 6th temporal derivatives * 0 must be included in the temporal derivative range for the original term to be returned when temporal derivatives are computed. Exponential options: * (variable)^6 for the 6th power * (variable)^^6 for all powers up to the 6th * (variable)^4-6 for the 4th through 6th powers * 1 must be included in the powers range for the original term to be returned when exponential terms are computed. Temporal derivatives and exponential terms are computed for all terms in the grouping symbols that they adjoin. Returns ------- variables: list(str) A list of variables included in the model parsed from the provided formula. data: pandas DataFrame All values in the complete model. ] variable[variables] assign[=] dictionary[[], []] variable[data] assign[=] dictionary[[], []] variable[expr_delimiter] assign[=] constant[0] variable[grouping_depth] assign[=] constant[0] variable[model_formula] assign[=] call[name[_expand_shorthand], parameter[name[model_formula], name[parent_data].columns]] for taget[tuple[[<ast.Name object at 0x7da20c992800>, <ast.Name object at 0x7da20c992fb0>]]] in starred[call[name[enumerate], parameter[name[model_formula]]]] begin[:] if compare[name[char] equal[==] constant[(]] begin[:] <ast.AugAssign object at 0x7da20c991900> variable[expression] assign[=] call[call[name[model_formula]][<ast.Slice object at 0x7da20c991a80>].strip, parameter[]] call[name[variables]][name[expression]] assign[=] constant[None] call[name[data]][name[expression]] assign[=] constant[None] for taget[name[expression]] in starred[call[name[list], parameter[name[variables]]]] begin[:] if <ast.BoolOp object at 0x7da20c992dd0> begin[:] <ast.Tuple object at 0x7da20c990490> assign[=] call[name[parse_formula], parameter[call[name[expression]][<ast.Slice object at 0x7da20c9910f0>], name[parent_data]]] variable[variables] assign[=] call[name[list], parameter[call[name[set], parameter[call[name[reduce], parameter[<ast.Lambda object at 0x7da18dc05420>, call[name[variables].values, parameter[]]]]]]]] variable[data] assign[=] call[name[pd].concat, parameter[call[name[data].values, parameter[]]]] if name[unscramble] begin[:] variable[data] assign[=] call[name[_unscramble_regressor_columns], parameter[name[parent_data], name[data]]] return[tuple[[<ast.Name object at 0x7da18dc05630>, <ast.Name object at 0x7da18dc046a0>]]]
keyword[def] identifier[parse_formula] ( identifier[model_formula] , identifier[parent_data] , identifier[unscramble] = keyword[False] ): literal[string] identifier[variables] ={} identifier[data] ={} identifier[expr_delimiter] = literal[int] identifier[grouping_depth] = literal[int] identifier[model_formula] = identifier[_expand_shorthand] ( identifier[model_formula] , identifier[parent_data] . identifier[columns] ) keyword[for] identifier[i] , identifier[char] keyword[in] identifier[enumerate] ( identifier[model_formula] ): keyword[if] identifier[char] == literal[string] : identifier[grouping_depth] += literal[int] keyword[elif] identifier[char] == literal[string] : identifier[grouping_depth] -= literal[int] keyword[elif] identifier[grouping_depth] == literal[int] keyword[and] identifier[char] == literal[string] : identifier[expression] = identifier[model_formula] [ identifier[expr_delimiter] : identifier[i] ]. identifier[strip] () identifier[variables] [ identifier[expression] ]= keyword[None] identifier[data] [ identifier[expression] ]= keyword[None] identifier[expr_delimiter] = identifier[i] + literal[int] identifier[expression] = identifier[model_formula] [ identifier[expr_delimiter] :]. identifier[strip] () identifier[variables] [ identifier[expression] ]= keyword[None] identifier[data] [ identifier[expression] ]= keyword[None] keyword[for] identifier[expression] keyword[in] identifier[list] ( identifier[variables] ): keyword[if] identifier[expression] [ literal[int] ]== literal[string] keyword[and] identifier[expression] [- literal[int] ]== literal[string] : ( identifier[variables] [ identifier[expression] ], identifier[data] [ identifier[expression] ])= identifier[parse_formula] ( identifier[expression] [ literal[int] :- literal[int] ], identifier[parent_data] ) keyword[else] : ( identifier[variables] [ identifier[expression] ], identifier[data] [ identifier[expression] ])= identifier[parse_expression] ( identifier[expression] , identifier[parent_data] ) identifier[variables] = identifier[list] ( identifier[set] ( identifier[reduce] (( keyword[lambda] identifier[x] , identifier[y] : identifier[x] + identifier[y] ), identifier[variables] . identifier[values] ()))) identifier[data] = identifier[pd] . identifier[concat] (( identifier[data] . identifier[values] ()), identifier[axis] = literal[int] ) keyword[if] identifier[unscramble] : identifier[data] = identifier[_unscramble_regressor_columns] ( identifier[parent_data] , identifier[data] ) keyword[return] identifier[variables] , identifier[data]
def parse_formula(model_formula, parent_data, unscramble=False): """ Recursively parse a model formula by breaking it into additive atoms and tracking grouping symbol depth. Parameters ---------- model_formula: str Expression for the model formula, e.g. '(a + b)^^2 + dd1(c + (d + e)^3) + f' Note that any expressions to be expanded *must* be in parentheses, even if they include only a single variable (e.g., (x)^2, not x^2). parent_data: pandas DataFrame A tabulation of all values usable in the model formula. Each additive term in `model_formula` should correspond either to a variable in this data frame or to instructions for operating on a variable (for instance, computing temporal derivatives or exponential terms). Temporal derivative options: * d6(variable) for the 6th temporal derivative * dd6(variable) for all temporal derivatives up to the 6th * d4-6(variable) for the 4th through 6th temporal derivatives * 0 must be included in the temporal derivative range for the original term to be returned when temporal derivatives are computed. Exponential options: * (variable)^6 for the 6th power * (variable)^^6 for all powers up to the 6th * (variable)^4-6 for the 4th through 6th powers * 1 must be included in the powers range for the original term to be returned when exponential terms are computed. Temporal derivatives and exponential terms are computed for all terms in the grouping symbols that they adjoin. Returns ------- variables: list(str) A list of variables included in the model parsed from the provided formula. data: pandas DataFrame All values in the complete model. """ variables = {} data = {} expr_delimiter = 0 grouping_depth = 0 model_formula = _expand_shorthand(model_formula, parent_data.columns) for (i, char) in enumerate(model_formula): if char == '(': grouping_depth += 1 # depends on [control=['if'], data=[]] elif char == ')': grouping_depth -= 1 # depends on [control=['if'], data=[]] elif grouping_depth == 0 and char == '+': expression = model_formula[expr_delimiter:i].strip() variables[expression] = None data[expression] = None expr_delimiter = i + 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] expression = model_formula[expr_delimiter:].strip() variables[expression] = None data[expression] = None for expression in list(variables): if expression[0] == '(' and expression[-1] == ')': (variables[expression], data[expression]) = parse_formula(expression[1:-1], parent_data) # depends on [control=['if'], data=[]] else: (variables[expression], data[expression]) = parse_expression(expression, parent_data) # depends on [control=['for'], data=['expression']] variables = list(set(reduce(lambda x, y: x + y, variables.values()))) data = pd.concat(data.values(), axis=1) if unscramble: data = _unscramble_regressor_columns(parent_data, data) # depends on [control=['if'], data=[]] return (variables, data)
def database(self, database_id, ddl_statements=(), pool=None): """Factory to create a database within this instance. :type database_id: str :param database_id: The ID of the instance. :type ddl_statements: list of string :param ddl_statements: (Optional) DDL statements, excluding the 'CREATE DATABSE' statement. :type pool: concrete subclass of :class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`. :param pool: (Optional) session pool to be used by database. :rtype: :class:`~google.cloud.spanner_v1.database.Database` :returns: a database owned by this instance. """ return Database(database_id, self, ddl_statements=ddl_statements, pool=pool)
def function[database, parameter[self, database_id, ddl_statements, pool]]: constant[Factory to create a database within this instance. :type database_id: str :param database_id: The ID of the instance. :type ddl_statements: list of string :param ddl_statements: (Optional) DDL statements, excluding the 'CREATE DATABSE' statement. :type pool: concrete subclass of :class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`. :param pool: (Optional) session pool to be used by database. :rtype: :class:`~google.cloud.spanner_v1.database.Database` :returns: a database owned by this instance. ] return[call[name[Database], parameter[name[database_id], name[self]]]]
keyword[def] identifier[database] ( identifier[self] , identifier[database_id] , identifier[ddl_statements] =(), identifier[pool] = keyword[None] ): literal[string] keyword[return] identifier[Database] ( identifier[database_id] , identifier[self] , identifier[ddl_statements] = identifier[ddl_statements] , identifier[pool] = identifier[pool] )
def database(self, database_id, ddl_statements=(), pool=None): """Factory to create a database within this instance. :type database_id: str :param database_id: The ID of the instance. :type ddl_statements: list of string :param ddl_statements: (Optional) DDL statements, excluding the 'CREATE DATABSE' statement. :type pool: concrete subclass of :class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`. :param pool: (Optional) session pool to be used by database. :rtype: :class:`~google.cloud.spanner_v1.database.Database` :returns: a database owned by this instance. """ return Database(database_id, self, ddl_statements=ddl_statements, pool=pool)
def get(self, client_id, code): """Get the Grant object with the given client ID and code :param client_id: ID of the client :param code: """ return self.query.filter_by(client_id=client_id, code=code).first()
def function[get, parameter[self, client_id, code]]: constant[Get the Grant object with the given client ID and code :param client_id: ID of the client :param code: ] return[call[call[name[self].query.filter_by, parameter[]].first, parameter[]]]
keyword[def] identifier[get] ( identifier[self] , identifier[client_id] , identifier[code] ): literal[string] keyword[return] identifier[self] . identifier[query] . identifier[filter_by] ( identifier[client_id] = identifier[client_id] , identifier[code] = identifier[code] ). identifier[first] ()
def get(self, client_id, code): """Get the Grant object with the given client ID and code :param client_id: ID of the client :param code: """ return self.query.filter_by(client_id=client_id, code=code).first()
def _check_digit(self): """ Calculate the check digit for ISBN-13. See https://en.wikipedia.org/wiki/International_Standard_Book_Number for calculation. """ weights = (1 if x % 2 == 0 else 3 for x in range(12)) body = ''.join([self.ean, self.group, self.registrant, self.publication]) remainder = sum(int(b) * w for b, w in zip(body, weights)) % 10 diff = 10 - remainder check_digit = 0 if diff == 10 else diff return str(check_digit)
def function[_check_digit, parameter[self]]: constant[ Calculate the check digit for ISBN-13. See https://en.wikipedia.org/wiki/International_Standard_Book_Number for calculation. ] variable[weights] assign[=] <ast.GeneratorExp object at 0x7da204345510> variable[body] assign[=] call[constant[].join, parameter[list[[<ast.Attribute object at 0x7da207f99030>, <ast.Attribute object at 0x7da207f98790>, <ast.Attribute object at 0x7da207f99ba0>, <ast.Attribute object at 0x7da207f9b6d0>]]]] variable[remainder] assign[=] binary_operation[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da207f991e0>]] <ast.Mod object at 0x7da2590d6920> constant[10]] variable[diff] assign[=] binary_operation[constant[10] - name[remainder]] variable[check_digit] assign[=] <ast.IfExp object at 0x7da207f98250> return[call[name[str], parameter[name[check_digit]]]]
keyword[def] identifier[_check_digit] ( identifier[self] ): literal[string] identifier[weights] =( literal[int] keyword[if] identifier[x] % literal[int] == literal[int] keyword[else] literal[int] keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] )) identifier[body] = literal[string] . identifier[join] ([ identifier[self] . identifier[ean] , identifier[self] . identifier[group] , identifier[self] . identifier[registrant] , identifier[self] . identifier[publication] ]) identifier[remainder] = identifier[sum] ( identifier[int] ( identifier[b] )* identifier[w] keyword[for] identifier[b] , identifier[w] keyword[in] identifier[zip] ( identifier[body] , identifier[weights] ))% literal[int] identifier[diff] = literal[int] - identifier[remainder] identifier[check_digit] = literal[int] keyword[if] identifier[diff] == literal[int] keyword[else] identifier[diff] keyword[return] identifier[str] ( identifier[check_digit] )
def _check_digit(self): """ Calculate the check digit for ISBN-13. See https://en.wikipedia.org/wiki/International_Standard_Book_Number for calculation. """ weights = (1 if x % 2 == 0 else 3 for x in range(12)) body = ''.join([self.ean, self.group, self.registrant, self.publication]) remainder = sum((int(b) * w for (b, w) in zip(body, weights))) % 10 diff = 10 - remainder check_digit = 0 if diff == 10 else diff return str(check_digit)
def add_kb(self, issuer, kb): """ Add a key bundle and bind it to an identifier :param issuer: Owner of the keys in the key bundle :param kb: A :py:class:`oidcmsg.key_bundle.KeyBundle` instance """ try: self.issuer_keys[issuer].append(kb) except KeyError: self.issuer_keys[issuer] = [kb]
def function[add_kb, parameter[self, issuer, kb]]: constant[ Add a key bundle and bind it to an identifier :param issuer: Owner of the keys in the key bundle :param kb: A :py:class:`oidcmsg.key_bundle.KeyBundle` instance ] <ast.Try object at 0x7da1b0548340>
keyword[def] identifier[add_kb] ( identifier[self] , identifier[issuer] , identifier[kb] ): literal[string] keyword[try] : identifier[self] . identifier[issuer_keys] [ identifier[issuer] ]. identifier[append] ( identifier[kb] ) keyword[except] identifier[KeyError] : identifier[self] . identifier[issuer_keys] [ identifier[issuer] ]=[ identifier[kb] ]
def add_kb(self, issuer, kb): """ Add a key bundle and bind it to an identifier :param issuer: Owner of the keys in the key bundle :param kb: A :py:class:`oidcmsg.key_bundle.KeyBundle` instance """ try: self.issuer_keys[issuer].append(kb) # depends on [control=['try'], data=[]] except KeyError: self.issuer_keys[issuer] = [kb] # depends on [control=['except'], data=[]]
def make_bigint_autoincrement_column(column_name: str, dialect: Dialect) -> Column: """ Returns an instance of :class:`Column` representing a :class:`BigInteger` ``AUTOINCREMENT`` column in the specified :class:`Dialect`. """ # noinspection PyUnresolvedReferences if dialect.name == SqlaDialectName.MSSQL: return Column(column_name, BigInteger, Sequence('dummy_name', start=1, increment=1)) else: # return Column(column_name, BigInteger, autoincrement=True) # noinspection PyUnresolvedReferences raise AssertionError( "SQLAlchemy doesn't support non-PK autoincrement fields yet for " "dialect {}".format(repr(dialect.name)))
def function[make_bigint_autoincrement_column, parameter[column_name, dialect]]: constant[ Returns an instance of :class:`Column` representing a :class:`BigInteger` ``AUTOINCREMENT`` column in the specified :class:`Dialect`. ] if compare[name[dialect].name equal[==] name[SqlaDialectName].MSSQL] begin[:] return[call[name[Column], parameter[name[column_name], name[BigInteger], call[name[Sequence], parameter[constant[dummy_name]]]]]]
keyword[def] identifier[make_bigint_autoincrement_column] ( identifier[column_name] : identifier[str] , identifier[dialect] : identifier[Dialect] )-> identifier[Column] : literal[string] keyword[if] identifier[dialect] . identifier[name] == identifier[SqlaDialectName] . identifier[MSSQL] : keyword[return] identifier[Column] ( identifier[column_name] , identifier[BigInteger] , identifier[Sequence] ( literal[string] , identifier[start] = literal[int] , identifier[increment] = literal[int] )) keyword[else] : keyword[raise] identifier[AssertionError] ( literal[string] literal[string] . identifier[format] ( identifier[repr] ( identifier[dialect] . identifier[name] )))
def make_bigint_autoincrement_column(column_name: str, dialect: Dialect) -> Column: """ Returns an instance of :class:`Column` representing a :class:`BigInteger` ``AUTOINCREMENT`` column in the specified :class:`Dialect`. """ # noinspection PyUnresolvedReferences if dialect.name == SqlaDialectName.MSSQL: return Column(column_name, BigInteger, Sequence('dummy_name', start=1, increment=1)) # depends on [control=['if'], data=[]] else: # return Column(column_name, BigInteger, autoincrement=True) # noinspection PyUnresolvedReferences raise AssertionError("SQLAlchemy doesn't support non-PK autoincrement fields yet for dialect {}".format(repr(dialect.name)))
def print_featurelist(feature_list): """ Print the feature_list in a human-readable form. Parameters ---------- feature_list : list feature objects """ input_features = sum(map(lambda n: n.get_dimension(), feature_list)) print("## Features (%i)" % input_features) print("```") for algorithm in feature_list: print("* %s" % str(algorithm)) print("```")
def function[print_featurelist, parameter[feature_list]]: constant[ Print the feature_list in a human-readable form. Parameters ---------- feature_list : list feature objects ] variable[input_features] assign[=] call[name[sum], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b28521a0>, name[feature_list]]]]] call[name[print], parameter[binary_operation[constant[## Features (%i)] <ast.Mod object at 0x7da2590d6920> name[input_features]]]] call[name[print], parameter[constant[```]]] for taget[name[algorithm]] in starred[name[feature_list]] begin[:] call[name[print], parameter[binary_operation[constant[* %s] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[algorithm]]]]]] call[name[print], parameter[constant[```]]]
keyword[def] identifier[print_featurelist] ( identifier[feature_list] ): literal[string] identifier[input_features] = identifier[sum] ( identifier[map] ( keyword[lambda] identifier[n] : identifier[n] . identifier[get_dimension] (), identifier[feature_list] )) identifier[print] ( literal[string] % identifier[input_features] ) identifier[print] ( literal[string] ) keyword[for] identifier[algorithm] keyword[in] identifier[feature_list] : identifier[print] ( literal[string] % identifier[str] ( identifier[algorithm] )) identifier[print] ( literal[string] )
def print_featurelist(feature_list): """ Print the feature_list in a human-readable form. Parameters ---------- feature_list : list feature objects """ input_features = sum(map(lambda n: n.get_dimension(), feature_list)) print('## Features (%i)' % input_features) print('```') for algorithm in feature_list: print('* %s' % str(algorithm)) # depends on [control=['for'], data=['algorithm']] print('```')
def _send_request(self, method, url, *args, **kwargs): """ Send HTTP request. :param str method: The HTTP method to use. :param str url: The URL to make the request to. :return: Deferred firing with the HTTP response. """ action = LOG_JWS_REQUEST(url=url) with action.context(): headers = kwargs.setdefault('headers', Headers()) headers.setRawHeaders(b'user-agent', [self._user_agent]) kwargs.setdefault('timeout', self.timeout) return ( DeferredContext( self._treq.request(method, url, *args, **kwargs)) .addCallback( tap(lambda r: action.add_success_fields( code=r.code, content_type=r.headers.getRawHeaders( b'content-type', [None])[0]))) .addActionFinish())
def function[_send_request, parameter[self, method, url]]: constant[ Send HTTP request. :param str method: The HTTP method to use. :param str url: The URL to make the request to. :return: Deferred firing with the HTTP response. ] variable[action] assign[=] call[name[LOG_JWS_REQUEST], parameter[]] with call[name[action].context, parameter[]] begin[:] variable[headers] assign[=] call[name[kwargs].setdefault, parameter[constant[headers], call[name[Headers], parameter[]]]] call[name[headers].setRawHeaders, parameter[constant[b'user-agent'], list[[<ast.Attribute object at 0x7da18c4ce290>]]]] call[name[kwargs].setdefault, parameter[constant[timeout], name[self].timeout]] return[call[call[call[name[DeferredContext], parameter[call[name[self]._treq.request, parameter[name[method], name[url], <ast.Starred object at 0x7da18eb54a90>]]]].addCallback, parameter[call[name[tap], parameter[<ast.Lambda object at 0x7da20c76fc70>]]]].addActionFinish, parameter[]]]
keyword[def] identifier[_send_request] ( identifier[self] , identifier[method] , identifier[url] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[action] = identifier[LOG_JWS_REQUEST] ( identifier[url] = identifier[url] ) keyword[with] identifier[action] . identifier[context] (): identifier[headers] = identifier[kwargs] . identifier[setdefault] ( literal[string] , identifier[Headers] ()) identifier[headers] . identifier[setRawHeaders] ( literal[string] ,[ identifier[self] . identifier[_user_agent] ]) identifier[kwargs] . identifier[setdefault] ( literal[string] , identifier[self] . identifier[timeout] ) keyword[return] ( identifier[DeferredContext] ( identifier[self] . identifier[_treq] . identifier[request] ( identifier[method] , identifier[url] ,* identifier[args] ,** identifier[kwargs] )) . identifier[addCallback] ( identifier[tap] ( keyword[lambda] identifier[r] : identifier[action] . identifier[add_success_fields] ( identifier[code] = identifier[r] . identifier[code] , identifier[content_type] = identifier[r] . identifier[headers] . identifier[getRawHeaders] ( literal[string] ,[ keyword[None] ])[ literal[int] ]))) . identifier[addActionFinish] ())
def _send_request(self, method, url, *args, **kwargs): """ Send HTTP request. :param str method: The HTTP method to use. :param str url: The URL to make the request to. :return: Deferred firing with the HTTP response. """ action = LOG_JWS_REQUEST(url=url) with action.context(): headers = kwargs.setdefault('headers', Headers()) headers.setRawHeaders(b'user-agent', [self._user_agent]) kwargs.setdefault('timeout', self.timeout) return DeferredContext(self._treq.request(method, url, *args, **kwargs)).addCallback(tap(lambda r: action.add_success_fields(code=r.code, content_type=r.headers.getRawHeaders(b'content-type', [None])[0]))).addActionFinish() # depends on [control=['with'], data=[]]
def plot(q: np.ndarray, bodies: List[str], plot_colors: Dict[str, str], sim_name: str, fname: Optional[str] = None): """ Plot the planetary orbits. Plot size limited to box of 10 AU around the sun. q is a Nx3B array. t indexes time points. 3B columns are (x, y, z) for the bodies in order. """ # Get N and number of dims N, dims = q.shape # Slices for x_slice = slice(0, dims, 3) y_slice = slice(1, dims, 3) # Convert all distances from meters to astronomical units (AU) plot_x = q[:, x_slice] / au2m plot_y = q[:, y_slice] / au2m # Set up chart title and scale fig, ax = plt.subplots(figsize=[12,12]) ax.set_title(f'Inner Planetary Orbits in 2018; Weekly from {sim_name}') ax.set_xlabel('x in J2000.0 Frame; Astronomical Units (au)') ax.set_ylabel('y in J2000.0 Frame; Astronomical Units (au)') # Scale and tick size a = 5.0 da = 1.0 ticks = np.arange(-a, a+da, da) # Set limits and ticks ax.set_xlim(-a, a) ax.set_ylim(-a, a) ax.set_xticks(ticks) ax.set_yticks(ticks) # Set marker sizes proportional to size of bodies radius_earth = radius_tbl['earth'] markersize_earth = 4.0 markersize_tbl = {body : cbrt(radius_tbl[body] / radius_earth) * markersize_earth for body in bodies} # Plot the orbit of each body for k, body in enumerate(bodies): ax.plot(plot_x[:, k], plot_y[:, k], label=body, color=plot_colors[body], linewidth=0, markersize = markersize_tbl[body], marker='o') # Legend and grid fig.legend(loc=7, bbox_to_anchor=(0.85, 0.5)) # ax.legend() ax.grid() # Save plot if a filename was provided if fname is not None: fig.savefig(fname, bbox_inches='tight') # Display plot plt.show()
def function[plot, parameter[q, bodies, plot_colors, sim_name, fname]]: constant[ Plot the planetary orbits. Plot size limited to box of 10 AU around the sun. q is a Nx3B array. t indexes time points. 3B columns are (x, y, z) for the bodies in order. ] <ast.Tuple object at 0x7da18fe90520> assign[=] name[q].shape variable[x_slice] assign[=] call[name[slice], parameter[constant[0], name[dims], constant[3]]] variable[y_slice] assign[=] call[name[slice], parameter[constant[1], name[dims], constant[3]]] variable[plot_x] assign[=] binary_operation[call[name[q]][tuple[[<ast.Slice object at 0x7da18fe91870>, <ast.Name object at 0x7da18fe92140>]]] / name[au2m]] variable[plot_y] assign[=] binary_operation[call[name[q]][tuple[[<ast.Slice object at 0x7da18fe929b0>, <ast.Name object at 0x7da18fe93340>]]] / name[au2m]] <ast.Tuple object at 0x7da18fe93100> assign[=] call[name[plt].subplots, parameter[]] call[name[ax].set_title, parameter[<ast.JoinedStr object at 0x7da18fe938b0>]] call[name[ax].set_xlabel, parameter[constant[x in J2000.0 Frame; Astronomical Units (au)]]] call[name[ax].set_ylabel, parameter[constant[y in J2000.0 Frame; Astronomical Units (au)]]] variable[a] assign[=] constant[5.0] variable[da] assign[=] constant[1.0] variable[ticks] assign[=] call[name[np].arange, parameter[<ast.UnaryOp object at 0x7da18fe91e40>, binary_operation[name[a] + name[da]], name[da]]] call[name[ax].set_xlim, parameter[<ast.UnaryOp object at 0x7da18fe90760>, name[a]]] call[name[ax].set_ylim, parameter[<ast.UnaryOp object at 0x7da18fe920e0>, name[a]]] call[name[ax].set_xticks, parameter[name[ticks]]] call[name[ax].set_yticks, parameter[name[ticks]]] variable[radius_earth] assign[=] call[name[radius_tbl]][constant[earth]] variable[markersize_earth] assign[=] constant[4.0] variable[markersize_tbl] assign[=] <ast.DictComp object at 0x7da18fe92c80> for taget[tuple[[<ast.Name object at 0x7da18fe93880>, <ast.Name object at 0x7da18fe901c0>]]] in starred[call[name[enumerate], parameter[name[bodies]]]] begin[:] call[name[ax].plot, parameter[call[name[plot_x]][tuple[[<ast.Slice object at 0x7da18fe916f0>, <ast.Name object at 0x7da18fe90df0>]]], call[name[plot_y]][tuple[[<ast.Slice object at 0x7da18fe924d0>, <ast.Name object at 0x7da18fe93b80>]]]]] call[name[fig].legend, parameter[]] call[name[ax].grid, parameter[]] if compare[name[fname] is_not constant[None]] begin[:] call[name[fig].savefig, parameter[name[fname]]] call[name[plt].show, parameter[]]
keyword[def] identifier[plot] ( identifier[q] : identifier[np] . identifier[ndarray] , identifier[bodies] : identifier[List] [ identifier[str] ], identifier[plot_colors] : identifier[Dict] [ identifier[str] , identifier[str] ], identifier[sim_name] : identifier[str] , identifier[fname] : identifier[Optional] [ identifier[str] ]= keyword[None] ): literal[string] identifier[N] , identifier[dims] = identifier[q] . identifier[shape] identifier[x_slice] = identifier[slice] ( literal[int] , identifier[dims] , literal[int] ) identifier[y_slice] = identifier[slice] ( literal[int] , identifier[dims] , literal[int] ) identifier[plot_x] = identifier[q] [:, identifier[x_slice] ]/ identifier[au2m] identifier[plot_y] = identifier[q] [:, identifier[y_slice] ]/ identifier[au2m] identifier[fig] , identifier[ax] = identifier[plt] . identifier[subplots] ( identifier[figsize] =[ literal[int] , literal[int] ]) identifier[ax] . identifier[set_title] ( literal[string] ) identifier[ax] . identifier[set_xlabel] ( literal[string] ) identifier[ax] . identifier[set_ylabel] ( literal[string] ) identifier[a] = literal[int] identifier[da] = literal[int] identifier[ticks] = identifier[np] . identifier[arange] (- identifier[a] , identifier[a] + identifier[da] , identifier[da] ) identifier[ax] . identifier[set_xlim] (- identifier[a] , identifier[a] ) identifier[ax] . identifier[set_ylim] (- identifier[a] , identifier[a] ) identifier[ax] . identifier[set_xticks] ( identifier[ticks] ) identifier[ax] . identifier[set_yticks] ( identifier[ticks] ) identifier[radius_earth] = identifier[radius_tbl] [ literal[string] ] identifier[markersize_earth] = literal[int] identifier[markersize_tbl] ={ identifier[body] : identifier[cbrt] ( identifier[radius_tbl] [ identifier[body] ]/ identifier[radius_earth] )* identifier[markersize_earth] keyword[for] identifier[body] keyword[in] identifier[bodies] } keyword[for] identifier[k] , identifier[body] keyword[in] identifier[enumerate] ( identifier[bodies] ): identifier[ax] . identifier[plot] ( identifier[plot_x] [:, identifier[k] ], identifier[plot_y] [:, identifier[k] ], identifier[label] = identifier[body] , identifier[color] = identifier[plot_colors] [ identifier[body] ], identifier[linewidth] = literal[int] , identifier[markersize] = identifier[markersize_tbl] [ identifier[body] ], identifier[marker] = literal[string] ) identifier[fig] . identifier[legend] ( identifier[loc] = literal[int] , identifier[bbox_to_anchor] =( literal[int] , literal[int] )) identifier[ax] . identifier[grid] () keyword[if] identifier[fname] keyword[is] keyword[not] keyword[None] : identifier[fig] . identifier[savefig] ( identifier[fname] , identifier[bbox_inches] = literal[string] ) identifier[plt] . identifier[show] ()
def plot(q: np.ndarray, bodies: List[str], plot_colors: Dict[str, str], sim_name: str, fname: Optional[str]=None): """ Plot the planetary orbits. Plot size limited to box of 10 AU around the sun. q is a Nx3B array. t indexes time points. 3B columns are (x, y, z) for the bodies in order. """ # Get N and number of dims (N, dims) = q.shape # Slices for x_slice = slice(0, dims, 3) y_slice = slice(1, dims, 3) # Convert all distances from meters to astronomical units (AU) plot_x = q[:, x_slice] / au2m plot_y = q[:, y_slice] / au2m # Set up chart title and scale (fig, ax) = plt.subplots(figsize=[12, 12]) ax.set_title(f'Inner Planetary Orbits in 2018; Weekly from {sim_name}') ax.set_xlabel('x in J2000.0 Frame; Astronomical Units (au)') ax.set_ylabel('y in J2000.0 Frame; Astronomical Units (au)') # Scale and tick size a = 5.0 da = 1.0 ticks = np.arange(-a, a + da, da) # Set limits and ticks ax.set_xlim(-a, a) ax.set_ylim(-a, a) ax.set_xticks(ticks) ax.set_yticks(ticks) # Set marker sizes proportional to size of bodies radius_earth = radius_tbl['earth'] markersize_earth = 4.0 markersize_tbl = {body: cbrt(radius_tbl[body] / radius_earth) * markersize_earth for body in bodies} # Plot the orbit of each body for (k, body) in enumerate(bodies): ax.plot(plot_x[:, k], plot_y[:, k], label=body, color=plot_colors[body], linewidth=0, markersize=markersize_tbl[body], marker='o') # depends on [control=['for'], data=[]] # Legend and grid fig.legend(loc=7, bbox_to_anchor=(0.85, 0.5)) # ax.legend() ax.grid() # Save plot if a filename was provided if fname is not None: fig.savefig(fname, bbox_inches='tight') # depends on [control=['if'], data=['fname']] # Display plot plt.show()
def gapsplit(args): """ %prog gapsplit gffile > split.gff Read in the gff (normally generated by GMAP) and print it out after splitting each feature into one parent and multiple child features based on alignment information encoded in CIGAR string. """ p = OptionParser(gapsplit.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gffile, = args gff = Gff(gffile) for g in gff: if re.match("EST_match", g.type): """ hacky implementation: since the standard urlparse.parse_qsl() replaces all "+" symbols with spaces we will write a regex to check either for a "-" or a " " (space) """ match = re.search(r'\S+ (\d+) \d+ ([\s{1}\-])', g.attributes["Target"][0]) if match.group(2) == "-": strand = match.group(2) else: strand = "+" g.attributes["Target"][0] = " ".join(str(x) \ for x in [g.attributes["Target"][0].rstrip(), strand]) if g.strand == "?": g.strand = strand else: match = re.match(r'\S+ (\d+) \d+', g.attributes["Target"][0]) target_start = int(match.group(1)) re_cigar = re.compile(r'(\D+)(\d+)'); cigar = g.attributes["Gap"][0].split(" ") g.attributes["Gap"] = None parts = [] if g.strand == "+": for event in cigar: match = re_cigar.match(event) op, count = match.group(1), int(match.group(2)) if op in "IHS": target_start += count elif op in "DN": g.start += count elif op == "P": continue else: parts.append([g.start, g.start + count - 1, \ target_start, target_start + count - 1]) g.start += count target_start += count else: for event in cigar: match = re_cigar.match(event) op, count = match.group(1), int(match.group(2)) if op in "IHS": target_start += count elif op in "DN": g.end -= count elif op == "P": continue else: parts.append([g.end - count + 1, g.end, \ target_start, target_start + count - 1]) g.end -= count target_start += count g.update_attributes() print(g) parent = g.attributes["Name"][0] g.type = "match_part" g.attributes.clear() for part in parts: g.start, g.end = part[0], part[1] g.score, g.strand, g.phase = ".", g.strand, "." if re.match("EST", g.type): target_list = [parent, part[2], part[3], g.strand] else: target_list = [parent, part[2], part[3]] target = " ".join(str(x) for x in target_list) g.attributes["Parent"] = [parent] g.attributes["Target"] = [target] g.update_attributes() print(g)
def function[gapsplit, parameter[args]]: constant[ %prog gapsplit gffile > split.gff Read in the gff (normally generated by GMAP) and print it out after splitting each feature into one parent and multiple child features based on alignment information encoded in CIGAR string. ] variable[p] assign[=] call[name[OptionParser], parameter[name[gapsplit].__doc__]] <ast.Tuple object at 0x7da1b07498a0> assign[=] call[name[p].parse_args, parameter[name[args]]] if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b0749ff0>]] <ast.Tuple object at 0x7da1b07491e0> assign[=] name[args] variable[gff] assign[=] call[name[Gff], parameter[name[gffile]]] for taget[name[g]] in starred[name[gff]] begin[:] if call[name[re].match, parameter[constant[EST_match], name[g].type]] begin[:] constant[ hacky implementation: since the standard urlparse.parse_qsl() replaces all "+" symbols with spaces we will write a regex to check either for a "-" or a " " (space) ] variable[match] assign[=] call[name[re].search, parameter[constant[\S+ (\d+) \d+ ([\s{1}\-])], call[call[name[g].attributes][constant[Target]]][constant[0]]]] if compare[call[name[match].group, parameter[constant[2]]] equal[==] constant[-]] begin[:] variable[strand] assign[=] call[name[match].group, parameter[constant[2]]] if compare[name[g].strand equal[==] constant[?]] begin[:] name[g].strand assign[=] name[strand] variable[target_start] assign[=] call[name[int], parameter[call[name[match].group, parameter[constant[1]]]]] variable[re_cigar] assign[=] call[name[re].compile, parameter[constant[(\D+)(\d+)]]] variable[cigar] assign[=] call[call[call[name[g].attributes][constant[Gap]]][constant[0]].split, parameter[constant[ ]]] call[name[g].attributes][constant[Gap]] assign[=] constant[None] variable[parts] assign[=] list[[]] if compare[name[g].strand equal[==] constant[+]] begin[:] for taget[name[event]] in starred[name[cigar]] begin[:] variable[match] assign[=] call[name[re_cigar].match, parameter[name[event]]] <ast.Tuple object at 0x7da1b08e8cd0> assign[=] tuple[[<ast.Call object at 0x7da1b08e8a00>, <ast.Call object at 0x7da1b08e8370>]] if compare[name[op] in constant[IHS]] begin[:] <ast.AugAssign object at 0x7da1b08e8dc0> call[name[g].update_attributes, parameter[]] call[name[print], parameter[name[g]]] variable[parent] assign[=] call[call[name[g].attributes][constant[Name]]][constant[0]] name[g].type assign[=] constant[match_part] call[name[g].attributes.clear, parameter[]] for taget[name[part]] in starred[name[parts]] begin[:] <ast.Tuple object at 0x7da1b08128c0> assign[=] tuple[[<ast.Subscript object at 0x7da1b0811f00>, <ast.Subscript object at 0x7da1b0810130>]] <ast.Tuple object at 0x7da1b0813e80> assign[=] tuple[[<ast.Constant object at 0x7da1b0810e50>, <ast.Attribute object at 0x7da1b08130d0>, <ast.Constant object at 0x7da1b0813a90>]] if call[name[re].match, parameter[constant[EST], name[g].type]] begin[:] variable[target_list] assign[=] list[[<ast.Name object at 0x7da1b0810af0>, <ast.Subscript object at 0x7da1b0813c70>, <ast.Subscript object at 0x7da1b0811810>, <ast.Attribute object at 0x7da1b0811f60>]] variable[target] assign[=] call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da1b0810250>]] call[name[g].attributes][constant[Parent]] assign[=] list[[<ast.Name object at 0x7da1b0812260>]] call[name[g].attributes][constant[Target]] assign[=] list[[<ast.Name object at 0x7da1b0812320>]] call[name[g].update_attributes, parameter[]] call[name[print], parameter[name[g]]]
keyword[def] identifier[gapsplit] ( identifier[args] ): literal[string] identifier[p] = identifier[OptionParser] ( identifier[gapsplit] . identifier[__doc__] ) identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] ) keyword[if] identifier[len] ( identifier[args] )!= literal[int] : identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ()) identifier[gffile] ,= identifier[args] identifier[gff] = identifier[Gff] ( identifier[gffile] ) keyword[for] identifier[g] keyword[in] identifier[gff] : keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[g] . identifier[type] ): literal[string] identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[g] . identifier[attributes] [ literal[string] ][ literal[int] ]) keyword[if] identifier[match] . identifier[group] ( literal[int] )== literal[string] : identifier[strand] = identifier[match] . identifier[group] ( literal[int] ) keyword[else] : identifier[strand] = literal[string] identifier[g] . identifier[attributes] [ literal[string] ][ literal[int] ]= literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] [ identifier[g] . identifier[attributes] [ literal[string] ][ literal[int] ]. identifier[rstrip] (), identifier[strand] ]) keyword[if] identifier[g] . identifier[strand] == literal[string] : identifier[g] . identifier[strand] = identifier[strand] keyword[else] : identifier[match] = identifier[re] . identifier[match] ( literal[string] , identifier[g] . identifier[attributes] [ literal[string] ][ literal[int] ]) identifier[target_start] = identifier[int] ( identifier[match] . identifier[group] ( literal[int] )) identifier[re_cigar] = identifier[re] . identifier[compile] ( literal[string] ); identifier[cigar] = identifier[g] . identifier[attributes] [ literal[string] ][ literal[int] ]. identifier[split] ( literal[string] ) identifier[g] . identifier[attributes] [ literal[string] ]= keyword[None] identifier[parts] =[] keyword[if] identifier[g] . identifier[strand] == literal[string] : keyword[for] identifier[event] keyword[in] identifier[cigar] : identifier[match] = identifier[re_cigar] . identifier[match] ( identifier[event] ) identifier[op] , identifier[count] = identifier[match] . identifier[group] ( literal[int] ), identifier[int] ( identifier[match] . identifier[group] ( literal[int] )) keyword[if] identifier[op] keyword[in] literal[string] : identifier[target_start] += identifier[count] keyword[elif] identifier[op] keyword[in] literal[string] : identifier[g] . identifier[start] += identifier[count] keyword[elif] identifier[op] == literal[string] : keyword[continue] keyword[else] : identifier[parts] . identifier[append] ([ identifier[g] . identifier[start] , identifier[g] . identifier[start] + identifier[count] - literal[int] , identifier[target_start] , identifier[target_start] + identifier[count] - literal[int] ]) identifier[g] . identifier[start] += identifier[count] identifier[target_start] += identifier[count] keyword[else] : keyword[for] identifier[event] keyword[in] identifier[cigar] : identifier[match] = identifier[re_cigar] . identifier[match] ( identifier[event] ) identifier[op] , identifier[count] = identifier[match] . identifier[group] ( literal[int] ), identifier[int] ( identifier[match] . identifier[group] ( literal[int] )) keyword[if] identifier[op] keyword[in] literal[string] : identifier[target_start] += identifier[count] keyword[elif] identifier[op] keyword[in] literal[string] : identifier[g] . identifier[end] -= identifier[count] keyword[elif] identifier[op] == literal[string] : keyword[continue] keyword[else] : identifier[parts] . identifier[append] ([ identifier[g] . identifier[end] - identifier[count] + literal[int] , identifier[g] . identifier[end] , identifier[target_start] , identifier[target_start] + identifier[count] - literal[int] ]) identifier[g] . identifier[end] -= identifier[count] identifier[target_start] += identifier[count] identifier[g] . identifier[update_attributes] () identifier[print] ( identifier[g] ) identifier[parent] = identifier[g] . identifier[attributes] [ literal[string] ][ literal[int] ] identifier[g] . identifier[type] = literal[string] identifier[g] . identifier[attributes] . identifier[clear] () keyword[for] identifier[part] keyword[in] identifier[parts] : identifier[g] . identifier[start] , identifier[g] . identifier[end] = identifier[part] [ literal[int] ], identifier[part] [ literal[int] ] identifier[g] . identifier[score] , identifier[g] . identifier[strand] , identifier[g] . identifier[phase] = literal[string] , identifier[g] . identifier[strand] , literal[string] keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[g] . identifier[type] ): identifier[target_list] =[ identifier[parent] , identifier[part] [ literal[int] ], identifier[part] [ literal[int] ], identifier[g] . identifier[strand] ] keyword[else] : identifier[target_list] =[ identifier[parent] , identifier[part] [ literal[int] ], identifier[part] [ literal[int] ]] identifier[target] = literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[target_list] ) identifier[g] . identifier[attributes] [ literal[string] ]=[ identifier[parent] ] identifier[g] . identifier[attributes] [ literal[string] ]=[ identifier[target] ] identifier[g] . identifier[update_attributes] () identifier[print] ( identifier[g] )
def gapsplit(args): """ %prog gapsplit gffile > split.gff Read in the gff (normally generated by GMAP) and print it out after splitting each feature into one parent and multiple child features based on alignment information encoded in CIGAR string. """ p = OptionParser(gapsplit.__doc__) (opts, args) = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) # depends on [control=['if'], data=[]] (gffile,) = args gff = Gff(gffile) for g in gff: if re.match('EST_match', g.type): '\n hacky implementation:\n since the standard urlparse.parse_qsl() replaces all "+" symbols with spaces\n we will write a regex to check either for a "-" or a " " (space)\n ' match = re.search('\\S+ (\\d+) \\d+ ([\\s{1}\\-])', g.attributes['Target'][0]) if match.group(2) == '-': strand = match.group(2) # depends on [control=['if'], data=[]] else: strand = '+' g.attributes['Target'][0] = ' '.join((str(x) for x in [g.attributes['Target'][0].rstrip(), strand])) if g.strand == '?': g.strand = strand # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: match = re.match('\\S+ (\\d+) \\d+', g.attributes['Target'][0]) target_start = int(match.group(1)) re_cigar = re.compile('(\\D+)(\\d+)') cigar = g.attributes['Gap'][0].split(' ') g.attributes['Gap'] = None parts = [] if g.strand == '+': for event in cigar: match = re_cigar.match(event) (op, count) = (match.group(1), int(match.group(2))) if op in 'IHS': target_start += count # depends on [control=['if'], data=[]] elif op in 'DN': g.start += count # depends on [control=['if'], data=[]] elif op == 'P': continue # depends on [control=['if'], data=[]] else: parts.append([g.start, g.start + count - 1, target_start, target_start + count - 1]) g.start += count target_start += count # depends on [control=['for'], data=['event']] # depends on [control=['if'], data=[]] else: for event in cigar: match = re_cigar.match(event) (op, count) = (match.group(1), int(match.group(2))) if op in 'IHS': target_start += count # depends on [control=['if'], data=[]] elif op in 'DN': g.end -= count # depends on [control=['if'], data=[]] elif op == 'P': continue # depends on [control=['if'], data=[]] else: parts.append([g.end - count + 1, g.end, target_start, target_start + count - 1]) g.end -= count target_start += count # depends on [control=['for'], data=['event']] g.update_attributes() print(g) parent = g.attributes['Name'][0] g.type = 'match_part' g.attributes.clear() for part in parts: (g.start, g.end) = (part[0], part[1]) (g.score, g.strand, g.phase) = ('.', g.strand, '.') if re.match('EST', g.type): target_list = [parent, part[2], part[3], g.strand] # depends on [control=['if'], data=[]] else: target_list = [parent, part[2], part[3]] target = ' '.join((str(x) for x in target_list)) g.attributes['Parent'] = [parent] g.attributes['Target'] = [target] g.update_attributes() print(g) # depends on [control=['for'], data=['part']] # depends on [control=['for'], data=['g']]
def pass_none(func): """ Wrap func so it's not called if its first param is None >>> print_text = pass_none(print) >>> print_text('text') text >>> print_text(None) """ @functools.wraps(func) def wrapper(param, *args, **kwargs): if param is not None: return func(param, *args, **kwargs) return wrapper
def function[pass_none, parameter[func]]: constant[ Wrap func so it's not called if its first param is None >>> print_text = pass_none(print) >>> print_text('text') text >>> print_text(None) ] def function[wrapper, parameter[param]]: if compare[name[param] is_not constant[None]] begin[:] return[call[name[func], parameter[name[param], <ast.Starred object at 0x7da20c76dcf0>]]] return[name[wrapper]]
keyword[def] identifier[pass_none] ( identifier[func] ): literal[string] @ identifier[functools] . identifier[wraps] ( identifier[func] ) keyword[def] identifier[wrapper] ( identifier[param] ,* identifier[args] ,** identifier[kwargs] ): keyword[if] identifier[param] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[func] ( identifier[param] ,* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[wrapper]
def pass_none(func): """ Wrap func so it's not called if its first param is None >>> print_text = pass_none(print) >>> print_text('text') text >>> print_text(None) """ @functools.wraps(func) def wrapper(param, *args, **kwargs): if param is not None: return func(param, *args, **kwargs) # depends on [control=['if'], data=['param']] return wrapper
def _set_var_from_xml_text(self, xml, xmlpath, var): ''' Sets a object variable from the xml if it is there and passing it through a data conversion based on the variable datatype ''' xmle = xml.find(xmlpath) if xmle is not None: setattr(self, var, type_converter[ xmle.attrib.get('datatype', 'string') ]( xmle.text ))
def function[_set_var_from_xml_text, parameter[self, xml, xmlpath, var]]: constant[ Sets a object variable from the xml if it is there and passing it through a data conversion based on the variable datatype ] variable[xmle] assign[=] call[name[xml].find, parameter[name[xmlpath]]] if compare[name[xmle] is_not constant[None]] begin[:] call[name[setattr], parameter[name[self], name[var], call[call[name[type_converter]][call[name[xmle].attrib.get, parameter[constant[datatype], constant[string]]]], parameter[name[xmle].text]]]]
keyword[def] identifier[_set_var_from_xml_text] ( identifier[self] , identifier[xml] , identifier[xmlpath] , identifier[var] ): literal[string] identifier[xmle] = identifier[xml] . identifier[find] ( identifier[xmlpath] ) keyword[if] identifier[xmle] keyword[is] keyword[not] keyword[None] : identifier[setattr] ( identifier[self] , identifier[var] , identifier[type_converter] [ identifier[xmle] . identifier[attrib] . identifier[get] ( literal[string] , literal[string] )]( identifier[xmle] . identifier[text] ))
def _set_var_from_xml_text(self, xml, xmlpath, var): """ Sets a object variable from the xml if it is there and passing it through a data conversion based on the variable datatype """ xmle = xml.find(xmlpath) if xmle is not None: setattr(self, var, type_converter[xmle.attrib.get('datatype', 'string')](xmle.text)) # depends on [control=['if'], data=['xmle']]