docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Initialization method. Args: data (list of list of int/float): 2-dim array. entities (list): list of entities. categories (list): list of the categories (one per entity).
def __init__(self, data, entities=None, categories=None): self.data = data if entities is None: entities = self.default_entities() self.entities = entities if categories is None: categories = [] self.categories = categories self.validate()
894,666
An internal helper to create a page for a single module. This will automatically generate the needed RSF to document the module and save the module to its own page in its appropriate location. Args: mod (module): The single module to document as its own page showprivate (bool): A flag for whether or not to display private members Returns: str: The file name ready to be appended to a toctree
def _ProduceSingleContent(self, mod, showprivate=False, showinh=False): try: all = mod[1].__all__ except AttributeError: raise RuntimeError('Module (%s) MUST have `__all__` defined.' % mod[1].__name__) try: name = mod[1].__displayname__ except AttributeError: name = mod[0] try: category = mod[1].__category__ self.__categories.setdefault(category, 0) self.__categories[category] += 1 except AttributeError: pass feats = inspect.getmembers(mod[1]) fname = 'content/' + mod[1].__name__.replace('.', '/').replace(' ', '-')+'.rst' feats = [f for f in feats if f[0] in all and (showprivate or not f[0][0:1] == '_')] with open(fname, 'w') as fid: fid.write(Classifier.GetModuleText(name, mod[1].__name__, showprivate=showprivate)) for f in feats: # Check for a __displayname__ if inspect.isclass(f[1]) or inspect.isfunction(f[1]): try: featname = f[1].__displayname__ except AttributeError: featname = f[1].__name__ try: category = f[1].__category__ self.__categories.setdefault(category, 0) self.__categories[category] += 1 except AttributeError: pass # Make the auto doc rst if inspect.isclass(f[1]): fid.write(Classifier.GetClassText(featname, '%s.%s' % (mod[1].__name__, f[1].__name__), showprivate=showprivate, showinh=showinh)) elif inspect.isfunction(f[1]): fid.write(Classifier.GetFunctionText(featname, '%s.%s' % (mod[1].__name__, f[1].__name__))) fid.close() return '\n %s' % (fname.split('/')[-1])
894,689
An internal helper to generate all of the pages for a given package Args: package (module): The top-level package to document showprivate (bool): A flag for whether or not to display private members nested (bool): Foor internal use ONLY Returns: str: The file names ready to be appended to a top-level toctree
def _MakePackagePages(self, package, showprivate=False, nested=False, showinh=False): def checkNoNested(mod): try: all = mod.__all__ except AttributeError: return False mems = inspect.getmembers(mod, inspect.ismodule) mems = [m for m in mems if m[0] in mod.__all__] if len(mems) > 0: return False return True # Get package module members mods = inspect.getmembers(package, inspect.ismodule) # Split into modules and sub-packages nmods, pvt, npkgs = [], [], [] for mod in mods: # Deal with private modules if checkNoNested(mod[1]): if mod[0][0] == '_': pvt.append(mod) else: nmods.append(mod) else: npkgs.append(mod) if showprivate: nmods += pvt # for each member that has a nested module # recurse and keep track of index files for that package files = [] ignore = [] for pkg in npkgs: pt = '%s/%s/%s' % (self.path, package.__name__.replace('.', '/'), pkg[1].__name__.split('.')[-1]) if os.path.exists(pt): shutil.rmtree(pt) os.makedirs(pt) ignore += inspect.getmembers(pkg[1]) f = self._MakePackagePages(pkg[1], showprivate=showprivate, nested=True, showinh=showinh) files.append(f.split(package.__name__.replace('.', '/')+'/')[1]) if nested: try: name = package.__displayname__ except AttributeError: name = package.__name__ # Create index file here index = r % (name, '*' * len(name)) # include sub packages first index += '\n '.join(files) # then include modules index += '\n ' + self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh) findex = 'content/%s/index.rst' % (package.__name__.replace('.', '/')) # Write the file with open(findex, 'w') as f: if package.__doc__: f.write(package.__doc__) f.write(index) # return filename for index file at package level return '\n ' + findex # Not nested: return all files names = '\n %s/%s/' % ( self.path, package.__name__.replace('.', '/')) nmods = [m for m in nmods if m not in ignore] return names.join(self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh).split('\n ')+files)
894,691
Generates all of the documentation for given packages and appends new tocrees to the index. All documentation pages will be under the set relative path. Args: packages (list(module)): A package or list of packages that contain submodules to document showprivate (bool): A flag for whether or not to display private members Returns: str: The new content to append to the index
def _DocPackageFromTop(self, packages, showprivate=False, showinh=False): appIndex = '' if not isinstance(packages, list): packages = [packages] if os.path.exists('content'): shutil.rmtree('content') os.makedirs('content') appIndex += r % ('API Index') # Iterate over each package and generate appropriate pages for i in range(len(packages)): # The package to document and its path package = packages[i] try: name = package.__displayname__ except AttributeError: name = package.__name__ # Make sure paths are ready path = 'content/%s' % package.__name__ if os.path.exists(path): shutil.rmtree(path) os.makedirs(path) # Check if there is top level documentation # if package.__doc__: # Get metadata meta = 'About %s\n%s\n' % (name, '='*len('About ' + name)) author = getattr(package, "__author__", None) license = getattr(package, "__license__", None) copyright = getattr(package, "__copyright__", None) version = getattr(package, "__version__", None) if author: meta += '\n* Author: %s' % author if license: meta += '\n* License: %s' % license if copyright: meta += '\n* Copyright: %s' % copyright if version: meta += '\n* Version: %s' % version about = '%s/%s' % (path, 'index.rst') this_toc = r % (name) this_toc += self._MakePackagePages(package, showprivate=showprivate, showinh=showinh) this_toc = this_toc.replace('%s/' % path, '') with open(about, 'w') as f: f.write('%s\n\n' % meta) if package.__doc__: f.write(package.__doc__) f.write(this_toc) appIndex += '\n %s' % about # Return the new content to append return appIndex
894,692
This is the high level API to use to generate documentation pages for any given package(s). Args: packages (list(module)): A list of packages that contain submodules to document index_base (str): The index page file name. This content will be appended showprivate (bool): A flag for whether or not to display private members
def DocumentPackages(self, packages, index_base=None, showprivate=False, notify=True, showinh=False, intro_pages=None, append_material=None, extra=None): if index_base is None: gram = '' if isinstance(packages, list) and len(packages) > 1: gram = 's' if len(packages) < 3: names = ' and '.join(['``%s``' % p.__name__ for p in packages]) else: names = ['``%s``' % p.__name__ for p in packages] names[-1] = ' and %s' % names[-1] names = ', '.join(names) else: names = '``%s``' % packages.__name__ index = SAMPLE_INDEX.format(names, gram) else: index = self.OpenIndex(index_base) app = self._DocPackageFromTop(packages, showprivate=showprivate, showinh=showinh) index += self._GenerateStaticsTable() index += if intro_pages is not None: if isinstance(intro_pages, str): intro_pages = [intro_pages] for page in intro_pages: index += ' {}\n'.format(page.strip()) index += '\n' if append_material is not None: index += append_material index += app if extra is not None: index += extra if notify: index += self.WriteIndex(index) return None
894,694
The the closest available size for specified image type. Arguments: file_path (:py:class:`str`): The image file path. type_ (:py:class:`str`): The type of image to create a URL for, (``'poster'`` or ``'profile'``). target_size (:py:class:`int`): The size of image to aim for (used as either width or height).
def _create_image_url(self, file_path, type_, target_size): if self.image_config is None: logger.warning('no image configuration available') return return ''.join([ self.image_config['secure_base_url'], self._image_size(self.image_config, type_, target_size), file_path, ])
894,925
Create a model instance Arguments: json (:py:class:`dict`): The parsed JSON data. image_config (:py:class:`dict`): The API image configuration data. Returns: :py:class:`BaseModel`: The model instance.
def from_json(cls, json, image_config=None): cls.image_config = image_config return cls(**{ attr: json.get(attr if key is None else key) for attr, key in cls.JSON_MAPPING.items() })
894,926
Find the closest available size for specified image type. Arguments: image_config (:py:class:`dict`): The image config data. type_ (:py:class:`str`): The type of image to create a URL for, (``'poster'`` or ``'profile'``). target_size (:py:class:`int`): The size of image to aim for (used as either width or height).
def _image_size(image_config, type_, target_size): return min( image_config['{}_sizes'.format(type_)], key=lambda size: (abs(target_size - int(size[1:])) if size.startswith('w') or size.startswith('h') else 999), )
894,927
Initialization method. Args: config_dict (dict): the configuration as a dictionary.
def __init__(self, config_dict=None): self.config_dict = deepcopy(config_dict) self.plugins = Config.load_installed_plugins() self.analysis_groups = [] if not config_dict: return analysis = config_dict.get('analysis', {}) if isinstance(analysis, dict): for group_key, group_def in analysis.items(): try: self.analysis_groups.append( self.inflate_analysis_group(group_key, group_def)) except ValueError as e: logger.error( 'Error while inflating "%s" analysis group. ' 'The group will not be added to the list. ' 'Exception: %s.', group_key, e) else: raise ValueError('%s type is not supported for "analysis" key, ' 'use dict only' % type(analysis))
895,033
Inflate a list of strings/dictionaries to a list of plugin instances. Args: plugin_list (list): a list of str/dict. inflate_plugin (method): the method to inflate the plugin. Returns: list: a plugin instances list. Raises: ValueError: when a dictionary item contains more than one key.
def inflate_plugin_list(plugin_list, inflate_plugin): plugins = [] for plugin_def in plugin_list: if isinstance(plugin_def, str): try: plugins.append(inflate_plugin(plugin_def)) except PluginNotFoundError as e: logger.error('Could not import plugin identified by %s. ' 'Exception: %s.', plugin_def, e) elif isinstance(plugin_def, dict): if len(plugin_def) > 1: raise ValueError( 'When using a plugin list, each dictionary item ' 'must contain only one key.') identifier = list(plugin_def.keys())[0] definition = plugin_def[identifier] try: plugins.append(inflate_plugin(identifier, definition)) except PluginNotFoundError as e: logger.error('Could not import plugin identified by %s. ' 'Inflate method: %s. Exception: %s.', identifier, inflate_plugin, e) return plugins
895,038
Inflate a list of strings/dictionaries to a list of plugin instances. Args: plugin_dict (dict): a dict of dict. inflate_plugin (method): the method to inflate the plugin. Returns: list: a plugin instances list.
def inflate_plugin_dict(plugin_dict, inflate_plugin): plugins = [] for identifier, definition in plugin_dict.items(): try: plugins.append(inflate_plugin(identifier, definition)) except PluginNotFoundError as e: logger.error('Could not import plugin identified by %s. ' 'Exception: %s.', identifier, e) return plugins
895,039
Inflate a no-data checker from a basic definition. Args: identifier (str): the no-data checker identifier / name. definition (bool/dict): a boolean acting as "passes" or a full dict definition with "passes" and "allow_failure". Returns: Checker: a checker instance. Raises: ValueError: when the definition type is not bool or dict.
def inflate_nd_checker(identifier, definition): if isinstance(definition, bool): return Checker(name=identifier, passes=definition) elif isinstance(definition, dict): return Checker(definition.pop('name', identifier), **definition) else: raise ValueError('%s type is not supported for no-data checkers, ' 'use bool or dict' % type(definition))
895,040
Return the plugin corresponding to the given identifier and type. Args: identifier (str): identifier of the plugin. cls (str): one of checker / provider. Returns: Checker/Provider: plugin class.
def get_plugin(self, identifier, cls=None): if ((cls is None or cls == 'provider') and identifier in self.available_providers): return self.available_providers[identifier] elif ((cls is None or cls == 'checker') and identifier in self.available_checkers): return self.available_checkers[identifier] return Config.load_local_plugin(identifier)
895,041
Inflate a plugin thanks to it's identifier, definition and class. Args: identifier (str): the plugin identifier. definition (dict): the kwargs to instantiate the plugin with. cls (str): "provider", "checker", or None. Returns: Provider/Checker: instance of plugin.
def inflate_plugin(self, identifier, definition=None, cls=None): cls = self.get_plugin(identifier, cls) # TODO: implement re-usability of plugins? # same instances shared across analyses (to avoid re-computing stuff) return cls(**definition or {})
895,044
Inflate multiple plugins based on a list/dict definition. Args: plugins_definition (list/dict): the plugins definitions. inflate_method (method): the method to indlate each plugin. Returns: list: a list of plugin instances. Raises: ValueError: when the definition type is not list or dict.
def inflate_plugins(self, plugins_definition, inflate_method): if isinstance(plugins_definition, list): return self.inflate_plugin_list(plugins_definition, inflate_method) elif isinstance(plugins_definition, dict): return self.inflate_plugin_dict(plugins_definition, inflate_method) else: raise ValueError('%s type is not supported for a plugin list, ' 'use list or dict' % type(plugins_definition))
895,045
Get data from the TMDb API via :py:func:`aiohttp.get`. Notes: Updates configuration (if required) on successful requests. Arguments: url (:py:class:`str`): The endpoint URL and params. Returns: :py:class:`dict`: The parsed JSON result.
async def get_data(self, url): logger.debug('making request to %r', url) with aiohttp.ClientSession() as session: async with session.get(url, headers=self.headers) as response: body = json.loads((await response.read()).decode('utf-8')) if response.status == HTTPStatus.OK: if url != self.url_builder('configuration'): await self._update_config() return body elif response.status == HTTPStatus.TOO_MANY_REQUESTS: timeout = self.calculate_timeout( response.headers['Retry-After'], ) logger.warning( 'Request limit exceeded, waiting %s seconds', timeout, ) await asyncio.sleep(timeout) return await self.get_data(url) logger.warning( 'request failed %s: %r', response.status, body.get('status_message', '<no message>') )
895,777
Retrieve movie data by search query. Arguments: query (:py:class:`str`): Query to search for. Returns: :py:class:`list`: Possible matches.
async def find_movie(self, query): params = OrderedDict([ ('query', query), ('include_adult', False), ]) url = self.url_builder('search/movie', {}, params) data = await self.get_data(url) if data is None: return return [ Movie.from_json(item, self.config['data'].get('images')) for item in data.get('results', []) ]
895,778
Retrieve person data by search query. Arguments: query (:py:class:`str`): Query to search for. Returns: :py:class:`list`: Possible matches.
async def find_person(self, query): url = self.url_builder( 'search/person', dict(), url_params=OrderedDict([ ('query', query), ('include_adult', False) ]), ) data = await self.get_data(url) if data is None: return return [ Person.from_json(item, self.config['data'].get('images')) for item in data.get('results', []) ]
895,779
Retrieve movie data by ID. Arguments: id_ (:py:class:`int`): The movie's TMDb ID. Returns: :py:class:`~.Movie`: The requested movie.
async def get_movie(self, id_): url = self.url_builder( 'movie/{movie_id}', dict(movie_id=id_), url_params=OrderedDict(append_to_response='credits'), ) data = await self.get_data(url) if data is None: return return Movie.from_json(data, self.config['data'].get('images'))
895,780
Retrieve person data by ID. Arguments: id_ (:py:class:`int`): The person's TMDb ID. Returns: :py:class:`~.Person`: The requested person.
async def get_person(self, id_): data = await self._get_person_json( id_, OrderedDict(append_to_response='movie_credits') ) return Person.from_json(data, self.config['data'].get('images'))
895,781
Retrieve raw person JSON by ID. Arguments: id_ (:py:class:`int`): The person's TMDb ID. url_params (:py:class:`dict`): Any additional URL parameters. Returns: :py:class:`dict`: The JSON data.
async def _get_person_json(self, id_, url_params=None): url = self.url_builder( 'person/{person_id}', dict(person_id=id_), url_params=url_params or OrderedDict(), ) data = await self.get_data(url) return data
895,782
Randomly select a popular person. Notes: Requires at least two API calls. May require three API calls if the randomly-selected index isn't within the first page of required data. Arguments: limit (:py:class:`int`, optional): How many of the most popular people to make random choice from (defaults to top ``500``). Returns: :py:class:`~.Person`: A randomly-selected popular person.
async def get_random_popular_person(self, limit=500): index = random.randrange(limit) data = await self._get_popular_people_page() if data is None: return if index >= len(data['results']): # result is not on first page page, index = self._calculate_page_index(index, data) data = await self._get_popular_people_page(page) if data is None: return json_data = data['results'][index] details = await self._get_person_json(json_data['id']) details.update(**json_data) return Person.from_json(details, self.config['data'].get('images'))
895,783
Get a specific page of popular person data. Arguments: page (:py:class:`int`, optional): The page to get. Returns: :py:class:`dict`: The page data.
async def _get_popular_people_page(self, page=1): return await self.get_data(self.url_builder( 'person/popular', url_params=OrderedDict(page=page), ))
895,784
Determine the location of a given index in paged data. Arguments: index (:py:class:`int`): The overall index. data: (:py:class:`dict`) The first page of data. Returns: :py:class:`tuple`: The location of that index, in the format ``(page, index_in_page)``.
def _calculate_page_index(index, data): if index > data['total_results']: raise ValueError('index not in paged data') page_length = len(data['results']) return (index // page_length) + 1, (index % page_length) - 1
895,785
Commit the shared remote folder data into local config.xml file 1. Update the remote_folder path and label 2. Append the remote_folder to config folders list Args: remote_folder(folder): syncthing folder object local_path: existing local path
def acknowledge(self, **kwargs): device_id = kwargs['device_id'] config = self.get_config() # Client - Client if 'r_folder_id' in kwargs: r_folder_id = kwargs['r_folder_id'] remote_folder = syncthing_adt.Folder( id=r_folder_id, label=kwargs['label'], path=kwargs['local_path'], deviceID=self.get_device_id(), rescanIntervalS=kwargs['interval'] ) remote_folder.add_device(device_id) remote_folder = remote_folder.obj # Server - Client else: remote_folder = kwargs['folder_obj'] remote_folder['path'] = kwargs['local_path'] if kwargs['interval']: remote_folder['rescanIntervalS'] = kwargs['interval'] r_folder_id = remote_folder['id'] # Check syncthing config to make sure folder is not added if self.folder_exists({'path' : kwargs['local_path']}, config): raise ValueError('This folder has already been added.') # Modify syncthing config config['folders'].append(remote_folder) config['label'] = kwargs['label'] self.new_device(config=config, device_id=device_id) device = self.find_device(device_id, config) if device: device['name'] = kwargs['hostname'] # Save folder data into kodrive config self.adapter.set_dir_config({ 'device_id' : device_id, 'api_key' : kwargs['api_key'] if 'api_key' in kwargs else '', 'label' : kwargs['label'], 'local_path' : kwargs['local_path'], 'is_shared' : True, 'server' : kwargs['server'] if 'server' in kwargs else False, 'host' : kwargs['host'] if 'host' in kwargs else None, 'remote_path': kwargs['remote_path'] if 'remote_path' in kwargs else '', 'port' : kwargs['port'] if 'port' in kwargs else None }) self.set_config(config) self.restart()
895,875
Set level of logging for all loggers. Args: level (int): level of logging.
def set_level(level): Logger.level = level for logger in Logger.loggers.values(): logger.setLevel(level)
895,966
Return a logger. Args: name (str): name to pass to the logging module. level (int): level of logging. fmt (str): format string. Returns: logging.Logger: logger from ``logging.getLogger``.
def get_logger(name, level=None, fmt=':%(lineno)d: %(message)s'): if name not in Logger.loggers: if Logger.level is None and level is None: Logger.level = level = logging.ERROR elif Logger.level is None: Logger.level = level elif level is None: level = Logger.level logger = logging.getLogger(name) logger_handler = logging.StreamHandler() logger_handler.setFormatter(LoggingFormatter(fmt=name + fmt)) logger.addHandler(logger_handler) logger.setLevel(level) Logger.loggers[name] = logger return Logger.loggers[name]
895,967
Check if given file exists and is a regular file. Args: value (str): path to the file. Raises: argparse.ArgumentTypeError: if not valid. Returns: str: original value argument.
def valid_file(value): if not value: raise argparse.ArgumentTypeError("'' is not a valid file path") elif not os.path.exists(value): raise argparse.ArgumentTypeError( "%s is not a valid file path" % value) elif os.path.isdir(value): raise argparse.ArgumentTypeError( "%s is a directory, not a regular file" % value) return value
896,115
Find the offsets in a byte code which are start of lines in the source. Generate pairs (offset, lineno) as described in Python/compile.c. Arguments: code: code object. Yields: Address and line number pairs.
def _findlinestarts(code): byte_increments = [ord(c) for c in code.co_lnotab[0::2]] line_increments = [ord(c) for c in code.co_lnotab[1::2]] lastlineno = None lineno = code.co_firstlineno addr = 0 for byte_incr, line_incr in zip(byte_increments, line_increments): if byte_incr: if lineno != lastlineno: yield (addr, lineno) lastlineno = lineno addr += byte_incr lineno += line_incr if lineno != lastlineno: yield (addr, lineno)
896,197
Check if matrix and its mediation matrix are compliant. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. complete_mediation_matrix (list of list of int): 2-dim array Returns: bool: True if compliant, else False
def matrices_compliance(dsm, complete_mediation_matrix): matrix = dsm.data rows_dep_matrix = len(matrix) cols_dep_matrix = len(matrix[0]) rows_med_matrix = len(complete_mediation_matrix) cols_med_matrix = len(complete_mediation_matrix[0]) if (rows_dep_matrix != rows_med_matrix or cols_dep_matrix != cols_med_matrix): raise DesignStructureMatrixError( 'Matrices are NOT compliant ' '(number of rows/columns not equal)') discrepancy_found = False message = [] for i in range(0, rows_dep_matrix): for j in range(0, cols_dep_matrix): if ((complete_mediation_matrix[i][j] == 0 and matrix[i][j] > 0) or (complete_mediation_matrix[i][j] == 1 and matrix[i][j] < 1)): discrepancy_found = True message.append( 'Untolerated dependency at %s:%s (%s:%s): ' '%s instead of %s' % ( i, j, dsm.entities[i], dsm.entities[j], matrix[i][j], complete_mediation_matrix[i][j])) message = '\n'.join(message) return not discrepancy_found, message
896,234
Check if matrix and its mediation matrix are compliant. It means that number of dependencies for each (line, column) is either 0 if the mediation matrix (line, column) is 0, or >0 if the mediation matrix (line, column) is 1. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. Returns: bool: True if compliant, else False
def check(self, dsm, **kwargs): # generate complete_mediation_matrix according to each category med_matrix = CompleteMediation.generate_mediation_matrix(dsm) return CompleteMediation.matrices_compliance(dsm, med_matrix)
896,235
Check economy of mechanism. As first abstraction, number of dependencies between two modules < 2 * the number of modules (dependencies to the framework are NOT considered). Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. simplicity_factor (int): simplicity factor. Returns: bool: True if economic, else False
def check(self, dsm, simplicity_factor=2, **kwargs): # economy_of_mechanism economy_of_mechanism = False message = '' data = dsm.data categories = dsm.categories dsm_size = dsm.size[0] if not categories: categories = ['appmodule'] * dsm_size dependency_number = 0 # evaluate Matrix(data) for i in range(0, dsm_size): for j in range(0, dsm_size): if (categories[i] not in ('framework', 'corelib') and categories[j] not in ('framework', 'corelib') and data[i][j] > 0): dependency_number += 1 # check comparison result if dependency_number < dsm_size * simplicity_factor: economy_of_mechanism = True else: message = ' '.join([ 'Number of dependencies (%s)' % dependency_number, '> number of rows (%s)' % dsm_size, '* simplicity factor (%s) = %s' % ( simplicity_factor, dsm_size * simplicity_factor)]) return economy_of_mechanism, message
896,236
Check least common mechanism. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. independence_factor (int): if the maximum dependencies for one module is inferior or equal to the DSM size divided by the independence factor, then this criterion is verified. Returns: bool: True if least common mechanism, else False
def check(self, dsm, independence_factor=5, **kwargs): # leastCommonMechanismMatrix least_common_mechanism = False message = '' # get the list of dependent modules for each module data = dsm.data categories = dsm.categories dsm_size = dsm.size[0] if not categories: categories = ['appmodule'] * dsm_size dependent_module_number = [] # evaluate Matrix(data) for j in range(0, dsm_size): dependent_module_number.append(0) for i in range(0, dsm_size): if (categories[i] != 'framework' and categories[j] != 'framework' and data[i][j] > 0): dependent_module_number[j] += 1 # except for the broker if any and libs, check that threshold is not # overlapped # index of brokers # and app_libs are set to 0 for index, item in enumerate(dsm.categories): if item == 'broker' or item == 'applib': dependent_module_number[index] = 0 if max(dependent_module_number) <= dsm_size / independence_factor: least_common_mechanism = True else: maximum = max(dependent_module_number) message = ( 'Dependencies to %s (%s) > matrix size (%s) / ' 'independence factor (%s) = %s' % ( dsm.entities[dependent_module_number.index(maximum)], maximum, dsm_size, independence_factor, dsm_size / independence_factor)) return least_common_mechanism, message
896,237
Check layered architecture. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. Returns: bool, str: True if layered architecture else False, messages
def check(self, dsm, **kwargs): layered_architecture = True messages = [] categories = dsm.categories dsm_size = dsm.size[0] if not categories: categories = ['appmodule'] * dsm_size for i in range(0, dsm_size - 1): for j in range(i + 1, dsm_size): if (categories[i] != 'broker' and categories[j] != 'broker' and dsm.entities[i].split('.')[0] != dsm.entities[j].split('.')[0]): # noqa if dsm.data[i][j] > 0: layered_architecture = False messages.append( 'Dependency from %s to %s breaks the ' 'layered architecture.' % ( dsm.entities[i], dsm.entities[j])) return layered_architecture, '\n'.join(messages)
896,238
Check code clean. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. Returns: bool, str: True if code clean else False, messages
def check(self, dsm, **kwargs): logger.debug('Entities = %s' % dsm.entities) messages = [] code_clean = True threshold = kwargs.pop('threshold', 1) rows, _ = dsm.size for i in range(0, rows): if dsm.data[i][0] > threshold: messages.append( 'Number of issues (%d) in module %s ' '> threshold (%d)' % ( dsm.data[i][0], dsm.entities[i], threshold)) code_clean = False return code_clean, '\n'.join(messages)
896,239
Run the analysis. Generate data from each provider, then check these data with every checker, and store the analysis results. Args: verbose (bool): whether to immediately print the results or not.
def run(self, verbose=True): self.results.clear() for analysis_group in self.config.analysis_groups: if analysis_group.providers: for provider in analysis_group.providers: logger.info('Run provider %s', provider.identifier) provider.run() for checker in analysis_group.checkers: result = self._get_checker_result( analysis_group, checker, provider) self.results.append(result) analysis_group.results.append(result) if verbose: result.print() else: for checker in analysis_group.checkers: result = self._get_checker_result( analysis_group, checker, nd='no-data-') self.results.append(result) analysis_group.results.append(result) if verbose: result.print()
896,485
Initialization method. Args: name (str): the group name. description (str): the group description. providers (list): the list of providers. checkers (list): the list of checkers.
def __init__(self, name=None, description=None, providers=None, checkers=None): self.name = name self.description = description self.providers = providers or [] self.checkers = checkers or [] self.results = []
896,488
Initialization method. Args: group (AnalysisGroup): parent group. provider (Provider): parent Provider. checker (Checker): parent Checker. code (int): constant from Checker class. messages (str): messages string.
def __init__(self, group, provider, checker, code, messages): self.group = group self.provider = provider self.checker = checker self.code = code self.messages = messages
896,489
Calculates the sum of the squared differences between target and prediction. Parameters: ----------- y : vector, shape (n_samples,) The target values. y_pred : vector, shape (n_samples,) The predicted values. Returns: -------- error : float number, the sum of the squared differences between target and prediction
def squared_error(y, y_pred): y, y_pred = convert_assert(y, y_pred) return np.sum((y - y_pred) ** 2)
896,518
Calculates the sum of the differences between target and prediction. Parameters: ----------- y : vector, shape (n_samples,) The target values. y_pred : vector, shape (n_samples,) The predicted values. Returns: -------- error : float number, sum of the differences between target and prediction
def absolute_error(y, y_pred): y, y_pred = convert_assert(y, y_pred) return np.sum(y - y_pred)
896,519
Handles HTTP error codes for the given request Raises: AuthenticationError on the appropriate 4** errors ServerError if the response is not an ok (2**) Arguments: r -- The request result
def handleresult(self, r): if r.status_code >= 400 and r.status_code < 500: msg = r.json() raise AuthenticationError(str(msg["code"]) + ": " + msg["msg"] + " (" + msg["ref"] + ")") elif r.status_code > 300: err = None try: msg = r.json() err = ServerError(str(msg["code"]) + ": " + msg["msg"] + " (" + msg["ref"] + ")") except: raise ServerError( "Server returned error, but did not give a valid error message") raise err return r
896,581
Initialization method. Args: allow_failure (bool): still pass if failed or not. arguments (dict): arguments passed to the check method when run.
def __init__(self, name=None, description=None, hint=None, allow_failure=False, passes=None, arguments=None): if name: self.name = name if description: self.description = description if hint: self.hint = hint self.allow_failure = allow_failure self.passes = passes self.arguments = arguments or {} self.result = None
896,656
Run the check method and format the result for analysis. Args: data (DSM/DMM/MDM): DSM/DMM/MDM instance to check. Returns: tuple (int, str): status constant from Checker class and messages.
def run(self, data): result_type = namedtuple('Result', 'code messages') if self.passes is True: result = result_type(Checker.Code.PASSED, '') elif self.passes is False: if self.allow_failure: result = result_type(Checker.Code.IGNORED, '') else: result = result_type(Checker.Code.FAILED, '') else: try: result = self.check(data, **self.arguments) messages = '' if isinstance(result, tuple): result, messages = result if result not in Checker.Code: result = Checker.Code.PASSED if bool(result) else Checker.Code.FAILED if result == Checker.Code.FAILED and self.allow_failure: result = Checker.Code.IGNORED result = result_type(result, messages) except NotImplementedError: result = result_type(Checker.Code.NOT_IMPLEMENTED, '') self.result = result
896,657
Initialization method. Args: arguments (dict): arguments that will be used for get_data method.
def __init__(self, name=None, description=None, arguments=None): if name: self.name = name if description: self.description = description self.arguments = arguments or {} self.data = None
896,658
Return a pretty formatted string given some text. Args: description (str): string to format. wrap_at (int): maximum length of a line. indent (int): level of indentation. Returns: str: pretty formatted string.
def pretty_description(description, wrap_at=None, indent=0): if wrap_at is None or wrap_at < 0: width = console_width(default=79) if wrap_at is None: wrap_at = width else: wrap_at += width indent = ' ' * indent text_wrapper = textwrap.TextWrapper( width=wrap_at, replace_whitespace=False, initial_indent=indent, subsequent_indent=indent) new_desc = [] for line in description.split('\n'): new_desc.append(line.replace('\n', '').strip()) while not new_desc[0]: del new_desc[0] while not new_desc[-1]: del new_desc[-1] separators = [i for i, l in enumerate(new_desc) if not l] paragraphs = [] if separators: start, end = 0, separators[0] paragraphs.append(new_desc[start:end]) for i in range(len(separators) - 1): start = end + 1 end = separators[i + 1] paragraphs.append(new_desc[start:end]) paragraphs.append(new_desc[end + 1:]) return '\n\n'.join(text_wrapper.fill(' '.join(p)) for p in paragraphs) return text_wrapper.fill(' '.join(new_desc))
896,706
Implement get_dsm method from Provider class. Parse CSV to return an instance of DSM. Args: file_path (str/fd): path or file descriptor. delimiter (str): character(s) used as delimiter for columns. categories_delimiter (str): character(s) used as delimiter for categories and keys (first column). Returns: DSM: instance of DSM.
def get_data(self, file_path=sys.stdin, delimiter=',', categories_delimiter=None): if file_path == sys.stdin: logger.info('Read data from standard input') lines = [line.replace('\n', '') for line in file_path] else: logger.info('Read data from file ' + file_path) with open(file_path) as file: lines = list(file) columns = lines[0].rstrip('\n').split(delimiter)[1:] categories = None if categories_delimiter: columns, categories = zip(*[c.split(categories_delimiter, 1) for c in columns]) size = len(columns) data = [list(map(int, l.split(delimiter)[1:])) for l in lines[1:size + 1]] return DesignStructureMatrix(data, columns, categories)
896,717
Main fit method for SAR. Expects the dataframes to have row_id, col_id columns which are indexes, i.e. contain the sequential integer index of the original alphanumeric user and item IDs. Dataframe also contains rating and timestamp as floats; timestamp is in seconds since Epoch by default. Arguments: df (pySpark.DataFrame): input dataframe which contains the index of users and items.
def fit( self, df, similarity_type="jaccard", time_decay_coefficient=30, time_now=None, timedecay_formula=False, threshold=1, ): # threshold - items below this number get set to zero in coocurrence counts assert threshold > 0 df.createOrReplaceTempView("{prefix}df_train_input".format(**self.header)) if timedecay_formula: # WARNING: previously we would take the last value in training dataframe and set it # as a matrix U element # for each user-item pair. Now with time decay, we compute a sum over ratings given # by a user in the case # when T=np.inf, so user gets a cumulative sum of ratings for a particular item and # not the last rating. # Time Decay # do a group by on user item pairs and apply the formula for time decay there # Time T parameter is in days and input time is in seconds # so we do dt/60/(T*24*60)=dt/(T*24*3600) # the folling is the query which we want to run query = self.f( , time_now=time_now, time_decay_coefficient=time_decay_coefficient, ) # replace with timedecayed version df = self.spark.sql(query) else: # since SQL is case insensitive, this check needs to be performed similar if self.header['col_timestamp'].lower() in [s.name.lower() for s in df.schema]: # we need to de-duplicate items by using the latest item query = self.f( ) df = self.spark.sql(query) df.createOrReplaceTempView(self.f("{prefix}df_train")) log.info("sarplus.fit 1/2: compute item cooccurences...") # compute cooccurrence above minimum threshold query = self.f( , threshold=threshold, ) item_cooccurrence = self.spark.sql(query) item_cooccurrence.write.mode("overwrite").saveAsTable( self.f("{prefix}item_cooccurrence") ) # compute the diagonal used later for Jaccard and Lift if similarity_type == SIM_LIFT or similarity_type == SIM_JACCARD: item_marginal = self.spark.sql( self.f( "SELECT i1 i, value AS margin FROM {prefix}item_cooccurrence WHERE i1 = i2" ) ) item_marginal.createOrReplaceTempView(self.f("{prefix}item_marginal")) if similarity_type == SIM_COOCCUR: self.item_similarity = item_cooccurrence elif similarity_type == SIM_JACCARD: query = self.f( ) self.item_similarity = self.spark.sql(query) elif similarity_type == SIM_LIFT: query = self.f( ) self.item_similarity = self.spark.sql(query) else: raise ValueError("Unknown similarity type: {0}".format(similarity_type)) # store upper triangular log.info("sarplus.fit 2/2: compute similiarity metric %s..." % similarity_type) self.item_similarity.write.mode("overwrite").saveAsTable( self.f("{prefix}item_similarity_upper") ) # expand upper triangular to full matrix query = self.f( ) self.item_similarity = self.spark.sql(query) self.item_similarity.write.mode("overwrite").saveAsTable( self.f("{prefix}item_similarity") ) # free space self.spark.sql(self.f("DROP TABLE {prefix}item_cooccurrence")) self.spark.sql(self.f("DROP TABLE {prefix}item_similarity_upper")) self.item_similarity = self.spark.table(self.f("{prefix}item_similarity"))
897,122
Prepare test set for C++ SAR prediction code. Find all items the test users have seen in the past. Arguments: test (pySpark.DataFrame): input dataframe which contains test users.
def get_user_affinity(self, test): test.createOrReplaceTempView(self.f("{prefix}df_test")) query = self.f( "SELECT DISTINCT {col_user} FROM {prefix}df_test CLUSTER BY {col_user}" ) df_test_users = self.spark.sql(query) df_test_users.write.mode("overwrite").saveAsTable( self.f("{prefix}df_test_users") ) query = self.f( ) return self.spark.sql(query)
897,123
Recommend top K items for all users which are in the test set. Args: test: test Spark dataframe top_k: top n items to return remove_seen: remove items test users have already seen in the past from the recommended set.
def recommend_k_items_slow(self, test, top_k=10, remove_seen=True): # TODO: remove seen if remove_seen: raise ValueError("Not implemented") self.get_user_affinity(test)\ .write.mode("overwrite")\ .saveAsTable(self.f("{prefix}user_affinity")) # user_affinity * item_similarity # filter top-k query = self.f( , top_k=top_k, ) return self.spark.sql(query)
897,125
Creates a new injector. All provided keys will be injectable. Arguments: parent -- Reserved name, used for sub-injectors.
def __init__(self, parent=None, **kwargs): self.___parent = parent self.___subs = [] self.___args = kwargs self.___close_list = [] self.___closed = False self.___initialized = set() for item in kwargs.values(): self._record_closeable(item) if parent: parent.___subs.append(weakref.ref(self))
897,657
Fits the given model to the data and labels provided. Parameters: ----------- X : matrix, shape (n_samples, n_features) The samples, the train data. y : vector, shape (n_samples,) The target labels. Returns: -------- self : instance of the model itself (`self`)
def fit(self, X, y): X = np.array(X, dtype=np.float32) y = np.array(y, dtype=np.float32) assert X.shape[0] == y.shape[0] return X, y
897,788
Shortcut to `model.fit(X, y); return model.predict(X_)`. Parameters: ----------- X : matrix, shape (n_samples, n_features) The samples, the train data. y : vector, shape (n_samples,) The target labels. X_ : matrix, shape (m_samples, m_features) The samples which labels to predict. Returns: -------- y : vector, shape (m_samples,) The predicted labels.
def fit_predict(self, X, y, X_): self.fit(X, y) return self.predict(X_)
897,789
Removes a job from the job queue, or from being executed. Args: options (list of str, optional): A list of command line options for the condor_rm command. For details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_rm.html. Defaults to an empty list. job_num (int, optional): The number of sub_job to remove rather than the whole cluster. Defaults to None.
def remove(self, options=[], sub_job_num=None): args = ['condor_rm'] args.extend(options) job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id) args.append(job_id) out, err = self._execute(args) return out,err
897,863
Returns the contract data for a given contract Args: contract_name (str): Name of the contract to return. Returns: str, str: ABI and bytecode of the contract
def get_contract_data(self, contract_name): contract_data_path = self.output_dir + '/{0}.json'.format(contract_name) with open(contract_data_path, 'r') as contract_data_file: contract_data = json.load(contract_data_file) abi = contract_data['abi'] bytecode = contract_data['evm']['bytecode']['object'] return abi, bytecode
898,099
A shortcut for the 'set' method. Args: key (str): The name of the attribute to set. value (str): The value to assign to 'key'.
def __setattr__(self, key, value): if key in self.__dict__ or '_' + key in self.__dict__: object.__setattr__(self, key, value) else: self.set(key, value)
898,138
Wait for the job, or a sub-job to complete. Args: options (list of str, optional): A list of command line options for the condor_wait command. For details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html. Defaults to an empty list. job_num (int, optional): The number
def wait(self, options=[], sub_job_num=None): args = ['condor_wait'] args.extend(options) job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id) if self._remote: abs_log_file = self.log_file else: abs_log_file = os.path.abspath(self.log_file) args.extend([abs_log_file, job_id]) out, err = self._execute(args) return out, err
898,144
Set the value of an attribute in the submit description file. The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean). The Python values will be reformatted into strings based on the standards described in the HTCondor manual: http://research.cs.wisc.edu/htcondor/manual/current/condor_submit.html Args: attr (str): The name of the attribute to set. value (str): The value to assign to 'attr'.
def set(self, attr, value): def escape_new_syntax(value, double_quote_escape='"'): value = str(value) value = value.replace("'", "''") value = value.replace('"', '%s"' % double_quote_escape) if ' ' in value or '\t' in value: value = "'%s'" % value return value def escape_new_syntax_pre_post_script(value): return escape_new_syntax(value, '\\') def escape_remap(value): value = value.replace('=', '\=') value = value.replace(';', '\;') return value def join_function_template(join_string, escape_func): return lambda value: join_string.join([escape_func(i) for i in value]) def quote_join_function_template(join_string, escape_func): return lambda value: join_function_template(join_string, escape_func)(value) join_functions = {'rempas': quote_join_function_template('; ', escape_remap), 'arguments': quote_join_function_template(' ', escape_new_syntax), 'Arguments': quote_join_function_template(' ', escape_new_syntax_pre_post_script) } if value is False: value = 'false' elif value is True: value = 'true' elif isinstance(value, list) or isinstance(value, tuple): join_function = join_function_template(', ', str) for key in list(join_functions.keys()): if attr.endswith(key): join_function = join_functions[key] value = join_function(value) self.attributes[attr] = value
898,146
Recursively replaces references to other attributes with their value. Args: attribute (str): The name of the attribute to resolve. Returns: str: The resolved value of 'attribute'.
def _resolve_attribute(self, attribute): value = self.attributes[attribute] if not value: return None resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value) return resolved_value
898,151
Replaces a reference to an attribute with the value of the attribute. Args: match (re.match object): A match object containing a match to a reference to an attribute.
def _resolve_attribute_match(self, match): if match.group(1) == 'cluster': return str(self.cluster_id) return self.get(match.group(1), match.group(0))
898,152
Get bin edges from a ROOT hist axis. Note: Doesn't include over- or underflow bins! Args: axis (ROOT.TAxis): Axis from which the bin edges should be extracted. Returns: Array containing the bin edges.
def get_bin_edges_from_axis(axis) -> np.ndarray: # Don't include over- or underflow bins bins = range(1, axis.GetNbins() + 1) # Bin edges bin_edges = np.empty(len(bins) + 1) bin_edges[:-1] = [axis.GetBinLowEdge(i) for i in bins] bin_edges[-1] = axis.GetBinUpEdge(axis.GetNbins()) return bin_edges
898,254
MPDS API consumer constructor Args: api_key: (str) The MPDS API key, or None if the MPDS_KEY envvar is set endpoint: (str) MPDS API gateway URL Returns: None
def __init__(self, api_key=None, endpoint=None, dtype=None, verbose=None, debug=None): self.api_key = api_key if api_key else os.environ['MPDS_KEY'] self.network = httplib2.Http() self.endpoint = endpoint or self.endpoint self.dtype = dtype or MPDSDataTypes.PEER_REVIEWED self.verbose = verbose if verbose is not None else self.verbose self.debug = debug or self.debug
898,426
Calculate the number of entries matching the keyword(s) specified Args: search: (dict) Search query like {"categ_A": "val_A", "categ_B": "val_B"}, documented at https://developer.mpds.io/#Categories phases: (list) Phase IDs, according to the MPDS distinct phases concept kwargs: just a mockup Returns: count (int)
def count_data(self, search, phases=None, **kwargs): result = self._request(search, phases=phases, pagesize=10) if result['error']: raise APIError(result['error'], result.get('code', 0)) if result['npages'] > self.maxnpages: warnings.warn( "\r\nDataset is too big, you may risk to change maxnpages from %s to %s" % \ (self.maxnpages, int(math.ceil(result['count']/self.pagesize))) ) return result['count']
898,429
Download single image from Landsat on Google Storage Arguments: row - string in this format xxx, e.g. 003 path - string in this format xxx, e.g. 003 name - zip file name without .tar.bz e.g. LT81360082013127LGN01 sat_type - e.g. L7, L8, ...
def single_download(self,username,password,download ,name,ZIP_DIR): try: request = urllib2.Request(download) base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) result = urllib2.urlopen(request) data = result.read() result.close() try: f=open(ZIP_DIR+'/'+name+'.tgz', 'wb') f.write(data) f.close() return True except urllib2.HTTPError: return False except urllib2.HTTPError: return False
898,619
Download single image from Landsat on Google Storage Arguments: row - string in this format xxx, e.g. 003 path - string in this format xxx, e.g. 003 name - zip file name without .tar.bz e.g. LT81360082013127LGN01 sat_type - e.g. L7, L8, ...
def checkifDownloadExist(self,username,password,download , name): try: request = urllib2.Request(download) base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) result = urllib2.urlopen(request) try: f=open(self.zip_dir+'/'+name+'.tgz', 'wb') f.close() return True except urllib2.HTTPError: return False except urllib2.HTTPError: return False
898,620
Create a new virtual environment. Args: python (str): The name or path of a python interpreter to use while creating the virtual environment. system_site (bool): Whether or not use use the system site packages within the virtual environment. Default is False. always_copy (bool): Whether or not to force copying instead of symlinking in the virtual environment. Default is False.
def create(self, python=None, system_site=False, always_copy=False): command = 'virtualenv' if python: command = '{0} --python={1}'.format(command, python) if system_site: command = '{0} --system-site-packages'.format(command) if always_copy: command = '{0} --always-copy'.format(command) command = '{0} {1}'.format(command, self.path) self._execute(command)
898,621
Merge two lists without duplicating items Args: list_a: list list_b: list Returns: New list with deduplicated items from list_a and list_b
def list_merge(list_a, list_b): #return list(collections.OrderedDict.fromkeys(list_a + list_b)) #result = list(list_b) result = [] for item in list_a: if not item in result: result.append(item) for item in list_b: if not item in result: result.append(item) return result
898,695
Flatten the dictionary keys are separated by separator Arguments: dictionary {dict} -- The dictionary to be flattened. Keyword Arguments: separator {str} -- The separator to use (default is '.'). It will crush items with key conflicts. prefix {str} -- Used for recursive calls. Returns: dict -- The flattened dictionary.
def flatten(dictionary, separator='.', prefix=''): new_dict = {} for key, value in dictionary.items(): new_key = prefix + separator + key if prefix else key if isinstance(value, collections.MutableMapping): new_dict.update(flatten(value, separator, new_key)) elif isinstance(value, list): new_value = [] for item in value: if isinstance(item, collections.MutableMapping): new_value.append(flatten(item, separator, new_key)) else: new_value.append(item) new_dict[new_key] = new_value else: new_dict[new_key] = value return new_dict
898,784
Adjust the virtual environment settings and optional move it. Args: source (str): Path to the existing virtual environment. destination (str): Desired path of the virtual environment. move (bool): Whether or not to actually move the files. Default False.
def relocate(source, destination, move=False): venv = api.VirtualEnvironment(source) if not move: venv.relocate(destination) return None venv.move(destination) return None
898,785
Calculate the moving overage over an array. Algorithm from: https://stackoverflow.com/a/14314054 Args: arr (np.ndarray): Array over which to calculate the moving average. n (int): Number of elements over which to calculate the moving average. Default: 3 Returns: np.ndarray: Moving average calculated over n.
def moving_average(arr: np.ndarray, n: int = 3) -> np.ndarray: ret = np.cumsum(arr, dtype=float) ret[n:] = ret[n:] - ret[:-n] return ret[n - 1:] / n
898,980
Recursive ``getattar``. This can be used as a drop in for the standard ``getattr(...)``. Credit to: https://stackoverflow.com/a/31174427 Args: obj: Object to retrieve the attribute from. attr: Name of the attribute, with each successive attribute separated by a ".". Returns: The requested attribute. (Same as ``getattr``). Raises: AttributeError: If the attribute was not found and no default was provided. (Same as ``getattr``).
def recursive_getattr(obj: Any, attr: str, *args) -> Any: def _getattr(obj, attr): return getattr(obj, attr, *args) return functools.reduce(_getattr, [obj] + attr.split('.'))
898,981
Recursively retrieve an item from a nested dict. Credit to: https://stackoverflow.com/a/52260663 Args: d: Mapping of strings to objects. keys: Names of the keys under which the object is stored. Can also just be a single string. Returns: The object stored under the keys. Raises: KeyError: If one of the keys isnt' found.
def recursive_getitem(d: Mapping[str, Any], keys: Union[str, Sequence[str]]) -> Any: # If only a string, then just just return the item if isinstance(keys, str): return d[keys] else: return functools.reduce(operator.getitem, keys, d)
898,983
Get a Histogram1D associated with the selected jet and track pt bins. This is often used to retrieve data for fitting. Args: observables (dict): The observables from which the hist should be retrieved. track_pt_bin (int): Track pt bin of the desired hist. jet_ptbin (int): Jet pt bin of the desired hist. Returns: Histogram1D: Converted TH1 or uproot histogram. Raises: ValueError: If the requested observable couldn't be found.
def get_array_for_fit(observables: dict, track_pt_bin: int, jet_pt_bin: int) -> histogram.Histogram1D: for name, observable in observables.items(): if observable.track_pt_bin == track_pt_bin and observable.jet_pt_bin == jet_pt_bin: return histogram.Histogram1D.from_existing_hist(observable.hist) raise ValueError("Cannot find fit with jet pt bin {jet_pt_bin} and track pt bin {track_pt_bin}")
898,984
Used to create YapconfItems from a specification dictionary. Args: specification (dict): The specification used to initialize ``YapconfSpec`` env_prefix (str): Prefix to add to environment names separator (str): Separator for nested items parent_names (list): Parents names of any given item Returns: A dictionary of names to YapconfItems
def from_specification(specification, env_prefix=None, separator='.', parent_names=None): items = {} for item_name, item_info in six.iteritems(specification): names = copy.copy(parent_names) if parent_names else [] items[item_name] = _generate_item(item_name, item_info, env_prefix, separator, names) return items
899,243
Update our current default with the new_default. Args: new_default: New default to set. respect_none: Flag to determine if ``None`` is a valid value.
def update_default(self, new_default, respect_none=False): if new_default is not None: self.default = new_default elif new_default is None and respect_none: self.default = None
899,247
Add this item as an argument to the given parser. Args: parser (argparse.ArgumentParser): The parser to add this item to. bootstrap: Flag to indicate whether you only want to mark this item as required or not
def add_argument(self, parser, bootstrap=False): if self.cli_expose: args = self._get_argparse_names(parser.prefix_chars) kwargs = self._get_argparse_kwargs(bootstrap) parser.add_argument(*args, **kwargs)
899,249
Converts all 'Truthy' values to True and 'Falsy' values to False. Args: value: Value to convert label: Label of the config which this item was found. Returns:
def convert_config_value(self, value, label): if isinstance(value, six.string_types): value = value.lower() if value in self.TRUTHY_VALUES: return True elif value in self.FALSY_VALUES: return False else: raise YapconfValueError("Cowardly refusing to interpret " "config value as a boolean. Name: " "{0}, Value: {1}" .format(self.name, value))
899,265
Load an analysis configuration from a file. Args: yaml: YAML object to use in loading the configuration. filename: Filename of the YAML configuration file. Returns: dict-like object containing the loaded configuration
def load_configuration(yaml: yaml.ruamel.yaml.YAML, filename: str) -> DictLike: with open(filename, "r") as f: config = yaml.load(f) return config
899,319
Iterate over an analysis dictionary with selected attributes. Args: analysis_objects: Analysis objects dictionary. selections: Keyword arguments used to select attributes from the analysis dictionary. Yields: object: Matching analysis object.
def iterate_with_selected_objects(analysis_objects: Mapping[Any, Any], **selections: Mapping[str, Any]) -> Iterator[Tuple[Any, Any]]: for key_index, obj in analysis_objects.items(): # If selections is empty, we return every object. If it's not empty, then we only want to return # objects which are selected in through the selections. selected_obj = not selections or all([getattr(key_index, selector) == selected_value for selector, selected_value in selections.items()]) if selected_obj: yield key_index, obj
899,328
Save the CellDataFrame to an hdf5 file. Args: path (str): the path to save to key (str): the name of the location to save it to mode (str): write mode
def to_hdf(self,path,key,mode='a'): pd.DataFrame(self.serialize()).to_hdf(path,key,mode=mode,format='table',complib='zlib',complevel=9) f = h5py.File(path,'r+') f[key].attrs["microns_per_pixel"] = float(self.microns_per_pixel) if self.microns_per_pixel is not None else np.nan f.close()
899,441
Add mutually exclusive phenotypes to the scored calls Args: phenotypes (list): a list of phenotypes to add to scored calls. if none or not set, add them all overwrite (bool): if True allow the overwrite of a phenotype, if False, the phenotype must not exist in the scored calls Returns: CellDataFrame
def phenotypes_to_scored(self,phenotypes=None,overwrite=False): if not self.is_uniform(): raise ValueError("inconsistent phenotypes") if phenotypes is None: phenotypes = self.phenotypes elif isinstance(phenotypes,str): phenotypes = [phenotypes] def _post(binary,phenotype_label,phenotypes,overwrite): d = binary.copy() if len(set(phenotypes)&set(list(binary.keys()))) > 0 and overwrite==False: raise ValueError("Error, phenotype already exists as a scored type") for label in phenotypes: d[label] = 0 if phenotype_label == phenotype_label and phenotype_label in phenotypes: d[phenotype_label] = 1 return d output = self.copy() output['scored_calls'] = output.apply(lambda x: _post(x['scored_calls'],x['phenotype_label'],phenotypes,overwrite) ,1) return output
899,442
Concatonate multiple CellDataFrames throws an error if the microns_per_pixel is not uniform across the frames Args: array_like (list): a list of CellDataFrames with 1 or more CellDataFrames Returns: CellDataFrame
def concat(self,array_like): arr = list(array_like) if len(set([x.microns_per_pixel for x in arr])) != 1: raise ValueError("Multiple microns per pixel set") cdf = CellDataFrame(pd.concat([pd.DataFrame(x) for x in arr])) cdf.microns_per_pixel = arr[0].microns_per_pixel return cdf
899,443
Read a CellDataFrame from an hdf5 file. Args: path (str): the path to read from key (str): the name of the location to read from Returns: CellDataFrame
def read_hdf(cls,path,key=None): df = pd.read_hdf(path,key) df['scored_calls'] = df['scored_calls'].apply(lambda x: json.loads(x)) df['channel_values'] = df['channel_values'].apply(lambda x: json.loads(x)) df['regions'] = df['regions'].apply(lambda x: json.loads(x)) df['phenotype_calls'] = df['phenotype_calls'].apply(lambda x: json.loads(x)) df['neighbors'] = df['neighbors'].apply(lambda x: json.loads(x)) df['neighbors'] = df['neighbors'].apply(lambda x: np.nan if not isinstance(x,dict) else dict(zip([int(y) for y in x.keys()],x.values())) ) df['frame_shape'] = df['frame_shape'].apply(lambda x: tuple(json.loads(x))) df = cls(df) f = h5py.File(path,'r') mpp = f[key].attrs["microns_per_pixel"] if not np.isnan(mpp): df.microns_per_pixel = mpp f.close() return df
899,444
Use the segmented images to create per-image graphics Args: verbose (bool): output more details if true Returns: SegmentationImages: returns a class used to construct the image graphics
def segmentation_images(self,*args,**kwargs): if not self.db: raise ValueError("Need to set db") segs = SegmentationImages.read_cellframe(self,*args,**kwargs) segs.microns_per_pixel = segs.microns_per_pixel return segs
899,448
Return a class that can be used to create honeycomb plots Args: subsets (list): list of SubsetLogic objects step_pixels (int): distance between hexagons max_distance_pixels (int): the distance from each point by which to caclulate the quanitty of the phenotype for that area Returns: Cartesian: returns a class that holds the layout of the points to plot.
def cartesian(self,subsets=None,step_pixels=100,max_distance_pixels=150,*args,**kwargs): n = Cartesian.read_cellframe(self,subsets=subsets,step_pixels=step_pixels,max_distance_pixels=max_distance_pixels,prune_neighbors=False,*args,**kwargs) if 'measured_regions' in kwargs: n.measured_regions = kwargs['measured_regions'] else: n.measured_regions = self.get_measured_regions() if 'measured_phenotypes' in kwargs: n.measured_phenotypes = kwargs['measured_phenotypes'] else: n.measured_phenotypes = self.phenotypes n.microns_per_pixel = self.microns_per_pixel return n
899,451
Return a class that can be used to access count densities Args: measured_regions (pandas.DataFrame): Dataframe of regions that are being measured (defaults to all the regions) measured_phenotypes (list): List of phenotypes present (defaults to all the phenotypes) minimum_region_size_pixels (int): Minimum region size to calculate counts on in pixels (Default: 1) Returns: Counts: returns a class that holds the counts.
def counts(self,*args,**kwargs): n = Counts.read_cellframe(self,prune_neighbors=False) if 'measured_regions' in kwargs: n.measured_regions = kwargs['measured_regions'] else: n.measured_regions = self.get_measured_regions() if 'measured_phenotypes' in kwargs: n.measured_phenotypes = kwargs['measured_phenotypes'] else: n.measured_phenotypes = self.phenotypes n.microns_per_pixel = self.microns_per_pixel if 'minimum_region_size_pixels' in kwargs: n.minimum_region_size_pixels = kwargs['minimum_region_size_pixels'] else: n.minimum_region_size_pixels = 1 return n
899,452
Change the names of scored call names, input dictionary change with {<current name>:<new name>} format, new name must not already exist Args: change (dict): a dictionary of current name keys and new name values Returns: CellDataFrame: The CellDataFrame modified.
def rename_scored_calls(self,change): output = self.copy() output['scored_calls'] = output.apply(lambda x: _dict_rename(x['scored_calls'],change) ,1) return output
899,455
Take a name or list of scored call names and drop those from the scored calls Args: names (list): list of names to drop or a single string name to drop Returns: CellDataFrame: The CellDataFrame modified.
def drop_scored_calls(self,names): def _remove(calls,names): d = dict([(k,v) for k,v in calls.items() if k not in names]) return d if isinstance(names, str): names = [names] output = self.copy() output['scored_calls'] = output['scored_calls'].\ apply(lambda x: _remove(x,names)) return output
899,457
Rename one or more input phenotypes to a single output phenotype Args: input_phenotype_labels (list): A str name or list of names to combine output_phenotype_label (list): A str name to change the phenotype names to verbose (bool): output more details Returns: CellDataFrame: The CellDataFrame modified.
def collapse_phenotypes(self,input_phenotype_labels,output_phenotype_label,verbose=True): if isinstance(input_phenotype_labels,str): input_phenotype_labels = [input_phenotype_labels] bad_phenotypes = set(input_phenotype_labels)-set(self.phenotypes) if len(bad_phenotypes) > 0: raise ValueError("Error phenotype(s) "+str(bad_phenotypes)+" are not in the data.") data = self.copy() if len(input_phenotype_labels) == 0: return data def _swap_in(d,inputs,output): # Get the keys we need to merge together overlap = set(d.keys()).intersection(inputs) # if there are none to merge we're done already if len(overlap) == 0: return d keepers = [(k,v) for k,v in d.items() if k not in inputs] # combine anything thats not a keeper return dict(keepers+\ [(output_phenotype_label,max([d[x] for x in overlap]))]) data['phenotype_calls'] = data.apply(lambda x: _swap_in(x['phenotype_calls'],input_phenotype_labels,output_phenotype_label) ,1) def _set_label(d): vals = [k for k,v in d.items() if v==1] return np.nan if len(vals) == 0 else vals[0] data['phenotype_label'] = data.apply(lambda x: _set_label(x['phenotype_calls']),1) return data
899,460
Combine/rename one or more input regions to a single output region Args: input_region_labels (list): A str name or list of names to combine output_region_label (list): A str name to change the phenotype names to verbose (bool): output more details Returns: CellDataFrame: The CellDataFrame modified.
def combine_regions(self,input_region_labels,output_region_label,verbose=True): if isinstance(input_region_labels,str): input_region_labels = [input_region_labels] bad_regions = set(input_region_labels)-set(self.regions) if len(bad_regions) > 0: raise ValueError("Error regions(s) "+str(bad_regions)+" are not in the data.") data = self.copy() if len(input_region_labels) == 0: return data def _swap_in(d,inputs,output): # Get the keys we need to merge together overlap = set(d.keys()).intersection(inputs) # if there are none to merge we're done already if len(overlap) == 0: return d keepers = [(k,v) for k,v in d.items() if k not in inputs] # combine anything thats not a keeper return dict(keepers+\ [(output_region_label,sum([d[x] for x in overlap]))]) data['regions'] = data.apply(lambda x: _swap_in(x['regions'],input_region_labels,output_region_label) ,1) data.loc[data['region_label'].isin(input_region_labels),'region_label'] = output_region_label return data
899,461
Convert binary pehnotypes to mutually exclusive phenotypes. If none of the phenotypes are set, then phenotype_label becomes nan If any of the phenotypes are multiply set then it throws a fatal error. Args: phenotypes (list): a list of scored_names to convert to phenotypes Returns: CellDataFrame
def scored_to_phenotype(self,phenotypes): def _apply_score(scored_calls,phenotypes): present = sorted(list(set(phenotypes)&set(scored_calls.keys()))) total = sum([scored_calls[x] for x in present]) if total > 1: raise ValueError("You cant extract phenotypes from scores if they are not mutually exclusive") if total == 0: return np.nan for label in present: if scored_calls[label] == 1: return label raise ValueError("Should have hit an exit criteria already") output = self.copy() output['phenotype_label'] = output.apply(lambda x: _apply_score(x['scored_calls'],phenotypes),1) # now update the phenotypes with these output['phenotype_calls'] = output.apply(lambda x: dict([(y,1 if x['phenotype_label']==y else 0) for y in phenotypes]) ,1) return output
899,464
Adds all items to the parser passed in. Args: parser (argparse.ArgumentParser): The parser to add all items to. bootstrap (bool): Flag to indicate whether you only want to mark bootstrapped items as required on the command-line.
def add_arguments(self, parser, bootstrap=False): [item.add_argument(parser, bootstrap) for item in self._get_items(bootstrap=False)]
899,921
Find an item in the specification by fully qualified name. Args: fq_name (str): Fully-qualified name of the item. Returns: The item if it is in the specification. None otherwise
def find_item(self, fq_name): names = fq_name.split(self._separator) current = self._yapconf_items for name in names: if isinstance(current, (YapconfDictItem, YapconfListItem)): current = current.children if name not in current: return None current = current[name] return current
899,923
Get a particular item in the specification. Args: name (str): The name of the item to retrieve. bootstrap (bool): Only search bootstrap items Returns (YapconfItem): A YapconfItem if it is found, None otherwise.
def get_item(self, name, bootstrap=False): for item in self._get_items(bootstrap): if item.name == name: return item return None
899,924
Update items defaults to the values in the new_defaults dict. Args: new_defaults (dict): A key-value pair of new defaults to be applied. respect_none (bool): Flag to indicate if ``None`` values should constitute an update to the default.
def update_defaults(self, new_defaults, respect_none=False): for key, value in six.iteritems(new_defaults): item = self.get_item(key) if item is None: raise YapconfItemNotFound("Cannot update default for {0}, " "there is no config item by the " "name of {1}".format(key, key), None) item.update_default(value, respect_none)
899,925
Create a new buffer of n chunks. Parameters: size: number of chunks chunk_size: size of each chunk ctype: string of the C type to use (defaults to float)
def __init__(self, size, chunk_size, ctype='float'): self.count = 0 # current number of chunks self.size = size # max number of chunks self.chunk_size = chunk_size # size of chunks self.ctype = ctype self.data = self._allocate(size) self.ctype_size = sizeof(self.data[0:1])
900,021
Retrieve the mean and median from a ROOT histogram. Note: These values are not so trivial to calculate without ROOT, as they are the bin values weighted by the bin content. Args: hist: Histogram from which the values will be extract. Returns: mean, median of the histogram.
def _get_mean_and_median(hist: Hist) -> Tuple[float, float]: # Median # See: https://root-forum.cern.ch/t/median-of-histogram/7626/5 x = ctypes.c_double(0) q = ctypes.c_double(0.5) # Apparently needed to be safe(?) hist.ComputeIntegral() hist.GetQuantiles(1, x, q) mean = hist.GetMean() return (mean, x.value)
900,131
Project the input histogram to the particle level axis. Args: hist: Histogram to check for outliers. outliers_removal_axis: Axis along which outliers removal will be performed. Usually the particle level aixs. Returns: The histogram to check for outliers.
def _project_to_part_level(hist: Hist, outliers_removal_axis: OutliersRemovalAxis) -> Hist: # Setup the projector import ROOT if isinstance(hist, (ROOT.TH2, ROOT.TH3)): projection_information: Dict[str, Any] = {} output_object = _OutputObject(None) projector = projectors.HistProjector( observable_to_project_from = hist, output_observable = output_object, output_attribute_name = "output", projection_name_format = "outliers_removal_hist", projection_information = projection_information, ) # No additional_axis_cuts or projection_dependent_cut_axes # Projection axis projector.projection_axes.append( projectors.HistAxisRange( axis_type = outliers_removal_axis, axis_range_name = "outliers_removal_axis", min_val = projectors.HistAxisRange.apply_func_to_find_bin(None, 1), max_val = projectors.HistAxisRange.apply_func_to_find_bin(ROOT.TAxis.GetNbins), ) ) # Perform the actual projection and return the output. projector.project() return output_object.output # If we already have a 1D hist, just return that existing hist. return hist
900,132
Remove outliers from a given histogram. Args: hist: Histogram to check for outliers. outliers_start_index: Index in the truth axis where outliers begin. outliers_removal_axis: Axis along which outliers removal will be performed. Usually the particle level aixs. Returns: None. The histogram is modified in place.
def _remove_outliers_from_hist(hist: Hist, outliers_start_index: int, outliers_removal_axis: OutliersRemovalAxis) -> None: # Use on TH1, TH2, and TH3 since we don't start removing immediately, but instead only after the limit if outliers_start_index > 0: #logger.debug("Removing outliers") # Check for values above which they should be removed by translating the global index x = ctypes.c_int(0) y = ctypes.c_int(0) z = ctypes.c_int(0) # Maps axis to valaues # This is kind of dumb, but it works. outliers_removal_axis_values: Dict[OutliersRemovalAxis, ctypes.c_int] = { projectors.TH1AxisType.x_axis: x, projectors.TH1AxisType.y_axis: y, projectors.TH1AxisType.z_axis: z, } for index in range(0, hist.GetNcells()): # Get the bin x, y, z from the global bin hist.GetBinXYZ(index, x, y, z) # Watch out for any problems if hist.GetBinContent(index) < hist.GetBinError(index): logger.warning(f"Bin content < error. Name: {hist.GetName()}, Bin content: {hist.GetBinContent(index)}, Bin error: {hist.GetBinError(index)}, index: {index}, ({x.value}, {y.value})") if outliers_removal_axis_values[outliers_removal_axis].value >= outliers_start_index: #logger.debug("Cutting for index {}. x bin {}. Cut index: {}".format(index, x, cutIndex)) hist.SetBinContent(index, 0) hist.SetBinError(index, 0) else: logger.info(f"Hist {hist.GetName()} did not have any outliers to cut")
900,135
Create an empty SQLite database for library spectra. Example: >>> from msp2db.db import create_db >>> db_pth = 'library.db' >>> create_db(file_pth=db_pth) Args: file_pth (str): File path for SQLite database
def create_db(file_pth): conn = sqlite3.connect(file_pth) c = conn.cursor() c.execute('DROP TABLE IF EXISTS library_spectra_source') c.execute( ) c.execute('DROP TABLE IF EXISTS metab_compound') c.execute() c.execute('DROP TABLE IF EXISTS library_spectra_meta') c.execute( ) c.execute('DROP TABLE IF EXISTS library_spectra') c.execute( ) c.execute('DROP TABLE IF EXISTS library_spectra_annotation') c.execute( )
900,163
Get a connection to a SQL database. Can be used for SQLite, MySQL or Django MySQL database Example: >>> from msp2db.db import get_connection >>> conn = get_connection('sqlite', 'library.db') If using "mysql" mysql.connector needs to be installed. If using "django_mysql" Django needs to be installed. Args: db_type (str): Type of database can either be "sqlite", "mysql" or "django_mysql" Returns: sql connection object
def get_connection(db_type, db_pth, user=None, password=None, name=None): if db_type == 'sqlite': print(db_pth) conn = sqlite3.connect(db_pth) elif db_type == 'mysql': import mysql.connector conn = mysql.connector.connect(user=user, password=password, database=name) elif db_type == 'django_mysql': from django.db import connection as conn else: print('unsupported database type: {}, choices are "sqlite", "mysql" or "django_mysql"'.format(db_type)) return conn
900,164