docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Initialization method. Args: data (list of list of int/float): 2-dim array. entities (list): list of entities. categories (list): list of the categories (one per entity).
def __init__(self, data, entities=None, categories=None): self.data = data if entities is None: entities = self.default_entities() self.entities = entities if categories is None: categories = [] self.categories = categories self.validate(...
894,666
An internal helper to create a page for a single module. This will automatically generate the needed RSF to document the module and save the module to its own page in its appropriate location. Args: mod (module): The single module to document as its own page showprivate ...
def _ProduceSingleContent(self, mod, showprivate=False, showinh=False): try: all = mod[1].__all__ except AttributeError: raise RuntimeError('Module (%s) MUST have `__all__` defined.' % mod[1].__name__) try: name = mod[1].__displayname__ except...
894,689
An internal helper to generate all of the pages for a given package Args: package (module): The top-level package to document showprivate (bool): A flag for whether or not to display private members nested (bool): Foor internal use ONLY Returns: str: The...
def _MakePackagePages(self, package, showprivate=False, nested=False, showinh=False): def checkNoNested(mod): try: all = mod.__all__ except AttributeError: return False mems = inspect.getmembers(mod, inspect.ismodule) mems...
894,691
Generates all of the documentation for given packages and appends new tocrees to the index. All documentation pages will be under the set relative path. Args: packages (list(module)): A package or list of packages that contain submodules to document showprivate (bool): A...
def _DocPackageFromTop(self, packages, showprivate=False, showinh=False): appIndex = '' if not isinstance(packages, list): packages = [packages] if os.path.exists('content'): shutil.rmtree('content') os.makedirs('content') appIndex += r % ('API ...
894,692
This is the high level API to use to generate documentation pages for any given package(s). Args: packages (list(module)): A list of packages that contain submodules to document index_base (str): The index page file name. This content will be appended showprivate (bool): A f...
def DocumentPackages(self, packages, index_base=None, showprivate=False, notify=True, showinh=False, intro_pages=None, append_material=None, extra=None): if index_base is None: gram = '' if isinstance(packages, list) and len(pack...
894,694
The the closest available size for specified image type. Arguments: file_path (:py:class:`str`): The image file path. type_ (:py:class:`str`): The type of image to create a URL for, (``'poster'`` or ``'profile'``). target_size (:py:class:`int`): The size of image to ai...
def _create_image_url(self, file_path, type_, target_size): if self.image_config is None: logger.warning('no image configuration available') return return ''.join([ self.image_config['secure_base_url'], self._image_size(self.image_config, type_, t...
894,925
Create a model instance Arguments: json (:py:class:`dict`): The parsed JSON data. image_config (:py:class:`dict`): The API image configuration data. Returns: :py:class:`BaseModel`: The model instance.
def from_json(cls, json, image_config=None): cls.image_config = image_config return cls(**{ attr: json.get(attr if key is None else key) for attr, key in cls.JSON_MAPPING.items() })
894,926
Find the closest available size for specified image type. Arguments: image_config (:py:class:`dict`): The image config data. type_ (:py:class:`str`): The type of image to create a URL for, (``'poster'`` or ``'profile'``). target_size (:py:class:`int`): The size of imag...
def _image_size(image_config, type_, target_size): return min( image_config['{}_sizes'.format(type_)], key=lambda size: (abs(target_size - int(size[1:])) if size.startswith('w') or size.startswith('h') else 999), ...
894,927
Initialization method. Args: config_dict (dict): the configuration as a dictionary.
def __init__(self, config_dict=None): self.config_dict = deepcopy(config_dict) self.plugins = Config.load_installed_plugins() self.analysis_groups = [] if not config_dict: return analysis = config_dict.get('analysis', {}) if isinstance(analysis, di...
895,033
Inflate a list of strings/dictionaries to a list of plugin instances. Args: plugin_list (list): a list of str/dict. inflate_plugin (method): the method to inflate the plugin. Returns: list: a plugin instances list. Raises: ValueError: when a dic...
def inflate_plugin_list(plugin_list, inflate_plugin): plugins = [] for plugin_def in plugin_list: if isinstance(plugin_def, str): try: plugins.append(inflate_plugin(plugin_def)) except PluginNotFoundError as e: ...
895,038
Inflate a list of strings/dictionaries to a list of plugin instances. Args: plugin_dict (dict): a dict of dict. inflate_plugin (method): the method to inflate the plugin. Returns: list: a plugin instances list.
def inflate_plugin_dict(plugin_dict, inflate_plugin): plugins = [] for identifier, definition in plugin_dict.items(): try: plugins.append(inflate_plugin(identifier, definition)) except PluginNotFoundError as e: logger.error('Could not impo...
895,039
Inflate a no-data checker from a basic definition. Args: identifier (str): the no-data checker identifier / name. definition (bool/dict): a boolean acting as "passes" or a full dict definition with "passes" and "allow_failure". Returns: Checker: a ch...
def inflate_nd_checker(identifier, definition): if isinstance(definition, bool): return Checker(name=identifier, passes=definition) elif isinstance(definition, dict): return Checker(definition.pop('name', identifier), **definition) else: raise ValueEr...
895,040
Return the plugin corresponding to the given identifier and type. Args: identifier (str): identifier of the plugin. cls (str): one of checker / provider. Returns: Checker/Provider: plugin class.
def get_plugin(self, identifier, cls=None): if ((cls is None or cls == 'provider') and identifier in self.available_providers): return self.available_providers[identifier] elif ((cls is None or cls == 'checker') and identifier in self.available_checke...
895,041
Inflate a plugin thanks to it's identifier, definition and class. Args: identifier (str): the plugin identifier. definition (dict): the kwargs to instantiate the plugin with. cls (str): "provider", "checker", or None. Returns: Provider/Checker: instance ...
def inflate_plugin(self, identifier, definition=None, cls=None): cls = self.get_plugin(identifier, cls) # TODO: implement re-usability of plugins? # same instances shared across analyses (to avoid re-computing stuff) return cls(**definition or {})
895,044
Inflate multiple plugins based on a list/dict definition. Args: plugins_definition (list/dict): the plugins definitions. inflate_method (method): the method to indlate each plugin. Returns: list: a list of plugin instances. Raises: ValueError: w...
def inflate_plugins(self, plugins_definition, inflate_method): if isinstance(plugins_definition, list): return self.inflate_plugin_list(plugins_definition, inflate_method) elif isinstance(plugins_definition, dict): return self.inflate_plugin_dict(plugins_definition, infl...
895,045
Get data from the TMDb API via :py:func:`aiohttp.get`. Notes: Updates configuration (if required) on successful requests. Arguments: url (:py:class:`str`): The endpoint URL and params. Returns: :py:class:`dict`: The parsed JSON result.
async def get_data(self, url): logger.debug('making request to %r', url) with aiohttp.ClientSession() as session: async with session.get(url, headers=self.headers) as response: body = json.loads((await response.read()).decode('utf-8')) if response.sta...
895,777
Retrieve movie data by search query. Arguments: query (:py:class:`str`): Query to search for. Returns: :py:class:`list`: Possible matches.
async def find_movie(self, query): params = OrderedDict([ ('query', query), ('include_adult', False), ]) url = self.url_builder('search/movie', {}, params) data = await self.get_data(url) if data is None: return return [ Movie....
895,778
Retrieve person data by search query. Arguments: query (:py:class:`str`): Query to search for. Returns: :py:class:`list`: Possible matches.
async def find_person(self, query): url = self.url_builder( 'search/person', dict(), url_params=OrderedDict([ ('query', query), ('include_adult', False) ]), ) data = await self.get_data(url) if data is None: ...
895,779
Retrieve movie data by ID. Arguments: id_ (:py:class:`int`): The movie's TMDb ID. Returns: :py:class:`~.Movie`: The requested movie.
async def get_movie(self, id_): url = self.url_builder( 'movie/{movie_id}', dict(movie_id=id_), url_params=OrderedDict(append_to_response='credits'), ) data = await self.get_data(url) if data is None: return return Movie.fr...
895,780
Retrieve person data by ID. Arguments: id_ (:py:class:`int`): The person's TMDb ID. Returns: :py:class:`~.Person`: The requested person.
async def get_person(self, id_): data = await self._get_person_json( id_, OrderedDict(append_to_response='movie_credits') ) return Person.from_json(data, self.config['data'].get('images'))
895,781
Retrieve raw person JSON by ID. Arguments: id_ (:py:class:`int`): The person's TMDb ID. url_params (:py:class:`dict`): Any additional URL parameters. Returns: :py:class:`dict`: The JSON data.
async def _get_person_json(self, id_, url_params=None): url = self.url_builder( 'person/{person_id}', dict(person_id=id_), url_params=url_params or OrderedDict(), ) data = await self.get_data(url) return data
895,782
Randomly select a popular person. Notes: Requires at least two API calls. May require three API calls if the randomly-selected index isn't within the first page of required data. Arguments: limit (:py:class:`int`, optional): How many of the most popu...
async def get_random_popular_person(self, limit=500): index = random.randrange(limit) data = await self._get_popular_people_page() if data is None: return if index >= len(data['results']): # result is not on first page page, index = self._calc...
895,783
Get a specific page of popular person data. Arguments: page (:py:class:`int`, optional): The page to get. Returns: :py:class:`dict`: The page data.
async def _get_popular_people_page(self, page=1): return await self.get_data(self.url_builder( 'person/popular', url_params=OrderedDict(page=page), ))
895,784
Determine the location of a given index in paged data. Arguments: index (:py:class:`int`): The overall index. data: (:py:class:`dict`) The first page of data. Returns: :py:class:`tuple`: The location of that index, in the format ``(page, index_in_page)``.
def _calculate_page_index(index, data): if index > data['total_results']: raise ValueError('index not in paged data') page_length = len(data['results']) return (index // page_length) + 1, (index % page_length) - 1
895,785
Commit the shared remote folder data into local config.xml file 1. Update the remote_folder path and label 2. Append the remote_folder to config folders list Args: remote_folder(folder): syncthing folder object local_path: existing local path
def acknowledge(self, **kwargs): device_id = kwargs['device_id'] config = self.get_config() # Client - Client if 'r_folder_id' in kwargs: r_folder_id = kwargs['r_folder_id'] remote_folder = syncthing_adt.Folder( id=r_folder_id, label=kwargs['label'], path=...
895,875
Set level of logging for all loggers. Args: level (int): level of logging.
def set_level(level): Logger.level = level for logger in Logger.loggers.values(): logger.setLevel(level)
895,966
Return a logger. Args: name (str): name to pass to the logging module. level (int): level of logging. fmt (str): format string. Returns: logging.Logger: logger from ``logging.getLogger``.
def get_logger(name, level=None, fmt=':%(lineno)d: %(message)s'): if name not in Logger.loggers: if Logger.level is None and level is None: Logger.level = level = logging.ERROR elif Logger.level is None: Logger.level = level elif level...
895,967
Check if given file exists and is a regular file. Args: value (str): path to the file. Raises: argparse.ArgumentTypeError: if not valid. Returns: str: original value argument.
def valid_file(value): if not value: raise argparse.ArgumentTypeError("'' is not a valid file path") elif not os.path.exists(value): raise argparse.ArgumentTypeError( "%s is not a valid file path" % value) elif os.path.isdir(value): raise argparse.ArgumentTypeError( ...
896,115
Find the offsets in a byte code which are start of lines in the source. Generate pairs (offset, lineno) as described in Python/compile.c. Arguments: code: code object. Yields: Address and line number pairs.
def _findlinestarts(code): byte_increments = [ord(c) for c in code.co_lnotab[0::2]] line_increments = [ord(c) for c in code.co_lnotab[1::2]] lastlineno = None lineno = code.co_firstlineno addr = 0 for byte_incr, line_incr in zip(byte_increments, line_increments): if byte_incr: if lineno != las...
896,197
Check if matrix and its mediation matrix are compliant. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. complete_mediation_matrix (list of list of int): 2-dim array Returns: bool: True if compliant, else False
def matrices_compliance(dsm, complete_mediation_matrix): matrix = dsm.data rows_dep_matrix = len(matrix) cols_dep_matrix = len(matrix[0]) rows_med_matrix = len(complete_mediation_matrix) cols_med_matrix = len(complete_mediation_matrix[0]) if (rows_dep_matrix != ...
896,234
Check if matrix and its mediation matrix are compliant. It means that number of dependencies for each (line, column) is either 0 if the mediation matrix (line, column) is 0, or >0 if the mediation matrix (line, column) is 1. Args: dsm (:class:`DesignStructureMatrix`): the D...
def check(self, dsm, **kwargs): # generate complete_mediation_matrix according to each category med_matrix = CompleteMediation.generate_mediation_matrix(dsm) return CompleteMediation.matrices_compliance(dsm, med_matrix)
896,235
Check economy of mechanism. As first abstraction, number of dependencies between two modules < 2 * the number of modules (dependencies to the framework are NOT considered). Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. simplicity_factor (int): si...
def check(self, dsm, simplicity_factor=2, **kwargs): # economy_of_mechanism economy_of_mechanism = False message = '' data = dsm.data categories = dsm.categories dsm_size = dsm.size[0] if not categories: categories = ['appmodule'] * dsm_size ...
896,236
Check least common mechanism. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. independence_factor (int): if the maximum dependencies for one module is inferior or equal to the DSM size divided by the independence factor, then this criterion ...
def check(self, dsm, independence_factor=5, **kwargs): # leastCommonMechanismMatrix least_common_mechanism = False message = '' # get the list of dependent modules for each module data = dsm.data categories = dsm.categories dsm_size = dsm.size[0] ...
896,237
Check layered architecture. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. Returns: bool, str: True if layered architecture else False, messages
def check(self, dsm, **kwargs): layered_architecture = True messages = [] categories = dsm.categories dsm_size = dsm.size[0] if not categories: categories = ['appmodule'] * dsm_size for i in range(0, dsm_size - 1): for j in range(i + 1, ...
896,238
Check code clean. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. Returns: bool, str: True if code clean else False, messages
def check(self, dsm, **kwargs): logger.debug('Entities = %s' % dsm.entities) messages = [] code_clean = True threshold = kwargs.pop('threshold', 1) rows, _ = dsm.size for i in range(0, rows): if dsm.data[i][0] > threshold: messages.app...
896,239
Run the analysis. Generate data from each provider, then check these data with every checker, and store the analysis results. Args: verbose (bool): whether to immediately print the results or not.
def run(self, verbose=True): self.results.clear() for analysis_group in self.config.analysis_groups: if analysis_group.providers: for provider in analysis_group.providers: logger.info('Run provider %s', provider.identifier) pr...
896,485
Initialization method. Args: name (str): the group name. description (str): the group description. providers (list): the list of providers. checkers (list): the list of checkers.
def __init__(self, name=None, description=None, providers=None, checkers=None): self.name = name self.description = description self.providers = providers or [] self.checkers = checkers or [] self.results = []
896,488
Initialization method. Args: group (AnalysisGroup): parent group. provider (Provider): parent Provider. checker (Checker): parent Checker. code (int): constant from Checker class. messages (str): messages string.
def __init__(self, group, provider, checker, code, messages): self.group = group self.provider = provider self.checker = checker self.code = code self.messages = messages
896,489
Calculates the sum of the squared differences between target and prediction. Parameters: ----------- y : vector, shape (n_samples,) The target values. y_pred : vector, shape (n_samples,) The predicted values. Returns: -------- error : float number, the sum of the squar...
def squared_error(y, y_pred): y, y_pred = convert_assert(y, y_pred) return np.sum((y - y_pred) ** 2)
896,518
Calculates the sum of the differences between target and prediction. Parameters: ----------- y : vector, shape (n_samples,) The target values. y_pred : vector, shape (n_samples,) The predicted values. Returns: -------- error : float number, sum of the differences b...
def absolute_error(y, y_pred): y, y_pred = convert_assert(y, y_pred) return np.sum(y - y_pred)
896,519
Handles HTTP error codes for the given request Raises: AuthenticationError on the appropriate 4** errors ServerError if the response is not an ok (2**) Arguments: r -- The request result
def handleresult(self, r): if r.status_code >= 400 and r.status_code < 500: msg = r.json() raise AuthenticationError(str(msg["code"]) + ": " + msg["msg"] + " (" + msg["ref"] + ")") elif r.status_code > 300: err = None ...
896,581
Initialization method. Args: allow_failure (bool): still pass if failed or not. arguments (dict): arguments passed to the check method when run.
def __init__(self, name=None, description=None, hint=None, allow_failure=False, passes=None, arguments=None): if name: self.name = name if description: self.description = description if hint: self.hint = hint self.allow_failu...
896,656
Run the check method and format the result for analysis. Args: data (DSM/DMM/MDM): DSM/DMM/MDM instance to check. Returns: tuple (int, str): status constant from Checker class and messages.
def run(self, data): result_type = namedtuple('Result', 'code messages') if self.passes is True: result = result_type(Checker.Code.PASSED, '') elif self.passes is False: if self.allow_failure: result = result_type(Checker.Code.IGNORED, '') ...
896,657
Initialization method. Args: arguments (dict): arguments that will be used for get_data method.
def __init__(self, name=None, description=None, arguments=None): if name: self.name = name if description: self.description = description self.arguments = arguments or {} self.data = None
896,658
Return a pretty formatted string given some text. Args: description (str): string to format. wrap_at (int): maximum length of a line. indent (int): level of indentation. Returns: str: pretty formatted string.
def pretty_description(description, wrap_at=None, indent=0): if wrap_at is None or wrap_at < 0: width = console_width(default=79) if wrap_at is None: wrap_at = width else: wrap_at += width indent = ' ' * indent text_wrapper = textwrap.TextWrapper( ...
896,706
Implement get_dsm method from Provider class. Parse CSV to return an instance of DSM. Args: file_path (str/fd): path or file descriptor. delimiter (str): character(s) used as delimiter for columns. categories_delimiter (str): character(s) used as del...
def get_data(self, file_path=sys.stdin, delimiter=',', categories_delimiter=None): if file_path == sys.stdin: logger.info('Read data from standard input') lines = [line.replace('\n', '') for line in file_path] else: ...
896,717
Main fit method for SAR. Expects the dataframes to have row_id, col_id columns which are indexes, i.e. contain the sequential integer index of the original alphanumeric user and item IDs. Dataframe also contains rating and timestamp as floats; timestamp is in seconds since Epoch by default. Arg...
def fit( self, df, similarity_type="jaccard", time_decay_coefficient=30, time_now=None, timedecay_formula=False, threshold=1, ): # threshold - items below this number get set to zero in coocurrence counts assert threshold > 0 ...
897,122
Prepare test set for C++ SAR prediction code. Find all items the test users have seen in the past. Arguments: test (pySpark.DataFrame): input dataframe which contains test users.
def get_user_affinity(self, test): test.createOrReplaceTempView(self.f("{prefix}df_test")) query = self.f( "SELECT DISTINCT {col_user} FROM {prefix}df_test CLUSTER BY {col_user}" ) df_test_users = self.spark.sql(query) df_test_users.write.mode("overwrite")....
897,123
Recommend top K items for all users which are in the test set. Args: test: test Spark dataframe top_k: top n items to return remove_seen: remove items test users have already seen in the past from the recommended set.
def recommend_k_items_slow(self, test, top_k=10, remove_seen=True): # TODO: remove seen if remove_seen: raise ValueError("Not implemented") self.get_user_affinity(test)\ .write.mode("overwrite")\ .saveAsTable(self.f("{prefix}user_affinity")) ...
897,125
Creates a new injector. All provided keys will be injectable. Arguments: parent -- Reserved name, used for sub-injectors.
def __init__(self, parent=None, **kwargs): self.___parent = parent self.___subs = [] self.___args = kwargs self.___close_list = [] self.___closed = False self.___initialized = set() for item in kwargs.values(): self._record_closeable(item) ...
897,657
Fits the given model to the data and labels provided. Parameters: ----------- X : matrix, shape (n_samples, n_features) The samples, the train data. y : vector, shape (n_samples,) The target labels. Returns: -------- self : insta...
def fit(self, X, y): X = np.array(X, dtype=np.float32) y = np.array(y, dtype=np.float32) assert X.shape[0] == y.shape[0] return X, y
897,788
Shortcut to `model.fit(X, y); return model.predict(X_)`. Parameters: ----------- X : matrix, shape (n_samples, n_features) The samples, the train data. y : vector, shape (n_samples,) The target labels. X_ : matrix, shape (m_samples, m_features) ...
def fit_predict(self, X, y, X_): self.fit(X, y) return self.predict(X_)
897,789
Removes a job from the job queue, or from being executed. Args: options (list of str, optional): A list of command line options for the condor_rm command. For details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_rm.html. Defaults t...
def remove(self, options=[], sub_job_num=None): args = ['condor_rm'] args.extend(options) job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id) args.append(job_id) out, err = self._execute(args) return out,err
897,863
Returns the contract data for a given contract Args: contract_name (str): Name of the contract to return. Returns: str, str: ABI and bytecode of the contract
def get_contract_data(self, contract_name): contract_data_path = self.output_dir + '/{0}.json'.format(contract_name) with open(contract_data_path, 'r') as contract_data_file: contract_data = json.load(contract_data_file) abi = contract_data['abi'] bytecode = contra...
898,099
A shortcut for the 'set' method. Args: key (str): The name of the attribute to set. value (str): The value to assign to 'key'.
def __setattr__(self, key, value): if key in self.__dict__ or '_' + key in self.__dict__: object.__setattr__(self, key, value) else: self.set(key, value)
898,138
Wait for the job, or a sub-job to complete. Args: options (list of str, optional): A list of command line options for the condor_wait command. For details on valid options see: http://research.cs.wisc.edu/htcondor/manual/current/condor_wait.html. Defaults to an empty...
def wait(self, options=[], sub_job_num=None): args = ['condor_wait'] args.extend(options) job_id = '%s.%s' % (self.cluster_id, sub_job_num) if sub_job_num else str(self.cluster_id) if self._remote: abs_log_file = self.log_file else: abs_log_file =...
898,144
Set the value of an attribute in the submit description file. The value can be passed in as a Python type (i.e. a list, a tuple or a Python boolean). The Python values will be reformatted into strings based on the standards described in the HTCondor manual: http://research.cs.wisc.edu/htcondor/...
def set(self, attr, value): def escape_new_syntax(value, double_quote_escape='"'): value = str(value) value = value.replace("'", "''") value = value.replace('"', '%s"' % double_quote_escape) if ' ' in value or '\t' in value: value = "'%s'...
898,146
Recursively replaces references to other attributes with their value. Args: attribute (str): The name of the attribute to resolve. Returns: str: The resolved value of 'attribute'.
def _resolve_attribute(self, attribute): value = self.attributes[attribute] if not value: return None resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value) return resolved_value
898,151
Replaces a reference to an attribute with the value of the attribute. Args: match (re.match object): A match object containing a match to a reference to an attribute.
def _resolve_attribute_match(self, match): if match.group(1) == 'cluster': return str(self.cluster_id) return self.get(match.group(1), match.group(0))
898,152
Get bin edges from a ROOT hist axis. Note: Doesn't include over- or underflow bins! Args: axis (ROOT.TAxis): Axis from which the bin edges should be extracted. Returns: Array containing the bin edges.
def get_bin_edges_from_axis(axis) -> np.ndarray: # Don't include over- or underflow bins bins = range(1, axis.GetNbins() + 1) # Bin edges bin_edges = np.empty(len(bins) + 1) bin_edges[:-1] = [axis.GetBinLowEdge(i) for i in bins] bin_edges[-1] = axis.GetBinUpEdge(axis.GetNbins()) return...
898,254
MPDS API consumer constructor Args: api_key: (str) The MPDS API key, or None if the MPDS_KEY envvar is set endpoint: (str) MPDS API gateway URL Returns: None
def __init__(self, api_key=None, endpoint=None, dtype=None, verbose=None, debug=None): self.api_key = api_key if api_key else os.environ['MPDS_KEY'] self.network = httplib2.Http() self.endpoint = endpoint or self.endpoint self.dtype = dtype or MPDSDataTypes.PEER_REVIEWED ...
898,426
Calculate the number of entries matching the keyword(s) specified Args: search: (dict) Search query like {"categ_A": "val_A", "categ_B": "val_B"}, documented at https://developer.mpds.io/#Categories phases: (list) Phase IDs, according to the MPDS distinct phases concept ...
def count_data(self, search, phases=None, **kwargs): result = self._request(search, phases=phases, pagesize=10) if result['error']: raise APIError(result['error'], result.get('code', 0)) if result['npages'] > self.maxnpages: warnings.warn( "\r\n...
898,429
Download single image from Landsat on Google Storage Arguments: row - string in this format xxx, e.g. 003 path - string in this format xxx, e.g. 003 name - zip file name without .tar.bz e.g. LT81360082013127LGN01 sat_type - e.g. L7, L8, ...
def single_download(self,username,password,download ,name,ZIP_DIR): try: request = urllib2.Request(download) base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) ...
898,619
Download single image from Landsat on Google Storage Arguments: row - string in this format xxx, e.g. 003 path - string in this format xxx, e.g. 003 name - zip file name without .tar.bz e.g. LT81360082013127LGN01 sat_type - e.g. L7, L8, ...
def checkifDownloadExist(self,username,password,download , name): try: request = urllib2.Request(download) base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '') request.add_header("Authorization", "Basic %s" % base64string) ...
898,620
Create a new virtual environment. Args: python (str): The name or path of a python interpreter to use while creating the virtual environment. system_site (bool): Whether or not use use the system site packages within the virtual environment. Default is Fa...
def create(self, python=None, system_site=False, always_copy=False): command = 'virtualenv' if python: command = '{0} --python={1}'.format(command, python) if system_site: command = '{0} --system-site-packages'.format(command) if always_copy: ...
898,621
Merge two lists without duplicating items Args: list_a: list list_b: list Returns: New list with deduplicated items from list_a and list_b
def list_merge(list_a, list_b): #return list(collections.OrderedDict.fromkeys(list_a + list_b)) #result = list(list_b) result = [] for item in list_a: if not item in result: result.append(item) for item in list_b: if not item in result: result.append(item...
898,695
Flatten the dictionary keys are separated by separator Arguments: dictionary {dict} -- The dictionary to be flattened. Keyword Arguments: separator {str} -- The separator to use (default is '.'). It will crush items with key conflicts. prefix {str} -- Used for recursive calls. ...
def flatten(dictionary, separator='.', prefix=''): new_dict = {} for key, value in dictionary.items(): new_key = prefix + separator + key if prefix else key if isinstance(value, collections.MutableMapping): new_dict.update(flatten(value, separator, new_key)) elif isinst...
898,784
Adjust the virtual environment settings and optional move it. Args: source (str): Path to the existing virtual environment. destination (str): Desired path of the virtual environment. move (bool): Whether or not to actually move the files. Default False.
def relocate(source, destination, move=False): venv = api.VirtualEnvironment(source) if not move: venv.relocate(destination) return None venv.move(destination) return None
898,785
Calculate the moving overage over an array. Algorithm from: https://stackoverflow.com/a/14314054 Args: arr (np.ndarray): Array over which to calculate the moving average. n (int): Number of elements over which to calculate the moving average. Default: 3 Returns: np.ndarray: Moving ...
def moving_average(arr: np.ndarray, n: int = 3) -> np.ndarray: ret = np.cumsum(arr, dtype=float) ret[n:] = ret[n:] - ret[:-n] return ret[n - 1:] / n
898,980
Recursive ``getattar``. This can be used as a drop in for the standard ``getattr(...)``. Credit to: https://stackoverflow.com/a/31174427 Args: obj: Object to retrieve the attribute from. attr: Name of the attribute, with each successive attribute separated by a ".". Returns: Th...
def recursive_getattr(obj: Any, attr: str, *args) -> Any: def _getattr(obj, attr): return getattr(obj, attr, *args) return functools.reduce(_getattr, [obj] + attr.split('.'))
898,981
Recursively retrieve an item from a nested dict. Credit to: https://stackoverflow.com/a/52260663 Args: d: Mapping of strings to objects. keys: Names of the keys under which the object is stored. Can also just be a single string. Returns: The object stored under the keys. Raises...
def recursive_getitem(d: Mapping[str, Any], keys: Union[str, Sequence[str]]) -> Any: # If only a string, then just just return the item if isinstance(keys, str): return d[keys] else: return functools.reduce(operator.getitem, keys, d)
898,983
Get a Histogram1D associated with the selected jet and track pt bins. This is often used to retrieve data for fitting. Args: observables (dict): The observables from which the hist should be retrieved. track_pt_bin (int): Track pt bin of the desired hist. jet_ptbin (int): Jet pt bin of...
def get_array_for_fit(observables: dict, track_pt_bin: int, jet_pt_bin: int) -> histogram.Histogram1D: for name, observable in observables.items(): if observable.track_pt_bin == track_pt_bin and observable.jet_pt_bin == jet_pt_bin: return histogram.Histogram1D.from_existing_hist(observable....
898,984
Used to create YapconfItems from a specification dictionary. Args: specification (dict): The specification used to initialize ``YapconfSpec`` env_prefix (str): Prefix to add to environment names separator (str): Separator for nested items parent_names (list): Parents nam...
def from_specification(specification, env_prefix=None, separator='.', parent_names=None): items = {} for item_name, item_info in six.iteritems(specification): names = copy.copy(parent_names) if parent_names else [] items[item_name] = _generate_item(item_name, ...
899,243
Update our current default with the new_default. Args: new_default: New default to set. respect_none: Flag to determine if ``None`` is a valid value.
def update_default(self, new_default, respect_none=False): if new_default is not None: self.default = new_default elif new_default is None and respect_none: self.default = None
899,247
Add this item as an argument to the given parser. Args: parser (argparse.ArgumentParser): The parser to add this item to. bootstrap: Flag to indicate whether you only want to mark this item as required or not
def add_argument(self, parser, bootstrap=False): if self.cli_expose: args = self._get_argparse_names(parser.prefix_chars) kwargs = self._get_argparse_kwargs(bootstrap) parser.add_argument(*args, **kwargs)
899,249
Converts all 'Truthy' values to True and 'Falsy' values to False. Args: value: Value to convert label: Label of the config which this item was found. Returns:
def convert_config_value(self, value, label): if isinstance(value, six.string_types): value = value.lower() if value in self.TRUTHY_VALUES: return True elif value in self.FALSY_VALUES: return False else: raise YapconfValueError("C...
899,265
Load an analysis configuration from a file. Args: yaml: YAML object to use in loading the configuration. filename: Filename of the YAML configuration file. Returns: dict-like object containing the loaded configuration
def load_configuration(yaml: yaml.ruamel.yaml.YAML, filename: str) -> DictLike: with open(filename, "r") as f: config = yaml.load(f) return config
899,319
Iterate over an analysis dictionary with selected attributes. Args: analysis_objects: Analysis objects dictionary. selections: Keyword arguments used to select attributes from the analysis dictionary. Yields: object: Matching analysis object.
def iterate_with_selected_objects(analysis_objects: Mapping[Any, Any], **selections: Mapping[str, Any]) -> Iterator[Tuple[Any, Any]]: for key_index, obj in analysis_objects.items(): # If selections is empty, we return every object. If it's not empty, then we only want to return # objects which ...
899,328
Save the CellDataFrame to an hdf5 file. Args: path (str): the path to save to key (str): the name of the location to save it to mode (str): write mode
def to_hdf(self,path,key,mode='a'): pd.DataFrame(self.serialize()).to_hdf(path,key,mode=mode,format='table',complib='zlib',complevel=9) f = h5py.File(path,'r+') f[key].attrs["microns_per_pixel"] = float(self.microns_per_pixel) if self.microns_per_pixel is not None else np.nan f....
899,441
Add mutually exclusive phenotypes to the scored calls Args: phenotypes (list): a list of phenotypes to add to scored calls. if none or not set, add them all overwrite (bool): if True allow the overwrite of a phenotype, if False, the phenotype must not exist in the scored calls ...
def phenotypes_to_scored(self,phenotypes=None,overwrite=False): if not self.is_uniform(): raise ValueError("inconsistent phenotypes") if phenotypes is None: phenotypes = self.phenotypes elif isinstance(phenotypes,str): phenotypes = [phenotypes] def _post...
899,442
Concatonate multiple CellDataFrames throws an error if the microns_per_pixel is not uniform across the frames Args: array_like (list): a list of CellDataFrames with 1 or more CellDataFrames Returns: CellDataFrame
def concat(self,array_like): arr = list(array_like) if len(set([x.microns_per_pixel for x in arr])) != 1: raise ValueError("Multiple microns per pixel set") cdf = CellDataFrame(pd.concat([pd.DataFrame(x) for x in arr])) cdf.microns_per_pixel = arr[0].microns_per_pixe...
899,443
Read a CellDataFrame from an hdf5 file. Args: path (str): the path to read from key (str): the name of the location to read from Returns: CellDataFrame
def read_hdf(cls,path,key=None): df = pd.read_hdf(path,key) df['scored_calls'] = df['scored_calls'].apply(lambda x: json.loads(x)) df['channel_values'] = df['channel_values'].apply(lambda x: json.loads(x)) df['regions'] = df['regions'].apply(lambda x: json.loads(x)) df['...
899,444
Use the segmented images to create per-image graphics Args: verbose (bool): output more details if true Returns: SegmentationImages: returns a class used to construct the image graphics
def segmentation_images(self,*args,**kwargs): if not self.db: raise ValueError("Need to set db") segs = SegmentationImages.read_cellframe(self,*args,**kwargs) segs.microns_per_pixel = segs.microns_per_pixel return segs
899,448
Return a class that can be used to create honeycomb plots Args: subsets (list): list of SubsetLogic objects step_pixels (int): distance between hexagons max_distance_pixels (int): the distance from each point by which to caclulate the quanitty of the phenotype for that area ...
def cartesian(self,subsets=None,step_pixels=100,max_distance_pixels=150,*args,**kwargs): n = Cartesian.read_cellframe(self,subsets=subsets,step_pixels=step_pixels,max_distance_pixels=max_distance_pixels,prune_neighbors=False,*args,**kwargs) if 'measured_regions' in kwargs: n.measured_regions = ...
899,451
Return a class that can be used to access count densities Args: measured_regions (pandas.DataFrame): Dataframe of regions that are being measured (defaults to all the regions) measured_phenotypes (list): List of phenotypes present (defaults to all the phenotypes) minimum_reg...
def counts(self,*args,**kwargs): n = Counts.read_cellframe(self,prune_neighbors=False) if 'measured_regions' in kwargs: n.measured_regions = kwargs['measured_regions'] else: n.measured_regions = self.get_measured_regions() if 'measured_phenotypes' in kwargs: n.measured_phenotype...
899,452
Change the names of scored call names, input dictionary change with {<current name>:<new name>} format, new name must not already exist Args: change (dict): a dictionary of current name keys and new name values Returns: CellDataFrame: The CellDataFrame modified.
def rename_scored_calls(self,change): output = self.copy() output['scored_calls'] = output.apply(lambda x: _dict_rename(x['scored_calls'],change) ,1) return output
899,455
Take a name or list of scored call names and drop those from the scored calls Args: names (list): list of names to drop or a single string name to drop Returns: CellDataFrame: The CellDataFrame modified.
def drop_scored_calls(self,names): def _remove(calls,names): d = dict([(k,v) for k,v in calls.items() if k not in names]) return d if isinstance(names, str): names = [names] output = self.copy() output['scored_calls'] = output['scored_calls']....
899,457
Rename one or more input phenotypes to a single output phenotype Args: input_phenotype_labels (list): A str name or list of names to combine output_phenotype_label (list): A str name to change the phenotype names to verbose (bool): output more details Returns: ...
def collapse_phenotypes(self,input_phenotype_labels,output_phenotype_label,verbose=True): if isinstance(input_phenotype_labels,str): input_phenotype_labels = [input_phenotype_labels] bad_phenotypes = set(input_phenotype_labels)-set(self.phenotypes) if len(bad_phenotypes) > 0: raise Valu...
899,460
Combine/rename one or more input regions to a single output region Args: input_region_labels (list): A str name or list of names to combine output_region_label (list): A str name to change the phenotype names to verbose (bool): output more details Returns: ...
def combine_regions(self,input_region_labels,output_region_label,verbose=True): if isinstance(input_region_labels,str): input_region_labels = [input_region_labels] bad_regions = set(input_region_labels)-set(self.regions) if len(bad_regions) > 0: raise ValueError("Error regions(s) "+str(...
899,461
Convert binary pehnotypes to mutually exclusive phenotypes. If none of the phenotypes are set, then phenotype_label becomes nan If any of the phenotypes are multiply set then it throws a fatal error. Args: phenotypes (list): a list of scored_names to convert to phenotypes ...
def scored_to_phenotype(self,phenotypes): def _apply_score(scored_calls,phenotypes): present = sorted(list(set(phenotypes)&set(scored_calls.keys()))) total = sum([scored_calls[x] for x in present]) if total > 1: raise ValueError("You cant extract phe...
899,464
Adds all items to the parser passed in. Args: parser (argparse.ArgumentParser): The parser to add all items to. bootstrap (bool): Flag to indicate whether you only want to mark bootstrapped items as required on the command-line.
def add_arguments(self, parser, bootstrap=False): [item.add_argument(parser, bootstrap) for item in self._get_items(bootstrap=False)]
899,921
Find an item in the specification by fully qualified name. Args: fq_name (str): Fully-qualified name of the item. Returns: The item if it is in the specification. None otherwise
def find_item(self, fq_name): names = fq_name.split(self._separator) current = self._yapconf_items for name in names: if isinstance(current, (YapconfDictItem, YapconfListItem)): current = current.children if name not in current: r...
899,923
Get a particular item in the specification. Args: name (str): The name of the item to retrieve. bootstrap (bool): Only search bootstrap items Returns (YapconfItem): A YapconfItem if it is found, None otherwise.
def get_item(self, name, bootstrap=False): for item in self._get_items(bootstrap): if item.name == name: return item return None
899,924
Update items defaults to the values in the new_defaults dict. Args: new_defaults (dict): A key-value pair of new defaults to be applied. respect_none (bool): Flag to indicate if ``None`` values should constitute an update to the default.
def update_defaults(self, new_defaults, respect_none=False): for key, value in six.iteritems(new_defaults): item = self.get_item(key) if item is None: raise YapconfItemNotFound("Cannot update default for {0}, " "there is ...
899,925
Create a new buffer of n chunks. Parameters: size: number of chunks chunk_size: size of each chunk ctype: string of the C type to use (defaults to float)
def __init__(self, size, chunk_size, ctype='float'): self.count = 0 # current number of chunks self.size = size # max number of chunks self.chunk_size = chunk_size # size of chunks self.ctype = ctype self.data = self._allocate(size) s...
900,021
Retrieve the mean and median from a ROOT histogram. Note: These values are not so trivial to calculate without ROOT, as they are the bin values weighted by the bin content. Args: hist: Histogram from which the values will be extract. Returns: mean, median of the histogram.
def _get_mean_and_median(hist: Hist) -> Tuple[float, float]: # Median # See: https://root-forum.cern.ch/t/median-of-histogram/7626/5 x = ctypes.c_double(0) q = ctypes.c_double(0.5) # Apparently needed to be safe(?) hist.ComputeIntegral() hist.GetQuantiles(1, x, q) mean = hist.GetMe...
900,131
Project the input histogram to the particle level axis. Args: hist: Histogram to check for outliers. outliers_removal_axis: Axis along which outliers removal will be performed. Usually the particle level aixs. Returns: The histogram to check for outliers.
def _project_to_part_level(hist: Hist, outliers_removal_axis: OutliersRemovalAxis) -> Hist: # Setup the projector import ROOT if isinstance(hist, (ROOT.TH2, ROOT.TH3)): projection_information: Dict[str, Any] = {} output_object = _OutputObject(None) projector = projectors.HistPro...
900,132
Remove outliers from a given histogram. Args: hist: Histogram to check for outliers. outliers_start_index: Index in the truth axis where outliers begin. outliers_removal_axis: Axis along which outliers removal will be performed. Usually the particle level aixs. Returns: ...
def _remove_outliers_from_hist(hist: Hist, outliers_start_index: int, outliers_removal_axis: OutliersRemovalAxis) -> None: # Use on TH1, TH2, and TH3 since we don't start removing immediately, but instead only after the limit if outliers_start_index > 0: #logger.debug("Removing outliers") #...
900,135
Create an empty SQLite database for library spectra. Example: >>> from msp2db.db import create_db >>> db_pth = 'library.db' >>> create_db(file_pth=db_pth) Args: file_pth (str): File path for SQLite database
def create_db(file_pth): conn = sqlite3.connect(file_pth) c = conn.cursor() c.execute('DROP TABLE IF EXISTS library_spectra_source') c.execute( ) c.execute('DROP TABLE IF EXISTS metab_compound') c.execute() c.execute('DROP TABLE IF EXISTS library_spectra_meta') c.ex...
900,163
Get a connection to a SQL database. Can be used for SQLite, MySQL or Django MySQL database Example: >>> from msp2db.db import get_connection >>> conn = get_connection('sqlite', 'library.db') If using "mysql" mysql.connector needs to be installed. If using "django_mysql" Django needs to be...
def get_connection(db_type, db_pth, user=None, password=None, name=None): if db_type == 'sqlite': print(db_pth) conn = sqlite3.connect(db_pth) elif db_type == 'mysql': import mysql.connector conn = mysql.connector.connect(user=user, password=password, database=name) elif...
900,164