text
stringlengths
81
112k
Get a dataframe of estimator values. NB when parallelised the results will not be produced in order (so results from some run number will not nessesarily correspond to that number run in run_list). Parameters ---------- run_list: list of dicts List of nested sampling run dicts. estimator_list: list of functions Estimators to apply to runs. estimator_names: list of strs, optional Name of each func in estimator_list. parallel: bool, optional Whether or not to parallelise - see parallel_utils.parallel_apply. save_name: str or None, optional See nestcheck.io_utils.save_load_result. save: bool, optional See nestcheck.io_utils.save_load_result. load: bool, optional See nestcheck.io_utils.save_load_result. overwrite_existing: bool, optional See nestcheck.io_utils.save_load_result. Returns ------- df: pandas DataFrame Results table showing calculation values and diagnostics. Rows show different runs. Columns have titles given by estimator_names and show results for the different functions in estimators_list. def estimator_values_df(run_list, estimator_list, **kwargs): """Get a dataframe of estimator values. NB when parallelised the results will not be produced in order (so results from some run number will not nessesarily correspond to that number run in run_list). Parameters ---------- run_list: list of dicts List of nested sampling run dicts. estimator_list: list of functions Estimators to apply to runs. estimator_names: list of strs, optional Name of each func in estimator_list. parallel: bool, optional Whether or not to parallelise - see parallel_utils.parallel_apply. save_name: str or None, optional See nestcheck.io_utils.save_load_result. save: bool, optional See nestcheck.io_utils.save_load_result. load: bool, optional See nestcheck.io_utils.save_load_result. overwrite_existing: bool, optional See nestcheck.io_utils.save_load_result. Returns ------- df: pandas DataFrame Results table showing calculation values and diagnostics. Rows show different runs. Columns have titles given by estimator_names and show results for the different functions in estimators_list. """ estimator_names = kwargs.pop( 'estimator_names', ['est_' + str(i) for i in range(len(estimator_list))]) parallel = kwargs.pop('parallel', True) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) values_list = pu.parallel_apply( nestcheck.ns_run_utils.run_estimators, run_list, func_args=(estimator_list,), parallel=parallel) df = pd.DataFrame(np.stack(values_list, axis=0)) df.columns = estimator_names df.index.name = 'run' return df
Get summary statistics about calculation errors, including estimated implementation errors. Parameters ---------- error_values: pandas DataFrame Of format output by run_list_error_values (look at it for more details). summary_df_kwargs: dict, optional See pandas_functions.summary_df docstring for more details. Returns ------- df: pandas DataFrame Table showing means and standard deviations of results and diagnostics for the different runs. Also contains estimated numerical uncertainties on results. def error_values_summary(error_values, **summary_df_kwargs): """Get summary statistics about calculation errors, including estimated implementation errors. Parameters ---------- error_values: pandas DataFrame Of format output by run_list_error_values (look at it for more details). summary_df_kwargs: dict, optional See pandas_functions.summary_df docstring for more details. Returns ------- df: pandas DataFrame Table showing means and standard deviations of results and diagnostics for the different runs. Also contains estimated numerical uncertainties on results. """ df = pf.summary_df_from_multi(error_values, **summary_df_kwargs) # get implementation stds imp_std, imp_std_unc, imp_frac, imp_frac_unc = \ nestcheck.error_analysis.implementation_std( df.loc[('values std', 'value')], df.loc[('values std', 'uncertainty')], df.loc[('bootstrap std mean', 'value')], df.loc[('bootstrap std mean', 'uncertainty')]) df.loc[('implementation std', 'value'), df.columns] = imp_std df.loc[('implementation std', 'uncertainty'), df.columns] = imp_std_unc df.loc[('implementation std frac', 'value'), :] = imp_frac df.loc[('implementation std frac', 'uncertainty'), :] = imp_frac_unc # Get implementation RMSEs (calculated using the values RMSE instead of # values std) if 'values rmse' in set(df.index.get_level_values('calculation type')): imp_rmse, imp_rmse_unc, imp_frac, imp_frac_unc = \ nestcheck.error_analysis.implementation_std( df.loc[('values rmse', 'value')], df.loc[('values rmse', 'uncertainty')], df.loc[('bootstrap std mean', 'value')], df.loc[('bootstrap std mean', 'uncertainty')]) df.loc[('implementation rmse', 'value'), df.columns] = imp_rmse df.loc[('implementation rmse', 'uncertainty'), df.columns] = \ imp_rmse_unc df.loc[('implementation rmse frac', 'value'), :] = imp_frac df.loc[('implementation rmse frac', 'uncertainty'), :] = imp_frac_unc # Return only the calculation types we are interested in, in order calcs_to_keep = ['true values', 'values mean', 'values std', 'values rmse', 'bootstrap std mean', 'implementation std', 'implementation std frac', 'implementation rmse', 'implementation rmse frac', 'thread ks pvalue mean', 'bootstrap ks distance mean', 'bootstrap energy distance mean', 'bootstrap earth mover distance mean'] df = pd.concat([df.xs(calc, level='calculation type', drop_level=False) for calc in calcs_to_keep if calc in df.index.get_level_values('calculation type')]) return df
Wrapper which runs run_list_error_values then applies error_values summary to the resulting dataframe. See the docstrings for those two funcions for more details and for descriptions of parameters and output. def run_list_error_summary(run_list, estimator_list, estimator_names, n_simulate, **kwargs): """Wrapper which runs run_list_error_values then applies error_values summary to the resulting dataframe. See the docstrings for those two funcions for more details and for descriptions of parameters and output. """ true_values = kwargs.pop('true_values', None) include_true_values = kwargs.pop('include_true_values', False) include_rmse = kwargs.pop('include_rmse', False) error_values = run_list_error_values(run_list, estimator_list, estimator_names, n_simulate, **kwargs) return error_values_summary(error_values, true_values=true_values, include_true_values=include_true_values, include_rmse=include_rmse)
Computes a data frame of bootstrap resampled values. Parameters ---------- run_list: list of dicts List of nested sampling run dicts. estimator_list: list of functions Estimators to apply to runs. estimator_names: list of strs Name of each func in estimator_list. n_simulate: int Number of bootstrap replications to use on each run. kwargs: Kwargs to pass to parallel_apply. Returns ------- bs_values_df: pandas data frame Columns represent estimators and rows represent runs. Each cell contains a 1d array of bootstrap resampled values for the run and estimator. def bs_values_df(run_list, estimator_list, estimator_names, n_simulate, **kwargs): """Computes a data frame of bootstrap resampled values. Parameters ---------- run_list: list of dicts List of nested sampling run dicts. estimator_list: list of functions Estimators to apply to runs. estimator_names: list of strs Name of each func in estimator_list. n_simulate: int Number of bootstrap replications to use on each run. kwargs: Kwargs to pass to parallel_apply. Returns ------- bs_values_df: pandas data frame Columns represent estimators and rows represent runs. Each cell contains a 1d array of bootstrap resampled values for the run and estimator. """ tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'bs values'}) assert len(estimator_list) == len(estimator_names), ( 'len(estimator_list) = {0} != len(estimator_names = {1}' .format(len(estimator_list), len(estimator_names))) bs_values_list = pu.parallel_apply( nestcheck.error_analysis.run_bootstrap_values, run_list, func_args=(estimator_list,), func_kwargs={'n_simulate': n_simulate}, tqdm_kwargs=tqdm_kwargs, **kwargs) df = pd.DataFrame() for i, name in enumerate(estimator_names): df[name] = [arr[i, :] for arr in bs_values_list] # Check there are the correct number of bootstrap replications in each cell for vals_shape in df.loc[0].apply(lambda x: x.shape).values: assert vals_shape == (n_simulate,), ( 'Should be n_simulate=' + str(n_simulate) + ' values in ' + 'each cell. The cell contains array with shape ' + str(vals_shape)) return df
Calculates estimator values for the constituent threads of the input runs. Parameters ---------- run_list: list of dicts List of nested sampling run dicts. estimator_list: list of functions Estimators to apply to runs. estimator_names: list of strs Name of each func in estimator_list. kwargs: Kwargs to pass to parallel_apply. Returns ------- df: pandas data frame Columns represent estimators and rows represent runs. Each cell contains a 1d numpy array with length equal to the number of threads in the run, containing the results from evaluating the estimator on each thread. def thread_values_df(run_list, estimator_list, estimator_names, **kwargs): """Calculates estimator values for the constituent threads of the input runs. Parameters ---------- run_list: list of dicts List of nested sampling run dicts. estimator_list: list of functions Estimators to apply to runs. estimator_names: list of strs Name of each func in estimator_list. kwargs: Kwargs to pass to parallel_apply. Returns ------- df: pandas data frame Columns represent estimators and rows represent runs. Each cell contains a 1d numpy array with length equal to the number of threads in the run, containing the results from evaluating the estimator on each thread. """ tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'thread values'}) assert len(estimator_list) == len(estimator_names), ( 'len(estimator_list) = {0} != len(estimator_names = {1}' .format(len(estimator_list), len(estimator_names))) # get thread results thread_vals_arrays = pu.parallel_apply( nestcheck.error_analysis.run_thread_values, run_list, func_args=(estimator_list,), tqdm_kwargs=tqdm_kwargs, **kwargs) df = pd.DataFrame() for i, name in enumerate(estimator_names): df[name] = [arr[i, :] for arr in thread_vals_arrays] # Check there are the correct number of thread values in each cell for vals_shape in df.loc[0].apply(lambda x: x.shape).values: assert vals_shape == (run_list[0]['thread_min_max'].shape[0],), \ ('Should be nlive=' + str(run_list[0]['thread_min_max'].shape[0]) + ' values in each cell. The cell contains array with shape ' + str(vals_shape)) return df
Computes pairwise statistical distance measures. parameters ---------- df_in: pandas data frame Columns represent estimators and rows represent runs. Each data frane element is an array of values which are used as samples in the distance measures. earth_mover_dist: bool, optional Passed to error_analysis.pairwise_distances. energy_dist: bool, optional Passed to error_analysis.pairwise_distances. returns ------- df: pandas data frame with kl values for each pair. def pairwise_dists_on_cols(df_in, earth_mover_dist=True, energy_dist=True): """Computes pairwise statistical distance measures. parameters ---------- df_in: pandas data frame Columns represent estimators and rows represent runs. Each data frane element is an array of values which are used as samples in the distance measures. earth_mover_dist: bool, optional Passed to error_analysis.pairwise_distances. energy_dist: bool, optional Passed to error_analysis.pairwise_distances. returns ------- df: pandas data frame with kl values for each pair. """ df = pd.DataFrame() for col in df_in.columns: df[col] = nestcheck.error_analysis.pairwise_distances( df_in[col].values, earth_mover_dist=earth_mover_dist, energy_dist=energy_dist) return df
Quote the column names def _backtick_columns(cols): """ Quote the column names """ def bt(s): b = '' if s == '*' or not s else '`' return [_ for _ in [b + (s or '') + b] if _] formatted = [] for c in cols: if c[0] == '#': formatted.append(c[1:]) elif c.startswith('(') and c.endswith(')'): # WHERE (column_a, column_b) IN ((1,10), (1,20)) formatted.append(c) else: # backtick the former part when it meets the first dot, and then all the rest formatted.append('.'.join(bt(c.split('.')[0]) + bt('.'.join(c.split('.')[1:])))) return ', '.join(formatted)
Input: {'c1': 'v', 'c2': None, '#c3': 'uuid()'} Output: ('%s, %s, uuid()', [None, 'v']) # insert; columnname=False ('`c2` = %s, `c1` = %s, `c3` = uuid()', [None, 'v']) # update; columnname=True No need to transform NULL value since it's supported in execute() def _value_parser(self, value, columnname=False, placeholder='%s'): """ Input: {'c1': 'v', 'c2': None, '#c3': 'uuid()'} Output: ('%s, %s, uuid()', [None, 'v']) # insert; columnname=False ('`c2` = %s, `c1` = %s, `c3` = uuid()', [None, 'v']) # update; columnname=True No need to transform NULL value since it's supported in execute() """ if not isinstance(value, dict): raise TypeError('Input value should be a dictionary') q = [] a = [] for k, v in value.items(): if k[0] == '#': # if is sql function q.append(' = '.join([self._backtick(k[1:]), str(v)]) if columnname else v) else: q.append(' = '.join([self._backtick(k), placeholder]) if columnname else placeholder) a.append(v) return ', '.join(q), tuple(a)
Allow select.group and select.order accepting string and list def _by_columns(self, columns): """ Allow select.group and select.order accepting string and list """ return columns if self.isstr(columns) else self._backtick_columns(columns)
:type table: string :type columns: list :type join: dict :param join: {'[>]table1(t1)': {'user.id': 't1.user_id'}} -> "LEFT JOIN table AS t1 ON user.id = t1.user_id" :type where: dict :type group: string|list :type having: string :type order: string|list :type limit: int|list # TODO: change to offset :param limit: The max row number for this query. If it contains offset, limit must be a list like [offset, limit] :param iterator: Whether to output the result in a generator. It always returns generator if the cursor is SSCursor or SSDictCursor, no matter iterator is True or False. :type fetch: bool def select(self, table, columns=None, join=None, where=None, group=None, having=None, order=None, limit=None, iterator=False, fetch=True): """ :type table: string :type columns: list :type join: dict :param join: {'[>]table1(t1)': {'user.id': 't1.user_id'}} -> "LEFT JOIN table AS t1 ON user.id = t1.user_id" :type where: dict :type group: string|list :type having: string :type order: string|list :type limit: int|list # TODO: change to offset :param limit: The max row number for this query. If it contains offset, limit must be a list like [offset, limit] :param iterator: Whether to output the result in a generator. It always returns generator if the cursor is SSCursor or SSDictCursor, no matter iterator is True or False. :type fetch: bool """ if not columns: columns = ['*'] where_q, _args = self._where_parser(where) # TODO: support multiple table _sql = ''.join(['SELECT ', self._backtick_columns(columns), ' FROM ', self._tablename_parser(table)['formatted_tablename'], self._join_parser(join), where_q, (' GROUP BY ' + self._by_columns(group)) if group else '', (' HAVING ' + having) if having else '', (' ORDER BY ' + self._by_columns(order)) if order else '', self._limit_parser(limit), ';']) if self.debug: return self.cur.mogrify(_sql, _args) execute_result = self.cur.execute(_sql, _args) if not fetch: return execute_result if self.cursorclass in (pymysql.cursors.SSCursor, pymysql.cursors.SSDictCursor): return self.cur if iterator: return self._yield_result() return self.cur.fetchall()
:type limit: int :param limit: The max row number for each page :type offset: int :param offset: The starting position of the page :return: def select_page(self, limit, offset=0, **kwargs): """ :type limit: int :param limit: The max row number for each page :type offset: int :param offset: The starting position of the page :return: """ start = offset while True: result = self.select(limit=[start, limit], **kwargs) start += limit if result: yield result else: break if self.debug: break
A simplified method of select, for getting the first result in one column only. A common case of using this method is getting id. :type table: string :type column: str :type join: dict :type where: dict :type insert: bool :param insert: If insert==True, insert the input condition if there's no result and return the id of new row. :type ifnone: string :param ifnone: When ifnone is a non-empty string, raise an error if query returns empty result. insert parameter would not work in this mode. def get(self, table, column, join=None, where=None, insert=False, ifnone=None): """ A simplified method of select, for getting the first result in one column only. A common case of using this method is getting id. :type table: string :type column: str :type join: dict :type where: dict :type insert: bool :param insert: If insert==True, insert the input condition if there's no result and return the id of new row. :type ifnone: string :param ifnone: When ifnone is a non-empty string, raise an error if query returns empty result. insert parameter would not work in this mode. """ select_result = self.select(table=table, columns=[column], join=join, where=where, limit=1) if self.debug: return select_result result = select_result[0] if select_result else None if result: return result[0 if self.cursorclass is pymysql.cursors.Cursor else column] if ifnone: raise ValueError(ifnone) if insert: if any([isinstance(d, dict) for d in where.values()]): raise ValueError("The where parameter in get() doesn't support nested condition with insert==True.") return self.insert(table=table, value=where) return None
Insert a dict into db. :type table: string :type value: dict :type ignore: bool :type commit: bool :return: int. The row id of the insert. def insert(self, table, value, ignore=False, commit=True): """ Insert a dict into db. :type table: string :type value: dict :type ignore: bool :type commit: bool :return: int. The row id of the insert. """ value_q, _args = self._value_parser(value, columnname=False) _sql = ''.join(['INSERT', ' IGNORE' if ignore else '', ' INTO ', self._backtick(table), ' (', self._backtick_columns(value), ') VALUES (', value_q, ');']) if self.debug: return self.cur.mogrify(_sql, _args) self.cur.execute(_sql, _args) if commit: self.conn.commit() return self.cur.lastrowid
:type table: string :type value: dict :type update_columns: list :param update_columns: specify the columns which will be updated if record exists :type commit: bool def upsert(self, table, value, update_columns=None, commit=True): """ :type table: string :type value: dict :type update_columns: list :param update_columns: specify the columns which will be updated if record exists :type commit: bool """ if not isinstance(value, dict): raise TypeError('Input value should be a dictionary') if not update_columns: update_columns = value.keys() value_q, _args = self._value_parser(value, columnname=False) _sql = ''.join(['INSERT INTO ', self._backtick(table), ' (', self._backtick_columns(value), ') VALUES ', '(', value_q, ') ', 'ON DUPLICATE KEY UPDATE ', ', '.join(['='.join([k, 'VALUES('+k+')']) for k in update_columns]), ';']) if self.debug: return self.cur.mogrify(_sql, _args) self.cur.execute(_sql, _args) if commit: self.conn.commit() return self.cur.lastrowid
Insert multiple records within one query. :type table: string :type columns: list :type value: list|tuple :param value: Doesn't support MySQL functions :param value: Example: [(value1_column1, value1_column2,), ] :type ignore: bool :type commit: bool :return: int. The row id of the LAST insert only. def insertmany(self, table, columns, value, ignore=False, commit=True): """ Insert multiple records within one query. :type table: string :type columns: list :type value: list|tuple :param value: Doesn't support MySQL functions :param value: Example: [(value1_column1, value1_column2,), ] :type ignore: bool :type commit: bool :return: int. The row id of the LAST insert only. """ if not isinstance(value, (list, tuple)): raise TypeError('Input value should be a list or tuple') # Cannot add semicolon here, otherwise it will not pass the Cursor.executemany validation _sql = ''.join(['INSERT', ' IGNORE' if ignore else '', ' INTO ', self._backtick(table), ' (', self._backtick_columns(columns), ') VALUES (', ', '.join(['%s'] * len(columns)), ')']) _args = tuple(value) # For insertmany, the base queries for executemany and printing are different _sql_full = ''.join(['INSERT', ' IGNORE' if ignore else '', ' INTO ', self._backtick(table), ' (', self._backtick_columns(columns), ') VALUES ', ', '.join([''.join(['(', ', '.join(['%s'] * len(columns)), ')'])] * len(_args)), ';']) _args_flattened = [item for sublist in _args for item in sublist] if self.debug: return self.cur.mogrify(_sql_full, _args_flattened) self.cur.executemany(_sql, _args) if commit: self.conn.commit() return self.cur.lastrowid
:type table: string :type value: dict :type where: dict :type join: dict :type commit: bool def update(self, table, value, where, join=None, commit=True): """ :type table: string :type value: dict :type where: dict :type join: dict :type commit: bool """ value_q, _value_args = self._value_parser(value, columnname=True) where_q, _where_args = self._where_parser(where) _sql = ''.join(['UPDATE ', self._tablename_parser(table)['formatted_tablename'], self._join_parser(join), ' SET ', value_q, where_q, ';']) _args = _value_args + _where_args if self.debug: return self.cur.mogrify(_sql, _args) result = self.cur.execute(_sql, _args) if commit: self.commit() return result
:type table: string :type where: dict :type commit: bool def delete(self, table, where=None, commit=True): """ :type table: string :type where: dict :type commit: bool """ where_q, _args = self._where_parser(where) alias = self._tablename_parser(table)['alias'] _sql = ''.join(['DELETE ', alias + ' ' if alias else '', 'FROM ', self._tablename_parser(table)['formatted_tablename'], where_q, ';']) if self.debug: return self.cur.mogrify(_sql, _args) result = self.cur.execute(_sql, _args) if commit: self.commit() return result
Returns a list containing the whitespace to the left and right of a string as its two elements def get_whitespace(txt): """ Returns a list containing the whitespace to the left and right of a string as its two elements """ # if the entire parameter is whitespace rall = re.search(r'^([\s])+$', txt) if rall: tmp = txt.split('\n', 1) if len(tmp) == 2: return (tmp[0], '\n' + tmp[1]) # left, right else: return ('', tmp[0]) # left, right left = '' # find whitespace to the left of the parameter rlm = re.search(r'^([\s])+', txt) if rlm: left = rlm.group(0) right = '' # find whitespace to the right of the parameter rrm = re.search(r'([\s])+$', txt) if rrm: right = rrm.group(0) return (left, right)
Try to find a whitespace pattern in the existing parameters to be applied to a newly added parameter def find_whitespace_pattern(self): """ Try to find a whitespace pattern in the existing parameters to be applied to a newly added parameter """ name_ws = [] value_ws = [] for entry in self._entries: name_ws.append(get_whitespace(entry.name)) if entry.value != '': value_ws.append(get_whitespace(entry._value)) # _value is unstripped if len(value_ws) >= 1: value_ws = most_common(value_ws) else: value_ws = ('', ' ') if len(name_ws) >= 1: name_ws = most_common(name_ws) else: name_ws = (' ', '') return name_ws, value_ws
Generate the path on disk for a specified project and date. :param project_name: the PyPI project name for the data :type project: str :param date: the date for the data :type date: datetime.datetime :return: path for where to store this data on disk :rtype: str def _path_for_file(self, project_name, date): """ Generate the path on disk for a specified project and date. :param project_name: the PyPI project name for the data :type project: str :param date: the date for the data :type date: datetime.datetime :return: path for where to store this data on disk :rtype: str """ return os.path.join( self.cache_path, '%s_%s.json' % (project_name, date.strftime('%Y%m%d')) )
Get the cache data for a specified project for the specified date. Returns None if the data cannot be found in the cache. :param project: PyPi project name to get data for :type project: str :param date: date to get data for :type date: datetime.datetime :return: dict of per-date data for project :rtype: :py:obj:`dict` or ``None`` def get(self, project, date): """ Get the cache data for a specified project for the specified date. Returns None if the data cannot be found in the cache. :param project: PyPi project name to get data for :type project: str :param date: date to get data for :type date: datetime.datetime :return: dict of per-date data for project :rtype: :py:obj:`dict` or ``None`` """ fpath = self._path_for_file(project, date) logger.debug('Cache GET project=%s date=%s - path=%s', project, date.strftime('%Y-%m-%d'), fpath) try: with open(fpath, 'r') as fh: data = json.loads(fh.read()) except: logger.debug('Error getting from cache for project=%s date=%s', project, date.strftime('%Y-%m-%d')) return None data['cache_metadata']['date'] = datetime.strptime( data['cache_metadata']['date'], '%Y%m%d' ) data['cache_metadata']['updated'] = datetime.fromtimestamp( data['cache_metadata']['updated'] ) return data
Set the cache data for a specified project for the specified date. :param project: project name to set data for :type project: str :param date: date to set data for :type date: datetime.datetime :param data: data to cache :type data: dict :param data_ts: maximum timestamp in the BigQuery data table :type data_ts: int def set(self, project, date, data, data_ts): """ Set the cache data for a specified project for the specified date. :param project: project name to set data for :type project: str :param date: date to set data for :type date: datetime.datetime :param data: data to cache :type data: dict :param data_ts: maximum timestamp in the BigQuery data table :type data_ts: int """ data['cache_metadata'] = { 'project': project, 'date': date.strftime('%Y%m%d'), 'updated': time.time(), 'version': VERSION, 'data_ts': data_ts } fpath = self._path_for_file(project, date) logger.debug('Cache SET project=%s date=%s - path=%s', project, date.strftime('%Y-%m-%d'), fpath) with open(fpath, 'w') as fh: fh.write(json.dumps(data))
Return a list of the dates we have in cache for the specified project, sorted in ascending date order. :param project: project name :type project: str :return: list of datetime.datetime objects :rtype: datetime.datetime def get_dates_for_project(self, project): """ Return a list of the dates we have in cache for the specified project, sorted in ascending date order. :param project: project name :type project: str :return: list of datetime.datetime objects :rtype: datetime.datetime """ file_re = re.compile(r'^%s_([0-9]{8})\.json$' % project) all_dates = [] for f in os.listdir(self.cache_path): if not os.path.isfile(os.path.join(self.cache_path, f)): continue m = file_re.match(f) if m is None: continue all_dates.append(datetime.strptime(m.group(1), '%Y%m%d')) return sorted(all_dates)
Use Argparse to parse command-line arguments. :param argv: list of arguments to parse (``sys.argv[1:]``) :type argv: ``list`` :return: parsed arguments :rtype: :py:class:`argparse.Namespace` def parse_args(argv): """ Use Argparse to parse command-line arguments. :param argv: list of arguments to parse (``sys.argv[1:]``) :type argv: ``list`` :return: parsed arguments :rtype: :py:class:`argparse.Namespace` """ p = argparse.ArgumentParser( description='pypi-download-stats - Calculate detailed download stats ' 'and generate HTML and badges for PyPI packages - ' '<%s>' % PROJECT_URL, prog='pypi-download-stats' ) p.add_argument('-V', '--version', action='version', version='%(prog)s ' + VERSION) p.add_argument('-v', '--verbose', dest='verbose', action='count', default=0, help='verbose output. specify twice for debug-level output.') m = p.add_mutually_exclusive_group() m.add_argument('-Q', '--no-query', dest='query', action='store_false', default=True, help='do not query; just generate output ' 'from cached data') m.add_argument('-G', '--no-generate', dest='generate', action='store_false', default=True, help='do not generate output; just query ' 'data and cache results') p.add_argument('-o', '--out-dir', dest='out_dir', action='store', type=str, default='./pypi-stats', help='output directory (default: ' './pypi-stats') p.add_argument('-p', '--project-id', dest='project_id', action='store', type=str, default=None, help='ProjectID for your Google Cloud user, if not using ' 'service account credentials JSON file') # @TODO this is tied to the DiskDataCache class p.add_argument('-c', '--cache-dir', dest='cache_dir', action='store', type=str, default='./pypi-stats-cache', help='stats cache directory (default: ./pypi-stats-cache)') p.add_argument('-B', '--backfill-num-days', dest='backfill_days', type=int, action='store', default=7, help='number of days of historical data to backfill, if ' 'missing (defaut: 7). Note this may incur BigQuery ' 'charges. Set to -1 to backfill all available history.') g = p.add_mutually_exclusive_group() g.add_argument('-P', '--project', dest='PROJECT', action='append', type=str, help='project name to query/generate stats for (can be ' 'specified more than once; ' 'this will reduce query cost for multiple projects)') g.add_argument('-U', '--user', dest='user', action='store', type=str, help='Run for all PyPI projects owned by the specified' 'user.') args = p.parse_args(argv) return args
Set logger level and format. :param level: logging level; see the :py:mod:`logging` constants. :type level: int :param format: logging formatter format string :type format: str def set_log_level_format(level, format): """ Set logger level and format. :param level: logging level; see the :py:mod:`logging` constants. :type level: int :param format: logging formatter format string :type format: str """ formatter = logging.Formatter(fmt=format) logger.handlers[0].setFormatter(formatter) logger.setLevel(level)
Given the username of a PyPI user, return a list of all of the user's projects from the XMLRPC interface. See: https://wiki.python.org/moin/PyPIXmlRpc :param username: PyPI username :type username: str :return: list of string project names :rtype: ``list`` def _pypi_get_projects_for_user(username): """ Given the username of a PyPI user, return a list of all of the user's projects from the XMLRPC interface. See: https://wiki.python.org/moin/PyPIXmlRpc :param username: PyPI username :type username: str :return: list of string project names :rtype: ``list`` """ client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi') pkgs = client.user_packages(username) # returns [role, package] return [x[1] for x in pkgs]
Main entry point def main(args=None): """ Main entry point """ # parse args if args is None: args = parse_args(sys.argv[1:]) # set logging level if args.verbose > 1: set_log_debug() elif args.verbose == 1: set_log_info() outpath = os.path.abspath(os.path.expanduser(args.out_dir)) cachepath = os.path.abspath(os.path.expanduser(args.cache_dir)) cache = DiskDataCache(cache_path=cachepath) if args.user: args.PROJECT = _pypi_get_projects_for_user(args.user) if args.query: DataQuery(args.project_id, args.PROJECT, cache).run_queries( backfill_num_days=args.backfill_days) else: logger.warning('Query disabled by command-line flag; operating on ' 'cached data only.') if not args.generate: logger.warning('Output generation disabled by command-line flag; ' 'exiting now.') raise SystemExit(0) for proj in args.PROJECT: logger.info('Generating output for: %s', proj) stats = ProjectStats(proj, cache) outdir = os.path.join(outpath, proj) OutputGenerator(proj, stats, outdir).generate()
Generate the graph; return a 2-tuple of strings, script to place in the head of the HTML document and div content for the graph itself. :return: 2-tuple (script, div) :rtype: tuple def generate_graph(self): """ Generate the graph; return a 2-tuple of strings, script to place in the head of the HTML document and div content for the graph itself. :return: 2-tuple (script, div) :rtype: tuple """ logger.debug('Generating graph for %s', self._graph_id) # tools to use tools = [ PanTool(), BoxZoomTool(), WheelZoomTool(), SaveTool(), ResetTool(), ResizeTool() ] # generate the stacked area graph try: g = Area( self._data, x='Date', y=self._y_series_names, title=self._title, stack=True, xlabel='Date', ylabel='Downloads', tools=tools, # note the width and height will be set by JavaScript plot_height=400, plot_width=800, toolbar_location='above', legend=False ) except Exception as ex: logger.error("Error generating %s graph", self._graph_id) logger.error("Data: %s", self._data) logger.error("y=%s", self._y_series_names) raise ex lines = [] legend_parts = [] # add a line at the top of each Patch (stacked area) for hovertool for renderer in g.select(GlyphRenderer): if not isinstance(renderer.glyph, Patches): continue series_name = renderer.data_source.data['series'][0] logger.debug('Adding line for Patches %s (series: %s)', renderer, series_name) line = self._line_for_patches(self._data, g, renderer, series_name) if line is not None: lines.append(line) legend_parts.append((series_name, [line])) # add the Hovertool, specifying only our line glyphs g.add_tools( HoverTool( tooltips=[ (self._y_name, '@SeriesName'), ('Date', '@FmtDate'), ('Downloads', '@Downloads'), ], renderers=lines, line_policy='nearest' ) ) # legend outside chart area legend = Legend(legends=legend_parts, location=(0, 0)) g.add_layout(legend, 'right') return components(g)
Add a line along the top edge of a Patch in a stacked Area Chart; return the new Glyph for addition to HoverTool. :param data: original data for the graph :type data: dict :param chart: Chart to add the line to :type chart: bokeh.charts.Chart :param renderer: GlyphRenderer containing one Patches glyph, to draw the line for :type renderer: bokeh.models.renderers.GlyphRenderer :param series_name: the data series name this Patches represents :type series_name: str :return: GlyphRenderer for a Line at the top edge of this Patch :rtype: bokeh.models.renderers.GlyphRenderer def _line_for_patches(self, data, chart, renderer, series_name): """ Add a line along the top edge of a Patch in a stacked Area Chart; return the new Glyph for addition to HoverTool. :param data: original data for the graph :type data: dict :param chart: Chart to add the line to :type chart: bokeh.charts.Chart :param renderer: GlyphRenderer containing one Patches glyph, to draw the line for :type renderer: bokeh.models.renderers.GlyphRenderer :param series_name: the data series name this Patches represents :type series_name: str :return: GlyphRenderer for a Line at the top edge of this Patch :rtype: bokeh.models.renderers.GlyphRenderer """ # @TODO this method needs a major refactor # get the original x and y values, and color xvals = deepcopy(renderer.data_source.data['x_values'][0]) yvals = deepcopy(renderer.data_source.data['y_values'][0]) line_color = renderer.glyph.fill_color # save original values for logging if needed orig_xvals = [x for x in xvals] orig_yvals = [y for y in yvals] # get a list of the values new_xvals = [x for x in xvals] new_yvals = [y for y in yvals] # so when a Patch is made, the first point is (0,0); trash it xvals = new_xvals[1:] yvals = new_yvals[1:] # then, we can tell the last point in the "top" line because it will be # followed by a point with the same x value and a y value of 0. last_idx = None for idx, val in enumerate(xvals): if yvals[idx+1] == 0 and xvals[idx+1] == xvals[idx]: last_idx = idx break if last_idx is None: logger.error('Unable to find top line of patch (x_values=%s ' 'y_values=%s', orig_xvals, orig_yvals) return None # truncate our values to just what makes up the top line xvals = xvals[:last_idx+1] yvals = yvals[:last_idx+1] # Currently (bokeh 0.12.1) HoverTool won't show the tooltip for the last # point in our line. As a hack for this, add a point with the same Y # value and an X slightly before it. lastx = xvals[-1] xvals[-1] = lastx - 1000 # 1000 nanoseconds xvals.append(lastx) yvals.append(yvals[-1]) # get the actual download counts from the original data download_counts = [ data[series_name][y] for y in range(0, len(yvals) - 1) ] download_counts.append(download_counts[-1]) # create a ColumnDataSource for the new overlay line data2 = { 'x': xvals, # Date/x values are numpy.datetime64 'y': yvals, # the following are hacks for data that we want in the HoverTool # tooltip 'SeriesName': [series_name for _ in yvals], # formatted date 'FmtDate': [self.datetime64_to_formatted_date(x) for x in xvals], # to show the exact value, not where the pointer is 'Downloads': download_counts } # set the formatted date for our hacked second-to-last point to the # same value as the last point data2['FmtDate'][-2] = data2['FmtDate'][-1] # create the CloumnDataSource, then the line for it, then the Glyph line_ds = ColumnDataSource(data2) line = Line(x='x', y='y', line_color=line_color) lineglyph = chart.add_glyph(line_ds, line) return lineglyph
Get s list of dates (:py:class:`datetime.datetime`) present in cache, beginning with the longest contiguous set of dates that isn't missing more than one date in series. :return: list of datetime objects for contiguous dates in cache :rtype: ``list`` def _get_cache_dates(self): """ Get s list of dates (:py:class:`datetime.datetime`) present in cache, beginning with the longest contiguous set of dates that isn't missing more than one date in series. :return: list of datetime objects for contiguous dates in cache :rtype: ``list`` """ all_dates = self.cache.get_dates_for_project(self.project_name) dates = [] last_date = None for val in sorted(all_dates): if last_date is None: last_date = val continue if val - last_date > timedelta(hours=48): # reset dates to start from here logger.warning("Last cache date was %s, current date is %s; " "delta is too large. Starting cache date series " "at current date.", last_date, val) dates = [] last_date = val dates.append(val) # find the first download record, and only look at dates after that for idx, cache_date in enumerate(dates): data = self._cache_get(cache_date) if not self._is_empty_cache_record(data): logger.debug("First cache date with data: %s", cache_date) return dates[idx:] return dates
Return True if the specified cache record has no data, False otherwise. :param rec: cache record returned by :py:meth:`~._cache_get` :type rec: dict :return: True if record is empty, False otherwise :rtype: bool def _is_empty_cache_record(self, rec): """ Return True if the specified cache record has no data, False otherwise. :param rec: cache record returned by :py:meth:`~._cache_get` :type rec: dict :return: True if record is empty, False otherwise :rtype: bool """ # these are taken from DataQuery.query_one_table() for k in [ 'by_version', 'by_file_type', 'by_installer', 'by_implementation', 'by_system', 'by_distro', 'by_country' ]: if k in rec and len(rec[k]) > 0: return False return True
Return cache data for the specified day; cache locally in this class. :param date: date to get data for :type date: datetime.datetime :return: cache data for date :rtype: dict def _cache_get(self, date): """ Return cache data for the specified day; cache locally in this class. :param date: date to get data for :type date: datetime.datetime :return: cache data for date :rtype: dict """ if date in self.cache_data: logger.debug('Using class-cached data for date %s', date.strftime('%Y-%m-%d')) return self.cache_data[date] logger.debug('Getting data from cache for date %s', date.strftime('%Y-%m-%d')) data = self.cache.get(self.project_name, date) self.cache_data[date] = data return data
Like :py:meth:`~._column_value` but collapses two unknowns into one. :param k1: first (top-level) value :param k2: second (bottom-level) value :return: display key :rtype: str def _compound_column_value(k1, k2): """ Like :py:meth:`~._column_value` but collapses two unknowns into one. :param k1: first (top-level) value :param k2: second (bottom-level) value :return: display key :rtype: str """ k1 = ProjectStats._column_value(k1) k2 = ProjectStats._column_value(k2) if k1 == 'unknown' and k2 == 'unknown': return 'unknown' return '%s %s' % (k1, k2)
If ``ver`` is a dot-separated string with at least (num_components +1) components, return only the first two. Else return the original string. :param ver: version string :type ver: str :return: shortened (major, minor) version :rtype: str def _shorten_version(ver, num_components=2): """ If ``ver`` is a dot-separated string with at least (num_components +1) components, return only the first two. Else return the original string. :param ver: version string :type ver: str :return: shortened (major, minor) version :rtype: str """ parts = ver.split('.') if len(parts) <= num_components: return ver return '.'.join(parts[:num_components])
Return download data by version. :return: dict of cache data; keys are datetime objects, values are dict of version (str) to count (int) :rtype: dict def per_version_data(self): """ Return download data by version. :return: dict of cache data; keys are datetime objects, values are dict of version (str) to count (int) :rtype: dict """ ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) if len(data['by_version']) == 0: data['by_version'] = {'other': 0} ret[cache_date] = data['by_version'] return ret
Return download data by file type. :return: dict of cache data; keys are datetime objects, values are dict of file type (str) to count (int) :rtype: dict def per_file_type_data(self): """ Return download data by file type. :return: dict of cache data; keys are datetime objects, values are dict of file type (str) to count (int) :rtype: dict """ ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) if len(data['by_file_type']) == 0: data['by_file_type'] = {'other': 0} ret[cache_date] = data['by_file_type'] return ret
Return download data by installer name and version. :return: dict of cache data; keys are datetime objects, values are dict of installer name/version (str) to count (int). :rtype: dict def per_installer_data(self): """ Return download data by installer name and version. :return: dict of cache data; keys are datetime objects, values are dict of installer name/version (str) to count (int). :rtype: dict """ ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) ret[cache_date] = {} for inst_name, inst_data in data['by_installer'].items(): for inst_ver, count in inst_data.items(): k = self._compound_column_value( inst_name, self._shorten_version(inst_ver) ) ret[cache_date][k] = count if len(ret[cache_date]) == 0: ret[cache_date]['unknown'] = 0 return ret
Return download data by python impelementation name and version. :return: dict of cache data; keys are datetime objects, values are dict of implementation name/version (str) to count (int). :rtype: dict def per_implementation_data(self): """ Return download data by python impelementation name and version. :return: dict of cache data; keys are datetime objects, values are dict of implementation name/version (str) to count (int). :rtype: dict """ ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) ret[cache_date] = {} for impl_name, impl_data in data['by_implementation'].items(): for impl_ver, count in impl_data.items(): k = self._compound_column_value( impl_name, self._shorten_version(impl_ver) ) ret[cache_date][k] = count if len(ret[cache_date]) == 0: ret[cache_date]['unknown'] = 0 return ret
Return download data by system. :return: dict of cache data; keys are datetime objects, values are dict of system (str) to count (int) :rtype: dict def per_system_data(self): """ Return download data by system. :return: dict of cache data; keys are datetime objects, values are dict of system (str) to count (int) :rtype: dict """ ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) ret[cache_date] = { self._column_value(x): data['by_system'][x] for x in data['by_system'] } if len(ret[cache_date]) == 0: ret[cache_date]['unknown'] = 0 return ret
Return download data by country. :return: dict of cache data; keys are datetime objects, values are dict of country (str) to count (int) :rtype: dict def per_country_data(self): """ Return download data by country. :return: dict of cache data; keys are datetime objects, values are dict of country (str) to count (int) :rtype: dict """ ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) ret[cache_date] = {} for cc, count in data['by_country'].items(): k = '%s (%s)' % (self._alpha2_to_country(cc), cc) ret[cache_date][k] = count if len(ret[cache_date]) == 0: ret[cache_date]['unknown'] = 0 return ret
Return download data by distro name and version. :return: dict of cache data; keys are datetime objects, values are dict of distro name/version (str) to count (int). :rtype: dict def per_distro_data(self): """ Return download data by distro name and version. :return: dict of cache data; keys are datetime objects, values are dict of distro name/version (str) to count (int). :rtype: dict """ ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) ret[cache_date] = {} for distro_name, distro_data in data['by_distro'].items(): if distro_name.lower() == 'red hat enterprise linux server': distro_name = 'RHEL' for distro_ver, count in distro_data.items(): ver = self._shorten_version(distro_ver, num_components=1) if distro_name.lower() == 'os x': ver = self._shorten_version(distro_ver, num_components=2) k = self._compound_column_value(distro_name, ver) ret[cache_date][k] = count if len(ret[cache_date]) == 0: ret[cache_date]['unknown'] = 0 return ret
Return the number of downloads per day, averaged over the past 7 days of data. :return: average number of downloads per day :rtype: int def downloads_per_day(self): """ Return the number of downloads per day, averaged over the past 7 days of data. :return: average number of downloads per day :rtype: int """ count, num_days = self._downloads_for_num_days(7) res = ceil(count / num_days) logger.debug("Downloads per day = (%d / %d) = %d", count, num_days, res) return res
Return the number of downloads in the last 7 days. :return: number of downloads in the last 7 days; if we have less than 7 days of data, returns None. :rtype: int def downloads_per_week(self): """ Return the number of downloads in the last 7 days. :return: number of downloads in the last 7 days; if we have less than 7 days of data, returns None. :rtype: int """ if len(self.cache_dates) < 7: logger.error("Only have %d days of data; cannot calculate " "downloads per week", len(self.cache_dates)) return None count, _ = self._downloads_for_num_days(7) logger.debug("Downloads per week = %d", count) return count
Given a number of days of historical data to look at (starting with today and working backwards), return the total number of downloads for that time range, and the number of days of data we had (in cases where we had less data than requested). :param num_days: number of days of data to look at :type num_days: int :return: 2-tuple of (download total, number of days of data) :rtype: tuple def _downloads_for_num_days(self, num_days): """ Given a number of days of historical data to look at (starting with today and working backwards), return the total number of downloads for that time range, and the number of days of data we had (in cases where we had less data than requested). :param num_days: number of days of data to look at :type num_days: int :return: 2-tuple of (download total, number of days of data) :rtype: tuple """ logger.debug("Getting download total for last %d days", num_days) dates = self.cache_dates logger.debug("Cache has %d days of data", len(dates)) if len(dates) > num_days: dates = dates[(-1 * num_days):] logger.debug("Looking at last %d days of data", len(dates)) dl_sum = 0 for cache_date in dates: data = self._cache_get(cache_date) dl_sum += sum(data['by_version'].values()) logger.debug("Sum of download counts: %d", dl_sum) return dl_sum, len(dates)
Get our projectId from the ``GOOGLE_APPLICATION_CREDENTIALS`` creds JSON file. :return: project ID :rtype: str def _get_project_id(self): """ Get our projectId from the ``GOOGLE_APPLICATION_CREDENTIALS`` creds JSON file. :return: project ID :rtype: str """ fpath = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', None) if fpath is None: raise Exception('ERROR: No project ID specified, and ' 'GOOGLE_APPLICATION_CREDENTIALS env var is not set') fpath = os.path.abspath(os.path.expanduser(fpath)) logger.debug('Reading credentials file at %s to get project_id', fpath) with open(fpath, 'r') as fh: cred_data = json.loads(fh.read()) return cred_data['project_id']
Connect to the BigQuery service. Calling ``GoogleCredentials.get_application_default`` requires that you either be running in the Google Cloud, or have the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable set to the path to a credentials JSON file. :return: authenticated BigQuery service connection object :rtype: `googleapiclient.discovery.Resource <http://google.github.io/\ google-api-python-client/docs/epy/googleapiclient.discovery.\ Resource-class.html>`_ def _get_bigquery_service(self): """ Connect to the BigQuery service. Calling ``GoogleCredentials.get_application_default`` requires that you either be running in the Google Cloud, or have the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable set to the path to a credentials JSON file. :return: authenticated BigQuery service connection object :rtype: `googleapiclient.discovery.Resource <http://google.github.io/\ google-api-python-client/docs/epy/googleapiclient.discovery.\ Resource-class.html>`_ """ logger.debug('Getting Google Credentials') credentials = GoogleCredentials.get_application_default() logger.debug('Building BigQuery service instance') bigquery_service = build('bigquery', 'v2', credentials=credentials) return bigquery_service
Get a list of PyPI downloads table (sharded per day) IDs. :return: list of table names (strings) :rtype: ``list`` def _get_download_table_ids(self): """ Get a list of PyPI downloads table (sharded per day) IDs. :return: list of table names (strings) :rtype: ``list`` """ all_table_names = [] # matching per-date table names logger.info('Querying for all tables in dataset') tables = self.service.tables() request = tables.list(projectId=self._PROJECT_ID, datasetId=self._DATASET_ID) while request is not None: response = request.execute() # if the number of results is evenly divisible by the page size, # we may end up with a last response that has no 'tables' key, # and is empty. if 'tables' not in response: response['tables'] = [] for table in response['tables']: if table['type'] != 'TABLE': logger.debug('Skipping %s (type=%s)', table['tableReference']['tableId'], table['type']) continue if not self._table_re.match(table['tableReference']['tableId']): logger.debug('Skipping table with non-matching name: %s', table['tableReference']['tableId']) continue all_table_names.append(table['tableReference']['tableId']) request = tables.list_next(previous_request=request, previous_response=response) return sorted(all_table_names)
Return a :py:class:`datetime.datetime` object for the date of the data in the specified table name. :param table_name: name of the table :type table_name: str :return: datetime that the table holds data for :rtype: datetime.datetime def _datetime_for_table_name(self, table_name): """ Return a :py:class:`datetime.datetime` object for the date of the data in the specified table name. :param table_name: name of the table :type table_name: str :return: datetime that the table holds data for :rtype: datetime.datetime """ m = self._table_re.match(table_name) dt = datetime.strptime(m.group(1), '%Y%m%d') return dt
Run one query against BigQuery and return the result. :param query: the query to run :type query: str :return: list of per-row response dicts (key => value) :rtype: ``list`` def _run_query(self, query): """ Run one query against BigQuery and return the result. :param query: the query to run :type query: str :return: list of per-row response dicts (key => value) :rtype: ``list`` """ query_request = self.service.jobs() logger.debug('Running query: %s', query) start = datetime.now() resp = query_request.query( projectId=self.project_id, body={'query': query} ).execute() duration = datetime.now() - start logger.debug('Query response (in %s): %s', duration, resp) if not resp['jobComplete']: logger.error('Error: query reported job not complete!') if int(resp['totalRows']) == 0: return [] if int(resp['totalRows']) != len(resp['rows']): logger.error('Error: query reported %s total rows, but only ' 'returned %d', resp['totalRows'], len(resp['rows'])) data = [] fields = [f['name'] for f in resp['schema']['fields']] for row in resp['rows']: d = {} for idx, val in enumerate(row['f']): d[fields[idx]] = val['v'] data.append(d) return data
Return the timestamp for the newest record in the given table. :param table_name: name of the table to query :type table_name: str :return: timestamp of newest row in table :rtype: int def _get_newest_ts_in_table(self, table_name): """ Return the timestamp for the newest record in the given table. :param table_name: name of the table to query :type table_name: str :return: timestamp of newest row in table :rtype: int """ logger.debug( 'Querying for newest timestamp in table %s', table_name ) q = "SELECT TIMESTAMP_TO_SEC(MAX(timestamp)) AS max_ts %s;" % ( self._from_for_table(table_name) ) res = self._run_query(q) ts = int(res[0]['max_ts']) logger.debug('Newest timestamp in table %s: %s', table_name, ts) return ts
Query for download data broken down by installer, for one day. :param table_name: table name to query against :type table_name: str :return: dict of download information by installer; keys are project name, values are a dict of installer names to dicts of installer version to download count. :rtype: dict def _query_by_installer(self, table_name): """ Query for download data broken down by installer, for one day. :param table_name: table name to query against :type table_name: str :return: dict of download information by installer; keys are project name, values are a dict of installer names to dicts of installer version to download count. :rtype: dict """ logger.info('Querying for downloads by installer in table %s', table_name) q = "SELECT file.project, details.installer.name, " \ "details.installer.version, COUNT(*) as dl_count " \ "%s " \ "%s " \ "GROUP BY file.project, details.installer.name, " \ "details.installer.version;" % ( self._from_for_table(table_name), self._where_for_projects ) res = self._run_query(q) result = self._dict_for_projects() # iterate through results for row in res: # pointer to the per-project result dict proj = result[row['file_project']] # grab the name and version; change None to 'unknown' iname = row['details_installer_name'] iver = row['details_installer_version'] if iname not in proj: proj[iname] = {} if iver not in proj[iname]: proj[iname][iver] = 0 proj[iname][iver] += int(row['dl_count']) return result
Query for download data broken down by system, for one day. :param table_name: table name to query against :type table_name: str :return: dict of download information by system; keys are project name, values are a dict of system names to download count. :rtype: dict def _query_by_system(self, table_name): """ Query for download data broken down by system, for one day. :param table_name: table name to query against :type table_name: str :return: dict of download information by system; keys are project name, values are a dict of system names to download count. :rtype: dict """ logger.info('Querying for downloads by system in table %s', table_name) q = "SELECT file.project, details.system.name, COUNT(*) as dl_count " \ "%s " \ "%s " \ "GROUP BY file.project, details.system.name;" % ( self._from_for_table(table_name), self._where_for_projects ) res = self._run_query(q) result = self._dict_for_projects() for row in res: system = row['details_system_name'] result[row['file_project']][system] = int( row['dl_count']) return result
Query for download data broken down by OS distribution, for one day. :param table_name: table name to query against :type table_name: str :return: dict of download information by distro; keys are project name, values are a dict of distro names to dicts of distro version to download count. :rtype: dict def _query_by_distro(self, table_name): """ Query for download data broken down by OS distribution, for one day. :param table_name: table name to query against :type table_name: str :return: dict of download information by distro; keys are project name, values are a dict of distro names to dicts of distro version to download count. :rtype: dict """ logger.info('Querying for downloads by distro in table %s', table_name) q = "SELECT file.project, details.distro.name, " \ "details.distro.version, COUNT(*) as dl_count " \ "%s " \ "%s " \ "GROUP BY file.project, details.distro.name, " \ "details.distro.version;" % ( self._from_for_table(table_name), self._where_for_projects ) res = self._run_query(q) result = self._dict_for_projects() # iterate through results for row in res: # pointer to the per-project result dict proj = result[row['file_project']] # grab the name and version; change None to 'unknown' dname = row['details_distro_name'] dver = row['details_distro_version'] if dname not in proj: proj[dname] = {} if dver not in proj[dname]: proj[dname][dver] = 0 proj[dname][dver] += int(row['dl_count']) return result
Run all queries for the given table name (date) and update the cache. :param table_name: table name to query against :type table_name: str def query_one_table(self, table_name): """ Run all queries for the given table name (date) and update the cache. :param table_name: table name to query against :type table_name: str """ table_date = self._datetime_for_table_name(table_name) logger.info('Running all queries for date table: %s (%s)', table_name, table_date.strftime('%Y-%m-%d')) final = self._dict_for_projects() try: data_timestamp = self._get_newest_ts_in_table(table_name) except HttpError as exc: try: content = json.loads(exc.content.decode('utf-8')) if content['error']['message'].startswith('Not found: Table'): logger.error("Table %s not found; no data for that day", table_name) return except: pass raise exc # data queries # note - ProjectStats._is_empty_cache_record() needs to know keys for name, func in { 'by_version': self._query_by_version, 'by_file_type': self._query_by_file_type, 'by_installer': self._query_by_installer, 'by_implementation': self._query_by_implementation, 'by_system': self._query_by_system, 'by_distro': self._query_by_distro, 'by_country': self._query_by_country_code }.items(): tmp = func(table_name) for proj_name in tmp: final[proj_name][name] = tmp[proj_name] # add to cache for proj_name in final: self.cache.set(proj_name, table_date, final[proj_name], data_timestamp)
Return True if we have cached data for all projects for the specified datetime. Return False otherwise. :param dt: datetime to find cache for :type dt: datetime.datetime :return: True if we have cache for all projects for this date, False otherwise :rtype: bool def _have_cache_for_date(self, dt): """ Return True if we have cached data for all projects for the specified datetime. Return False otherwise. :param dt: datetime to find cache for :type dt: datetime.datetime :return: True if we have cache for all projects for this date, False otherwise :rtype: bool """ for p in self.projects: if self.cache.get(p, dt) is None: return False return True
Backfill historical data for days that are missing. :param num_days: number of days of historical data to backfill, if missing :type num_days: int :param available_table_names: names of available per-date tables :type available_table_names: ``list`` def backfill_history(self, num_days, available_table_names): """ Backfill historical data for days that are missing. :param num_days: number of days of historical data to backfill, if missing :type num_days: int :param available_table_names: names of available per-date tables :type available_table_names: ``list`` """ if num_days == -1: # skip the first date, under the assumption that data may be # incomplete logger.info('Backfilling all available history') start_table = available_table_names[1] else: logger.info('Backfilling %d days of history', num_days) start_table = available_table_names[-1 * num_days] start_date = self._datetime_for_table_name(start_table) end_table = available_table_names[-3] end_date = self._datetime_for_table_name(end_table) logger.debug( 'Backfilling history from %s (%s) to %s (%s)', start_table, start_date.strftime('%Y-%m-%d'), end_table, end_date.strftime('%Y-%m-%d') ) for days in range((end_date - start_date).days + 1): backfill_dt = start_date + timedelta(days=days) if self._have_cache_for_date(backfill_dt): logger.info('Cache present for all projects for %s; skipping', backfill_dt.strftime('%Y-%m-%d')) continue backfill_table = self._table_name_for_datetime(backfill_dt) logger.info('Backfilling %s (%s)', backfill_table, backfill_dt.strftime('%Y-%m-%d')) self.query_one_table(backfill_table)
Run the data queries for the specified projects. :param backfill_num_days: number of days of historical data to backfill, if missing :type backfill_num_days: int def run_queries(self, backfill_num_days=7): """ Run the data queries for the specified projects. :param backfill_num_days: number of days of historical data to backfill, if missing :type backfill_num_days: int """ available_tables = self._get_download_table_ids() logger.debug('Found %d available download tables: %s', len(available_tables), available_tables) today_table = available_tables[-1] yesterday_table = available_tables[-2] self.query_one_table(today_table) self.query_one_table(yesterday_table) self.backfill_history(backfill_num_days, available_tables)
Given a dict of data such as those in :py:class:`~.ProjectStats` attributes, made up of :py:class:`datetime.datetime` keys and values of dicts of column keys to counts, return a list of the distinct column keys in sorted order. :param data: data dict as returned by ProjectStats attributes :type data: dict :return: sorted list of distinct keys :rtype: ``list`` def filter_data_columns(data): """ Given a dict of data such as those in :py:class:`~.ProjectStats` attributes, made up of :py:class:`datetime.datetime` keys and values of dicts of column keys to counts, return a list of the distinct column keys in sorted order. :param data: data dict as returned by ProjectStats attributes :type data: dict :return: sorted list of distinct keys :rtype: ``list`` """ keys = set() for dt, d in data.items(): for k in d: keys.add(k) return sorted([x for x in keys])
Generate the HTML for the specified graphs. :return: :rtype: def _generate_html(self): """ Generate the HTML for the specified graphs. :return: :rtype: """ logger.debug('Generating templated HTML') env = Environment( loader=PackageLoader('pypi_download_stats', 'templates'), extensions=['jinja2.ext.loopcontrols']) env.filters['format_date_long'] = filter_format_date_long env.filters['format_date_ymd'] = filter_format_date_ymd env.filters['data_columns'] = filter_data_columns template = env.get_template('base.html') logger.debug('Rendering template') html = template.render( project=self.project_name, cache_date=self._stats.as_of_datetime, user=getuser(), host=platform_node(), version=VERSION, proj_url=PROJECT_URL, graphs=self._graphs, graph_keys=self.GRAPH_KEYS, resources=Resources(mode='inline').render(), badges=self._badges ) logger.debug('Template rendered') return html
Take a dictionary of data, as returned by the :py:class:`~.ProjectStats` per_*_data properties, return a 2-tuple of data dict and x labels list usable by bokeh.charts. :param data: data dict from :py:class:`~.ProjectStats` property :type data: dict :return: 2-tuple of data dict, x labels list :rtype: tuple def _data_dict_to_bokeh_chart_data(self, data): """ Take a dictionary of data, as returned by the :py:class:`~.ProjectStats` per_*_data properties, return a 2-tuple of data dict and x labels list usable by bokeh.charts. :param data: data dict from :py:class:`~.ProjectStats` property :type data: dict :return: 2-tuple of data dict, x labels list :rtype: tuple """ labels = [] # find all the data keys keys = set() for date in data: for k in data[date]: keys.add(k) # final output dict out_data = {} for k in keys: out_data[k] = [] # transform the data; deal with sparse data for data_date, data_dict in sorted(data.items()): labels.append(data_date) for k in out_data: if k in data_dict: out_data[k].append(data_dict[k]) else: out_data[k].append(0) return out_data, labels
Find the per-day average of each series in the data over the last 7 days; drop all but the top 10. :param data: original graph data :type data: dict :return: dict containing only the top 10 series, based on average over the last 7 days. :rtype: dict def _limit_data(self, data): """ Find the per-day average of each series in the data over the last 7 days; drop all but the top 10. :param data: original graph data :type data: dict :return: dict containing only the top 10 series, based on average over the last 7 days. :rtype: dict """ if len(data.keys()) <= 10: logger.debug("Data has less than 10 keys; not limiting") return data # average last 7 days of each series avgs = {} for k in data: if len(data[k]) <= 7: vals = data[k] else: vals = data[k][-7:] avgs[k] = sum(vals) / len(vals) # hold state final_data = {} # final data dict other = [] # values for dropped/'other' series count = 0 # iteration counter # iterate the sorted averages; either drop or keep for k in sorted(avgs, key=avgs.get, reverse=True): if count < 10: final_data[k] = data[k] logger.debug("Keeping data series %s (average over last 7 " "days of data: %d", k, avgs[k]) else: logger.debug("Adding data series %s to 'other' (average over " "last 7 days of data: %d", k, avgs[k]) other.append(data[k]) count += 1 # sum up the other data and add to final final_data['other'] = [sum(series) for series in zip(*other)] return final_data
Generate a downloads graph; append it to ``self._graphs``. :param name: HTML name of the graph, also used in ``self.GRAPH_KEYS`` :type name: str :param title: human-readable title for the graph :type title: str :param stats_data: data dict from ``self._stats`` :type stats_data: dict :param y_name: Y axis metric name :type y_name: str def _generate_graph(self, name, title, stats_data, y_name): """ Generate a downloads graph; append it to ``self._graphs``. :param name: HTML name of the graph, also used in ``self.GRAPH_KEYS`` :type name: str :param title: human-readable title for the graph :type title: str :param stats_data: data dict from ``self._stats`` :type stats_data: dict :param y_name: Y axis metric name :type y_name: str """ logger.debug('Generating chart data for %s graph', name) orig_data, labels = self._data_dict_to_bokeh_chart_data(stats_data) data = self._limit_data(orig_data) logger.debug('Generating %s graph', name) script, div = FancyAreaGraph( name, '%s %s' % (self.project_name, title), data, labels, y_name).generate_graph() logger.debug('%s graph generated', name) self._graphs[name] = { 'title': title, 'script': script, 'div': div, 'raw_data': stats_data }
Generate download badges. Append them to ``self._badges``. def _generate_badges(self): """ Generate download badges. Append them to ``self._badges``. """ daycount = self._stats.downloads_per_day day = self._generate_badge('Downloads', '%d/day' % daycount) self._badges['per-day'] = day weekcount = self._stats.downloads_per_week if weekcount is None: # we don't have enough data for week (or month) return week = self._generate_badge('Downloads', '%d/week' % weekcount) self._badges['per-week'] = week monthcount = self._stats.downloads_per_month if monthcount is None: # we don't have enough data for month return month = self._generate_badge('Downloads', '%d/month' % monthcount) self._badges['per-month'] = month
Generate SVG for one badge via shields.io. :param subject: subject; left-hand side of badge :type subject: str :param status: status; right-hand side of badge :type status: str :return: badge SVG :rtype: str def _generate_badge(self, subject, status): """ Generate SVG for one badge via shields.io. :param subject: subject; left-hand side of badge :type subject: str :param status: status; right-hand side of badge :type status: str :return: badge SVG :rtype: str """ url = 'https://img.shields.io/badge/%s-%s-brightgreen.svg' \ '?style=flat&maxAge=3600' % (subject, status) logger.debug("Getting badge for %s => %s (%s)", subject, status, url) res = requests.get(url) if res.status_code != 200: raise Exception("Error: got status %s for shields.io badge: %s", res.status_code, res.text) logger.debug('Got %d character response from shields.io', len(res.text)) return res.text
Generate all output types and write to disk. def generate(self): """ Generate all output types and write to disk. """ logger.info('Generating graphs') self._generate_graph( 'by-version', 'Downloads by Version', self._stats.per_version_data, 'Version' ) self._generate_graph( 'by-file-type', 'Downloads by File Type', self._stats.per_file_type_data, 'File Type' ) self._generate_graph( 'by-installer', 'Downloads by Installer', self._stats.per_installer_data, 'Installer' ) self._generate_graph( 'by-implementation', 'Downloads by Python Implementation/Version', self._stats.per_implementation_data, 'Implementation/Version' ) self._generate_graph( 'by-system', 'Downloads by System Type', self._stats.per_system_data, 'System' ) self._generate_graph( 'by-country', 'Downloads by Country', self._stats.per_country_data, 'Country' ) self._generate_graph( 'by-distro', 'Downloads by Distro', self._stats.per_distro_data, 'Distro' ) self._generate_badges() logger.info('Generating HTML') html = self._generate_html() html_path = os.path.join(self.output_dir, 'index.html') with open(html_path, 'wb') as fh: fh.write(html.encode('utf-8')) logger.info('HTML report written to %s', html_path) logger.info('Writing SVG badges') for name, svg in self._badges.items(): path = os.path.join(self.output_dir, '%s.svg' % name) with open(path, 'w') as fh: fh.write(svg) logger.info('%s badge written to: %s', name, path)
Replaces format style phrases (listed in the dt_exps dictionary) with this datetime instance's information. .. code :: python reusables.datetime_format("Hey, it's {month-full} already!") "Hey, it's March already!" :param desired_format: string to add datetime details too :param datetime_instance: datetime.datetime instance, defaults to 'now' :param args: additional args to pass to str.format :param kwargs: additional kwargs to pass to str format :return: formatted string def datetime_format(desired_format, datetime_instance=None, *args, **kwargs): """ Replaces format style phrases (listed in the dt_exps dictionary) with this datetime instance's information. .. code :: python reusables.datetime_format("Hey, it's {month-full} already!") "Hey, it's March already!" :param desired_format: string to add datetime details too :param datetime_instance: datetime.datetime instance, defaults to 'now' :param args: additional args to pass to str.format :param kwargs: additional kwargs to pass to str format :return: formatted string """ for strf, exp in datetime_regex.datetime.format.items(): desired_format = exp.sub(strf, desired_format) if not datetime_instance: datetime_instance = now() return datetime_instance.strftime(desired_format.format(*args, **kwargs))
Create a DateTime object from a ISO string .. code :: python reusables.datetime_from_iso('2017-03-10T12:56:55.031863') datetime.datetime(2017, 3, 10, 12, 56, 55, 31863) :param iso_string: string of an ISO datetime :return: DateTime object def datetime_from_iso(iso_string): """ Create a DateTime object from a ISO string .. code :: python reusables.datetime_from_iso('2017-03-10T12:56:55.031863') datetime.datetime(2017, 3, 10, 12, 56, 55, 31863) :param iso_string: string of an ISO datetime :return: DateTime object """ try: assert datetime_regex.datetime.datetime.match(iso_string).groups()[0] except (ValueError, AssertionError, IndexError, AttributeError): raise TypeError("String is not in ISO format") try: return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S.%f") except ValueError: return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S")
Get a current DateTime object. By default is local. .. code:: python reusables.now() # DateTime(2016, 12, 8, 22, 5, 2, 517000) reusables.now().format("It's {24-hour}:{min}") # "It's 22:05" :param utc: bool, default False, UTC time not local :param tz: TimeZone as specified by the datetime module :return: reusables.DateTime def now(utc=False, tz=None): """ Get a current DateTime object. By default is local. .. code:: python reusables.now() # DateTime(2016, 12, 8, 22, 5, 2, 517000) reusables.now().format("It's {24-hour}:{min}") # "It's 22:05" :param utc: bool, default False, UTC time not local :param tz: TimeZone as specified by the datetime module :return: reusables.DateTime """ return datetime.datetime.utcnow() if utc else datetime.datetime.now(tz=tz)
Cross platform compatible subprocess with CompletedProcess return. No formatting or encoding is performed on the output of subprocess, so it's output will appear the same on each version / interpreter as before. .. code:: python reusables.run('echo "hello world!', shell=True) # CPython 3.6 # CompletedProcess(args='echo "hello world!', returncode=0, # stdout=b'"hello world!\\r\\n', stderr=b'') # # PyPy 5.4 (Python 2.7.10) # CompletedProcess(args='echo "hello world!', returncode=0L, # stdout='"hello world!\\r\\n') Timeout is only usable in Python 3.X, as it was not implemented before then, a NotImplementedError will be raised if specified on 2.x version of Python. :param command: command to run, str if shell=True otherwise must be list :param input: send something `communicate` :param stdout: PIPE or None :param stderr: PIPE or None :param timeout: max time to wait for command to complete :param copy_local_env: Use all current ENV vars in the subprocess as well :param kwargs: additional arguments to pass to Popen :return: CompletedProcess class def run(command, input=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=None, copy_local_env=False, **kwargs): """ Cross platform compatible subprocess with CompletedProcess return. No formatting or encoding is performed on the output of subprocess, so it's output will appear the same on each version / interpreter as before. .. code:: python reusables.run('echo "hello world!', shell=True) # CPython 3.6 # CompletedProcess(args='echo "hello world!', returncode=0, # stdout=b'"hello world!\\r\\n', stderr=b'') # # PyPy 5.4 (Python 2.7.10) # CompletedProcess(args='echo "hello world!', returncode=0L, # stdout='"hello world!\\r\\n') Timeout is only usable in Python 3.X, as it was not implemented before then, a NotImplementedError will be raised if specified on 2.x version of Python. :param command: command to run, str if shell=True otherwise must be list :param input: send something `communicate` :param stdout: PIPE or None :param stderr: PIPE or None :param timeout: max time to wait for command to complete :param copy_local_env: Use all current ENV vars in the subprocess as well :param kwargs: additional arguments to pass to Popen :return: CompletedProcess class """ if copy_local_env: # Copy local env first and overwrite with anything manually specified env = os.environ.copy() env.update(kwargs.get('env', {})) else: env = kwargs.get('env') if sys.version_info >= (3, 5): return subprocess.run(command, input=input, stdout=stdout, stderr=stderr, timeout=timeout, env=env, **kwargs) # Created here instead of root level as it should never need to be # manually created or referenced class CompletedProcess(object): """A backwards compatible near clone of subprocess.CompletedProcess""" def __init__(self, args, returncode, stdout=None, stderr=None): self.args = args self.returncode = returncode self.stdout = stdout self.stderr = stderr def __repr__(self): args = ['args={0!r}'.format(self.args), 'returncode={0!r}'.format(self.returncode), 'stdout={0!r}'.format(self.stdout) if self.stdout else '', 'stderr={0!r}'.format(self.stderr) if self.stderr else ''] return "{0}({1})".format(type(self).__name__, ', '.join(filter(None, args))) def check_returncode(self): if self.returncode: if python_version < (2, 7): raise subprocess.CalledProcessError(self.returncode, self.args) raise subprocess.CalledProcessError(self.returncode, self.args, self.stdout) proc = subprocess.Popen(command, stdout=stdout, stderr=stderr, env=env, **kwargs) if PY3: out, err = proc.communicate(input=input, timeout=timeout) else: if timeout: raise NotImplementedError("Timeout is only available on Python 3") out, err = proc.communicate(input=input) return CompletedProcess(command, proc.returncode, out, err)
Run a set of iterables to a function in a Threaded or MP Pool. .. code: python def func(a): return a + a reusables.run_in_pool(func, [1,2,3,4,5]) # [1, 4, 9, 16, 25] :param target: function to run :param iterable: positional arg to pass to function :param threaded: Threaded if True multiprocessed if False :param processes: Number of workers :param asynchronous: will do map_async if True :param target_kwargs: Keyword arguments to set on the function as a partial :return: pool results def run_in_pool(target, iterable, threaded=True, processes=4, asynchronous=False, target_kwargs=None): """ Run a set of iterables to a function in a Threaded or MP Pool. .. code: python def func(a): return a + a reusables.run_in_pool(func, [1,2,3,4,5]) # [1, 4, 9, 16, 25] :param target: function to run :param iterable: positional arg to pass to function :param threaded: Threaded if True multiprocessed if False :param processes: Number of workers :param asynchronous: will do map_async if True :param target_kwargs: Keyword arguments to set on the function as a partial :return: pool results """ my_pool = pool.ThreadPool if threaded else pool.Pool if target_kwargs: target = partial(target, **target_kwargs if target_kwargs else None) p = my_pool(processes) try: results = (p.map_async(target, iterable) if asynchronous else p.map(target, iterable)) finally: p.close() p.join() return results
View a dictionary as a tree. def tree_view(dictionary, level=0, sep="| "): """ View a dictionary as a tree. """ return "".join(["{0}{1}\n{2}".format(sep * level, k, tree_view(v, level + 1, sep=sep) if isinstance(v, dict) else "") for k, v in dictionary.items()])
Turn the Namespace and sub Namespaces back into a native python dictionary. :param in_dict: Do not use, for self recursion :return: python dictionary of this Namespace def to_dict(self, in_dict=None): """ Turn the Namespace and sub Namespaces back into a native python dictionary. :param in_dict: Do not use, for self recursion :return: python dictionary of this Namespace """ in_dict = in_dict if in_dict else self out_dict = dict() for k, v in in_dict.items(): if isinstance(v, Namespace): v = v.to_dict() out_dict[k] = v return out_dict
Return value of key as a list :param item: key of value to transform :param mod: function to map against list :param default: value to return if item does not exist :param spliter: character to split str on :param strip: clean the list with the `strip` :return: list of items def list(self, item, default=None, spliter=",", strip=True, mod=None): """ Return value of key as a list :param item: key of value to transform :param mod: function to map against list :param default: value to return if item does not exist :param spliter: character to split str on :param strip: clean the list with the `strip` :return: list of items """ try: item = self.__getattr__(item) except AttributeError as err: if default is not None: return default raise err if strip: item = item.lstrip("[").rstrip("]") out = [x.strip() if strip else x for x in item.split(spliter)] if mod: return list(map(mod, out)) return out
Download a given URL to either file or memory :param url: Full url (with protocol) of path to download :param save_to_file: boolean if it should be saved to file or not :param save_dir: location of saved file, default is current working dir :param filename: filename to save as :param block_size: download chunk size :param overwrite: overwrite file if it already exists :param quiet: boolean to turn off logging for function :return: save location (or content if not saved to file) def download(url, save_to_file=True, save_dir=".", filename=None, block_size=64000, overwrite=False, quiet=False): """ Download a given URL to either file or memory :param url: Full url (with protocol) of path to download :param save_to_file: boolean if it should be saved to file or not :param save_dir: location of saved file, default is current working dir :param filename: filename to save as :param block_size: download chunk size :param overwrite: overwrite file if it already exists :param quiet: boolean to turn off logging for function :return: save location (or content if not saved to file) """ if save_to_file: if not filename: filename = safe_filename(url.split('/')[-1]) if not filename: filename = "downloaded_at_{}.file".format(time.time()) save_location = os.path.abspath(os.path.join(save_dir, filename)) if os.path.exists(save_location) and not overwrite: logger.error("File {0} already exists".format(save_location)) return False else: save_location = "memory" try: request = urlopen(url) except ValueError as err: if not quiet and "unknown url type" in str(err): logger.error("Please make sure URL is formatted correctly and" " starts with http:// or other protocol") raise err except Exception as err: if not quiet: logger.error("Could not download {0} - {1}".format(url, err)) raise err try: kb_size = int(request.headers["Content-Length"]) / 1024 except Exception as err: if not quiet: logger.debug("Could not determine file size - {0}".format(err)) file_size = "(unknown size)" else: file_size = "({0:.1f} {1})".format(*(kb_size, "KB") if kb_size < 9999 else (kb_size / 1024, "MB")) if not quiet: logger.info("Downloading {0} {1} to {2}".format(url, file_size, save_location)) if save_to_file: with open(save_location, "wb") as f: while True: buffer = request.read(block_size) if not buffer: break f.write(buffer) return save_location else: return request.read()
Provide a list of IP addresses, uses `socket.getaddrinfo` .. code:: python reusables.url_to_ips("example.com", ipv6=True) # ['2606:2800:220:1:248:1893:25c8:1946'] :param url: hostname to resolve to IP addresses :param port: port to send to getaddrinfo :param ipv6: Return IPv6 address if True, otherwise IPv4 :param connect_type: defaults to STREAM connection, can be 0 for all :param proto: defaults to TCP, can be 0 for all :param flags: additional flags to pass :return: list of resolved IPs def url_to_ips(url, port=None, ipv6=False, connect_type=socket.SOCK_STREAM, proto=socket.IPPROTO_TCP, flags=0): """ Provide a list of IP addresses, uses `socket.getaddrinfo` .. code:: python reusables.url_to_ips("example.com", ipv6=True) # ['2606:2800:220:1:248:1893:25c8:1946'] :param url: hostname to resolve to IP addresses :param port: port to send to getaddrinfo :param ipv6: Return IPv6 address if True, otherwise IPv4 :param connect_type: defaults to STREAM connection, can be 0 for all :param proto: defaults to TCP, can be 0 for all :param flags: additional flags to pass :return: list of resolved IPs """ try: results = socket.getaddrinfo(url, port, (socket.AF_INET if not ipv6 else socket.AF_INET6), connect_type, proto, flags) except socket.gaierror: logger.exception("Could not resolve hostname") return [] return list(set([result[-1][0] for result in results]))
Resolve a hostname based off an IP address. This is very limited and will probably not return any results if it is a shared IP address or an address with improperly setup DNS records. .. code:: python reusables.ip_to_url('93.184.216.34') # example.com # None reusables.ip_to_url('8.8.8.8') # 'google-public-dns-a.google.com' :param ip_addr: IP address to resolve to hostname :return: string of hostname or None def ip_to_url(ip_addr): """ Resolve a hostname based off an IP address. This is very limited and will probably not return any results if it is a shared IP address or an address with improperly setup DNS records. .. code:: python reusables.ip_to_url('93.184.216.34') # example.com # None reusables.ip_to_url('8.8.8.8') # 'google-public-dns-a.google.com' :param ip_addr: IP address to resolve to hostname :return: string of hostname or None """ try: return socket.gethostbyaddr(ip_addr)[0] except (socket.gaierror, socket.herror): logger.exception("Could not resolve hostname")
Create a background thread for httpd and serve 'forever def start(self): """Create a background thread for httpd and serve 'forever'""" self._process = threading.Thread(target=self._background_runner) self._process.start()
Returns a set up stream handler to add to a logger. :param stream: which stream to use, defaults to sys.stderr :param level: logging level to set handler at :param log_format: formatter to use :return: stream handler def get_stream_handler(stream=sys.stderr, level=logging.INFO, log_format=log_formats.easy_read): """ Returns a set up stream handler to add to a logger. :param stream: which stream to use, defaults to sys.stderr :param level: logging level to set handler at :param log_format: formatter to use :return: stream handler """ sh = logging.StreamHandler(stream) sh.setLevel(level) sh.setFormatter(logging.Formatter(log_format)) return sh
Set up a file handler to add to a logger. :param file_path: file to write the log to, defaults to out.log :param level: logging level to set handler at :param log_format: formatter to use :param handler: logging handler to use, defaults to FileHandler :param handler_kwargs: options to pass to the handler :return: handler def get_file_handler(file_path="out.log", level=logging.INFO, log_format=log_formats.easy_read, handler=logging.FileHandler, **handler_kwargs): """ Set up a file handler to add to a logger. :param file_path: file to write the log to, defaults to out.log :param level: logging level to set handler at :param log_format: formatter to use :param handler: logging handler to use, defaults to FileHandler :param handler_kwargs: options to pass to the handler :return: handler """ fh = handler(file_path, **handler_kwargs) fh.setLevel(level) fh.setFormatter(logging.Formatter(log_format)) return fh
Grabs the specified logger and adds wanted handlers to it. Will default to adding a stream handler. :param module_name: logger name to use :param level: logging level to set logger at :param stream: stream to log to, or None :param file_path: file path to log to, or None :param log_format: format to set the handlers to use :param suppress_warning: add a NullHandler if no other handler is specified :return: configured logger def setup_logger(module_name=None, level=logging.INFO, stream=sys.stderr, file_path=None, log_format=log_formats.easy_read, suppress_warning=True): """ Grabs the specified logger and adds wanted handlers to it. Will default to adding a stream handler. :param module_name: logger name to use :param level: logging level to set logger at :param stream: stream to log to, or None :param file_path: file path to log to, or None :param log_format: format to set the handlers to use :param suppress_warning: add a NullHandler if no other handler is specified :return: configured logger """ new_logger = logging.getLogger(module_name) if stream: new_logger.addHandler(get_stream_handler(stream, level, log_format)) elif not file_path and suppress_warning and not new_logger.handlers: new_logger.addHandler(logging.NullHandler()) if file_path: new_logger.addHandler(get_file_handler(file_path, level, log_format)) if level > 0: new_logger.setLevel(level) return new_logger
Addes a newly created stream handler to the specified logger :param logger: logging name or object to modify, defaults to root logger :param stream: which stream to use, defaults to sys.stderr :param level: logging level to set handler at :param log_format: formatter to use def add_stream_handler(logger=None, stream=sys.stderr, level=logging.INFO, log_format=log_formats.easy_read): """ Addes a newly created stream handler to the specified logger :param logger: logging name or object to modify, defaults to root logger :param stream: which stream to use, defaults to sys.stderr :param level: logging level to set handler at :param log_format: formatter to use """ if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) logger.addHandler(get_stream_handler(stream, level, log_format))
Addes a newly created file handler to the specified logger :param logger: logging name or object to modify, defaults to root logger :param file_path: path to file to log to :param level: logging level to set handler at :param log_format: formatter to use def add_file_handler(logger=None, file_path="out.log", level=logging.INFO, log_format=log_formats.easy_read): """ Addes a newly created file handler to the specified logger :param logger: logging name or object to modify, defaults to root logger :param file_path: path to file to log to :param level: logging level to set handler at :param log_format: formatter to use """ if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) logger.addHandler(get_file_handler(file_path, level, log_format))
Adds a rotating file handler to the specified logger. :param logger: logging name or object to modify, defaults to root logger :param file_path: path to file to log to :param level: logging level to set handler at :param log_format: log formatter :param max_bytes: Max file size in bytes before rotating :param backup_count: Number of backup files :param handler_kwargs: options to pass to the handler def add_rotating_file_handler(logger=None, file_path="out.log", level=logging.INFO, log_format=log_formats.easy_read, max_bytes=10*sizes.mb, backup_count=5, **handler_kwargs): """ Adds a rotating file handler to the specified logger. :param logger: logging name or object to modify, defaults to root logger :param file_path: path to file to log to :param level: logging level to set handler at :param log_format: log formatter :param max_bytes: Max file size in bytes before rotating :param backup_count: Number of backup files :param handler_kwargs: options to pass to the handler """ if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) logger.addHandler(get_file_handler(file_path, level, log_format, handler=RotatingFileHandler, maxBytes=max_bytes, backupCount=backup_count, **handler_kwargs))
Adds a timed rotating file handler to the specified logger. Defaults to weekly rotation, with 5 backups. :param logger: logging name or object to modify, defaults to root logger :param file_path: path to file to log to :param level: logging level to set handler at :param log_format: log formatter :param when: :param interval: :param backup_count: Number of backup files :param handler_kwargs: options to pass to the handler def add_timed_rotating_file_handler(logger=None, file_path="out.log", level=logging.INFO, log_format=log_formats.easy_read, when='w0', interval=1, backup_count=5, **handler_kwargs): """ Adds a timed rotating file handler to the specified logger. Defaults to weekly rotation, with 5 backups. :param logger: logging name or object to modify, defaults to root logger :param file_path: path to file to log to :param level: logging level to set handler at :param log_format: log formatter :param when: :param interval: :param backup_count: Number of backup files :param handler_kwargs: options to pass to the handler """ if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) logger.addHandler(get_file_handler(file_path, level, log_format, handler=TimedRotatingFileHandler, when=when, interval=interval, backupCount=backup_count, **handler_kwargs))
Remove only stream handlers from the specified logger :param logger: logging name or object to modify, defaults to root logger def remove_stream_handlers(logger=None): """ Remove only stream handlers from the specified logger :param logger: logging name or object to modify, defaults to root logger """ if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) new_handlers = [] for handler in logger.handlers: # FileHandler is a subclass of StreamHandler so # 'if not a StreamHandler' does not work if (isinstance(handler, logging.FileHandler) or isinstance(handler, logging.NullHandler) or (isinstance(handler, logging.Handler) and not isinstance(handler, logging.StreamHandler))): new_handlers.append(handler) logger.handlers = new_handlers
Remove only file handlers from the specified logger. Will go through and close each handler for safety. :param logger: logging name or object to modify, defaults to root logger def remove_file_handlers(logger=None): """ Remove only file handlers from the specified logger. Will go through and close each handler for safety. :param logger: logging name or object to modify, defaults to root logger """ if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) new_handlers = [] for handler in logger.handlers: if isinstance(handler, logging.FileHandler): handler.close() else: new_handlers.append(handler) logger.handlers = new_handlers
Safely remove all handlers from the logger :param logger: logging name or object to modify, defaults to root logger def remove_all_handlers(logger=None): """ Safely remove all handlers from the logger :param logger: logging name or object to modify, defaults to root logger """ if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) remove_file_handlers(logger) logger.handlers = []
Go through the logger and handlers and update their levels to the one specified. :param logger: logging name or object to modify, defaults to root logger :param level: logging level to set at (10=Debug, 20=Info, 30=Warn, 40=Error) def change_logger_levels(logger=None, level=logging.DEBUG): """ Go through the logger and handlers and update their levels to the one specified. :param logger: logging name or object to modify, defaults to root logger :param level: logging level to set at (10=Debug, 20=Info, 30=Warn, 40=Error) """ if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) logger.setLevel(level) for handler in logger.handlers: handler.level = level
Find the names of all loggers currently registered :param hide_children: only return top level logger names :param hide_reusables: hide the reusables loggers :return: list of logger names def get_registered_loggers(hide_children=False, hide_reusables=False): """ Find the names of all loggers currently registered :param hide_children: only return top level logger names :param hide_reusables: hide the reusables loggers :return: list of logger names """ return [logger for logger in logging.Logger.manager.loggerDict.keys() if not (hide_reusables and "reusables" in logger) and not (hide_children and "." in logger)]
Wrapper. Makes sure the function's return value has not been returned before or else it run with the same inputs again. .. code: python import reusables import random @reusables.unique(max_retries=100) def poor_uuid(): return random.randint(0, 10) print([poor_uuid() for _ in range(10)]) # [8, 9, 6, 3, 0, 7, 2, 5, 4, 10] print([poor_uuid() for _ in range(100)]) # Exception: No result was unique Message format options: {func} {args} {kwargs} :param max_retries: int of number of retries to attempt before failing :param wait: float of seconds to wait between each try, defaults to 0 :param exception: Exception type of raise :param error_text: text of the exception :param alt_return: if specified, an exception is not raised on failure, instead the provided value of any type of will be returned def unique(max_retries=10, wait=0, alt_return="-no_alt_return-", exception=Exception, error_text=None): """ Wrapper. Makes sure the function's return value has not been returned before or else it run with the same inputs again. .. code: python import reusables import random @reusables.unique(max_retries=100) def poor_uuid(): return random.randint(0, 10) print([poor_uuid() for _ in range(10)]) # [8, 9, 6, 3, 0, 7, 2, 5, 4, 10] print([poor_uuid() for _ in range(100)]) # Exception: No result was unique Message format options: {func} {args} {kwargs} :param max_retries: int of number of retries to attempt before failing :param wait: float of seconds to wait between each try, defaults to 0 :param exception: Exception type of raise :param error_text: text of the exception :param alt_return: if specified, an exception is not raised on failure, instead the provided value of any type of will be returned """ def func_wrap(func): @wraps(func) def wrapper(*args, **kwargs): msg = (error_text if error_text else "No result was unique for function '{func}'") if not error_text: msg = _add_args(msg, *args, **kwargs) for i in range(max_retries): value = func(*args, **kwargs) if value not in unique_cache[func.__name__]: unique_cache[func.__name__].append(value) return value if wait: time.sleep(wait) else: if alt_return != "-no_alt_return-": return alt_return raise exception(msg.format(func=func.__name__, args=args, kwargs=kwargs)) return wrapper return func_wrap
Wrapper. Simple wrapper to make sure a function is only run once at a time. .. code: python import reusables import time def func_one(_): time.sleep(5) @reusables.lock_it() def func_two(_): time.sleep(5) @reusables.time_it(message="test_1 took {0:.2f} seconds") def test_1(): reusables.run_in_pool(func_one, (1, 2, 3), threaded=True) @reusables.time_it(message="test_2 took {0:.2f} seconds") def test_2(): reusables.run_in_pool(func_two, (1, 2, 3), threaded=True) test_1() test_2() # test_1 took 5.04 seconds # test_2 took 15.07 seconds :param lock: Which lock to use, uses unique default def lock_it(lock=g_lock): """ Wrapper. Simple wrapper to make sure a function is only run once at a time. .. code: python import reusables import time def func_one(_): time.sleep(5) @reusables.lock_it() def func_two(_): time.sleep(5) @reusables.time_it(message="test_1 took {0:.2f} seconds") def test_1(): reusables.run_in_pool(func_one, (1, 2, 3), threaded=True) @reusables.time_it(message="test_2 took {0:.2f} seconds") def test_2(): reusables.run_in_pool(func_two, (1, 2, 3), threaded=True) test_1() test_2() # test_1 took 5.04 seconds # test_2 took 15.07 seconds :param lock: Which lock to use, uses unique default """ def func_wrapper(func): @wraps(func) def wrapper(*args, **kwargs): with lock: return func(*args, **kwargs) return wrapper return func_wrapper
Wrapper. Time the amount of time it takes the execution of the function and print it. If log is true, make sure to set the logging level of 'reusables' to INFO level or lower. .. code:: python import time import reusables reusables.add_stream_handler('reusables') @reusables.time_it(log=True, message="{seconds:.2f} seconds") def test_time(length): time.sleep(length) return "slept {0}".format(length) result = test_time(5) # 2016-11-09 16:59:39,935 - reusables.wrappers INFO 5.01 seconds print(result) # slept 5 Message format options: {func} {seconds} {args} {kwargs} :param log: log as INFO level instead of printing :param message: string to format with total time as the only input :param append: list to append item too def time_it(log=None, message=None, append=None): """ Wrapper. Time the amount of time it takes the execution of the function and print it. If log is true, make sure to set the logging level of 'reusables' to INFO level or lower. .. code:: python import time import reusables reusables.add_stream_handler('reusables') @reusables.time_it(log=True, message="{seconds:.2f} seconds") def test_time(length): time.sleep(length) return "slept {0}".format(length) result = test_time(5) # 2016-11-09 16:59:39,935 - reusables.wrappers INFO 5.01 seconds print(result) # slept 5 Message format options: {func} {seconds} {args} {kwargs} :param log: log as INFO level instead of printing :param message: string to format with total time as the only input :param append: list to append item too """ def func_wrapper(func): @wraps(func) def wrapper(*args, **kwargs): # Can't use nonlocal in 2.x msg = (message if message else "Function '{func}' took a total of {seconds} seconds") if not message: msg = _add_args(msg, *args, **kwargs) time_func = (time.perf_counter if python_version >= (3, 3) else time.time) start_time = time_func() try: return func(*args, **kwargs) finally: total_time = time_func() - start_time time_string = msg.format(func=func.__name__, seconds=total_time, args=args, kwargs=kwargs) if log: my_logger = logging.getLogger(log) if isinstance(log, str)\ else logger my_logger.info(time_string) else: print(time_string) if isinstance(append, list): append.append(total_time) return wrapper return func_wrapper
Wrapper. Instead of returning the result of the function, add it to a queue. .. code: python import reusables import queue my_queue = queue.Queue() @reusables.queue_it(my_queue) def func(a): return a func(10) print(my_queue.get()) # 10 :param queue: Queue to add result into def queue_it(queue=g_queue, **put_args): """ Wrapper. Instead of returning the result of the function, add it to a queue. .. code: python import reusables import queue my_queue = queue.Queue() @reusables.queue_it(my_queue) def func(a): return a func(10) print(my_queue.get()) # 10 :param queue: Queue to add result into """ def func_wrapper(func): @wraps(func) def wrapper(*args, **kwargs): queue.put(func(*args, **kwargs), **put_args) return wrapper return func_wrapper
Wrapper. Log the traceback to any exceptions raised. Possible to raise custom exception. .. code :: python @reusables.log_exception() def test(): raise Exception("Bad") # 2016-12-26 12:38:01,381 - reusables ERROR Exception in test - Bad # Traceback (most recent call last): # File "<input>", line 1, in <module> # File "reusables\wrappers.py", line 200, in wrapper # raise err # Exception: Bad Message format options: {func} {err} {args} {kwargs} :param exceptions: types of exceptions to catch :param log: log name to use :param message: message to use in log :param level: logging level :param show_traceback: include full traceback or just error message def log_exception(log="reusables", message=None, exceptions=(Exception, ), level=logging.ERROR, show_traceback=True): """ Wrapper. Log the traceback to any exceptions raised. Possible to raise custom exception. .. code :: python @reusables.log_exception() def test(): raise Exception("Bad") # 2016-12-26 12:38:01,381 - reusables ERROR Exception in test - Bad # Traceback (most recent call last): # File "<input>", line 1, in <module> # File "reusables\wrappers.py", line 200, in wrapper # raise err # Exception: Bad Message format options: {func} {err} {args} {kwargs} :param exceptions: types of exceptions to catch :param log: log name to use :param message: message to use in log :param level: logging level :param show_traceback: include full traceback or just error message """ def func_wrapper(func): @wraps(func) def wrapper(*args, **kwargs): msg = message if message else "Exception in '{func}': {err}" if not message: msg = _add_args(msg, *args, **kwargs) try: return func(*args, **kwargs) except exceptions as err: my_logger = (logging.getLogger(log) if isinstance(log, str) else log) my_logger.log(level, msg.format(func=func.__name__, err=str(err), args=args, kwargs=kwargs), exc_info=show_traceback) raise err return wrapper return func_wrapper
If the function encounters an exception, catch it, and return the specified default or sent to a handler function instead. .. code :: python def handle_error(exception, func, *args, **kwargs): print(f"{func.__name__} raised {exception} when called with {args}") @reusables.catch_it(handler=err_func) def will_raise(message="Hello") raise Exception(message) :param exceptions: tuple of exceptions to catch :param default: what to return if the exception is caught :param handler: function to send exception, func, *args and **kwargs def catch_it(exceptions=(Exception, ), default=None, handler=None): """ If the function encounters an exception, catch it, and return the specified default or sent to a handler function instead. .. code :: python def handle_error(exception, func, *args, **kwargs): print(f"{func.__name__} raised {exception} when called with {args}") @reusables.catch_it(handler=err_func) def will_raise(message="Hello") raise Exception(message) :param exceptions: tuple of exceptions to catch :param default: what to return if the exception is caught :param handler: function to send exception, func, *args and **kwargs """ def func_wrapper(func): @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except exceptions as err: if handler: return handler(err, func, *args, **kwargs) return default return wrapper return func_wrapper
Retry a function if an exception is raised, or if output_check returns False. Message format options: {func} {args} {kwargs} :param exceptions: tuple of exceptions to catch :param tries: number of tries to retry the function :param wait: time to wait between executions in seconds :param handler: function to check if output is valid, must return bool :param raised_exception: default is ReusablesError :param raised_message: message to pass to raised exception def retry_it(exceptions=(Exception, ), tries=10, wait=0, handler=None, raised_exception=ReusablesError, raised_message=None): """ Retry a function if an exception is raised, or if output_check returns False. Message format options: {func} {args} {kwargs} :param exceptions: tuple of exceptions to catch :param tries: number of tries to retry the function :param wait: time to wait between executions in seconds :param handler: function to check if output is valid, must return bool :param raised_exception: default is ReusablesError :param raised_message: message to pass to raised exception """ def func_wrapper(func): @wraps(func) def wrapper(*args, **kwargs): msg = (raised_message if raised_message else "Max retries exceeded for function '{func}'") if not raised_message: msg = _add_args(msg, *args, **kwargs) try: result = func(*args, **kwargs) except exceptions: if tries: if wait: time.sleep(wait) return retry_it(exceptions=exceptions, tries=tries-1, handler=handler, wait=wait)(func)(*args, **kwargs) if raised_exception: exc = raised_exception(msg.format(func=func.__name__, args=args, kwargs=kwargs)) exc.__cause__ = None raise exc else: if handler: if not handler(result): return retry_it(exceptions=exceptions, tries=tries - 1, handler=handler, wait=wait)(func)(*args, **kwargs) return result return wrapper return func_wrapper
Automatically detect archive type and extract all files to specified path. .. code:: python import os os.listdir(".") # ['test_structure.zip'] reusables.extract("test_structure.zip") os.listdir(".") # [ 'test_structure', 'test_structure.zip'] :param archive_file: path to file to extract :param path: location to extract to :param delete_on_success: Will delete the original archive if set to True :param enable_rar: include the rarfile import and extract :return: path to extracted files def extract(archive_file, path=".", delete_on_success=False, enable_rar=False): """ Automatically detect archive type and extract all files to specified path. .. code:: python import os os.listdir(".") # ['test_structure.zip'] reusables.extract("test_structure.zip") os.listdir(".") # [ 'test_structure', 'test_structure.zip'] :param archive_file: path to file to extract :param path: location to extract to :param delete_on_success: Will delete the original archive if set to True :param enable_rar: include the rarfile import and extract :return: path to extracted files """ if not os.path.exists(archive_file) or not os.path.getsize(archive_file): logger.error("File {0} unextractable".format(archive_file)) raise OSError("File does not exist or has zero size") arch = None if zipfile.is_zipfile(archive_file): logger.debug("File {0} detected as a zip file".format(archive_file)) arch = zipfile.ZipFile(archive_file) elif tarfile.is_tarfile(archive_file): logger.debug("File {0} detected as a tar file".format(archive_file)) arch = tarfile.open(archive_file) elif enable_rar: import rarfile if rarfile.is_rarfile(archive_file): logger.debug("File {0} detected as " "a rar file".format(archive_file)) arch = rarfile.RarFile(archive_file) if not arch: raise TypeError("File is not a known archive") logger.debug("Extracting files to {0}".format(path)) try: arch.extractall(path=path) finally: arch.close() if delete_on_success: logger.debug("Archive {0} will now be deleted".format(archive_file)) os.unlink(archive_file) return os.path.abspath(path)
Archive a list of files (or files inside a folder), can chose between - zip - tar - gz (tar.gz, tgz) - bz2 (tar.bz2) .. code:: python reusables.archive(['reusables', '.travis.yml'], name="my_archive.bz2") # 'C:\\Users\\Me\\Reusables\\my_archive.bz2' :param files_to_archive: list of files and folders to archive :param name: path and name of archive file :param archive_type: auto-detects unless specified :param overwrite: overwrite if archive exists :param store: zipfile only, True will not compress files :param depth: specify max depth for folders :param err_non_exist: raise error if provided file does not exist :param allow_zip_64: must be enabled for zip files larger than 2GB :param tarfile_kwargs: extra args to pass to tarfile.open :return: path to created archive def archive(files_to_archive, name="archive.zip", archive_type=None, overwrite=False, store=False, depth=None, err_non_exist=True, allow_zip_64=True, **tarfile_kwargs): """ Archive a list of files (or files inside a folder), can chose between - zip - tar - gz (tar.gz, tgz) - bz2 (tar.bz2) .. code:: python reusables.archive(['reusables', '.travis.yml'], name="my_archive.bz2") # 'C:\\Users\\Me\\Reusables\\my_archive.bz2' :param files_to_archive: list of files and folders to archive :param name: path and name of archive file :param archive_type: auto-detects unless specified :param overwrite: overwrite if archive exists :param store: zipfile only, True will not compress files :param depth: specify max depth for folders :param err_non_exist: raise error if provided file does not exist :param allow_zip_64: must be enabled for zip files larger than 2GB :param tarfile_kwargs: extra args to pass to tarfile.open :return: path to created archive """ if not isinstance(files_to_archive, (list, tuple)): files_to_archive = [files_to_archive] if not archive_type: if name.lower().endswith("zip"): archive_type = "zip" elif name.lower().endswith("gz"): archive_type = "gz" elif name.lower().endswith("z2"): archive_type = "bz2" elif name.lower().endswith("tar"): archive_type = "tar" else: err_msg = ("Could not determine archive " "type based off {0}".format(name)) logger.error(err_msg) raise ValueError(err_msg) logger.debug("{0} file detected for {1}".format(archive_type, name)) elif archive_type not in ("tar", "gz", "bz2", "zip"): err_msg = ("archive_type must be zip, gz, bz2," " or gz, was {0}".format(archive_type)) logger.error(err_msg) raise ValueError(err_msg) if not overwrite and os.path.exists(name): err_msg = "File {0} exists and overwrite not specified".format(name) logger.error(err_msg) raise OSError(err_msg) if archive_type == "zip": arch = zipfile.ZipFile(name, 'w', zipfile.ZIP_STORED if store else zipfile.ZIP_DEFLATED, allowZip64=allow_zip_64) write = arch.write elif archive_type in ("tar", "gz", "bz2"): mode = archive_type if archive_type != "tar" else "" arch = tarfile.open(name, 'w:{0}'.format(mode), **tarfile_kwargs) write = arch.add else: raise ValueError("archive_type must be zip, gz, bz2, or gz") try: for file_path in files_to_archive: if os.path.isfile(file_path): if err_non_exist and not os.path.exists(file_path): raise OSError("File {0} does not exist".format(file_path)) write(file_path) elif os.path.isdir(file_path): for nf in find_files(file_path, abspath=False, depth=depth): write(nf) except (Exception, KeyboardInterrupt) as err: logger.exception("Could not archive {0}".format(files_to_archive)) try: arch.close() finally: os.unlink(name) raise err else: arch.close() return os.path.abspath(name)
Save a matrix (list of lists) to a file as a CSV .. code:: python my_list = [["Name", "Location"], ["Chris", "South Pole"], ["Harry", "Depth of Winter"], ["Bob", "Skull"]] reusables.list_to_csv(my_list, "example.csv") example.csv .. code:: csv "Name","Location" "Chris","South Pole" "Harry","Depth of Winter" "Bob","Skull" :param my_list: list of lists to save to CSV :param csv_file: File to save data to def list_to_csv(my_list, csv_file): """ Save a matrix (list of lists) to a file as a CSV .. code:: python my_list = [["Name", "Location"], ["Chris", "South Pole"], ["Harry", "Depth of Winter"], ["Bob", "Skull"]] reusables.list_to_csv(my_list, "example.csv") example.csv .. code:: csv "Name","Location" "Chris","South Pole" "Harry","Depth of Winter" "Bob","Skull" :param my_list: list of lists to save to CSV :param csv_file: File to save data to """ if PY3: csv_handler = open(csv_file, 'w', newline='') else: csv_handler = open(csv_file, 'wb') try: writer = csv.writer(csv_handler, delimiter=',', quoting=csv.QUOTE_ALL) writer.writerows(my_list) finally: csv_handler.close()
Open and transform a CSV file into a matrix (list of lists). .. code:: python reusables.csv_to_list("example.csv") # [['Name', 'Location'], # ['Chris', 'South Pole'], # ['Harry', 'Depth of Winter'], # ['Bob', 'Skull']] :param csv_file: Path to CSV file as str :return: list def csv_to_list(csv_file): """ Open and transform a CSV file into a matrix (list of lists). .. code:: python reusables.csv_to_list("example.csv") # [['Name', 'Location'], # ['Chris', 'South Pole'], # ['Harry', 'Depth of Winter'], # ['Bob', 'Skull']] :param csv_file: Path to CSV file as str :return: list """ with open(csv_file, 'r' if PY3 else 'rb') as f: return list(csv.reader(f))