docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Get multi-line string parameter from ``configparser`` ``.INI`` file, as a list of strings (one per line, ignoring blank lines). Args: config: :class:`ConfigParser` object section: section name within config file param: name of parameter within section default: default value Returns: parameter value, or default
def get_config_parameter_multiline(config: ConfigParser, section: str, param: str, default: List[str]) -> List[str]: try: multiline = config.get(section, param) lines = [x.strip() for x in multiline.splitlines()] return [line for line in lines if line] except (TypeError, ValueError, NoOptionError): log.warning( "Configuration variable {} not found or improper in section [{}]; " "using default of {!r}", param, section, default) return default
729,899
Wait for up to ``timeout_s`` for the child process to finish. Args: timeout_s: maximum time to wait or ``None`` to wait forever Returns: process return code; or ``0`` if it wasn't running, or ``1`` if it managed to exit without a return code Raises: subprocess.TimeoutExpired: if the process continues to run
def wait(self, timeout_s: float = None) -> int: if not self.running: return 0 retcode = self.process.wait(timeout=timeout_s) # We won't get further unless the process has stopped. if retcode is None: self.error("Subprocess finished, but return code was None") retcode = 1 # we're promising to return an int elif retcode == 0: self.info("Subprocess finished cleanly (return code 0).") else: self.error( "Subprocess finished, but FAILED (return code {}). " "Logs were: {} (stdout), {} (stderr)".format( retcode, self.details.logfile_out, self.details.logfile_err)) self.running = False return retcode
729,919
A test service. Writes to a file occasionally, so you can see it's running. Args: filename: file to write data to periodically period_ms: period, in milliseconds
def test_service(self, filename: str = TEST_FILENAME, period_ms: int = TEST_PERIOD_MS) -> None: # A test service. This works! (As long as you can write to the file.) def write(msg): f.write('{}: {}\n'.format(arrow.now(), msg)) f.flush() self.info("Starting test service; writing data periodically to " "{}".format(TEST_FILENAME)) f = open(filename, 'a') # open for append write('STARTING') retcode = None # if the stop event hasn't been fired keep looping while retcode != win32event.WAIT_OBJECT_0: write('Test data; will now wait {} ms'.format(period_ms)) # block for a while seconds and listen for a stop event retcode = win32event.WaitForSingleObject(self.h_stop_event, period_ms) write('SHUTTING DOWN') f.close() self.info("Test service FINISHED.")
729,925
Adds an information dictionary to an exception. See http://stackoverflow.com/questions/9157210/how-do-i-raise-the-same-exception-with-a-custom-message-in-python Args: err: the exception to be modified info: the information to add
def add_info_to_exception(err: Exception, info: Dict) -> None: # noqa if not err.args: err.args = ('', ) err.args += (info, )
729,933
Apply a low-pass filter to the data. Args: data: time series of the data sampling_freq_hz: sampling frequency :math:`f_s`, in Hz (or other consistent units) cutoff_freq_hz: filter cutoff frequency in Hz (or other consistent units) numtaps: number of filter taps Returns: filtered data Note: number of filter taps = filter order + 1
def lowpass_filter(data: FLOATS_TYPE, sampling_freq_hz: float, cutoff_freq_hz: float, numtaps: int) -> FLOATS_TYPE: coeffs = firwin( numtaps=numtaps, cutoff=normalized_frequency(cutoff_freq_hz, sampling_freq_hz), pass_zero=True ) # coefficients of a finite impulse response (FIR) filter using window method # noqa filtered_data = lfilter(b=coeffs, a=1.0, x=data) return filtered_data
729,952
Design and use a notch (band reject) filter to filter the data. Args: data: time series of the data sampling_freq_hz: sampling frequency :math:`f_s`, in Hz (or other consistent units) notch_freq_hz: notch frequency, in Hz (or other consistent units) quality_factor: notch filter quality factor, :math:`Q` Returns: filtered data
def notch_filter(data: FLOATS_TYPE, sampling_freq_hz: float, notch_freq_hz: float, quality_factor: float) -> FLOATS_TYPE: b, a = iirnotch( w0=normalized_frequency(notch_freq_hz, sampling_freq_hz), Q=quality_factor ) filtered_data = lfilter(b=b, a=a, x=data) return filtered_data
729,954
Fire up multiple processes, and wait for them to finihs. Args: args_list: command arguments for each process die_on_failure: see :func:`wait_for_processes`
def run_multiple_processes(args_list: List[List[str]], die_on_failure: bool = True) -> None: for procargs in args_list: start_process(procargs) # Wait for them all to finish wait_for_processes(die_on_failure=die_on_failure)
729,969
Command to produce an :class:`InsertOnDuplicate` object. Args: tablename: name of the table values: values to ``INSERT`` inline: as per http://docs.sqlalchemy.org/en/latest/core/dml.html#sqlalchemy.sql.expression.insert kwargs: additional parameters Returns: an :class:`InsertOnDuplicate` object
def insert_on_duplicate(tablename: str, values: Any = None, inline: bool = False, **kwargs): # noqa return InsertOnDuplicate(tablename, values, inline=inline, **kwargs)
729,982
Amalgamate multiple CSV/TSV/similar files into one. Args: filenames: list of filenames to process outfile: file-like object to write output to input_dialect: dialect of input files, as passed to ``csv.reader`` output_dialect: dialect to write, as passed to ``csv.writer`` debug: be verbose? headers: do the files have header lines?
def merge_csv(filenames: List[str], outfile: TextIO = sys.stdout, input_dialect: str = 'excel', output_dialect: str = 'excel', debug: bool = False, headers: bool = True) -> None: writer = csv.writer(outfile, dialect=output_dialect) written_header = False header_items = [] # type: List[str] for filename in filenames: log.info("Processing file " + repr(filename)) with open(filename, 'r') as f: reader = csv.reader(f, dialect=input_dialect) if headers: if not written_header: header_items = next(reader) if debug: log.debug("Header row: {!r}", header_items) writer.writerow(header_items) written_header = True else: new_headers = next(reader) if new_headers != header_items: raise ValueError( "Header line in file {filename} doesn't match - " "it was {new} but previous was {old}".format( filename=repr(filename), new=repr(new_headers), old=repr(header_items), )) if debug: log.debug("Header row matches previous") else: if debug: log.debug("No headers in use") for row in reader: if debug: log.debug("Data row: {!r}", row) writer.writerow(row)
730,004
r""" Standard logistic function. .. math:: y = \frac {1} {1 + e^{-k (x - \theta)}} Args: x: :math:`x` k: :math:`k` theta: :math:`\theta` Returns: :math:`y`
def logistic(x: Union[float, np.ndarray], k: float, theta: float) -> Optional[float]: r # https://www.sharelatex.com/learn/List_of_Greek_letters_and_math_symbols if x is None or k is None or theta is None: return None # noinspection PyUnresolvedReferences return 1 / (1 + np.exp(-k * (x - theta)))
730,017
r""" Inverse standard logistic function: .. math:: x = ( log( \frac {1} {y} - 1) / -k ) + \theta Args: y: :math:`y` k: :math:`k` theta: :math:`\theta` Returns: :math:`x`
def inv_logistic(y: Union[float, np.ndarray], k: float, theta: float) -> Optional[float]: r if y is None or k is None or theta is None: return None # noinspection PyUnresolvedReferences return (np.log((1 / y) - 1) / -k) + theta
730,018
Convert nose-style test reports to UnitTH-style test reports by splitting modules into separate XML files Args: in_file_nose (:obj:`str`): path to nose-style test report out_file_unitth (:obj:`str`): path to save UnitTH-style test reports
def run(in_file_nose, out_dir_unitth): suites = Converter.read_nose(in_file_nose) Converter.write_unitth(suites, out_dir_unitth)
730,031
Parse nose-style test reports into a `dict` Args: in_file (:obj:`str`): path to nose-style test report Returns: :obj:`dict`: dictionary of test suites
def read_nose(in_file): suites = {} doc_xml = minidom.parse(in_file) suite_xml = doc_xml.getElementsByTagName("testsuite")[0] for case_xml in suite_xml.getElementsByTagName('testcase'): classname = case_xml.getAttribute('classname') if classname not in suites: suites[classname] = [] case = { 'name': case_xml.getAttribute('name'), 'time': float(case_xml.getAttribute('time')), } skipped_xml = case_xml.getElementsByTagName('skipped') if skipped_xml: if skipped_xml[0].hasAttribute('type'): type = skipped_xml[0].getAttribute('type') else: type = '' case['skipped'] = { 'type': type, 'message': skipped_xml[0].getAttribute('message'), 'text': "".join([child.nodeValue for child in skipped_xml[0].childNodes]), } failure_xml = case_xml.getElementsByTagName('failure') if failure_xml: if failure_xml[0].hasAttribute('type'): type = failure_xml[0].getAttribute('type') else: type = '' case['failure'] = { 'type': type, 'message': failure_xml[0].getAttribute('message'), 'text': "".join([child.nodeValue for child in failure_xml[0].childNodes]), } error_xml = case_xml.getElementsByTagName('error') if error_xml: if error_xml[0].hasAttribute('type'): type = error_xml[0].getAttribute('type') else: type = '' case['error'] = { 'type': type, 'message': error_xml[0].getAttribute('message'), 'text': "".join([child.nodeValue for child in error_xml[0].childNodes]), } suites[classname].append(case) return suites
730,032
Write UnitTH-style test reports Args: suites (:obj:`dict`): dictionary of test suites out_dir (:obj:`str`): path to save UnitTH-style test reports
def write_unitth(suites, out_dir): if not os.path.isdir(out_dir): os.mkdir(out_dir) for classname, cases in suites.items(): doc_xml = minidom.Document() suite_xml = doc_xml.createElement('testsuite') suite_xml.setAttribute('name', classname) suite_xml.setAttribute('tests', str(len(cases))) suite_xml.setAttribute('errors', str(sum('error' in case for case in cases))) suite_xml.setAttribute('failures', str(sum('failure' in case for case in cases))) suite_xml.setAttribute('skipped', str(sum('skipped' in case for case in cases))) suite_xml.setAttribute('time', '{:.3f}'.format(sum(case['time'] for case in cases))) doc_xml.appendChild(suite_xml) for case in cases: case_xml = doc_xml.createElement('testcase') case_xml.setAttribute('classname', classname) case_xml.setAttribute('name', case['name']) case_xml.setAttribute('time', '{:.3f}'.format(case['time'])) suite_xml.appendChild(case_xml) if 'skipped' in case: skipped_xml = doc_xml.createElement('skipped') skipped_xml.setAttribute('type', case['skipped']['type']) skipped_xml.setAttribute('message', case['skipped']['message']) case_xml.appendChild(skipped_xml) skipped_text_xml = doc_xml.createCDATASection(case['skipped']['text']) skipped_xml.appendChild(skipped_text_xml) if 'failure' in case: failure_xml = doc_xml.createElement('failure') failure_xml.setAttribute('type', case['failure']['type']) failure_xml.setAttribute('message', case['failure']['message']) case_xml.appendChild(failure_xml) failure_text_xml = doc_xml.createCDATASection(case['failure']['text']) failure_xml.appendChild(failure_text_xml) if 'error' in case: error_xml = doc_xml.createElement('error') error_xml.setAttribute('type', case['error']['type']) error_xml.setAttribute('message', case['error']['message']) case_xml.appendChild(error_xml) error_text_xml = doc_xml.createCDATASection(case['error']['text']) error_xml.appendChild(error_text_xml) with open(os.path.join(out_dir, '{}.xml'.format(classname)), 'w') as output: doc_xml.writexml(output, encoding='utf-8', addindent='', newl="") doc_xml.unlink()
730,033
Returns results and column names from a query. Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object sql: raw SQL to execure Returns: ``(rows, fieldnames)`` where ``rows`` is the usual set of results and ``fieldnames`` are the name of the result columns/fields.
def get_rows_fieldnames_from_raw_sql( session: Union[Session, Engine, Connection], sql: str) -> Tuple[Sequence[Sequence[Any]], Sequence[str]]: result = session.execute(sql) # type: ResultProxy fieldnames = result.keys() rows = result.fetchall() return rows, fieldnames
730,042
Returns the result of ``COUNT(*)`` from the specified table (with additional ``WHERE`` criteria if desired). Args: session: SQLAlchemy :class:`Session`, :class:`Engine`, or :class:`Connection` object tablename: name of the table criteria: optional SQLAlchemy "where" criteria Returns: a scalar
def count_star(session: Union[Session, Engine, Connection], tablename: str, *criteria: Any) -> int: # works if you pass a connection or a session or an engine; all have # the execute() method query = select([func.count()]).select_from(table(tablename)) for criterion in criteria: query = query.where(criterion) return session.execute(query).scalar()
730,043
Returns a list of the first values in each row returned by a ``SELECT`` query. A Core version of this sort of thing: http://xion.io/post/code/sqlalchemy-query-values.html Args: session: SQLAlchemy :class:`Session` object select_statement: SQLAlchemy :class:`Select` object Returns: a list of the first value of each result row
def fetch_all_first_values(session: Session, select_statement: Select) -> List[Any]: rows = session.execute(select_statement) # type: ResultProxy try: return [row[0] for row in rows] except ValueError as e: raise MultipleResultsFound(str(e))
730,047
If we're running under SQL Server, disable constraint checking for the specified table while the resource is held. Args: session: SQLAlchemy :class:`Session` tablename: table name See https://stackoverflow.com/questions/123558/sql-server-2005-t-sql-to-temporarily-disable-a-trigger
def if_sqlserver_disable_constraints(session: SqlASession, tablename: str) -> None: # noqa engine = get_engine_from_session(session) if is_sqlserver(engine): quoted_tablename = quote_identifier(tablename, engine) session.execute( "ALTER TABLE {} NOCHECK CONSTRAINT all".format( quoted_tablename)) yield session.execute( "ALTER TABLE {} WITH CHECK CHECK CONSTRAINT all".format( quoted_tablename)) else: yield
730,056
If we're running under SQL Server, disable triggers AND constraints for the specified table while the resource is held. Args: session: SQLAlchemy :class:`Session` tablename: table name
def if_sqlserver_disable_constraints_triggers(session: SqlASession, tablename: str) -> None: with if_sqlserver_disable_constraints(session, tablename): with if_sqlserver_disable_triggers(session, tablename): yield
730,057
Ask Alembic what its head revision is (i.e. where the Python code would like the database to be at). Arguments: alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work. version_table: table name for Alembic versions
def get_head_revision_from_alembic( alembic_config_filename: str, alembic_base_dir: str = None, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> str: if alembic_base_dir is None: alembic_base_dir = os.path.dirname(alembic_config_filename) os.chdir(alembic_base_dir) # so the directory in the config file works config = Config(alembic_config_filename) script = ScriptDirectory.from_config(config) with EnvironmentContext(config, script, version_table=version_table): return script.get_current_head()
730,069
Ask the database what its current revision is. Arguments: database_url: SQLAlchemy URL for the database version_table: table name for Alembic versions
def get_current_revision( database_url: str, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> str: engine = create_engine(database_url) conn = engine.connect() opts = {'version_table': version_table} mig_context = MigrationContext.configure(conn, opts=opts) return mig_context.get_current_revision()
730,070
Returns a tuple of ``(current_revision, head_revision)``; see :func:`get_current_revision` and :func:`get_head_revision_from_alembic`. Arguments: database_url: SQLAlchemy URL for the database alembic_config_filename: config filename alembic_base_dir: directory to start in, so relative paths in the config file work. version_table: table name for Alembic versions
def get_current_and_head_revision( database_url: str, alembic_config_filename: str, alembic_base_dir: str = None, version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE) -> Tuple[str, str]: # Where we are head_revision = get_head_revision_from_alembic( alembic_config_filename=alembic_config_filename, alembic_base_dir=alembic_base_dir, version_table=version_table ) log.info("Intended database version: {}", head_revision) # Where we want to be current_revision = get_current_revision( database_url=database_url, version_table=version_table ) log.info("Current database version: {}", current_revision) # Are we where we want to be? return current_revision, head_revision
730,071
Takes a command-line command, executes it, and returns its ``stdout`` output. Args: command: command string Returns: output from the command as ``bytes``
def get_external_command_output(command: str) -> bytes: args = shlex.split(command) ret = subprocess.check_output(args) # this needs Python 2.7 or higher return ret
730,105
Get the output from a piped series of commands. Args: commands: sequence of command strings stdinput: optional ``stdin`` data to feed into the start of the pipe Returns: ``stdout`` from the end of the pipe
def get_pipe_series_output(commands: Sequence[str], stdinput: BinaryIO = None) -> bytes: # Python arrays indexes are zero-based, i.e. an array is indexed from # 0 to len(array)-1. # The range/xrange commands, by default, start at 0 and go to one less # than the maximum specified. # print commands processes = [] # type: List[subprocess.Popen] for i in range(len(commands)): if i == 0: # first processes processes.append( subprocess.Popen( shlex.split(commands[i]), stdin=subprocess.PIPE, stdout=subprocess.PIPE ) ) else: # subsequent ones processes.append( subprocess.Popen( shlex.split(commands[i]), stdin=processes[i - 1].stdout, stdout=subprocess.PIPE ) ) return processes[len(processes) - 1].communicate(stdinput)[0]
730,106
Launches a file using the operating system's standard launcher. Args: filename: file to launch raise_if_fails: raise any exceptions from ``subprocess.call(["xdg-open", filename])`` (Linux) or ``os.startfile(filename)`` (otherwise)? If not, exceptions are suppressed.
def launch_external_file(filename: str, raise_if_fails: bool = False) -> None: log.info("Launching external file: {!r}", filename) try: if sys.platform.startswith('linux'): cmdargs = ["xdg-open", filename] # log.debug("... command: {!r}", cmdargs) subprocess.call(cmdargs) else: # log.debug("... with os.startfile()") # noinspection PyUnresolvedReferences os.startfile(filename) except Exception as e: log.critical("Error launching {!r}: error was {}.\n\n{}", filename, str(e), traceback.format_exc()) if raise_if_fails: raise
730,107
Kills a tree of processes, starting with the parent. Slightly modified from https://stackoverflow.com/questions/1230669/subprocess-deleting-child-processes-in-windows. Args: pid: process ID of the parent including_parent: kill the parent too? timeout_s: timeout to wait for processes to close Returns: tuple: ``(gone, still_alive)``, where both are sets of :class:`psutil.Process` objects
def kill_proc_tree(pid: int, including_parent: bool = True, timeout_s: float = 5) \ -> Tuple[Set[psutil.Process], Set[psutil.Process]]: # noqa parent = psutil.Process(pid) to_kill = parent.children(recursive=True) # type: List[psutil.Process] if including_parent: to_kill.append(parent) for proc in to_kill: proc.kill() # SIGKILL gone, still_alive = psutil.wait_procs(to_kill, timeout=timeout_s) return gone, still_alive
730,108
Generates file-like objects from a list of filenames. Args: filenames: iterable of filenames Yields: each file as a :class:`TextIO` object
def gen_textfiles_from_filenames( filenames: Iterable[str]) -> Generator[TextIO, None, None]: for filename in filenames: with open(filename) as f: yield f
730,153
Generates lines from file-like objects. Args: files: iterable of :class:`TextIO` objects Yields: each line of all the files
def gen_lines_from_textfiles( files: Iterable[TextIO]) -> Generator[str, None, None]: for file in files: for line in file: yield line
730,154
Generates lines from binary files. Strips out newlines. Args: files: iterable of :class:`BinaryIO` file-like objects encoding: encoding to use Yields: each line of all the files
def gen_lines_from_binary_files( files: Iterable[BinaryIO], encoding: str = UTF8) -> Generator[str, None, None]: for file in files: for byteline in file: line = byteline.decode(encoding).strip() yield line
730,156
Splits lines with ``splitter`` and yields a specified part by index. Args: lines: iterable of strings part_index: index of part to yield splitter: string to split the lines on Yields: the specified part for each line
def gen_part_from_line(lines: Iterable[str], part_index: int, splitter: str = None) -> Generator[str, None, None]: for line in lines: parts = line.split(splitter) yield parts[part_index]
730,158
r""" Yields the *n*\ th part of each thing in ``iterables``. Args: iterables: iterable of anything part_index: part index Yields: ``item[part_index] for item in iterable``
def gen_part_from_iterables(iterables: Iterable[Any], part_index: int) -> Generator[Any, None, None]: r # RST: make part of word bold/italic: # https://stackoverflow.com/questions/12771480/part-of-a-word-bold-in-restructuredtext # noqa for iterable in iterables: yield iterable[part_index]
730,159
Iterate through binary file-like objects that are CSV files in a specified encoding. Yield each row. Args: csv_files: iterable of :class:`BinaryIO` objects encoding: encoding to use skip_header: skip the header (first) row of each file? csv_reader_kwargs: arguments to pass to :func:`csv.reader` Yields: rows from the files
def gen_rows_from_csv_binfiles( csv_files: Iterable[BinaryIO], encoding: str = UTF8, skip_header: bool = False, **csv_reader_kwargs) -> Generator[Iterable[str], None, None]: dialect = csv_reader_kwargs.pop('dialect', None) for csv_file_bin in csv_files: # noinspection PyTypeChecker csv_file = io.TextIOWrapper(csv_file_bin, encoding=encoding) thisfile_dialect = dialect if thisfile_dialect is None: thisfile_dialect = csv.Sniffer().sniff(csv_file.read(1024)) csv_file.seek(0) reader = csv.reader(csv_file, dialect=thisfile_dialect, **csv_reader_kwargs) first = True for row in reader: if first: first = False if skip_header: continue yield row
730,160
Replaces text in a file. Args: filename: filename to process (modifying it in place) text_from: original text to replace text_to: replacement text
def replace_in_file(filename: str, text_from: str, text_to: str) -> None: log.info("Amending {}: {} -> {}", filename, repr(text_from), repr(text_to)) with open(filename) as infile: contents = infile.read() contents = contents.replace(text_from, text_to) with open(filename, 'w') as outfile: outfile.write(contents)
730,163
Replaces multiple from/to string pairs within a single file. Args: filename: filename to process (modifying it in place) replacements: list of ``(from_text, to_text)`` tuples
def replace_multiple_in_file(filename: str, replacements: List[Tuple[str, str]]) -> None: with open(filename) as infile: contents = infile.read() for text_from, text_to in replacements: log.info("Amending {}: {} -> {}", filename, repr(text_from), repr(text_to)) contents = contents.replace(text_from, text_to) with open(filename, 'w') as outfile: outfile.write(contents)
730,164
Converts a file (in place) from UNIX to Windows line endings, or the reverse. Args: filename: filename to modify (in place) to_unix: convert Windows (CR LF) to UNIX (LF) to_windows: convert UNIX (LF) to Windows (CR LF)
def convert_line_endings(filename: str, to_unix: bool = False, to_windows: bool = False) -> None: assert to_unix != to_windows with open(filename, "rb") as f: contents = f.read() windows_eol = b"\r\n" # CR LF unix_eol = b"\n" # LF if to_unix: log.info("Converting from Windows to UNIX line endings: {!r}", filename) src = windows_eol dst = unix_eol else: # to_windows log.info("Converting from UNIX to Windows line endings: {!r}", filename) src = unix_eol dst = windows_eol if windows_eol in contents: log.info("... already contains at least one Windows line ending; " "probably converted before; skipping") return contents = contents.replace(src, dst) with open(filename, "wb") as f: f.write(contents)
730,165
Detects whether a line is present within a file. Args: filename: file to check line: line to search for (as an exact match)
def is_line_in_file(filename: str, line: str) -> bool: assert "\n" not in line with open(filename, "r") as file: for fileline in file: if fileline == line: return True return False
730,166
Adds a line (at the end) if it's not already in the file somewhere. Args: filename: filename to modify (in place) line: line to append (which must not have a newline in)
def add_line_if_absent(filename: str, line: str) -> None: assert "\n" not in line if not is_line_in_file(filename, line): log.info("Appending line {!r} to file {!r}", line, filename) with open(filename, "a") as file: file.writelines([line])
730,167
De-duplicate files within one or more directories. Remove files that are identical to ones already considered. Args: directories: list of directories to process recursive: process subdirectories (recursively)? dummy_run: say what it'll do, but don't do it
def deduplicate(directories: List[str], recursive: bool, dummy_run: bool) -> None: # ------------------------------------------------------------------------- # Catalogue files by their size # ------------------------------------------------------------------------- files_by_size = {} # type: Dict[int, List[str]] # maps size to list of filenames # noqa num_considered = 0 for filename in gen_filenames(directories, recursive=recursive): if not os.path.isfile(filename): continue size = os.stat(filename)[stat.ST_SIZE] a = files_by_size.setdefault(size, []) a.append(filename) num_considered += 1 log.debug("files_by_size =\n{}", pformat(files_by_size)) # ------------------------------------------------------------------------- # By size, look for duplicates using a hash of the first part only # ------------------------------------------------------------------------- log.info("Finding potential duplicates...") potential_duplicate_sets = [] potential_count = 0 sizes = list(files_by_size.keys()) sizes.sort() for k in sizes: files_of_this_size = files_by_size[k] out_files = [] # type: List[str] # ... list of all files having >1 file per hash, for this size hashes = {} # type: Dict[str, Union[bool, str]] # ... key is a hash; value is either True or a filename if len(files_of_this_size) == 1: continue log.info("Testing {} files of size {}...", len(files_of_this_size), k) for filename in files_of_this_size: if not os.path.isfile(filename): continue log.debug("Quick-scanning file: {}", filename) with open(filename, 'rb') as fd: hasher = md5() hasher.update(fd.read(INITIAL_HASH_SIZE)) hash_value = hasher.digest() if hash_value in hashes: # We have discovered the SECOND OR SUBSEQUENT hash match. first_file_or_true = hashes[hash_value] if first_file_or_true is not True: # We have discovered the SECOND file; # first_file_or_true contains the name of the FIRST. out_files.append(first_file_or_true) hashes[hash_value] = True out_files.append(filename) else: # We have discovered the FIRST file with this hash. hashes[hash_value] = filename if out_files: potential_duplicate_sets.append(out_files) potential_count = potential_count + len(out_files) del files_by_size log.info("Found {} sets of potential duplicates, based on hashing the " "first {} bytes of each...", potential_count, INITIAL_HASH_SIZE) log.debug("potential_duplicate_sets =\n{}", pformat(potential_duplicate_sets)) # ------------------------------------------------------------------------- # Within each set, check for duplicates using a hash of the entire file # ------------------------------------------------------------------------- log.info("Scanning for real duplicates...") num_scanned = 0 num_to_scan = sum(len(one_set) for one_set in potential_duplicate_sets) duplicate_sets = [] # type: List[List[str]] for one_set in potential_duplicate_sets: out_files = [] # type: List[str] hashes = {} for filename in one_set: num_scanned += 1 log.info("Scanning file [{}/{}]: {}", num_scanned, num_to_scan, filename) with open(filename, 'rb') as fd: hasher = md5() while True: r = fd.read(MAIN_READ_CHUNK_SIZE) if len(r) == 0: break hasher.update(r) hash_value = hasher.digest() if hash_value in hashes: if not out_files: out_files.append(hashes[hash_value]) out_files.append(filename) else: hashes[hash_value] = filename if len(out_files): duplicate_sets.append(out_files) log.debug("duplicate_sets = \n{}", pformat(duplicate_sets)) num_originals = 0 num_deleted = 0 for d in duplicate_sets: print("Original is: {}".format(d[0])) num_originals += 1 for f in d[1:]: if dummy_run: print("Would delete: {}".format(f)) else: print("Deleting: {}".format(f)) os.remove(f) num_deleted += 1 print() num_unique = num_considered - (num_originals + num_deleted) print( "{action} {d} duplicates, leaving {o} originals (and {u} unique files " "not touched; {c} files considered in total)".format( action="Would delete" if dummy_run else "Deleted", d=num_deleted, o=num_originals, u=num_unique, c=num_considered ) )
730,177
Context manager to add a file output stream to our logging system. Args: tee_file: file-like object to write to loglevel: log level (e.g. ``logging.DEBUG``) to use for this stream
def tee_log(tee_file: TextIO, loglevel: int) -> None: handler = get_monochrome_handler(stream=tee_file) handler.setLevel(loglevel) rootlogger = logging.getLogger() rootlogger.addHandler(handler) # Tee the main stdout/stderr as required. with TeeContextManager(tee_file, capture_stdout=True): with TeeContextManager(tee_file, capture_stderr=True): try: yield except Exception: # We catch this so that the exception also goes to # the log. exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.critical("\n" + "".join(lines)) raise
730,183
Returns a copy of the dictionary ``d`` with its keys renamed according to ``mapping``. Args: d: the starting dictionary mapping: a dictionary of the format ``{old_key_name: new_key_name}`` Returns: a new dictionary Keys that are not in ``mapping`` are left unchanged. The input parameters are not modified.
def rename_keys(d: Dict[str, Any], mapping: Dict[str, str]) -> Dict[str, Any]: result = {} # type: Dict[str, Any] for k, v in d.items(): if k in mapping: k = mapping[k] result[k] = v return result
730,204
Renames, IN PLACE, the keys in ``d`` according to the mapping in ``renames``. Args: d: a dictionary to modify renames: a dictionary of the format ``{old_key_name: new_key_name}`` See https://stackoverflow.com/questions/4406501/change-the-name-of-a-key-in-dictionary.
def rename_keys_in_dict(d: Dict[str, Any], renames: Dict[str, str]) -> None: # noqa for old_key, new_key in renames.items(): if new_key == old_key: continue if old_key in d: if new_key in d: raise ValueError( "rename_keys_in_dict: renaming {} -> {} but new key " "already exists".format(repr(old_key), repr(new_key))) d[new_key] = d.pop(old_key)
730,205
Deletes keys from a dictionary, in place. Args: d: dictonary to modify keys_to_delete: if any keys are present in this list, they are deleted... keys_to_keep: ... unless they are present in this list.
def delete_keys(d: Dict[Any, Any], keys_to_delete: List[Any], keys_to_keep: List[Any]) -> None: for k in keys_to_delete: if k in d and k not in keys_to_keep: del d[k]
730,211
Manually set the ``timing`` parameter, and optionally reset the timers. Args: timing: should we be timing? reset: reset the timers?
def set_timing(self, timing: bool, reset: bool = False) -> None: self._timing = timing if reset: self.reset()
730,227
Start a named timer. Args: name: name of the timer increment_count: increment the start count for this timer
def start(self, name: str, increment_count: bool = True) -> None: if not self._timing: return now = get_now_utc_pendulum() # If we were already timing something else, pause that. if self._stack: last = self._stack[-1] self._totaldurations[last] += now - self._starttimes[last] # Start timing our new thing if name not in self._starttimes: self._totaldurations[name] = datetime.timedelta() self._count[name] = 0 self._starttimes[name] = now if increment_count: self._count[name] += 1 self._stack.append(name)
730,228
Stop a named timer. Args: name: timer to stop
def stop(self, name: str) -> None: if not self._timing: return now = get_now_utc_pendulum() # Validity check if not self._stack: raise AssertionError("MultiTimer.stop() when nothing running") if self._stack[-1] != name: raise AssertionError( "MultiTimer.stop({}) when {} is running".format( repr(name), repr(self._stack[-1]))) # Finish what we were asked to self._totaldurations[name] += now - self._starttimes[name] self._stack.pop() # Now, if we were timing something else before we started "name", # resume... if self._stack: last = self._stack[-1] self._starttimes[last] = now
730,229
Pings a host, using OS tools. Args: hostname: host name or IP address timeout_s: timeout in seconds Returns: was the ping successful?
def ping(hostname: str, timeout_s: int = 5) -> bool: if sys.platform == "win32": timeout_ms = timeout_s * 1000 args = [ "ping", hostname, "-n", "1", # ping count "-w", str(timeout_ms), # timeout ] elif sys.platform.startswith('linux'): args = [ "ping", hostname, "-c", "1", # ping count "-w", str(timeout_s), # timeout ] else: raise AssertionError("Don't know how to ping on this operating system") proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.communicate() retcode = proc.returncode return retcode == 0
730,255
Downloads a URL to a file. Args: url: URL to download from filename: file to save to skip_cert_verify: skip SSL certificate check?
def download(url: str, filename: str, skip_cert_verify: bool = True) -> None: log.info("Downloading from {} to {}", url, filename) # urllib.request.urlretrieve(url, filename) # ... sometimes fails (e.g. downloading # https://www.openssl.org/source/openssl-1.1.0g.tar.gz under Windows) with: # ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:777) # noqa # ... due to this certificate root problem (probably because OpenSSL # [used by Python] doesn't play entirely by the same rules as others?): # https://stackoverflow.com/questions/27804710 # So: ctx = ssl.create_default_context() # type: ssl.SSLContext if skip_cert_verify: log.debug("Skipping SSL certificate check for " + url) ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE with urllib.request.urlopen(url, context=ctx) as u, open(filename, 'wb') as f: # noqa f.write(u.read())
730,256
Generate binary files from a series of URLs (one per URL). Args: urls: iterable of URLs on_disk: if ``True``, yields files that are on disk (permitting random access); if ``False``, yields in-memory files (which will not permit random access) show_info: show progress to the log? Yields: files, each of type :class:`BinaryIO`
def gen_binary_files_from_urls( urls: Iterable[str], on_disk: bool = False, show_info: bool = True) -> Generator[BinaryIO, None, None]: for url in urls: if on_disk: # Necessary for e.g. zip processing (random access) with tempfile.TemporaryDirectory() as tmpdir: filename = os.path.join(tmpdir, "tempfile") download(url=url, filename=filename) with open(filename, 'rb') as f: yield f else: if show_info: log.info("Reading from URL: {}", url) with urllib.request.urlopen(url) as f: yield f if show_info: log.info("... finished reading from URL: {}", url)
730,257
Returns a list produced by applying :func:`multiple_replace` to every string in ``stringlist``. Args: stringlist: list of source strings replacedict: dictionary mapping "original" to "replacement" strings Returns: list of final strings
def replace_in_list(stringlist: Iterable[str], replacedict: Dict[str, str]) -> List[str]: newlist = [] for fromstring in stringlist: newlist.append(multiple_replace(fromstring, replacedict)) return newlist
730,261
Determines whether a drug, passed as an instance of :class:`.Drug`, matches the specified criteria. Args: drug: a :class:`.Drug` instance criteria: ``name=value`` pairs to match against the attributes of the :class:`Drug` class. For example, you can include keyword arguments like ``antidepressant=True``.
def drug_matches_criteria(drug: Drug, **criteria: Dict[str, bool]) -> bool: for attribute, value in criteria.items(): if getattr(drug, attribute) != value: return False return True
730,305
Returns SQL like .. code-block:: sql (column_name LIKE '%drugname1%' OR column_name LIKE '%drugname2%') for the drug names that this Drug object knows about. Args: column_name: column name, pre-escaped if necessary Returns: SQL fragment as above
def sql_column_like_drug(self, column_name: str) -> str: clauses = [ "{col} LIKE {fragment}".format( col=column_name, fragment=sql_string_literal(f)) for f in self.sql_like_fragments ] return "({})".format(" OR ".join(clauses))
730,315
Runs self-tests. Args: test_expr: include tests of expressions (which can be slow).
def test(cls, test_expr: bool = True) -> None: cls.test_dialect_specific_1() cls.test_identifiers() if test_expr: cls.test_expr() cls.test_sql_core() cls.test_dialect_specific_2()
730,346
Gets a monochrome log handler using a standard format. Args: extranames: additional names to append to the logger's name with_process_id: include the process ID in the logger's name? with_thread_id: include the thread ID in the logger's name? stream: ``TextIO`` stream to send log output to Returns: the :class:`logging.StreamHandler`
def get_monochrome_handler( extranames: List[str] = None, with_process_id: bool = False, with_thread_id: bool = False, stream: TextIO = None) -> logging.StreamHandler: fmt = "%(asctime)s.%(msecs)03d" if with_process_id or with_thread_id: procinfo = [] # type: List[str] if with_process_id: procinfo.append("p%(process)d") if with_thread_id: procinfo.append("t%(thread)d") fmt += " [{}]".format(".".join(procinfo)) extras = ":" + ":".join(extranames) if extranames else "" fmt += " %(name)s{extras}:%(levelname)s: ".format(extras=extras) fmt += "%(message)s" f = logging.Formatter(fmt, datefmt=LOG_DATEFMT, style='%') h = logging.StreamHandler(stream) h.setFormatter(f) return h
730,426
Gets a colour log handler using a standard format. Args: extranames: additional names to append to the logger's name with_process_id: include the process ID in the logger's name? with_thread_id: include the thread ID in the logger's name? stream: ``TextIO`` stream to send log output to Returns: the :class:`logging.StreamHandler`
def get_colour_handler(extranames: List[str] = None, with_process_id: bool = False, with_thread_id: bool = False, stream: TextIO = None) -> logging.StreamHandler: fmt = "%(white)s%(asctime)s.%(msecs)03d" # this is dim white = grey if with_process_id or with_thread_id: procinfo = [] # type: List[str] if with_process_id: procinfo.append("p%(process)d") if with_thread_id: procinfo.append("t%(thread)d") fmt += " [{}]".format(".".join(procinfo)) extras = ":" + ":".join(extranames) if extranames else "" fmt += " %(name)s{extras}:%(levelname)s: ".format(extras=extras) fmt += "%(reset)s%(log_color)s%(message)s" cf = ColoredFormatter(fmt, datefmt=LOG_DATEFMT, reset=True, log_colors=LOG_COLORS, secondary_log_colors={}, style='%') ch = logging.StreamHandler(stream) ch.setFormatter(cf) return ch
730,427
Quick function to set up the root logger for colour. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Args: level: log level to set with_process_id: include the process ID in the logger's name? with_thread_id: include the thread ID in the logger's name?
def main_only_quicksetup_rootlogger(level: int = logging.DEBUG, with_process_id: bool = False, with_thread_id: bool = False) -> None: # Nasty. Only call from "if __name__ == '__main__'" clauses! rootlogger = logging.getLogger() configure_logger_for_colour(rootlogger, level, remove_existing=True, with_process_id=with_process_id, with_thread_id=with_thread_id)
730,429
Remove all handlers from a logger. Args: logger: logger to modify
def remove_all_logger_handlers(logger: logging.Logger) -> None: while logger.handlers: h = logger.handlers[0] logger.removeHandler(h)
730,430
Create a new formatter and apply it to the logger. :func:`logging.basicConfig` won't reset the formatter if another module has called it, so always set the formatter like this. Args: logger: logger to modify fmt: passed to the ``fmt=`` argument of :class:`logging.Formatter` datefmt: passed to the ``datefmt=`` argument of :class:`logging.Formatter`
def reset_logformat(logger: logging.Logger, fmt: str, datefmt: str = '%Y-%m-%d %H:%M:%S') -> None: handler = logging.StreamHandler() formatter = logging.Formatter(fmt=fmt, datefmt=datefmt) handler.setFormatter(formatter) remove_all_logger_handlers(logger) logger.addHandler(handler) logger.propagate = False
730,431
Apply a simple time-stamped log format to an existing logger, and set its loglevel to either ``logging.DEBUG`` or ``logging.INFO``. Args: logger: logger to modify extraname: additional name to append to the logger's name level: log level to set
def reset_logformat_timestamped(logger: logging.Logger, extraname: str = "", level: int = logging.INFO) -> None: namebit = extraname + ":" if extraname else "" fmt = ("%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s:" + namebit + "%(message)s") # logger.info(fmt) reset_logformat(logger, fmt=fmt) # logger.info(fmt) logger.setLevel(level)
730,432
Applies a preconfigured datetime/colour scheme to ALL logger. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Generally MORE SENSIBLE just to apply a handler to the root logger. Args: remove_existing: remove existing handlers from logger first?
def configure_all_loggers_for_colour(remove_existing: bool = True) -> None: handler = get_colour_handler() apply_handler_to_all_logs(handler, remove_existing=remove_existing)
730,433
Applies a handler to all logs, optionally removing existing handlers. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Generally MORE SENSIBLE just to apply a handler to the root logger. Args: handler: the handler to apply remove_existing: remove existing handlers from logger first?
def apply_handler_to_root_log(handler: logging.Handler, remove_existing: bool = False) -> None: rootlog = logging.getLogger() if remove_existing: rootlog.handlers = [] rootlog.addHandler(handler)
730,434
Applies a handler to all logs, optionally removing existing handlers. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Generally MORE SENSIBLE just to apply a handler to the root logger. Args: handler: the handler to apply remove_existing: remove existing handlers from logger first?
def apply_handler_to_all_logs(handler: logging.Handler, remove_existing: bool = False) -> None: # noinspection PyUnresolvedReferences for name, obj in logging.Logger.manager.loggerDict.items(): if remove_existing: obj.handlers = [] # http://stackoverflow.com/questions/7484454 obj.addHandler(handler)
730,435
Copy all currently configured logs to the specified file. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Args: filename: file to send log output to fmt: passed to the ``fmt=`` argument of :class:`logging.Formatter` datefmt: passed to the ``datefmt=`` argument of :class:`logging.Formatter`
def copy_all_logs_to_file(filename: str, fmt: str = LOG_FORMAT, datefmt: str = LOG_DATEFMT) -> None: fh = logging.FileHandler(filename) # default file mode is 'a' for append formatter = logging.Formatter(fmt=fmt, datefmt=datefmt) fh.setFormatter(formatter) apply_handler_to_all_logs(fh)
730,437
Set a log level for a log and all its handlers. Args: log: log to modify level: log level to set
def set_level_for_logger_and_its_handlers(log: logging.Logger, level: int) -> None: log.setLevel(level) for h in log.handlers: # type: logging.Handler h.setLevel(level)
730,442
r""" Args: append_br: append ``<br>`` to each line? replace_nl_with_br: replace ``\n`` with ``<br>`` in messages? See https://hg.python.org/cpython/file/3.5/Lib/logging/__init__.py
def __init__(self, append_br: bool = False, replace_nl_with_br: bool = True) -> None: r super().__init__( fmt='%(message)s', datefmt='%Y-%m-%d %H:%M:%S', style='%' ) self.append_br = append_br self.replace_nl_with_br = replace_nl_with_br
730,444
Returns the SQL column type used to make very large text columns for a given dialect. Args: dialect: a SQLAlchemy :class:`Dialect` Returns: the SQL data type of "giant text", typically 'LONGTEXT' for MySQL and 'NVARCHAR(MAX)' for SQL Server.
def giant_text_sqltype(dialect: Dialect) -> str: if dialect.name == SqlaDialectName.SQLSERVER: return 'NVARCHAR(MAX)' elif dialect.name == SqlaDialectName.MYSQL: return 'LONGTEXT' else: raise ValueError("Unknown dialect: {}".format(dialect.name))
730,469
Convert a signed integer to its "two's complement" representation. Args: val: signed integer n_bits: number of bits (which must reflect a whole number of bytes) Returns: unsigned integer: two's complement version
def signed_to_twos_comp(val: int, n_bits: int) -> int: assert n_bits % 8 == 0, "Must specify a whole number of bytes" n_bytes = n_bits // 8 b = val.to_bytes(n_bytes, byteorder=sys.byteorder, signed=True) return int.from_bytes(b, byteorder=sys.byteorder, signed=False)
730,507
Converts an 8-byte sequence to a long integer. Args: bytesdata: 8 consecutive bytes, as a ``bytes`` object, in little-endian format (least significant byte [LSB] first) Returns: integer
def bytes_to_long(bytesdata: bytes) -> int: assert len(bytesdata) == 8 return sum((b << (k * 8) for k, b in enumerate(bytesdata)))
730,508
Pure 32-bit Python implementation of MurmurHash3; see http://stackoverflow.com/questions/13305290/is-there-a-pure-python-implementation-of-murmurhash. Args: data: data to hash seed: seed Returns: integer hash
def murmur3_x86_32(data: Union[bytes, bytearray], seed: int = 0) -> int: # noqa c1 = 0xcc9e2d51 c2 = 0x1b873593 length = len(data) h1 = seed rounded_end = (length & 0xfffffffc) # round down to 4 byte block for i in range(0, rounded_end, 4): # little endian load order # RNC: removed ord() calls k1 = (data[i] & 0xff) | ((data[i + 1] & 0xff) << 8) | \ ((data[i + 2] & 0xff) << 16) | (data[i + 3] << 24) k1 *= c1 k1 = (k1 << 15) | ((k1 & 0xffffffff) >> 17) # ROTL32(k1, 15) k1 *= c2 h1 ^= k1 h1 = (h1 << 13) | ((h1 & 0xffffffff) >> 19) # ROTL32(h1, 13) h1 = h1 * 5 + 0xe6546b64 # tail k1 = 0 val = length & 0x03 if val == 3: k1 = (data[rounded_end + 2] & 0xff) << 16 # fallthrough if val in (2, 3): k1 |= (data[rounded_end + 1] & 0xff) << 8 # fallthrough if val in (1, 2, 3): k1 |= data[rounded_end] & 0xff k1 *= c1 k1 = (k1 << 15) | ((k1 & 0xffffffff) >> 17) # ROTL32(k1, 15) k1 *= c2 h1 ^= k1 # finalization h1 ^= length # fmix(h1) h1 ^= ((h1 & 0xffffffff) >> 16) h1 *= 0x85ebca6b h1 ^= ((h1 & 0xffffffff) >> 13) h1 *= 0xc2b2ae35 h1 ^= ((h1 & 0xffffffff) >> 16) return h1 & 0xffffffff
730,509
Pure 64-bit Python implementation of MurmurHash3; see http://stackoverflow.com/questions/13305290/is-there-a-pure-python-implementation-of-murmurhash (plus RNC bugfixes). Args: data: data to hash seed: seed Returns: integer hash
def murmur3_64(data: Union[bytes, bytearray], seed: int = 19820125) -> int: # noqa m = 0xc6a4a7935bd1e995 r = 47 mask = 2 ** 64 - 1 length = len(data) h = seed ^ ((m * length) & mask) offset = (length // 8) * 8 # RNC: was /, but for Python 3 that gives float; brackets added for clarity for ll in range(0, offset, 8): k = bytes_to_long(data[ll:ll + 8]) k = (k * m) & mask k ^= (k >> r) & mask k = (k * m) & mask h = (h ^ k) h = (h * m) & mask l = length & 7 if l >= 7: h = (h ^ (data[offset + 6] << 48)) if l >= 6: h = (h ^ (data[offset + 5] << 40)) if l >= 5: h = (h ^ (data[offset + 4] << 32)) if l >= 4: h = (h ^ (data[offset + 3] << 24)) if l >= 3: h = (h ^ (data[offset + 2] << 16)) if l >= 2: h = (h ^ (data[offset + 1] << 8)) if l >= 1: h = (h ^ data[offset]) h = (h * m) & mask h ^= (h >> r) & mask h = (h * m) & mask h ^= (h >> r) & mask return h
730,510
Implements 128-bit murmur3 hash for x64, as per ``pymmh3``, with some bugfixes. Args: key: data to hash seed: seed Returns: integer hash
def pymmh3_hash128_x64(key: Union[bytes, bytearray], seed: int) -> int: def fmix(k): k ^= k >> 33 k = (k * 0xff51afd7ed558ccd) & 0xFFFFFFFFFFFFFFFF k ^= k >> 33 k = (k * 0xc4ceb9fe1a85ec53) & 0xFFFFFFFFFFFFFFFF k ^= k >> 33 return k length = len(key) nblocks = int(length / 16) h1 = seed h2 = seed c1 = 0x87c37b91114253d5 c2 = 0x4cf5ad432745937f # body for block_start in range(0, nblocks * 8, 8): # ??? big endian? k1 = ( key[2 * block_start + 7] << 56 | key[2 * block_start + 6] << 48 | key[2 * block_start + 5] << 40 | key[2 * block_start + 4] << 32 | key[2 * block_start + 3] << 24 | key[2 * block_start + 2] << 16 | key[2 * block_start + 1] << 8 | key[2 * block_start + 0] ) k2 = ( key[2 * block_start + 15] << 56 | key[2 * block_start + 14] << 48 | key[2 * block_start + 13] << 40 | key[2 * block_start + 12] << 32 | key[2 * block_start + 11] << 24 | key[2 * block_start + 10] << 16 | key[2 * block_start + 9] << 8 | key[2 * block_start + 8] ) k1 = (c1 * k1) & 0xFFFFFFFFFFFFFFFF k1 = (k1 << 31 | k1 >> 33) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k1 = (c2 * k1) & 0xFFFFFFFFFFFFFFFF h1 ^= k1 h1 = (h1 << 27 | h1 >> 37) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h1 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF h1 = (h1 * 5 + 0x52dce729) & 0xFFFFFFFFFFFFFFFF k2 = (c2 * k2) & 0xFFFFFFFFFFFFFFFF k2 = (k2 << 33 | k2 >> 31) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k2 = (c1 * k2) & 0xFFFFFFFFFFFFFFFF h2 ^= k2 h2 = (h2 << 31 | h2 >> 33) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 h2 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF h2 = (h2 * 5 + 0x38495ab5) & 0xFFFFFFFFFFFFFFFF # tail tail_index = nblocks * 16 k1 = 0 k2 = 0 tail_size = length & 15 if tail_size >= 15: k2 ^= key[tail_index + 14] << 48 if tail_size >= 14: k2 ^= key[tail_index + 13] << 40 if tail_size >= 13: k2 ^= key[tail_index + 12] << 32 if tail_size >= 12: k2 ^= key[tail_index + 11] << 24 if tail_size >= 11: k2 ^= key[tail_index + 10] << 16 if tail_size >= 10: k2 ^= key[tail_index + 9] << 8 if tail_size >= 9: k2 ^= key[tail_index + 8] if tail_size > 8: k2 = (k2 * c2) & 0xFFFFFFFFFFFFFFFF k2 = (k2 << 33 | k2 >> 31) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k2 = (k2 * c1) & 0xFFFFFFFFFFFFFFFF h2 ^= k2 if tail_size >= 8: k1 ^= key[tail_index + 7] << 56 if tail_size >= 7: k1 ^= key[tail_index + 6] << 48 if tail_size >= 6: k1 ^= key[tail_index + 5] << 40 if tail_size >= 5: k1 ^= key[tail_index + 4] << 32 if tail_size >= 4: k1 ^= key[tail_index + 3] << 24 if tail_size >= 3: k1 ^= key[tail_index + 2] << 16 if tail_size >= 2: k1 ^= key[tail_index + 1] << 8 if tail_size >= 1: k1 ^= key[tail_index + 0] if tail_size > 0: k1 = (k1 * c1) & 0xFFFFFFFFFFFFFFFF k1 = (k1 << 31 | k1 >> 33) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64 k1 = (k1 * c2) & 0xFFFFFFFFFFFFFFFF h1 ^= k1 # finalization h1 ^= length h2 ^= length h1 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF h2 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF h1 = fmix(h1) h2 = fmix(h2) h1 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF h2 = (h1 + h2) & 0xFFFFFFFFFFFFFFFF return h2 << 64 | h1
730,511
Implements 128-bit murmur3 hash for x86, as per ``pymmh3``, with some bugfixes. Args: key: data to hash seed: seed Returns: integer hash
def pymmh3_hash128_x86(key: Union[bytes, bytearray], seed: int) -> int: def fmix(h): h ^= h >> 16 h = (h * 0x85ebca6b) & 0xFFFFFFFF h ^= h >> 13 h = (h * 0xc2b2ae35) & 0xFFFFFFFF h ^= h >> 16 return h length = len(key) nblocks = int(length / 16) h1 = seed h2 = seed h3 = seed h4 = seed c1 = 0x239b961b c2 = 0xab0e9789 c3 = 0x38b34ae5 c4 = 0xa1e38b93 # body for block_start in range(0, nblocks * 16, 16): k1 = ( key[block_start + 3] << 24 | key[block_start + 2] << 16 | key[block_start + 1] << 8 | key[block_start + 0] ) k2 = ( key[block_start + 7] << 24 | key[block_start + 6] << 16 | key[block_start + 5] << 8 | key[block_start + 4] ) k3 = ( key[block_start + 11] << 24 | key[block_start + 10] << 16 | key[block_start + 9] << 8 | key[block_start + 8] ) k4 = ( key[block_start + 15] << 24 | key[block_start + 14] << 16 | key[block_start + 13] << 8 | key[block_start + 12] ) k1 = (c1 * k1) & 0xFFFFFFFF k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF # inlined ROTL32 k1 = (c2 * k1) & 0xFFFFFFFF h1 ^= k1 h1 = (h1 << 19 | h1 >> 13) & 0xFFFFFFFF # inlined ROTL32 h1 = (h1 + h2) & 0xFFFFFFFF h1 = (h1 * 5 + 0x561ccd1b) & 0xFFFFFFFF k2 = (c2 * k2) & 0xFFFFFFFF k2 = (k2 << 16 | k2 >> 16) & 0xFFFFFFFF # inlined ROTL32 k2 = (c3 * k2) & 0xFFFFFFFF h2 ^= k2 h2 = (h2 << 17 | h2 >> 15) & 0xFFFFFFFF # inlined ROTL32 h2 = (h2 + h3) & 0xFFFFFFFF h2 = (h2 * 5 + 0x0bcaa747) & 0xFFFFFFFF k3 = (c3 * k3) & 0xFFFFFFFF k3 = (k3 << 17 | k3 >> 15) & 0xFFFFFFFF # inlined ROTL32 k3 = (c4 * k3) & 0xFFFFFFFF h3 ^= k3 h3 = (h3 << 15 | h3 >> 17) & 0xFFFFFFFF # inlined ROTL32 h3 = (h3 + h4) & 0xFFFFFFFF h3 = (h3 * 5 + 0x96cd1c35) & 0xFFFFFFFF k4 = (c4 * k4) & 0xFFFFFFFF k4 = (k4 << 18 | k4 >> 14) & 0xFFFFFFFF # inlined ROTL32 k4 = (c1 * k4) & 0xFFFFFFFF h4 ^= k4 h4 = (h4 << 13 | h4 >> 19) & 0xFFFFFFFF # inlined ROTL32 h4 = (h1 + h4) & 0xFFFFFFFF h4 = (h4 * 5 + 0x32ac3b17) & 0xFFFFFFFF # tail tail_index = nblocks * 16 k1 = 0 k2 = 0 k3 = 0 k4 = 0 tail_size = length & 15 if tail_size >= 15: k4 ^= key[tail_index + 14] << 16 if tail_size >= 14: k4 ^= key[tail_index + 13] << 8 if tail_size >= 13: k4 ^= key[tail_index + 12] if tail_size > 12: k4 = (k4 * c4) & 0xFFFFFFFF k4 = (k4 << 18 | k4 >> 14) & 0xFFFFFFFF # inlined ROTL32 k4 = (k4 * c1) & 0xFFFFFFFF h4 ^= k4 if tail_size >= 12: k3 ^= key[tail_index + 11] << 24 if tail_size >= 11: k3 ^= key[tail_index + 10] << 16 if tail_size >= 10: k3 ^= key[tail_index + 9] << 8 if tail_size >= 9: k3 ^= key[tail_index + 8] if tail_size > 8: k3 = (k3 * c3) & 0xFFFFFFFF k3 = (k3 << 17 | k3 >> 15) & 0xFFFFFFFF # inlined ROTL32 k3 = (k3 * c4) & 0xFFFFFFFF h3 ^= k3 if tail_size >= 8: k2 ^= key[tail_index + 7] << 24 if tail_size >= 7: k2 ^= key[tail_index + 6] << 16 if tail_size >= 6: k2 ^= key[tail_index + 5] << 8 if tail_size >= 5: k2 ^= key[tail_index + 4] if tail_size > 4: k2 = (k2 * c2) & 0xFFFFFFFF k2 = (k2 << 16 | k2 >> 16) & 0xFFFFFFFF # inlined ROTL32 k2 = (k2 * c3) & 0xFFFFFFFF h2 ^= k2 if tail_size >= 4: k1 ^= key[tail_index + 3] << 24 if tail_size >= 3: k1 ^= key[tail_index + 2] << 16 if tail_size >= 2: k1 ^= key[tail_index + 1] << 8 if tail_size >= 1: k1 ^= key[tail_index + 0] if tail_size > 0: k1 = (k1 * c1) & 0xFFFFFFFF k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF # inlined ROTL32 k1 = (k1 * c2) & 0xFFFFFFFF h1 ^= k1 # finalization h1 ^= length h2 ^= length h3 ^= length h4 ^= length h1 = (h1 + h2) & 0xFFFFFFFF h1 = (h1 + h3) & 0xFFFFFFFF h1 = (h1 + h4) & 0xFFFFFFFF h2 = (h1 + h2) & 0xFFFFFFFF h3 = (h1 + h3) & 0xFFFFFFFF h4 = (h1 + h4) & 0xFFFFFFFF h1 = fmix(h1) h2 = fmix(h2) h3 = fmix(h3) h4 = fmix(h4) h1 = (h1 + h2) & 0xFFFFFFFF h1 = (h1 + h3) & 0xFFFFFFFF h1 = (h1 + h4) & 0xFFFFFFFF h2 = (h1 + h2) & 0xFFFFFFFF h3 = (h1 + h3) & 0xFFFFFFFF h4 = (h1 + h4) & 0xFFFFFFFF return h4 << 96 | h3 << 64 | h2 << 32 | h1
730,512
Implements 128bit murmur3 hash, as per ``pymmh3``. Args: key: data to hash seed: seed x64arch: is a 64-bit architecture available? Returns: integer hash
def pymmh3_hash128(key: Union[bytes, bytearray], seed: int = 0, x64arch: bool = True) -> int: if x64arch: return pymmh3_hash128_x64(key, seed) else: return pymmh3_hash128_x86(key, seed)
730,513
Implements 64bit murmur3 hash, as per ``pymmh3``. Returns a tuple. Args: key: data to hash seed: seed x64arch: is a 64-bit architecture available? Returns: tuple: tuple of integers, ``(signed_val1, signed_val2)``
def pymmh3_hash64(key: Union[bytes, bytearray], seed: int = 0, x64arch: bool = True) -> Tuple[int, int]: hash_128 = pymmh3_hash128(key, seed, x64arch) unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF # low half if unsigned_val1 & 0x8000000000000000 == 0: signed_val1 = unsigned_val1 else: signed_val1 = -((unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1) unsigned_val2 = (hash_128 >> 64) & 0xFFFFFFFFFFFFFFFF # high half if unsigned_val2 & 0x8000000000000000 == 0: signed_val2 = unsigned_val2 else: signed_val2 = -((unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1) return signed_val1, signed_val2
730,514
Checks the pure Python implementation of 32-bit murmur3 against the ``mmh3`` C-based module. Args: data: data to hash seed: seed Raises: AssertionError: if the two calculations don't match
def compare_python_to_reference_murmur3_32(data: Any, seed: int = 0) -> None: assert mmh3, "Need mmh3 module" c_data = to_str(data) c_signed = mmh3.hash(c_data, seed=seed) # 32 bit py_data = to_bytes(c_data) py_unsigned = murmur3_x86_32(py_data, seed=seed) py_signed = twos_comp_to_signed(py_unsigned, n_bits=32) preamble = "Hashing {data} with MurmurHash3/32-bit/seed={seed}".format( data=repr(data), seed=seed) if c_signed == py_signed: print(preamble + " -> {result}: OK".format(result=c_signed)) else: raise AssertionError( preamble + "; mmh3 says " "{c_data} -> {c_signed}, Python version says {py_data} -> " "{py_unsigned} = {py_signed}".format( c_data=repr(c_data), c_signed=c_signed, py_data=repr(py_data), py_unsigned=py_unsigned, py_signed=py_signed))
730,515
Checks the pure Python implementation of 64-bit murmur3 against the ``mmh3`` C-based module. Args: data: data to hash seed: seed Raises: AssertionError: if the two calculations don't match
def compare_python_to_reference_murmur3_64(data: Any, seed: int = 0) -> None: assert mmh3, "Need mmh3 module" c_data = to_str(data) c_signed_low, c_signed_high = mmh3.hash64(c_data, seed=seed, x64arch=IS_64_BIT) py_data = to_bytes(c_data) py_signed_low, py_signed_high = pymmh3_hash64(py_data, seed=seed) preamble = "Hashing {data} with MurmurHash3/64-bit values from 128-bit " \ "hash/seed={seed}".format(data=repr(data), seed=seed) if c_signed_low == py_signed_low and c_signed_high == py_signed_high: print(preamble + " -> (low={low}, high={high}): OK".format( low=c_signed_low, high=c_signed_high)) else: raise AssertionError( preamble + "; mmh3 says {c_data} -> (low={c_low}, high={c_high}), Python " "version says {py_data} -> (low={py_low}, high={py_high})".format( c_data=repr(c_data), c_low=c_signed_low, c_high=c_signed_high, py_data=repr(py_data), py_low=py_signed_low, py_high=py_signed_high))
730,516
Non-cryptographic, deterministic, fast hash. Args: data: data to hash seed: seed Returns: signed 32-bit integer
def hash32(data: Any, seed=0) -> int: with MultiTimerContext(timer, TIMING_HASH): c_data = to_str(data) if mmh3: return mmh3.hash(c_data, seed=seed) py_data = to_bytes(c_data) py_unsigned = murmur3_x86_32(py_data, seed=seed) return twos_comp_to_signed(py_unsigned, n_bits=32)
730,517
Non-cryptographic, deterministic, fast hash. Args: data: data to hash seed: seed Returns: signed 64-bit integer
def hash64(data: Any, seed: int = 0) -> int: # ------------------------------------------------------------------------- # MurmurHash3 # ------------------------------------------------------------------------- c_data = to_str(data) if mmh3: c_signed_low, _ = mmh3.hash64(data, seed=seed, x64arch=IS_64_BIT) return c_signed_low py_data = to_bytes(c_data) py_signed_low, _ = pymmh3_hash64(py_data, seed=seed) return py_signed_low
730,518
Returns the mean of a list of numbers. Args: values: values to mean, ignoring any values that are ``None`` Returns: the mean, or ``None`` if :math:`n = 0`
def mean(values: Sequence[Union[int, float, None]]) -> Optional[float]: total = 0.0 # starting with "0.0" causes automatic conversion to float n = 0 for x in values: if x is not None: total += x n += 1 return total / n if n > 0 else None
730,527
r""" Returns the logit (log odds) of its input probability .. math:: \alpha = logit(p) = log(x / (1 - x)) Args: p: :math:`p` Returns: :math:`\alpha`, or ``None`` if ``x`` is not in the range [0, 1].
def safe_logit(p: Union[float, int]) -> Optional[float]: r if p > 1 or p < 0: return None # can't take log of negative number if p == 1: return float("inf") if p == 0: return float("-inf") return math.log(p / (1 - p))
730,528
Finds the SQLAlchemy dialect in use. Args: mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or :class:`Dialect` object Returns: the SQLAlchemy :class:`Dialect` being used
def get_dialect(mixed: Union[SQLCompiler, Engine, Dialect]) -> Dialect: if isinstance(mixed, Dialect): return mixed elif isinstance(mixed, Engine): return mixed.dialect elif isinstance(mixed, SQLCompiler): return mixed.dialect else: raise ValueError("get_dialect: 'mixed' parameter of wrong type")
730,541
Finds the name of the SQLAlchemy dialect in use. Args: mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or :class:`Dialect` object Returns: the SQLAlchemy dialect name being used
def get_dialect_name(mixed: Union[SQLCompiler, Engine, Dialect]) -> str: dialect = get_dialect(mixed) # noinspection PyUnresolvedReferences return dialect.name
730,542
Returns the SQLAlchemy :class:`IdentifierPreparer` in use for the dialect being used. Args: mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or :class:`Dialect` object Returns: an :class:`IdentifierPreparer`
def get_preparer(mixed: Union[SQLCompiler, Engine, Dialect]) -> IdentifierPreparer: dialect = get_dialect(mixed) # noinspection PyUnresolvedReferences return dialect.preparer(dialect)
730,543
Converts an SQL identifier to a quoted version, via the SQL dialect in use. Args: identifier: the identifier to be quoted mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or :class:`Dialect` object Returns: the quoted identifier
def quote_identifier(identifier: str, mixed: Union[SQLCompiler, Engine, Dialect]) -> str: # See also http://sqlalchemy-utils.readthedocs.io/en/latest/_modules/sqlalchemy_utils/functions/orm.html # noqa return get_preparer(mixed).quote(identifier)
730,544
r""" Converts a TSV line into sequential key/value pairs as a dictionary. For example, .. code-block:: none field1\tvalue1\tfield2\tvalue2 becomes .. code-block:: none {"field1": "value1", "field2": "value2"} Args: line: the line key_lower: should the keys be forced to lower case?
def tsv_pairs_to_dict(line: str, key_lower: bool = True) -> Dict[str, str]: r items = line.split("\t") d = {} # type: Dict[str, str] for chunk in chunks(items, 2): if len(chunk) < 2: log.warning("Bad chunk, not of length 2: {!r}", chunk) continue key = chunk[0] value = unescape_tabs_newlines(chunk[1]) if key_lower: key = key.lower() d[key] = value return d
730,555
Import all submodules of a module, recursively, including subpackages. Args: package: package (name or actual module) base_package_for_relative_import: path to prepend? recursive: import submodules too? Returns: dict: mapping from full module name to module
def import_submodules(package: Union[str, ModuleType], base_package_for_relative_import: str = None, recursive: bool = True) -> Dict[str, ModuleType]: # http://stackoverflow.com/questions/3365740/how-to-import-all-submodules if isinstance(package, str): package = importlib.import_module(package, base_package_for_relative_import) results = {} for loader, name, is_pkg in pkgutil.walk_packages(package.__path__): full_name = package.__name__ + '.' + name log.debug("importing: {}", full_name) results[full_name] = importlib.import_module(full_name) if recursive and is_pkg: results.update(import_submodules(full_name)) return results
730,556
Performs a :func:`shutil.which` command using the PATH from the specified environment. Reason: when you use ``run([executable, ...], env)`` and therefore ``subprocess.run([executable, ...], env=env)``, the PATH that's searched for ``executable`` is the parent's, not the new child's -- so you have to find the executable manually. Args: executable: executable to find env: environment to fetch the PATH variable from
def which_with_envpath(executable: str, env: Dict[str, str]) -> str: oldpath = os.environ.get("PATH", "") os.environ["PATH"] = env.get("PATH") which = shutil.which(executable) os.environ["PATH"] = oldpath return which
730,560
Performs a recursive ``chown``. Args: path: path to walk down user: user name or ID group: group name or ID As per http://stackoverflow.com/questions/2853723
def chown_r(path: str, user: str, group: str) -> None: for root, dirs, files in os.walk(path): for x in dirs: shutil.chown(os.path.join(root, x), user, group) for x in files: shutil.chown(os.path.join(root, x), user, group)
730,573
Recursive ``chmod``. Args: root: directory to walk down permission: e.g. ``e.g. stat.S_IWUSR``
def chmod_r(root: str, permission: int) -> None: os.chmod(root, permission) for dirpath, dirnames, filenames in os.walk(root): for d in dirnames: os.chmod(os.path.join(dirpath, d), permission) for f in filenames: os.chmod(os.path.join(dirpath, f), permission)
730,574
From a starting list of files and/or directories, generates filenames of all files in the list, and (if ``recursive`` is set) all files within directories in the list. Args: starting_filenames: files and/or directories recursive: walk down any directories in the starting list, recursively? Yields: each filename
def gen_filenames(starting_filenames: List[str], recursive: bool) -> Generator[str, None, None]: for base_filename in starting_filenames: if os.path.isfile(base_filename): yield os.path.abspath(base_filename) elif os.path.isdir(base_filename) and recursive: for dirpath, dirnames, filenames in os.walk(base_filename): for fname in filenames: yield os.path.abspath(os.path.join(dirpath, fname))
730,577
Checks if a file is locked by opening it in append mode. (If no exception is thrown in that situation, then the file is not locked.) Args: filepath: file to check Returns: tuple: ``(exists, locked)`` See https://www.calazan.com/how-to-check-if-a-file-is-locked-in-python/.
def exists_locked(filepath: str) -> Tuple[bool, bool]: exists = False locked = None file_object = None if os.path.exists(filepath): exists = True locked = True try: buffer_size = 8 # Opening file in append mode and read the first 8 characters. file_object = open(filepath, 'a', buffer_size) if file_object: locked = False # exists and not locked except IOError: pass finally: if file_object: file_object.close() return exists, locked
730,578
Asks MySQL for its variables and status. Args: mysql: ``mysql`` executable filename host: host name port: TCP/IP port number user: username Returns: dictionary of MySQL variables/values
def get_mysql_vars(mysql: str, host: str, port: int, user: str) -> Dict[str, str]: cmdargs = [ mysql, "-h", host, "-P", str(port), "-e", "SHOW VARIABLES; SHOW STATUS", "-u", user, "-p" # prompt for password ] log.info("Connecting to MySQL with user: {}", user) log.debug(cmdargs) process = subprocess.Popen(cmdargs, stdout=subprocess.PIPE) out, err = process.communicate() lines = out.decode("utf8").splitlines() mysqlvars = {} for line in lines: var, val = line.split("\t") mysqlvars[var] = val return mysqlvars
730,597
For "hits": prints either the ``.zip`` filename, or the ``.zip`` filename and the inner filename. Args: zipfilename: filename of the ``.zip`` file contentsfilename: filename of the inner file show_inner_file: if ``True``, show both; if ``False``, show just the ``.zip`` filename Returns:
def report_hit_filename(zipfilename: str, contentsfilename: str, show_inner_file: bool) -> None: if show_inner_file: print("{} [{}]".format(zipfilename, contentsfilename)) else: print(zipfilename)
730,601
Prints a line from a file, with the ``.zip`` filename and optionally also the inner filename. Args: zipfilename: filename of the ``.zip`` file contentsfilename: filename of the inner file line: the line from the inner file show_inner_file: if ``True``, show both filenames; if ``False``, show just the ``.zip`` filename
def report_line(zipfilename: str, contentsfilename: str, line: str, show_inner_file: bool) -> None: if show_inner_file: print("{} [{}]: {}".format(zipfilename, contentsfilename, line)) else: print("{}: {}".format(zipfilename, line))
730,602
Validates the form. Args: controls: an iterable of ``(key, value)`` tuples subcontrol: Returns: a Colander ``appstruct`` Raises: ValidationFailure: on failure
def validate(self, controls: Iterable[Tuple[str, str]], subcontrol: str = None) -> Any: try: return super().validate(controls, subcontrol) except ValidationFailure as e: if DEBUG_FORM_VALIDATION: log.warning("Validation failure: {!r}; {}", e, self._get_form_errors()) self._show_hidden_widgets_for_fields_with_errors(self) raise
730,680
Converts something to a :class:`pendulum.DateTime`. Args: x: something that may be coercible to a datetime assume_local: if ``True``, assume local timezone; if ``False``, assume UTC Returns: a :class:`pendulum.DateTime`, or ``None``. Raises: pendulum.parsing.exceptions.ParserError: if a string fails to parse ValueError: if no conversion possible
def coerce_to_pendulum(x: PotentialDatetimeType, assume_local: bool = False) -> Optional[DateTime]: if not x: # None and blank string return None if isinstance(x, DateTime): return x tz = get_tz_local() if assume_local else get_tz_utc() if isinstance(x, datetime.datetime): return pendulum.instance(x, tz=tz) # (*) elif isinstance(x, datetime.date): # BEWARE: datetime subclasses date. The order is crucial here. # Can also use: type(x) is datetime.date # noinspection PyUnresolvedReferences midnight = DateTime.min.time() dt = DateTime.combine(x, midnight) return pendulum.instance(dt, tz=tz) # (*) elif isinstance(x, str): return pendulum.parse(x, tz=tz) # (*) # may raise else: raise ValueError("Don't know how to convert to DateTime: " "{!r}".format(x))
730,729
Converts something to a :class:`pendulum.Date`. Args: x: something that may be coercible to a date assume_local: if ``True``, assume local timezone; if ``False``, assume UTC Returns: a :class:`pendulum.Date`, or ``None``. Raises: pendulum.parsing.exceptions.ParserError: if a string fails to parse ValueError: if no conversion possible
def coerce_to_pendulum_date(x: PotentialDatetimeType, assume_local: bool = False) -> Optional[Date]: p = coerce_to_pendulum(x, assume_local=assume_local) return None if p is None else p.date()
730,730
Calculate the time between two dates/times expressed as strings. Args: start: start date/time end: end date/time default: string value to return in case either of the inputs is ``None`` Returns: a string that is one of .. code-block: 'hh:mm' '-hh:mm' default
def get_duration_h_m(start: Union[str, DateTime], end: Union[str, DateTime], default: str = "N/A") -> str: start = coerce_to_pendulum(start) end = coerce_to_pendulum(end) if start is None or end is None: return default duration = end - start minutes = duration.in_minutes() (hours, minutes) = divmod(minutes, 60) if hours < 0: # negative... trickier # Python's divmod does interesting things with negative numbers: # Hours will be negative, and minutes always positive hours += 1 minutes = 60 - minutes return "-{}:{}".format(hours, "00" if minutes == 0 else minutes) else: return "{}:{}".format(hours, "00" if minutes == 0 else minutes)
730,739
Age (in whole years) at a particular date, or ``default``. Args: dob: date of birth when: date/time at which to calculate age default: value to return if either input is ``None`` Returns: age in whole years (rounded down), or ``default``
def get_age(dob: PotentialDatetimeType, when: PotentialDatetimeType, default: str = "") -> Union[int, str]: dob = coerce_to_pendulum_date(dob) when = coerce_to_pendulum_date(when) if dob is None or when is None: return default return (when - dob).years
730,740
Validates an integer as an NHS number. Args: n: NHS number Returns: valid? Checksum details are at http://www.datadictionary.nhs.uk/version2/data_dictionary/data_field_notes/n/nhs_number_de.asp
def is_valid_nhs_number(n: int) -> bool: # noqa if not isinstance(n, int): log.debug("is_valid_nhs_number: parameter was not of integer type") return False s = str(n) # Not 10 digits long? if len(s) != 10: log.debug("is_valid_nhs_number: not 10 digits") return False main_digits = [int(s[i]) for i in range(9)] actual_check_digit = int(s[9]) # tenth digit expected_check_digit = nhs_check_digit(main_digits) if expected_check_digit == 10: log.debug("is_valid_nhs_number: calculated check digit invalid") return False if expected_check_digit != actual_check_digit: log.debug("is_valid_nhs_number: check digit mismatch") return False # Hooray! return True
730,776
Generate all rows from a cursor. Args: cursor: the cursor arraysize: split fetches into chunks of this many records Yields: each row
def genrows(cursor: Cursor, arraysize: int = 1000) \ -> Generator[List[Any], None, None]: # http://code.activestate.com/recipes/137270-use-generators-for-fetching-large-db-record-sets/ # noqa while True: results = cursor.fetchmany(arraysize) if not results: break for result in results: yield result
730,893