func_code_string
stringlengths
52
1.94M
func_documentation_string
stringlengths
1
47.2k
def _connect(self): # Attempt to get a cached connection from the connection pool try: connection = self._pool_manager.get(self.pid, self) LOGGER.debug("Re-using connection for %s", self.pid) except pool.NoIdleConnectionsError: if self._pool_manager.i...
Connect to PostgreSQL, either by reusing a connection from the pool if possible, or by creating the new connection. :rtype: psycopg2.extensions.connection :raises: pool.NoIdleConnectionsError
def _get_cursor(self, connection, name=None): cursor = connection.cursor(name=name, cursor_factory=self._cursor_factory) if name is not None: cursor.scrollable = True cursor.withhold = True return cursor
Return a cursor for the given cursor_factory. Specify a name to use server-side cursors. :param connection: The connection to create a cursor on :type connection: psycopg2.extensions.connection :param str name: A cursor name for a server side cursor :rtype: psycopg2.extensions.c...
def _incr_exceptions(self): self._pool_manager.get_connection(self.pid, self._conn).exceptions += 1
Increment the number of exceptions for the current connection.
def _incr_executions(self): self._pool_manager.get_connection(self.pid, self._conn).executions += 1
Increment the number of executions for the current connection.
def _register_unicode(connection): psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, connection) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY, connection)
Register the cursor to be able to receive Unicode string. :type connection: psycopg2.extensions.connection :param connection: Where to register things
def _status(self): if self._conn.status == psycopg2.extensions.STATUS_BEGIN: return self.READY return self._conn.status
Return the current connection status as an integer value. The status should match one of the following constants: - queries.Session.INTRANS: Connection established, in transaction - queries.Session.PREPARED: Prepared for second phase of transaction - queries.Session.READY: Connected, n...
def as_dict(self): if not self.cursor.rowcount: return {} self._rewind() if self.cursor.rowcount == 1: return dict(self.cursor.fetchone()) else: raise ValueError('More than one row')
Return a single row result as a dictionary. If the results contain multiple rows, a :py:class:`ValueError` will be raised. :return: dict :raises: ValueError
def items(self): if not self.cursor.rowcount: return [] self.cursor.scroll(0, 'absolute') return self.cursor.fetchall()
Return all of the rows that are in the result set. :rtype: list
def get_current_user(): if pwd is None: return getpass.getuser() else: try: return pwd.getpwuid(os.getuid())[0] except KeyError as error: LOGGER.error('Could not get logged-in user: %s', error)
Return the current username for the logged in user :rtype: str
def uri(host='localhost', port=5432, dbname='postgres', user='postgres', password=None): if port: host = '%s:%s' % (host, port) if password: return 'postgresql://%s:%s@%s/%s' % (user, password, host, dbname) return 'postgresql://%s@%s/%s' % (user, host, dbname)
Return a PostgreSQL connection URI for the specified values. :param str host: Host to connect to :param int port: Port to connect on :param str dbname: The database name :param str user: User to connect as :param str password: The password to use, None for no password :return str: The PostgreSQ...
def uri_to_kwargs(uri): parsed = urlparse(uri) default_user = get_current_user() password = unquote(parsed.password) if parsed.password else None kwargs = {'host': parsed.hostname, 'port': parsed.port, 'dbname': parsed.path[1:] or default_user, 'user': pars...
Return a URI as kwargs for connecting to PostgreSQL with psycopg2, applying default values for non-specified areas of the URI. :param str uri: The connection URI :rtype: dict
def urlparse(url): value = 'http%s' % url[5:] if url[:5] == 'postgresql' else url parsed = _urlparse.urlparse(value) path, query = parsed.path, parsed.query hostname = parsed.hostname if parsed.hostname else '' return PARSED(parsed.scheme.replace('http', 'postgresql'), parsed....
Parse the URL in a Python2/3 independent fashion. :param str url: The URL to parse :rtype: Parsed
def free(self): self._freed = True self._cleanup(self.cursor, self._fd)
Release the results and connection lock from the TornadoSession object. This **must** be called after you finish processing the results from :py:meth:`TornadoSession.query <queries.TornadoSession.query>` or :py:meth:`TornadoSession.callproc <queries.TornadoSession.callproc>` or the conne...
def _ensure_pool_exists(self): if self.pid not in self._pool_manager: self._pool_manager.create(self.pid, self._pool_idle_ttl, self._pool_max_size, self._ioloop.time)
Create the pool in the pool manager if it does not exist.
def _connect(self): future = concurrent.Future() # Attempt to get a cached connection from the connection pool try: connection = self._pool_manager.get(self.pid, self) self._connections[connection.fileno()] = connection future.set_result(connection) ...
Connect to PostgreSQL, either by reusing a connection from the pool if possible, or by creating the new connection. :rtype: psycopg2.extensions.connection :raises: pool.NoIdleConnectionsError
def _create_connection(self, future): LOGGER.debug('Creating a new connection for %s', self.pid) # Create a new PostgreSQL connection kwargs = utils.uri_to_kwargs(self._uri) try: connection = self._psycopg2_connect(kwargs) except (psycopg2.Error, OSError, soc...
Create a new PostgreSQL connection :param tornado.concurrent.Future future: future for new conn result
def _execute(self, method, query, parameters=None): future = concurrent.Future() def on_connected(cf): if cf.exception(): future.set_exception(cf.exception()) return # Get the psycopg2 connection object and cursor ...
Issue a query asynchronously on the server, mogrifying the parameters against the sql statement and yielding the results as a :py:class:`Results <queries.tornado_session.Results>` object. This function reduces duplicate code for callproc and query by getting the class attribute for the ...
def _exec_cleanup(self, cursor, fd): LOGGER.debug('Closing cursor and cleaning %s', fd) try: cursor.close() except (psycopg2.Error, psycopg2.Warning) as error: LOGGER.debug('Error closing the cursor: %s', error) self._cleanup_fd(fd) # If the clean...
Close the cursor, remove any references to the fd in internal state and remove the fd from the ioloop. :param psycopg2.extensions.cursor cursor: The cursor to close :param int fd: The connection file descriptor
def _cleanup_fd(self, fd, close=False): self._ioloop.remove_handler(fd) if fd in self._connections: try: self._pool_manager.free(self.pid, self._connections[fd]) except pool.ConnectionNotFoundError: pass if close: ...
Ensure the socket socket is removed from the IOLoop, the connection stack, and futures stack. :param int fd: The fd # to cleanup
def _incr_exceptions(self, conn): self._pool_manager.get_connection(self.pid, conn).exceptions += 1
Increment the number of exceptions for the current connection. :param psycopg2.extensions.connection conn: the psycopg2 connection
def _incr_executions(self, conn): self._pool_manager.get_connection(self.pid, conn).executions += 1
Increment the number of executions for the current connection. :param psycopg2.extensions.connection conn: the psycopg2 connection
def _on_io_events(self, fd=None, _events=None): if fd not in self._connections: LOGGER.warning('Received IO event for non-existing connection') return self._poll_connection(fd)
Invoked by Tornado's IOLoop when there are events for the fd :param int fd: The file descriptor for the event :param int _events: The events raised
def _poll_connection(self, fd): try: state = self._connections[fd].poll() except (OSError, socket.error) as error: self._ioloop.remove_handler(fd) if fd in self._futures and not self._futures[fd].done(): self._futures[fd].set_exception( ...
Check with psycopg2 to see what action to take. If the state is POLL_OK, we should have a pending callback for that fd. :param int fd: The socket fd for the postgresql connection
def main(): import codecs setuptools.setup( name='wcwidth', version='0.1.7', description=("Measures number of Terminal column cells " "of wide-character codes"), long_description=codecs.open( os.path.join(HERE, 'README.rst'), 'r', 'utf8').rea...
Setup.py entry point.
def _do_readme_update(self): import codecs import glob # read in, data_in = codecs.open( os.path.join(HERE, 'README.rst'), 'r', 'utf8').read() # search for beginning and end positions, pos_begin = data_in.find(self.README_PATCH_FROM) assert po...
Patch README.rst to reflect the data files used in release.
def _do_east_asian(self): self._do_retrieve(self.EAW_URL, self.EAW_IN) (version, date, values) = self._parse_east_asian( fname=self.EAW_IN, properties=(u'W', u'F',) ) table = self._make_table(values) self._do_write(self.EAW_OUT, 'WIDE_EASTASIAN', ...
Fetch and update east-asian tables.
def _do_zero_width(self): self._do_retrieve(self.UCD_URL, self.UCD_IN) (version, date, values) = self._parse_category( fname=self.UCD_IN, categories=('Me', 'Mn',) ) table = self._make_table(values) self._do_write(self.ZERO_OUT, 'ZERO_WIDTH', versi...
Fetch and update zero width tables.
def _make_table(values): import collections table = collections.deque() start, end = values[0], values[0] for num, value in enumerate(values): if num == 0: table.append((value, value,)) continue start, end = table.pop() ...
Return a tuple of lookup tables for given values.
def _do_retrieve(url, fname): folder = os.path.dirname(fname) if not os.path.exists(folder): os.makedirs(folder) print("{}/ created.".format(folder)) if not os.path.exists(fname): with open(fname, 'wb') as fout: print("retrieving {}."....
Retrieve given url to target filepath fname.
def _parse_east_asian(fname, properties=(u'W', u'F',)): version, date, values = None, None, [] print("parsing {} ..".format(fname)) for line in open(fname, 'rb'): uline = line.decode('utf-8') if version is None: version = uline.split(None, 1)[1].r...
Parse unicode east-asian width tables.
def _parse_category(fname, categories): version, date, values = None, None, [] print("parsing {} ..".format(fname)) for line in open(fname, 'rb'): uline = line.decode('utf-8') if version is None: version = uline.split(None, 1)[1].rstrip() ...
Parse unicode category tables.
def _do_write(fname, variable, version, date, table): # pylint: disable=R0914 # Too many local variables (19/15) (col 4) print("writing {} ..".format(fname)) import unicodedata import datetime import string utc_now = datetime.datetime.utcnow() ...
Write combining tables to filesystem as python code.
def report_ucs_msg(ucs, wcwidth_libc, wcwidth_local): ucp = (ucs.encode('unicode_escape')[2:] .decode('ascii') .upper() .lstrip('0')) url = "http://codepoints.net/U+{}".format(ucp) name = unicodedata.name(ucs) return (u"libc,ours={},{} [--o{}o--] name={} val={} {}" ...
Return string report of combining character differences. :param ucs: unicode point. :type ucs: unicode :param wcwidth_libc: libc-wcwidth's reported character length. :type comb_py: int :param wcwidth_local: wcwidth's reported character length. :type comb_wc: int :rtype: unicode
def main(using_locale=('en_US', 'UTF-8',)): all_ucs = (ucs for ucs in [unichr(val) for val in range(sys.maxunicode)] if is_named(ucs) and isnt_combining(ucs)) libc_name = ctypes.util.find_library('c') if not libc_name: raise ImportError("Can't find C library.") ...
Program entry point. Load the entire Unicode table into memory, excluding those that: - are not named (func unicodedata.name returns empty string), - are combining characters. Using ``locale``, for each unicode character string compare libc's wcwidth with local wcwidth.wcwidth() function;...
def validate_args(opts): if opts['--wide'] is None: opts['--wide'] = 2 else: assert opts['--wide'] in ("1", "2"), opts['--wide'] if opts['--alignment'] is None: opts['--alignment'] = 'left' else: assert opts['--alignment'] in ('left', 'right'), opts['--alignment'] ...
Validate and return options provided by docopt parsing.
def main(opts): term = Terminal() style = Style() # if the terminal supports colors, use a Style instance with some # standout colors (magenta, cyan). if term.number_of_colors: style = Style(attr_major=term.magenta, attr_minor=term.bright_cyan, ...
Program entry point.
def hint_width(self): return sum((len(self.style.delimiter), self.wide, len(self.style.delimiter), len(u' '), UCS_PRINTLEN + 2, len(u' '), self.style.name_len,))
Width of a column segment.
def head_item(self): delimiter = self.style.attr_minor(self.style.delimiter) hint = self.style.header_hint * self.wide heading = (u'{delimiter}{hint}{delimiter}' .format(delimiter=delimiter, hint=hint)) alignment = lambda *args: ( self.term.rjust(*...
Text of a single column heading.
def msg_intro(self): delim = self.style.attr_minor(self.style.delimiter) txt = self.intro_msg_fmt.format(delim=delim).rstrip() return self.term.center(txt)
Introductory message disabled above heading.
def num_columns(self): if self.term.is_a_tty: return self.term.width // self.hint_width return 1
Number of columns displayed.
def on_resize(self, *args): # pylint: disable=W0613 # Unused argument 'args' self.screen.style.name_len = min(self.screen.style.name_len, self.term.width - 15) assert self.term.width >= self.screen.hint_width, ( 'Scree...
Signal handler callback for SIGWINCH.
def _set_lastpage(self): self.last_page = (len(self._page_data) - 1) // self.screen.page_size
Calculate value of class attribute ``last_page``.
def display_initialize(self): echo(self.term.home + self.term.clear) echo(self.term.move_y(self.term.height // 2)) echo(self.term.center('Initializing page data ...').rstrip()) flushout() if LIMIT_UCS == 0x10000: echo('\n\n') echo(self.term.blink_...
Display 'please wait' message, and narrow build warning.
def initialize_page_data(self): if self.term.is_a_tty: self.display_initialize() self.character_generator = self.character_factory(self.screen.wide) page_data = list() while True: try: page_data.append(next(self.character_generator)) ...
Initialize the page data for the given screen.
def page_data(self, idx, offset): size = self.screen.page_size while offset < 0 and idx: offset += size idx -= 1 offset = max(0, offset) while offset >= size: offset -= size idx += 1 if idx == self.last_page: of...
Return character data for page of given index and offset. :param idx: page index. :type idx: int :param offset: scrolling region offset of current page. :type offset: int :returns: list of tuples in form of ``(ucs, name)`` :rtype: list[(unicode, unicode)]
def _run_notty(self, writer): page_idx = page_offset = 0 while True: npage_idx, _ = self.draw(writer, page_idx + 1, page_offset) if npage_idx == self.last_page: # page displayed was last page, quit. break page_idx = npage_idx ...
Pager run method for terminals that are not a tty.
def _run_tty(self, writer, reader): # allow window-change signal to reflow screen signal.signal(signal.SIGWINCH, self.on_resize) page_idx = page_offset = 0 while True: if self.dirty: page_idx, page_offset = self.draw(writer, ...
Pager run method for terminals that are a tty.
def run(self, writer, reader): self._page_data = self.initialize_page_data() self._set_lastpage() if not self.term.is_a_tty: self._run_notty(writer) else: self._run_tty(writer, reader)
Pager entry point. In interactive mode (terminal is a tty), run until ``process_keystroke()`` detects quit keystroke ('q'). In non-interactive mode, exit after displaying all unicode points. :param writer: callable writes to output stream, receiving unicode. :type writer: call...
def process_keystroke(self, inp, idx, offset): if inp.lower() in (u'q', u'Q'): # exit return (-1, -1) self._process_keystroke_commands(inp) idx, offset = self._process_keystroke_movement(inp, idx, offset) return idx, offset
Process keystroke ``inp``, adjusting screen parameters. :param inp: return value of Terminal.inkey(). :type inp: blessed.keyboard.Keystroke :param idx: page index. :type idx: int :param offset: scrolling region offset of current page. :type offset: int :returns: ...
def _process_keystroke_commands(self, inp): if inp in (u'1', u'2'): # chose 1 or 2-character wide if int(inp) != self.screen.wide: self.screen.wide = int(inp) self.on_resize(None, None) elif inp in (u'_', u'-'): # adjust name l...
Process keystrokes that issue commands (side effects).
def _process_keystroke_movement(self, inp, idx, offset): term = self.term if inp in (u'y', u'k') or inp.code in (term.KEY_UP,): # scroll backward 1 line idx, offset = (idx, offset - self.screen.num_columns) elif inp in (u'e', u'j') or inp.code in (term.KEY_ENTER,...
Process keystrokes that adjust index and offset.
def draw(self, writer, idx, offset): # as our screen can be resized while we're mid-calculation, # our self.dirty flag can become re-toggled; because we are # not re-flowing our pagination, we must begin over again. while self.dirty: self.draw_heading(writer) ...
Draw the current page view to ``writer``. :param writer: callable writes to output stream, receiving unicode. :type writer: callable :param idx: current page index. :type idx: int :param offset: scrolling region offset of current page. :type offset: int :returns:...
def draw_heading(self, writer): if self.dirty == self.STATE_REFRESH: writer(u''.join( (self.term.home, self.term.clear, self.screen.msg_intro, '\n', self.screen.header, '\n',))) return True
Conditionally redraw screen when ``dirty`` attribute is valued REFRESH. When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved to (0,0), screen is cleared, and heading is displayed. :param writer: callable writes to output stream, receiving unicode. :returns: True if clas...
def draw_status(self, writer, idx): if self.term.is_a_tty: writer(self.term.hide_cursor()) style = self.screen.style writer(self.term.move(self.term.height - 1)) if idx == self.last_page: last_end = u'(END)' else: ...
Conditionally draw status bar when output terminal is a tty. :param writer: callable writes to output stream, receiving unicode. :param idx: current page position index. :type idx: int
def page_view(self, data): if self.term.is_a_tty: yield self.term.move(self.screen.row_begins, 0) # sequence clears to end-of-line clear_eol = self.term.clear_eol # sequence clears to end-of-screen clear_eos = self.term.clear_eos # track our current c...
Generator yields text to be displayed for the current unicode pageview. :param data: The current page's data as tuple of ``(ucs, name)``. :rtype: generator
def text_entry(self, ucs, name): style = self.screen.style if len(name) > style.name_len: idx = max(0, style.name_len - len(style.continuation)) name = u''.join((name[:idx], style.continuation if idx else u'')) if style.alignment == 'right': fmt = u' ...
Display a single column segment row describing ``(ucs, name)``. :param ucs: target unicode point character string. :param name: name of unicode point. :rtype: unicode
def back_tick(cmd, ret_err=False, as_str=True, raise_err=None): if raise_err is None: raise_err = False if ret_err else True cmd_is_seq = isinstance(cmd, (list, tuple)) proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=not cmd_is_seq) out, err = proc.communicate() retcode = proc.returnc...
Run command `cmd`, return stdout, or stdout, stderr if `ret_err` Roughly equivalent to ``check_output`` in Python 2.7 Parameters ---------- cmd : sequence command to execute ret_err : bool, optional If True, return stderr in addition to stdout. If False, just return stdout...
def unique_by_index(sequence): uniques = [] for element in sequence: if element not in uniques: uniques.append(element) return uniques
unique elements in `sequence` in the order in which they occur Parameters ---------- sequence : iterable Returns ------- uniques : list unique elements of sequence, ordered by the order in which the element occurs in `sequence`
def ensure_permissions(mode_flags=stat.S_IWUSR): def decorator(f): def modify(filename, *args, **kwargs): m = chmod_perms(filename) if exists(filename) else mode_flags if not m & mode_flags: os.chmod(filename, m | mode_flags) try: retu...
decorator to ensure a filename has given permissions. If changed, original permissions are restored after the decorated modification.
def get_install_names(filename): lines = _cmd_out_err(['otool', '-L', filename]) if not _line0_says_object(lines[0], filename): return () names = tuple(parse_install_name(line)[0] for line in lines[1:]) install_id = get_install_id(filename) if not install_id is None: assert name...
Return install names from library named in `filename` Returns tuple of install names tuple will be empty if no install names, or if this is not an object file. Parameters ---------- filename : str filename of library Returns ------- install_names : tuple tuple of inst...
def get_install_id(filename): lines = _cmd_out_err(['otool', '-D', filename]) if not _line0_says_object(lines[0], filename): return None if len(lines) == 1: return None if len(lines) != 2: raise InstallNameError('Unexpected otool output ' + '\n'.join(lines)) return lines...
Return install id from library named in `filename` Returns None if no install id, or if this is not an object file. Parameters ---------- filename : str filename of library Returns ------- install_id : str install id of library `filename`, or None if no install id
def set_install_name(filename, oldname, newname): names = get_install_names(filename) if oldname not in names: raise InstallNameError('{0} not in install names for {1}'.format( oldname, filename)) back_tick(['install_name_tool', '-change', oldname, newname, filename])
Set install name `oldname` to `newname` in library filename Parameters ---------- filename : str filename of library oldname : str current install name in library newname : str replacement name for `oldname`
def set_install_id(filename, install_id): if get_install_id(filename) is None: raise InstallNameError('{0} has no install id'.format(filename)) back_tick(['install_name_tool', '-id', install_id, filename])
Set install id for library named in `filename` Parameters ---------- filename : str filename of library install_id : str install id for library `filename` Raises ------ RuntimeError if `filename` has not install id
def get_rpaths(filename): try: lines = _cmd_out_err(['otool', '-l', filename]) except RuntimeError: return () if not _line0_says_object(lines[0], filename): return () lines = [line.strip() for line in lines] paths = [] line_no = 1 while line_no < len(lines): ...
Return a tuple of rpaths from the library `filename` If `filename` is not a library then the returned tuple will be empty. Parameters ---------- filaname : str filename of library Returns ------- rpath : tuple rpath paths in `filename`
def dir2zip(in_dir, zip_fname): z = zipfile.ZipFile(zip_fname, 'w', compression=zipfile.ZIP_DEFLATED) for root, dirs, files in os.walk(in_dir): for file in files: in_fname = pjoin(root, file) in_stat = os.stat(in_fname) # Preserve file per...
Make a zip file `zip_fname` with contents of directory `in_dir` The recorded filenames are relative to `in_dir`, so doing a standard zip unpack of the resulting `zip_fname` in an empty directory will result in the original directory contents. Parameters ---------- in_dir : str Director...
def find_package_dirs(root_path): package_sdirs = set() for entry in os.listdir(root_path): fname = entry if root_path == '.' else pjoin(root_path, entry) if isdir(fname) and exists(pjoin(fname, '__init__.py')): package_sdirs.add(fname) return package_sdirs
Find python package directories in directory `root_path` Parameters ---------- root_path : str Directory to search for package subdirectories Returns ------- package_sdirs : set Set of strings where each is a subdirectory of `root_path`, containing an ``__init__.py`` fi...
def cmp_contents(filename1, filename2): with open_readable(filename1, 'rb') as fobj: contents1 = fobj.read() with open_readable(filename2, 'rb') as fobj: contents2 = fobj.read() return contents1 == contents2
Returns True if contents of the files are the same Parameters ---------- filename1 : str filename of first file to compare filename2 : str filename of second file to compare Returns ------- tf : bool True if binary contents of `filename1` is same as binary contents ...
def get_archs(libname): if not exists(libname): raise RuntimeError(libname + " is not a file") try: stdout = back_tick(['lipo', '-info', libname]) except RuntimeError: return frozenset() lines = [line.strip() for line in stdout.split('\n') if line.strip()] # For some rea...
Return architecture types from library `libname` Parameters ---------- libname : str filename of binary for which to return arch codes Returns ------- arch_names : frozenset Empty (frozen)set if no arch codes. If not empty, contains one or more of 'ppc', 'ppc64', 'i386...
def validate_signature(filename): out, err = back_tick(['codesign', '--verify', filename], ret_err=True, as_str=True, raise_err=False) if not err: return # The existing signature is valid if 'code object is not signed at all' in err: return # File has no signatu...
Remove invalid signatures from a binary file If the file signature is missing or valid then it will be ignored Invalid signatures are replaced with an ad-hoc signature. This is the closest you can get to removing a signature on MacOS Parameters ---------- filename : str Filepath to a...
def os_path_relpath(path, start=os.path.curdir): if not path: raise ValueError("no path specified") start_list = [x for x in os.path.abspath(start).split(os.path.sep) if x] path_list = [x for x in os.path.abspath(path).split(os.path.sep) if x] # Work out how much of the filepath is shared b...
Return a relative version of a path
def fuse_trees(to_tree, from_tree, lib_exts=('.so', '.dylib', '.a')): for from_dirpath, dirnames, filenames in os.walk(from_tree): to_dirpath = pjoin(to_tree, relpath(from_dirpath, from_tree)) # Copy any missing directories in to_path for dirname in tuple(dirnames): to_path ...
Fuse path `from_tree` into path `to_tree` For each file in `from_tree` - check for library file extension (in `lib_exts` - if present, check if there is a file with matching relative path in `to_tree`, if so, use :func:`delocate.tools.lipo_fuse` to fuse the two libraries together and write into `to_tre...
def fuse_wheels(to_wheel, from_wheel, out_wheel): to_wheel, from_wheel, out_wheel = [ abspath(w) for w in (to_wheel, from_wheel, out_wheel)] with InTemporaryDirectory(): zip2dir(to_wheel, 'to_wheel') zip2dir(from_wheel, 'from_wheel') fuse_trees('to_wheel', 'from_wheel') ...
Fuse `from_wheel` into `to_wheel`, write to `out_wheel` Parameters --------- to_wheel : str filename of wheel to fuse into from_wheel : str filename of wheel to fuse from out_wheel : str filename of new wheel from fusion of `to_wheel` and `from_wheel`
def delocate_tree_libs(lib_dict, lib_path, root_path): copied_libs = {} delocated_libs = set() copied_basenames = set() rp_root_path = realpath(root_path) rp_lib_path = realpath(lib_path) # Test for errors first to avoid getting half-way through changing the tree for required, requiring...
Move needed libraries in `lib_dict` into `lib_path` `lib_dict` has keys naming libraries required by the files in the corresponding value. Call the keys, "required libs". Call the values "requiring objects". Copy all the required libs to `lib_path`. Fix up the rpaths and install names in the re...
def copy_recurse(lib_path, copy_filt_func = None, copied_libs = None): if copied_libs is None: copied_libs = {} else: copied_libs = dict(copied_libs) done = False while not done: in_len = len(copied_libs) _copy_required(lib_path, copy_filt_func, copied_libs) ...
Analyze `lib_path` for library dependencies and copy libraries `lib_path` is a directory containing libraries. The libraries might themselves have dependencies. This function analyzes the dependencies and copies library dependencies that match the filter `copy_filt_func`. It also adjusts the dependin...
def _copy_required(lib_path, copy_filt_func, copied_libs): # Paths will be prepended with `lib_path` lib_dict = tree_libs(lib_path) # Map library paths after copy ('copied') to path before copy ('orig') rp_lp = realpath(lib_path) copied2orig = dict((pjoin(rp_lp, basename(c)), c) for c in copied...
Copy libraries required for files in `lib_path` to `lib_path` Augment `copied_libs` dictionary with any newly copied libraries, modifying `copied_libs` in-place - see Notes. This is one pass of ``copy_recurse`` Parameters ---------- lib_path : str Directory containing libraries co...
def delocate_path(tree_path, lib_path, lib_filt_func = None, copy_filt_func = filter_system_libs): if lib_filt_func == "dylibs-only": lib_filt_func = _dylibs_only if not exists(lib_path): os.makedirs(lib_path) lib_dict = tree_libs(tree_path, lib_filt_...
Copy required libraries for files in `tree_path` into `lib_path` Parameters ---------- tree_path : str Root path of tree to search for required libraries lib_path : str Directory into which we copy required libraries lib_filt_func : None or str or callable, optional If None,...
def _merge_lib_dict(d1, d2): for required, requirings in d2.items(): if required in d1: d1[required].update(requirings) else: d1[required] = requirings return None
Merges lib_dict `d2` into lib_dict `d1`
def delocate_wheel(in_wheel, out_wheel = None, lib_sdir = '.dylibs', lib_filt_func = None, copy_filt_func = filter_system_libs, require_archs = None, check_verbose = False, ): if ...
Update wheel by copying required libraries to `lib_sdir` in wheel Create `lib_sdir` in wheel tree only if we are copying one or more libraries. If `out_wheel` is None (the default), overwrite the wheel `in_wheel` in-place. Parameters ---------- in_wheel : str Filename of wheel to ...
def patch_wheel(in_wheel, patch_fname, out_wheel=None): in_wheel = abspath(in_wheel) patch_fname = abspath(patch_fname) if out_wheel is None: out_wheel = in_wheel else: out_wheel = abspath(out_wheel) if not exists(patch_fname): raise ValueError("patch file {0} does not e...
Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel` If `out_wheel` is None (the default), overwrite the wheel `in_wheel` in-place. Parameters ---------- in_wheel : str Filename of wheel to process patch_fname : str Filename of patch file. Will be applied with ...
def check_archs(copied_libs, require_archs=(), stop_fast=False): if isinstance(require_archs, string_types): require_archs = (['i386', 'x86_64'] if require_archs == 'intel' else [require_archs]) require_archs = frozenset(require_archs) bads = [] for depended_lib, de...
Check compatibility of archs in `copied_libs` dict Parameters ---------- copied_libs : dict dict containing the (key, value) pairs of (``copied_lib_path``, ``dependings_dict``), where ``copied_lib_path`` is a library real path that has been copied during delocation, and ``dependings...
def bads_report(bads, path_prefix=None): path_processor = ((lambda x : x) if path_prefix is None else get_rp_stripper(path_prefix)) reports = [] for result in bads: if len(result) == 3: depended_lib, depending_lib, missing_archs = result reports.app...
Return a nice report of bad architectures in `bads` Parameters ---------- bads : set set of length 2 or 3 tuples. A length 2 tuple is of form ``(depending_lib, missing_archs)`` meaning that an arch in `require_archs` was missing from ``depending_lib``. A length 3 tuple is o...
def tree_libs(start_path, filt_func=None): lib_dict = {} for dirpath, dirnames, basenames in os.walk(start_path): for base in basenames: depending_libpath = realpath(pjoin(dirpath, base)) if not filt_func is None and not filt_func(depending_libpath): continue...
Return analysis of library dependencies within `start_path` Parameters ---------- start_path : str root path of tree to search for libraries depending on other libraries. filt_func : None or callable, optional If None, inspect all files for library dependencies. If callable, acc...
def resolve_rpath(lib_path, rpaths): if not lib_path.startswith('@rpath/'): return lib_path lib_rpath = lib_path.split('/', 1)[1] for rpath in rpaths: rpath_lib = realpath(pjoin(rpath, lib_rpath)) if os.path.exists(rpath_lib): return rpath_lib warnings.warn( ...
Return `lib_path` with its `@rpath` resolved If the `lib_path` doesn't have `@rpath` then it's returned as is. If `lib_path` has `@rpath` then returns the first `rpaths`/`lib_path` combination found. If the library can't be found in `rpaths` then a detailed warning is printed and `lib_path` is return...
def get_prefix_stripper(strip_prefix): n = len(strip_prefix) def stripper(path): return path if not path.startswith(strip_prefix) else path[n:] return stripper
Return function to strip `strip_prefix` prefix from string if present Parameters ---------- prefix : str Prefix to strip from the beginning of string if present Returns ------- stripper : func function such that ``stripper(a_string)`` will strip `prefix` from ``a_string...
def stripped_lib_dict(lib_dict, strip_prefix): relative_dict = {} stripper = get_prefix_stripper(strip_prefix) for lib_path, dependings_dict in lib_dict.items(): ding_dict = {} for depending_libpath, install_name in dependings_dict.items(): ding_dict[stripper(depending_libpa...
Return `lib_dict` with `strip_prefix` removed from start of paths Use to give form of `lib_dict` that appears relative to some base path given by `strip_prefix`. Particularly useful for analyzing wheels where we unpack to a temporary path before analyzing. Parameters ---------- lib_dict : dic...
def wheel_libs(wheel_fname, filt_func = None): with TemporaryDirectory() as tmpdir: zip2dir(wheel_fname, tmpdir) lib_dict = tree_libs(tmpdir, filt_func) return stripped_lib_dict(lib_dict, realpath(tmpdir) + os.path.sep)
Return analysis of library dependencies with a Python wheel Use this routine for a dump of the dependency tree. Parameters ---------- wheel_fname : str Filename of wheel filt_func : None or callable, optional If None, inspect all files for library dependencies. If callable, ...
def _open_for_csv(name, mode): if sys.version_info[0] < 3: return open_rw(name, mode + 'b') return open_rw(name, mode, newline='', encoding='utf-8')
Deal with Python 2/3 open API differences
def rewrite_record(bdist_dir): info_dirs = glob.glob(pjoin(bdist_dir, '*.dist-info')) if len(info_dirs) != 1: raise WheelToolsError("Should be exactly one `*.dist_info` directory") record_path = pjoin(info_dirs[0], 'RECORD') record_relpath = relpath(record_path, bdist_dir) # Unsign whee...
Rewrite RECORD file with hashes for all files in `wheel_sdir` Copied from :method:`wheel.bdist_wheel.bdist_wheel.write_record` Will also unsign wheel Parameters ---------- bdist_dir : str Path of unpacked wheel file
def add_platforms(in_wheel, platforms, out_path=None, clobber=False): in_wheel = abspath(in_wheel) out_path = dirname(in_wheel) if out_path is None else abspath(out_path) wf = WheelFile(in_wheel) info_fname = _get_wheelinfo_name(wf) # Check what tags we have in_fname_tags = wf.parsed_filena...
Add platform tags `platforms` to `in_wheel` filename and WHEEL tags Add any platform tags in `platforms` that are missing from `in_wheel` filename. Add any platform tags in `platforms` that are missing from `in_wheel` ``WHEEL`` file. Parameters ---------- in_wheel : str Filename o...
def temporal_betweenness_centrality(tnet=None, paths=None, calc='time'): if tnet is not None and paths is not None: raise ValueError('Only network or path input allowed.') if tnet is None and paths is None: raise ValueError('No input.') # if shortest paths are not calculated, calculate ...
Returns temporal betweenness centrality per node. Parameters ----------- Input should be *either* tnet or paths. data : array or dict Temporal network input (graphlet or contact). nettype: 'bu', 'bd'. calc : str either 'global' or 'time' paths : pandas dataframe O...
def volatility(tnet, distance_func_name='default', calc='global', communities=None, event_displacement=None): r # Get input (C or G) tnet, netinfo = process_input(tnet, ['C', 'G', 'TN']) distance_func_name = check_distance_funciton_input( distance_func_name, netinfo) if not isinstance(distan...
r""" Volatility of temporal networks. Volatility is the average distance between consecutive time points of graphlets (difference is caclualted either globally or per edge). Parameters ---------- tnet : array or dict temporal network input (graphlet or contact). Nettype: 'bu','bd','wu','w...
def allegiance(community): N = community.shape[0] C = community.shape[1] T = P = np.zeros([N, N]) for t in range(len(community[0, :])): for i in range(len(community[:, 0])): for j in range(len(community[:, 0])): if i == j: continue ...
Computes the allegiance matrix with values representing the probability that nodes i and j were assigned to the same community by time-varying clustering methods. parameters ---------- community : array array of community assignment of size node,time returns ------- P : array ...
def rand_poisson(nnodes, ncontacts, lam=1, nettype='bu', netinfo=None, netrep='graphlet'): if isinstance(ncontacts, list): if len(ncontacts) != nnodes: raise ValueError( 'Number of contacts, if a list, should be one per node') if isinstance(lam, list): if len(lam...
Generate a random network where intervals between contacts are distributed by a poisson distribution Parameters ---------- nnodes : int Number of nodes in networks ncontacts : int or list Number of expected contacts (i.e. edges). If list, number of contacts for each node. Any ...
def temporal_participation_coeff(tnet, communities=None, decay=None, removeneg=False): r if communities is None: if isinstance(tnet, dict): if 'communities' in tnet.keys(): communities = tnet['communities'] else: raise ValueError('Community index n...
r''' Temporal participation coefficient is a measure of diversity of connections across communities for individual nodes. Parameters ---------- tnet : array, dict graphlet or contact sequence input. Only positive matrices considered. communities : array community vector. Either 1D (...
def graphlet_stack_plot(netin, ax, q=10, cmap='Reds', gridcolor='k', borderwidth=2, bordercolor=None, Fs=1, timeunit='', t0=1, sharpen='yes', vminmax='minmax'): r # Get input type (C, G, TO) inputType = checkInput(netin) # Convert TO to C representation if inputType == 'TO': netin = netin.co...
r''' Returns matplotlib axis handle for graphlet_stack_plot. This is a row of transformed connectivity matrices to look like a 3D stack. Parameters ---------- netin : array, dict network input (graphlet or contact) ax : matplotlib ax handles. q : int Quality. Increaseing this w...
def partition_inference(tctc_mat, comp, tau, sigma, kappa): r communityinfo = {} communityinfo['community'] = [] communityinfo['start'] = np.empty(0) communityinfo['end'] = np.empty(0) communityinfo['size'] = np.empty(0) for i, tcomp in enumerate(comp): # This can go in parallel loop...
r""" Takes tctc trajectory matrix and returns dataframe where all multi-label communities are listed Can take a little bit of time with large datasets and optimizaiton could remove some for loops.
def tctc(data, tau, epsilon, sigma, kappa=0, largedataset=False, rule='flock', noise=None, raw_signal='amplitude', output='array', tempdir=None, njobs=1, largestonly=False): r # Get distance matrix if largedataset: raise NotImplementedError( 'HDF5 implementation for large datasets is not...
r""" Runs TCTC community detection Parameters ---------- data : array Multiariate series with dimensions: "time, node" that belong to a network. tau : int tau specifies the minimum number of time-points of each temporal community must last. epsilon : float epsilon specif...
def temporal_efficiency(tnet=None, paths=None, calc='global'): r if tnet is not None and paths is not None: raise ValueError('Only network or path input allowed.') if tnet is None and paths is None: raise ValueError('No input.') # if shortest paths are not calculated, calculate them ...
r""" Returns temporal efficiency estimate. BU networks only. Parameters ---------- Input should be *either* tnet or paths. data : array or dict Temporal network input (graphlet or contact). nettype: 'bu', 'bd'. paths : pandas dataframe Output of TenetoBIDS.networkmeasure.sho...
def network_from_array(self, array): if len(array.shape) == 2: array = np.array(array, ndmin=3).transpose([1, 2, 0]) teneto.utils.check_TemporalNetwork_input(array, 'array') uvals = np.unique(array) if len(uvals) == 2 and 1 in uvals and 0 in uvals: i, j, ...
impo Defines a network from an array. Parameters ---------- array : array 3D numpy array.
def network_from_df(self, df): teneto.utils.check_TemporalNetwork_input(df, 'df') self.network = df self._update_network()
Defines a network from an array. Parameters ---------- array : array Pandas dataframe. Should have columns: \'i\', \'j\', \'t\' where i and j are node indicies and t is the temporal index. If weighted, should also include \'weight\'. Each row is an edge.