INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
|---|---|
Normalize a path.
This function simplifies a path by collapsing back-references
and removing duplicated separators.
Arguments:
path (str): Path to normalize.
Returns:
str: A valid FS path.
Example:
>>> normpath("/foo//bar/frob/../baz")
'/foo/bar/baz'
>>> normpath("foo/../../bar")
Traceback (most recent call last)
...
IllegalBackReference: path 'foo/../../bar' contains back-references outside of filesystem"
|
def normpath(path):
# type: (Text) -> Text
"""Normalize a path.
This function simplifies a path by collapsing back-references
and removing duplicated separators.
Arguments:
path (str): Path to normalize.
Returns:
str: A valid FS path.
Example:
>>> normpath("/foo//bar/frob/../baz")
'/foo/bar/baz'
>>> normpath("foo/../../bar")
Traceback (most recent call last)
...
IllegalBackReference: path 'foo/../../bar' contains back-references outside of filesystem"
"""
if path in "/":
return path
# An early out if there is no need to normalize this path
if not _requires_normalization(path):
return path.rstrip("/")
prefix = "/" if path.startswith("/") else ""
components = [] # type: List[Text]
try:
for component in path.split("/"):
if component in "..": # True for '..', '.', and ''
if component == "..":
components.pop()
else:
components.append(component)
except IndexError:
raise IllegalBackReference(path)
return prefix + "/".join(components)
|
Get intermediate paths from the root to the given path.
Arguments:
path (str): A PyFilesystem path
reverse (bool): Reverses the order of the paths
(default `False`).
Returns:
list: A list of paths.
Example:
>>> recursepath('a/b/c')
['/', '/a', '/a/b', '/a/b/c']
|
def recursepath(path, reverse=False):
# type: (Text, bool) -> List[Text]
"""Get intermediate paths from the root to the given path.
Arguments:
path (str): A PyFilesystem path
reverse (bool): Reverses the order of the paths
(default `False`).
Returns:
list: A list of paths.
Example:
>>> recursepath('a/b/c')
['/', '/a', '/a/b', '/a/b/c']
"""
if path in "/":
return ["/"]
path = abspath(normpath(path)) + "/"
paths = ["/"]
find = path.find
append = paths.append
pos = 1
len_path = len(path)
while pos < len_path:
pos = find("/", pos)
append(path[:pos])
pos += 1
if reverse:
return paths[::-1]
return paths
|
Join any number of paths together.
Arguments:
*paths (str): Paths to join, given as positional arguments.
Returns:
str: The joined path.
Example:
>>> join('foo', 'bar', 'baz')
'foo/bar/baz'
>>> join('foo/bar', '../baz')
'foo/baz'
>>> join('foo/bar', '/baz')
'/baz'
|
def join(*paths):
# type: (*Text) -> Text
"""Join any number of paths together.
Arguments:
*paths (str): Paths to join, given as positional arguments.
Returns:
str: The joined path.
Example:
>>> join('foo', 'bar', 'baz')
'foo/bar/baz'
>>> join('foo/bar', '../baz')
'foo/baz'
>>> join('foo/bar', '/baz')
'/baz'
"""
absolute = False
relpaths = [] # type: List[Text]
for p in paths:
if p:
if p[0] == "/":
del relpaths[:]
absolute = True
relpaths.append(p)
path = normpath("/".join(relpaths))
if absolute:
path = abspath(path)
return path
|
Join two paths together.
This is faster than :func:`~fs.path.join`, but only works when the
second path is relative, and there are no back references in either
path.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
str: The joint path.
Example:
>>> combine("foo/bar", "baz")
'foo/bar/baz'
|
def combine(path1, path2):
# type: (Text, Text) -> Text
"""Join two paths together.
This is faster than :func:`~fs.path.join`, but only works when the
second path is relative, and there are no back references in either
path.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
str: The joint path.
Example:
>>> combine("foo/bar", "baz")
'foo/bar/baz'
"""
if not path1:
return path2.lstrip()
return "{}/{}".format(path1.rstrip("/"), path2.lstrip("/"))
|
Split a path in to its component parts.
Arguments:
path (str): Path to split in to parts.
Returns:
list: List of components
Example:
>>> parts('/foo/bar/baz')
['/', 'foo', 'bar', 'baz']
|
def parts(path):
# type: (Text) -> List[Text]
"""Split a path in to its component parts.
Arguments:
path (str): Path to split in to parts.
Returns:
list: List of components
Example:
>>> parts('/foo/bar/baz')
['/', 'foo', 'bar', 'baz']
"""
_path = normpath(path)
components = _path.strip("/")
_parts = ["/" if _path.startswith("/") else "./"]
if components:
_parts += components.split("/")
return _parts
|
Split the extension from the path.
Arguments:
path (str): A path to split.
Returns:
(str, str): A tuple containing the path and the extension.
Example:
>>> splitext('baz.txt')
('baz', '.txt')
>>> splitext('foo/bar/baz.txt')
('foo/bar/baz', '.txt')
>>> splitext('foo/bar/.foo')
('foo/bar/.foo', '')
|
def splitext(path):
# type: (Text) -> Tuple[Text, Text]
"""Split the extension from the path.
Arguments:
path (str): A path to split.
Returns:
(str, str): A tuple containing the path and the extension.
Example:
>>> splitext('baz.txt')
('baz', '.txt')
>>> splitext('foo/bar/baz.txt')
('foo/bar/baz', '.txt')
>>> splitext('foo/bar/.foo')
('foo/bar/.foo', '')
"""
parent_path, pathname = split(path)
if pathname.startswith(".") and pathname.count(".") == 1:
return path, ""
if "." not in pathname:
return path, ""
pathname, ext = pathname.rsplit(".", 1)
path = join(parent_path, pathname)
return path, "." + ext
|
Check if ``path1`` is a base of ``path2``.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
bool: `True` if ``path2`` starts with ``path1``
Example:
>>> isbase('foo/bar', 'foo/bar/baz/egg.txt')
True
|
def isbase(path1, path2):
# type: (Text, Text) -> bool
"""Check if ``path1`` is a base of ``path2``.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
bool: `True` if ``path2`` starts with ``path1``
Example:
>>> isbase('foo/bar', 'foo/bar/baz/egg.txt')
True
"""
_path1 = forcedir(abspath(path1))
_path2 = forcedir(abspath(path2))
return _path2.startswith(_path1)
|
Check if ``path1`` is a parent directory of ``path2``.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
bool: `True` if ``path1`` is a parent directory of ``path2``
Example:
>>> isparent("foo/bar", "foo/bar/spam.txt")
True
>>> isparent("foo/bar/", "foo/bar")
True
>>> isparent("foo/barry", "foo/baz/bar")
False
>>> isparent("foo/bar/baz/", "foo/baz/bar")
False
|
def isparent(path1, path2):
# type: (Text, Text) -> bool
"""Check if ``path1`` is a parent directory of ``path2``.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
bool: `True` if ``path1`` is a parent directory of ``path2``
Example:
>>> isparent("foo/bar", "foo/bar/spam.txt")
True
>>> isparent("foo/bar/", "foo/bar")
True
>>> isparent("foo/barry", "foo/baz/bar")
False
>>> isparent("foo/bar/baz/", "foo/baz/bar")
False
"""
bits1 = path1.split("/")
bits2 = path2.split("/")
while bits1 and bits1[-1] == "":
bits1.pop()
if len(bits1) > len(bits2):
return False
for (bit1, bit2) in zip(bits1, bits2):
if bit1 != bit2:
return False
return True
|
Get the final path of ``path2`` that isn't in ``path1``.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
str: the final part of ``path2``.
Example:
>>> frombase('foo/bar/', 'foo/bar/baz/egg')
'baz/egg'
|
def frombase(path1, path2):
# type: (Text, Text) -> Text
"""Get the final path of ``path2`` that isn't in ``path1``.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
str: the final part of ``path2``.
Example:
>>> frombase('foo/bar/', 'foo/bar/baz/egg')
'baz/egg'
"""
if not isparent(path1, path2):
raise ValueError("path1 must be a prefix of path2")
return path2[len(path1) :]
|
Return a path relative from a given base path.
Insert backrefs as appropriate to reach the path from the base.
Arguments:
base (str): Path to a directory.
path (str): Path to make relative.
Returns:
str: the path to ``base`` from ``path``.
>>> relativefrom("foo/bar", "baz/index.html")
'../../baz/index.html'
|
def relativefrom(base, path):
# type: (Text, Text) -> Text
"""Return a path relative from a given base path.
Insert backrefs as appropriate to reach the path from the base.
Arguments:
base (str): Path to a directory.
path (str): Path to make relative.
Returns:
str: the path to ``base`` from ``path``.
>>> relativefrom("foo/bar", "baz/index.html")
'../../baz/index.html'
"""
base_parts = list(iteratepath(base))
path_parts = list(iteratepath(path))
common = 0
for component_a, component_b in zip(base_parts, path_parts):
if component_a != component_b:
break
common += 1
return "/".join([".."] * (len(base_parts) - common) + path_parts[common:])
|
Get a context to map OS errors to their `fs.errors` counterpart.
The context will re-write the paths in resource exceptions to be
in the same context as the wrapped filesystem.
The only parameter may be the path from the parent, if only one path
is to be unwrapped. Or it may be a dictionary that maps wrapped
paths on to unwrapped paths.
|
def unwrap_errors(path_replace):
# type: (Union[Text, Mapping[Text, Text]]) -> Iterator[None]
"""Get a context to map OS errors to their `fs.errors` counterpart.
The context will re-write the paths in resource exceptions to be
in the same context as the wrapped filesystem.
The only parameter may be the path from the parent, if only one path
is to be unwrapped. Or it may be a dictionary that maps wrapped
paths on to unwrapped paths.
"""
try:
yield
except errors.ResourceError as e:
if hasattr(e, "path"):
if isinstance(path_replace, Mapping):
e.path = path_replace.get(e.path, e.path)
else:
e.path = path_replace
reraise(type(e), e)
|
Decodes a Windows NT FTP LIST line like these two:
`11-02-18 02:12PM <DIR> images`
`11-02-18 03:33PM 9276 logo.gif`
|
def decode_windowsnt(line, match):
"""
Decodes a Windows NT FTP LIST line like these two:
`11-02-18 02:12PM <DIR> images`
`11-02-18 03:33PM 9276 logo.gif`
"""
is_dir = match.group("size") == "<DIR>"
raw_info = {
"basic": {
"name": match.group("name"),
"is_dir": is_dir,
},
"details": {
"type": int(ResourceType.directory if is_dir else ResourceType.file),
},
"ftp": {"ls": line},
}
if not is_dir:
raw_info["details"]["size"] = int(match.group("size"))
modified = _parse_time(match.group("modified"), formats=["%d-%m-%y %I:%M%p"])
if modified is not None:
raw_info["details"]["modified"] = modified
return raw_info
|
Test whether a name matches a wildcard pattern.
Arguments:
pattern (str): A wildcard pattern, e.g. ``"*.py"``.
name (str): A filename.
Returns:
bool: `True` if the filename matches the pattern.
|
def match(pattern, name):
# type: (Text, Text) -> bool
"""Test whether a name matches a wildcard pattern.
Arguments:
pattern (str): A wildcard pattern, e.g. ``"*.py"``.
name (str): A filename.
Returns:
bool: `True` if the filename matches the pattern.
"""
try:
re_pat = _PATTERN_CACHE[(pattern, True)]
except KeyError:
res = "(?ms)" + _translate(pattern) + r'\Z'
_PATTERN_CACHE[(pattern, True)] = re_pat = re.compile(res)
return re_pat.match(name) is not None
|
Test whether a name matches a wildcard pattern (case insensitive).
Arguments:
pattern (str): A wildcard pattern, e.g. ``"*.py"``.
name (bool): A filename.
Returns:
bool: `True` if the filename matches the pattern.
|
def imatch(pattern, name):
# type: (Text, Text) -> bool
"""Test whether a name matches a wildcard pattern (case insensitive).
Arguments:
pattern (str): A wildcard pattern, e.g. ``"*.py"``.
name (bool): A filename.
Returns:
bool: `True` if the filename matches the pattern.
"""
try:
re_pat = _PATTERN_CACHE[(pattern, False)]
except KeyError:
res = "(?ms)" + _translate(pattern, case_sensitive=False) + r'\Z'
_PATTERN_CACHE[(pattern, False)] = re_pat = re.compile(res, re.IGNORECASE)
return re_pat.match(name) is not None
|
Test if a name matches any of a list of patterns.
Will return `True` if ``patterns`` is an empty list.
Arguments:
patterns (list): A list of wildcard pattern, e.g ``["*.py",
"*.pyc"]``
name (str): A filename.
Returns:
bool: `True` if the name matches at least one of the patterns.
|
def match_any(patterns, name):
# type: (Iterable[Text], Text) -> bool
"""Test if a name matches any of a list of patterns.
Will return `True` if ``patterns`` is an empty list.
Arguments:
patterns (list): A list of wildcard pattern, e.g ``["*.py",
"*.pyc"]``
name (str): A filename.
Returns:
bool: `True` if the name matches at least one of the patterns.
"""
if not patterns:
return True
return any(match(pattern, name) for pattern in patterns)
|
Test if a name matches any of a list of patterns (case insensitive).
Will return `True` if ``patterns`` is an empty list.
Arguments:
patterns (list): A list of wildcard pattern, e.g ``["*.py",
"*.pyc"]``
name (str): A filename.
Returns:
bool: `True` if the name matches at least one of the patterns.
|
def imatch_any(patterns, name):
# type: (Iterable[Text], Text) -> bool
"""Test if a name matches any of a list of patterns (case insensitive).
Will return `True` if ``patterns`` is an empty list.
Arguments:
patterns (list): A list of wildcard pattern, e.g ``["*.py",
"*.pyc"]``
name (str): A filename.
Returns:
bool: `True` if the name matches at least one of the patterns.
"""
if not patterns:
return True
return any(imatch(pattern, name) for pattern in patterns)
|
Get a callable that matches names against the given patterns.
Arguments:
patterns (list): A list of wildcard pattern. e.g. ``["*.py",
"*.pyc"]``
case_sensitive (bool): If ``True``, then the callable will be case
sensitive, otherwise it will be case insensitive.
Returns:
callable: a matcher that will return `True` if the name given as
an argument matches any of the given patterns.
Example:
>>> from fs import wildcard
>>> is_python = wildcard.get_matcher(['*.py'], True)
>>> is_python('__init__.py')
True
>>> is_python('foo.txt')
False
|
def get_matcher(patterns, case_sensitive):
# type: (Iterable[Text], bool) -> Callable[[Text], bool]
"""Get a callable that matches names against the given patterns.
Arguments:
patterns (list): A list of wildcard pattern. e.g. ``["*.py",
"*.pyc"]``
case_sensitive (bool): If ``True``, then the callable will be case
sensitive, otherwise it will be case insensitive.
Returns:
callable: a matcher that will return `True` if the name given as
an argument matches any of the given patterns.
Example:
>>> from fs import wildcard
>>> is_python = wildcard.get_matcher(['*.py'], True)
>>> is_python('__init__.py')
True
>>> is_python('foo.txt')
False
"""
if not patterns:
return lambda name: True
if case_sensitive:
return partial(match_any, patterns)
else:
return partial(imatch_any, patterns)
|
Translate a wildcard pattern to a regular expression.
There is no way to quote meta-characters.
Arguments:
pattern (str): A wildcard pattern.
case_sensitive (bool): Set to `False` to use a case
insensitive regex (default `True`).
Returns:
str: A regex equivalent to the given pattern.
|
def _translate(pattern, case_sensitive=True):
# type: (Text, bool) -> Text
"""Translate a wildcard pattern to a regular expression.
There is no way to quote meta-characters.
Arguments:
pattern (str): A wildcard pattern.
case_sensitive (bool): Set to `False` to use a case
insensitive regex (default `True`).
Returns:
str: A regex equivalent to the given pattern.
"""
if not case_sensitive:
pattern = pattern.lower()
i, n = 0, len(pattern)
res = ""
while i < n:
c = pattern[i]
i = i + 1
if c == "*":
res = res + "[^/]*"
elif c == "?":
res = res + "."
elif c == "[":
j = i
if j < n and pattern[j] == "!":
j = j + 1
if j < n and pattern[j] == "]":
j = j + 1
while j < n and pattern[j] != "]":
j = j + 1
if j >= n:
res = res + "\\["
else:
stuff = pattern[i:j].replace("\\", "\\\\")
i = j + 1
if stuff[0] == "!":
stuff = "^" + stuff[1:]
elif stuff[0] == "^":
stuff = "\\" + stuff
res = "%s[%s]" % (res, stuff)
else:
res = res + re.escape(c)
return res
|
Get the delegate FS for a given path.
Arguments:
path (str): A path.
Returns:
(FS, str): a tuple of ``(<fs>, <path>)`` for a mounted filesystem,
or ``(None, None)`` if no filesystem is mounted on the
given ``path``.
|
def _delegate(self, path):
# type: (Text) -> Tuple[FS, Text]
"""Get the delegate FS for a given path.
Arguments:
path (str): A path.
Returns:
(FS, str): a tuple of ``(<fs>, <path>)`` for a mounted filesystem,
or ``(None, None)`` if no filesystem is mounted on the
given ``path``.
"""
_path = forcedir(abspath(normpath(path)))
is_mounted = _path.startswith
for mount_path, fs in self.mounts:
if is_mounted(mount_path):
return fs, _path[len(mount_path) :].rstrip("/")
return self.default_fs, path
|
Mounts a host FS object on a given path.
Arguments:
path (str): A path within the MountFS.
fs (FS or str): A filesystem (instance or URL) to mount.
|
def mount(self, path, fs):
# type: (Text, Union[FS, Text]) -> None
"""Mounts a host FS object on a given path.
Arguments:
path (str): A path within the MountFS.
fs (FS or str): A filesystem (instance or URL) to mount.
"""
if isinstance(fs, text_type):
from .opener import open_fs
fs = open_fs(fs)
if not isinstance(fs, FS):
raise TypeError("fs argument must be an FS object or a FS URL")
if fs is self:
raise ValueError("Unable to mount self")
_path = forcedir(abspath(normpath(path)))
for mount_path, _ in self.mounts:
if _path.startswith(mount_path):
raise MountError("mount point overlaps existing mount")
self.mounts.append((_path, fs))
self.default_fs.makedirs(_path, recreate=True)
|
Start the workers.
|
def start(self):
"""Start the workers."""
if self.num_workers:
self.queue = Queue(maxsize=self.num_workers)
self.workers = [_Worker(self) for _ in range(self.num_workers)]
for worker in self.workers:
worker.start()
self.running = True
|
Stop the workers (will block until they are finished).
|
def stop(self):
"""Stop the workers (will block until they are finished)."""
if self.running and self.num_workers:
for worker in self.workers:
self.queue.put(None)
for worker in self.workers:
worker.join()
# Free up references held by workers
del self.workers[:]
self.queue.join()
self.running = False
|
Copy a file from one fs to another.
|
def copy(self, src_fs, src_path, dst_fs, dst_path):
# type: (FS, Text, FS, Text) -> None
"""Copy a file from one fs to another."""
if self.queue is None:
# This should be the most performant for a single-thread
copy_file_internal(src_fs, src_path, dst_fs, dst_path)
else:
src_file = src_fs.openbin(src_path, "r")
try:
dst_file = dst_fs.openbin(dst_path, "w")
except Exception:
src_file.close()
raise
task = _CopyTask(src_file, dst_file)
self.queue.put(task)
|
Add a filesystem to the MultiFS.
Arguments:
name (str): A unique name to refer to the filesystem being
added.
fs (FS or str): The filesystem (instance or URL) to add.
write (bool): If this value is True, then the ``fs`` will
be used as the writeable FS (defaults to False).
priority (int): An integer that denotes the priority of the
filesystem being added. Filesystems will be searched in
descending priority order and then by the reverse order
they were added. So by default, the most recently added
filesystem will be looked at first.
|
def add_fs(self, name, fs, write=False, priority=0):
# type: (Text, FS, bool, int) -> None
"""Add a filesystem to the MultiFS.
Arguments:
name (str): A unique name to refer to the filesystem being
added.
fs (FS or str): The filesystem (instance or URL) to add.
write (bool): If this value is True, then the ``fs`` will
be used as the writeable FS (defaults to False).
priority (int): An integer that denotes the priority of the
filesystem being added. Filesystems will be searched in
descending priority order and then by the reverse order
they were added. So by default, the most recently added
filesystem will be looked at first.
"""
if isinstance(fs, text_type):
fs = open_fs(fs)
if not isinstance(fs, FS):
raise TypeError("fs argument should be an FS object or FS URL")
self._filesystems[name] = _PrioritizedFS(
priority=(priority, self._sort_index), fs=fs
)
self._sort_index += 1
self._resort()
if write:
self.write_fs = fs
self._write_fs_name = name
|
Get iterator that returns (name, fs) in priority order.
|
def iterate_fs(self):
# type: () -> Iterator[Tuple[Text, FS]]
"""Get iterator that returns (name, fs) in priority order.
"""
if self._fs_sequence is None:
self._fs_sequence = [
(name, fs)
for name, (_order, fs) in sorted(
self._filesystems.items(), key=itemgetter(1), reverse=True
)
]
return iter(self._fs_sequence)
|
Get a filesystem which has a given path.
|
def _delegate(self, path):
# type: (Text) -> Optional[FS]
"""Get a filesystem which has a given path.
"""
for _name, fs in self.iterate_fs():
if fs.exists(path):
return fs
return None
|
Check that there is a filesystem with the given ``path``.
|
def _delegate_required(self, path):
# type: (Text) -> FS
"""Check that there is a filesystem with the given ``path``.
"""
fs = self._delegate(path)
if fs is None:
raise errors.ResourceNotFound(path)
return fs
|
Check that ``path`` is writeable.
|
def _writable_required(self, path):
# type: (Text) -> FS
"""Check that ``path`` is writeable.
"""
if self.write_fs is None:
raise errors.ResourceReadOnly(path)
return self.write_fs
|
Get a tuple of (name, fs) that the given path would map to.
Arguments:
path (str): A path on the filesystem.
mode (str): An `io.open` mode.
|
def which(self, path, mode="r"):
# type: (Text, Text) -> Tuple[Optional[Text], Optional[FS]]
"""Get a tuple of (name, fs) that the given path would map to.
Arguments:
path (str): A path on the filesystem.
mode (str): An `io.open` mode.
"""
if check_writable(mode):
return self._write_fs_name, self.write_fs
for name, fs in self.iterate_fs():
if fs.exists(path):
return name, fs
return None, None
|
Take a Python 2.x binary file and return an IO Stream.
|
def make_stream(
name, # type: Text
bin_file, # type: RawIOBase
mode="r", # type: Text
buffering=-1, # type: int
encoding=None, # type: Optional[Text]
errors=None, # type: Optional[Text]
newline="", # type: Optional[Text]
line_buffering=False, # type: bool
**kwargs # type: Any
):
# type: (...) -> IO
"""Take a Python 2.x binary file and return an IO Stream.
"""
reading = "r" in mode
writing = "w" in mode
appending = "a" in mode
binary = "b" in mode
if "+" in mode:
reading = True
writing = True
encoding = None if binary else (encoding or "utf-8")
io_object = RawWrapper(bin_file, mode=mode, name=name) # type: io.IOBase
if buffering >= 0:
if reading and writing:
io_object = io.BufferedRandom(
typing.cast(io.RawIOBase, io_object),
buffering or io.DEFAULT_BUFFER_SIZE,
)
elif reading:
io_object = io.BufferedReader(
typing.cast(io.RawIOBase, io_object),
buffering or io.DEFAULT_BUFFER_SIZE,
)
elif writing or appending:
io_object = io.BufferedWriter(
typing.cast(io.RawIOBase, io_object),
buffering or io.DEFAULT_BUFFER_SIZE,
)
if not binary:
io_object = io.TextIOWrapper(
io_object,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,
)
return io_object
|
Iterate over the lines of a file.
Implementation reads each char individually, which is not very
efficient.
Yields:
str: a single line in the file.
|
def line_iterator(readable_file, size=None):
# type: (IO[bytes], Optional[int]) -> Iterator[bytes]
"""Iterate over the lines of a file.
Implementation reads each char individually, which is not very
efficient.
Yields:
str: a single line in the file.
"""
read = readable_file.read
line = []
byte = b"1"
if size is None or size < 0:
while byte:
byte = read(1)
line.append(byte)
if byte in b"\n":
yield b"".join(line)
del line[:]
else:
while byte and size:
byte = read(1)
size -= len(byte)
line.append(byte)
if byte in b"\n" or not size:
yield b"".join(line)
del line[:]
|
Check ``mode`` parameter of `~fs.base.FS.openbin` is valid.
Arguments:
mode (str): Mode parameter.
Raises:
`ValueError` if mode is not valid.
|
def validate_openbin_mode(mode, _valid_chars=frozenset("rwxab+")):
# type: (Text, Union[Set[Text], FrozenSet[Text]]) -> None
"""Check ``mode`` parameter of `~fs.base.FS.openbin` is valid.
Arguments:
mode (str): Mode parameter.
Raises:
`ValueError` if mode is not valid.
"""
if "t" in mode:
raise ValueError("text mode not valid in openbin")
if not mode:
raise ValueError("mode must not be empty")
if mode[0] not in "rwxa":
raise ValueError("mode must start with 'r', 'w', 'a' or 'x'")
if not _valid_chars.issuperset(mode):
raise ValueError("mode '{}' contains invalid characters".format(mode))
|
Render a directory structure in to a pretty tree.
Arguments:
fs (~fs.base.FS): A filesystem instance.
path (str): The path of the directory to start rendering
from (defaults to root folder, i.e. ``'/'``).
file (io.IOBase): An open file-like object to render the
tree, or `None` for stdout.
encoding (str, optional): Unicode encoding, or `None` to
auto-detect.
max_levels (int, optional): Maximum number of levels to
display, or `None` for no maximum.
with_color (bool, optional): Enable terminal color output,
or `None` to auto-detect terminal.
dirs_first (bool): Show directories first.
exclude (list, optional): Option list of directory patterns
to exclude from the tree render.
filter (list, optional): Optional list of files patterns to
match in the tree render.
Returns:
(int, int): A tuple of ``(<directory count>, <file count>)``.
|
def render(
fs, # type: FS
path="/", # type: Text
file=None, # type: Optional[TextIO]
encoding=None, # type: Optional[Text]
max_levels=5, # type: int
with_color=None, # type: Optional[bool]
dirs_first=True, # type: bool
exclude=None, # type: Optional[List[Text]]
filter=None, # type: Optional[List[Text]]
):
# type: (...) -> Tuple[int, int]
"""Render a directory structure in to a pretty tree.
Arguments:
fs (~fs.base.FS): A filesystem instance.
path (str): The path of the directory to start rendering
from (defaults to root folder, i.e. ``'/'``).
file (io.IOBase): An open file-like object to render the
tree, or `None` for stdout.
encoding (str, optional): Unicode encoding, or `None` to
auto-detect.
max_levels (int, optional): Maximum number of levels to
display, or `None` for no maximum.
with_color (bool, optional): Enable terminal color output,
or `None` to auto-detect terminal.
dirs_first (bool): Show directories first.
exclude (list, optional): Option list of directory patterns
to exclude from the tree render.
filter (list, optional): Optional list of files patterns to
match in the tree render.
Returns:
(int, int): A tuple of ``(<directory count>, <file count>)``.
"""
file = file or sys.stdout
if encoding is None:
encoding = getattr(file, "encoding", "utf-8") or "utf-8"
is_tty = hasattr(file, "isatty") and file.isatty()
if with_color is None:
is_windows = sys.platform.startswith("win")
with_color = False if is_windows else is_tty
if encoding.lower() == "utf-8" and with_color:
char_vertline = "│"
char_newnode = "├"
char_line = "──"
char_corner = "└"
else:
char_vertline = "|"
char_newnode = "|"
char_line = "--"
char_corner = "`"
indent = " " * 4
line_indent = char_vertline + " " * 3
def write(line):
# type: (Text) -> None
"""Write a line to the output.
"""
print(line, file=file)
# FIXME(@althonos): define functions using `with_color` and
# avoid checking `with_color` at every function call !
def format_prefix(prefix):
# type: (Text) -> Text
"""Format the prefix lines.
"""
if not with_color:
return prefix
return "\x1b[32m%s\x1b[0m" % prefix
def format_dirname(dirname):
# type: (Text) -> Text
"""Format a directory name.
"""
if not with_color:
return dirname
return "\x1b[1;34m%s\x1b[0m" % dirname
def format_error(msg):
# type: (Text) -> Text
"""Format an error.
"""
if not with_color:
return msg
return "\x1b[31m%s\x1b[0m" % msg
def format_filename(fname):
# type: (Text) -> Text
"""Format a filename.
"""
if not with_color:
return fname
if fname.startswith("."):
fname = "\x1b[33m%s\x1b[0m" % fname
return fname
def sort_key_dirs_first(info):
# type: (Info) -> Tuple[bool, Text]
"""Get the info sort function with directories first.
"""
return (not info.is_dir, info.name.lower())
def sort_key(info):
# type: (Info) -> Text
"""Get the default info sort function using resource name.
"""
return info.name.lower()
counts = {"dirs": 0, "files": 0}
def format_directory(path, levels):
# type: (Text, List[bool]) -> None
"""Recursive directory function.
"""
try:
directory = sorted(
fs.filterdir(path, exclude_dirs=exclude, files=filter),
key=sort_key_dirs_first if dirs_first else sort_key,
)
except Exception as error:
prefix = (
"".join(indent if last else line_indent for last in levels)
+ char_corner
+ char_line
)
write(
"{} {}".format(
format_prefix(prefix), format_error("error ({})".format(error))
)
)
return
_last = len(directory) - 1
for i, info in enumerate(directory):
is_last_entry = i == _last
counts["dirs" if info.is_dir else "files"] += 1
prefix = "".join(indent if last else line_indent for last in levels)
prefix += char_corner if is_last_entry else char_newnode
if info.is_dir:
write(
"{} {}".format(
format_prefix(prefix + char_line), format_dirname(info.name)
)
)
if max_levels is None or len(levels) < max_levels:
format_directory(join(path, info.name), levels + [is_last_entry])
else:
write(
"{} {}".format(
format_prefix(prefix + char_line), format_filename(info.name)
)
)
format_directory(abspath(normpath(path)), [])
return counts["dirs"], counts["files"]
|
Compare two `Info` objects to see if they should be copied.
Returns:
bool: `True` if the `Info` are different in size or mtime.
|
def _compare(info1, info2):
# type: (Info, Info) -> bool
"""Compare two `Info` objects to see if they should be copied.
Returns:
bool: `True` if the `Info` are different in size or mtime.
"""
# Check filesize has changed
if info1.size != info2.size:
return True
# Check modified dates
date1 = info1.modified
date2 = info2.modified
return date1 is None or date2 is None or date1 > date2
|
Mirror files / directories from one filesystem to another.
Mirroring a filesystem will create an exact copy of ``src_fs`` on
``dst_fs``, by removing any files / directories on the destination
that aren't on the source, and copying files that aren't.
Arguments:
src_fs (FS or str): Source filesystem (URL or instance).
dst_fs (FS or str): Destination filesystem (URL or instance).
walker (~fs.walk.Walker, optional): An optional walker instance.
copy_if_newer (bool): Only copy newer files (the default).
workers (int): Number of worker threads used
(0 for single threaded). Set to a relatively low number
for network filesystems, 4 would be a good start.
|
def mirror(
src_fs, # type: Union[FS, Text]
dst_fs, # type: Union[FS, Text]
walker=None, # type: Optional[Walker]
copy_if_newer=True, # type: bool
workers=0, # type: int
):
# type: (...) -> None
"""Mirror files / directories from one filesystem to another.
Mirroring a filesystem will create an exact copy of ``src_fs`` on
``dst_fs``, by removing any files / directories on the destination
that aren't on the source, and copying files that aren't.
Arguments:
src_fs (FS or str): Source filesystem (URL or instance).
dst_fs (FS or str): Destination filesystem (URL or instance).
walker (~fs.walk.Walker, optional): An optional walker instance.
copy_if_newer (bool): Only copy newer files (the default).
workers (int): Number of worker threads used
(0 for single threaded). Set to a relatively low number
for network filesystems, 4 would be a good start.
"""
def src():
return manage_fs(src_fs, writeable=False)
def dst():
return manage_fs(dst_fs, create=True)
with src() as _src_fs, dst() as _dst_fs:
with _src_fs.lock(), _dst_fs.lock():
_thread_safe = is_thread_safe(_src_fs, _dst_fs)
with Copier(num_workers=workers if _thread_safe else 0) as copier:
_mirror(
_src_fs,
_dst_fs,
walker=walker,
copy_if_newer=copy_if_newer,
copy_file=copier.copy,
)
|
Parse a Filesystem URL and return a `ParseResult`.
Arguments:
fs_url (str): A filesystem URL.
Returns:
~fs.opener.parse.ParseResult: a parse result instance.
Raises:
~fs.errors.ParseError: if the FS URL is not valid.
|
def parse_fs_url(fs_url):
# type: (Text) -> ParseResult
"""Parse a Filesystem URL and return a `ParseResult`.
Arguments:
fs_url (str): A filesystem URL.
Returns:
~fs.opener.parse.ParseResult: a parse result instance.
Raises:
~fs.errors.ParseError: if the FS URL is not valid.
"""
match = _RE_FS_URL.match(fs_url)
if match is None:
raise ParseError("{!r} is not a fs2 url".format(fs_url))
fs_name, credentials, url1, url2, path = match.groups()
if not credentials:
username = None # type: Optional[Text]
password = None # type: Optional[Text]
url = url2
else:
username, _, password = credentials.partition(":")
username = unquote(username)
password = unquote(password)
url = url1
url, has_qs, qs = url.partition("?")
resource = unquote(url)
if has_qs:
_params = parse_qs(qs, keep_blank_values=True)
params = {k: unquote(v[0]) for k, v in six.iteritems(_params)}
else:
params = {}
return ParseResult(fs_name, username, password, resource, params, path)
|
Return a method with a deprecation warning.
|
def _new_name(method, old_name):
"""Return a method with a deprecation warning."""
# Looks suspiciously like a decorator, but isn't!
@wraps(method)
def _method(*args, **kwargs):
warnings.warn(
"method '{}' has been deprecated, please rename to '{}'".format(
old_name, method.__name__
),
DeprecationWarning,
)
return method(*args, **kwargs)
deprecated_msg = """
Note:
.. deprecated:: 2.2.0
Please use `~{}`
""".format(
method.__name__
)
if getattr(_method, "__doc__"):
_method.__doc__ += deprecated_msg
return _method
|
Change stream position.
Change the stream position to the given byte offset. The
offset is interpreted relative to the position indicated by
``whence``.
Arguments:
offset (int): the offset to the new position, in bytes.
whence (int): the position reference. Possible values are:
* `Seek.set`: start of stream (the default).
* `Seek.current`: current position; offset may be negative.
* `Seek.end`: end of stream; offset must be negative.
Returns:
int: the new absolute position.
Raises:
ValueError: when ``whence`` is not known, or ``offset``
is invalid.
Note:
Zip compression does not support seeking, so the seeking
is emulated. Seeking somewhere else than the current position
will need to either:
* reopen the file and restart decompression
* read and discard data to advance in the file
|
def seek(self, offset, whence=Seek.set):
# type: (int, SupportsInt) -> int
"""Change stream position.
Change the stream position to the given byte offset. The
offset is interpreted relative to the position indicated by
``whence``.
Arguments:
offset (int): the offset to the new position, in bytes.
whence (int): the position reference. Possible values are:
* `Seek.set`: start of stream (the default).
* `Seek.current`: current position; offset may be negative.
* `Seek.end`: end of stream; offset must be negative.
Returns:
int: the new absolute position.
Raises:
ValueError: when ``whence`` is not known, or ``offset``
is invalid.
Note:
Zip compression does not support seeking, so the seeking
is emulated. Seeking somewhere else than the current position
will need to either:
* reopen the file and restart decompression
* read and discard data to advance in the file
"""
_whence = int(whence)
if _whence == Seek.current:
offset += self._pos
if _whence == Seek.current or _whence == Seek.set:
if offset < 0:
raise ValueError("Negative seek position {}".format(offset))
elif _whence == Seek.end:
if offset > 0:
raise ValueError("Positive seek position {}".format(offset))
offset += self._end
else:
raise ValueError(
"Invalid whence ({}, should be {}, {} or {})".format(
_whence, Seek.set, Seek.current, Seek.end
)
)
if offset < self._pos:
self._f = self._zip.open(self.name) # type: ignore
self._pos = 0
self.read(offset - self._pos)
return self._pos
|
Get the walk generator.
|
def _iter_walk(
self,
fs, # type: FS
path, # type: Text
namespaces=None, # type: Optional[Collection[Text]]
):
# type: (...) -> Iterator[Tuple[Text, Optional[Info]]]
"""Get the walk generator."""
if self.search == "breadth":
return self._walk_breadth(fs, path, namespaces=namespaces)
else:
return self._walk_depth(fs, path, namespaces=namespaces)
|
Check if a directory should be considered in the walk.
|
def _check_open_dir(self, fs, path, info):
# type: (FS, Text, Info) -> bool
"""Check if a directory should be considered in the walk.
"""
if self.exclude_dirs is not None and fs.match(self.exclude_dirs, info.name):
return False
if self.filter_dirs is not None and not fs.match(self.filter_dirs, info.name):
return False
return self.check_open_dir(fs, path, info)
|
Check if a directory contents should be scanned.
|
def _check_scan_dir(self, fs, path, info, depth):
# type: (FS, Text, Info, int) -> bool
"""Check if a directory contents should be scanned."""
if self.max_depth is not None and depth >= self.max_depth:
return False
return self.check_scan_dir(fs, path, info)
|
Check if a filename should be included.
Override to exclude files from the walk.
Arguments:
fs (FS): A filesystem instance.
info (Info): A resource info object.
Returns:
bool: `True` if the file should be included.
|
def check_file(self, fs, info):
# type: (FS, Info) -> bool
"""Check if a filename should be included.
Override to exclude files from the walk.
Arguments:
fs (FS): A filesystem instance.
info (Info): A resource info object.
Returns:
bool: `True` if the file should be included.
"""
if self.exclude is not None and fs.match(self.exclude, info.name):
return False
return fs.match(self.filter, info.name)
|
Get an iterator of `Info` objects for a directory path.
Arguments:
fs (FS): A filesystem instance.
dir_path (str): A path to a directory on the filesystem.
namespaces (list): A list of additional namespaces to
include in the `Info` objects.
Returns:
~collections.Iterator: iterator of `Info` objects for
resources within the given path.
|
def _scan(
self,
fs, # type: FS
dir_path, # type: Text
namespaces=None, # type: Optional[Collection[Text]]
):
# type: (...) -> Iterator[Info]
"""Get an iterator of `Info` objects for a directory path.
Arguments:
fs (FS): A filesystem instance.
dir_path (str): A path to a directory on the filesystem.
namespaces (list): A list of additional namespaces to
include in the `Info` objects.
Returns:
~collections.Iterator: iterator of `Info` objects for
resources within the given path.
"""
try:
for info in fs.scandir(dir_path, namespaces=namespaces):
yield info
except FSError as error:
if not self.on_error(dir_path, error):
six.reraise(type(error), error)
|
Walk the directory structure of a filesystem.
Arguments:
fs (FS): A filesystem instance.
path (str): A path to a directory on the filesystem.
namespaces (list, optional): A list of additional namespaces
to add to the `Info` objects.
Returns:
collections.Iterator: an iterator of `~fs.walk.Step` instances.
The return value is an iterator of ``(<path>, <dirs>, <files>)``
named tuples, where ``<path>`` is an absolute path to a
directory, and ``<dirs>`` and ``<files>`` are a list of
`~fs.info.Info` objects for directories and files in ``<path>``.
Example:
>>> home_fs = open_fs('~/')
>>> walker = Walker(filter=['*.py'])
>>> namespaces = ['details']
>>> for path, dirs, files in walker.walk(home_fs, namespaces)
... print("[{}]".format(path))
... print("{} directories".format(len(dirs)))
... total = sum(info.size for info in files)
... print("{} bytes {}".format(total))
|
def walk(
self,
fs, # type: FS
path="/", # type: Text
namespaces=None, # type: Optional[Collection[Text]]
):
# type: (...) -> Iterator[Step]
"""Walk the directory structure of a filesystem.
Arguments:
fs (FS): A filesystem instance.
path (str): A path to a directory on the filesystem.
namespaces (list, optional): A list of additional namespaces
to add to the `Info` objects.
Returns:
collections.Iterator: an iterator of `~fs.walk.Step` instances.
The return value is an iterator of ``(<path>, <dirs>, <files>)``
named tuples, where ``<path>`` is an absolute path to a
directory, and ``<dirs>`` and ``<files>`` are a list of
`~fs.info.Info` objects for directories and files in ``<path>``.
Example:
>>> home_fs = open_fs('~/')
>>> walker = Walker(filter=['*.py'])
>>> namespaces = ['details']
>>> for path, dirs, files in walker.walk(home_fs, namespaces)
... print("[{}]".format(path))
... print("{} directories".format(len(dirs)))
... total = sum(info.size for info in files)
... print("{} bytes {}".format(total))
"""
_path = abspath(normpath(path))
dir_info = defaultdict(list) # type: MutableMapping[Text, List[Info]]
_walk = self._iter_walk(fs, _path, namespaces=namespaces)
for dir_path, info in _walk:
if info is None:
dirs = [] # type: List[Info]
files = [] # type: List[Info]
for _info in dir_info[dir_path]:
(dirs if _info.is_dir else files).append(_info)
yield Step(dir_path, dirs, files)
del dir_info[dir_path]
else:
dir_info[dir_path].append(info)
|
Walk a filesystem, yielding absolute paths to files.
Arguments:
fs (FS): A filesystem instance.
path (str): A path to a directory on the filesystem.
Yields:
str: absolute path to files on the filesystem found
recursively within the given directory.
|
def files(self, fs, path="/"):
# type: (FS, Text) -> Iterator[Text]
"""Walk a filesystem, yielding absolute paths to files.
Arguments:
fs (FS): A filesystem instance.
path (str): A path to a directory on the filesystem.
Yields:
str: absolute path to files on the filesystem found
recursively within the given directory.
"""
_combine = combine
for _path, info in self._iter_walk(fs, path=path):
if info is not None and not info.is_dir:
yield _combine(_path, info.name)
|
Walk a filesystem, yielding tuples of ``(<path>, <info>)``.
Arguments:
fs (FS): A filesystem instance.
path (str): A path to a directory on the filesystem.
namespaces (list, optional): A list of additional namespaces
to add to the `Info` objects.
Yields:
(str, Info): a tuple of ``(<absolute path>, <resource info>)``.
|
def info(
self,
fs, # type: FS
path="/", # type: Text
namespaces=None, # type: Optional[Collection[Text]]
):
# type: (...) -> Iterator[Tuple[Text, Info]]
"""Walk a filesystem, yielding tuples of ``(<path>, <info>)``.
Arguments:
fs (FS): A filesystem instance.
path (str): A path to a directory on the filesystem.
namespaces (list, optional): A list of additional namespaces
to add to the `Info` objects.
Yields:
(str, Info): a tuple of ``(<absolute path>, <resource info>)``.
"""
_combine = combine
_walk = self._iter_walk(fs, path=path, namespaces=namespaces)
for _path, info in _walk:
if info is not None:
yield _combine(_path, info.name), info
|
Walk files using a *breadth first* search.
|
def _walk_breadth(
self,
fs, # type: FS
path, # type: Text
namespaces=None, # type: Optional[Collection[Text]]
):
# type: (...) -> Iterator[Tuple[Text, Optional[Info]]]
"""Walk files using a *breadth first* search.
"""
queue = deque([path])
push = queue.appendleft
pop = queue.pop
_combine = combine
_scan = self._scan
_calculate_depth = self._calculate_depth
_check_open_dir = self._check_open_dir
_check_scan_dir = self._check_scan_dir
_check_file = self.check_file
depth = _calculate_depth(path)
while queue:
dir_path = pop()
for info in _scan(fs, dir_path, namespaces=namespaces):
if info.is_dir:
_depth = _calculate_depth(dir_path) - depth + 1
if _check_open_dir(fs, dir_path, info):
yield dir_path, info # Opened a directory
if _check_scan_dir(fs, dir_path, info, _depth):
push(_combine(dir_path, info.name))
else:
if _check_file(fs, info):
yield dir_path, info # Found a file
yield dir_path, None
|
Walk files using a *depth first* search.
|
def _walk_depth(
self,
fs, # type: FS
path, # type: Text
namespaces=None, # type: Optional[Collection[Text]]
):
# type: (...) -> Iterator[Tuple[Text, Optional[Info]]]
"""Walk files using a *depth first* search.
"""
# No recursion!
_combine = combine
_scan = self._scan
_calculate_depth = self._calculate_depth
_check_open_dir = self._check_open_dir
_check_scan_dir = self._check_scan_dir
_check_file = self.check_file
depth = _calculate_depth(path)
stack = [
(path, _scan(fs, path, namespaces=namespaces), None)
] # type: List[Tuple[Text, Iterator[Info], Optional[Tuple[Text, Info]]]]
push = stack.append
while stack:
dir_path, iter_files, parent = stack[-1]
info = next(iter_files, None)
if info is None:
if parent is not None:
yield parent
yield dir_path, None
del stack[-1]
elif info.is_dir:
_depth = _calculate_depth(dir_path) - depth + 1
if _check_open_dir(fs, dir_path, info):
if _check_scan_dir(fs, dir_path, info, _depth):
_path = _combine(dir_path, info.name)
push(
(
_path,
_scan(fs, _path, namespaces=namespaces),
(dir_path, info),
)
)
else:
yield dir_path, info
else:
if _check_file(fs, info):
yield dir_path, info
|
Create a walker instance.
|
def _make_walker(self, *args, **kwargs):
# type: (*Any, **Any) -> Walker
"""Create a walker instance.
"""
walker = self.walker_class(*args, **kwargs)
return walker
|
Walk the directory structure of a filesystem.
Arguments:
path (str):
namespaces (list, optional): A list of namespaces to include
in the resource information, e.g. ``['basic', 'access']``
(defaults to ``['basic']``).
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: an iterator of ``(<path>, <dirs>, <files>)``
named tuples, where ``<path>`` is an absolute path to a
directory, and ``<dirs>`` and ``<files>`` are a list of
`~fs.info.Info` objects for directories and files in ``<path>``.
Example:
>>> home_fs = open_fs('~/')
>>> walker = Walker(filter=['*.py'])
>>> for path, dirs, files in walker.walk(home_fs, namespaces=['details']):
... print("[{}]".format(path))
... print("{} directories".format(len(dirs)))
... total = sum(info.size for info in files)
... print("{} bytes {}".format(total))
This method invokes `Walker.walk` with bound `FS` object.
|
def walk(
self,
path="/", # type: Text
namespaces=None, # type: Optional[Collection[Text]]
**kwargs # type: Any
):
# type: (...) -> Iterator[Step]
"""Walk the directory structure of a filesystem.
Arguments:
path (str):
namespaces (list, optional): A list of namespaces to include
in the resource information, e.g. ``['basic', 'access']``
(defaults to ``['basic']``).
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: an iterator of ``(<path>, <dirs>, <files>)``
named tuples, where ``<path>`` is an absolute path to a
directory, and ``<dirs>`` and ``<files>`` are a list of
`~fs.info.Info` objects for directories and files in ``<path>``.
Example:
>>> home_fs = open_fs('~/')
>>> walker = Walker(filter=['*.py'])
>>> for path, dirs, files in walker.walk(home_fs, namespaces=['details']):
... print("[{}]".format(path))
... print("{} directories".format(len(dirs)))
... total = sum(info.size for info in files)
... print("{} bytes {}".format(total))
This method invokes `Walker.walk` with bound `FS` object.
"""
walker = self._make_walker(**kwargs)
return walker.walk(self.fs, path=path, namespaces=namespaces)
|
Walk a filesystem, yielding absolute paths to files.
Arguments:
path (str): A path to a directory.
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: An iterator over file paths (absolute
from the filesystem root).
This method invokes `Walker.files` with the bound `FS` object.
|
def files(self, path="/", **kwargs):
# type: (Text, **Any) -> Iterator[Text]
"""Walk a filesystem, yielding absolute paths to files.
Arguments:
path (str): A path to a directory.
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: An iterator over file paths (absolute
from the filesystem root).
This method invokes `Walker.files` with the bound `FS` object.
"""
walker = self._make_walker(**kwargs)
return walker.files(self.fs, path=path)
|
Walk a filesystem, yielding absolute paths to directories.
Arguments:
path (str): A path to a directory.
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: an iterator over directory paths
(absolute from the filesystem root).
This method invokes `Walker.dirs` with the bound `FS` object.
|
def dirs(self, path="/", **kwargs):
# type: (Text, **Any) -> Iterator[Text]
"""Walk a filesystem, yielding absolute paths to directories.
Arguments:
path (str): A path to a directory.
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: an iterator over directory paths
(absolute from the filesystem root).
This method invokes `Walker.dirs` with the bound `FS` object.
"""
walker = self._make_walker(**kwargs)
return walker.dirs(self.fs, path=path)
|
Walk a filesystem, yielding path and `Info` of resources.
Arguments:
path (str): A path to a directory.
namespaces (list, optional): A list of namespaces to include
in the resource information, e.g. ``['basic', 'access']``
(defaults to ``['basic']``).
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterable: an iterable yielding tuples of
``(<absolute path>, <resource info>)``.
This method invokes `Walker.info` with the bound `FS` object.
|
def info(
self,
path="/", # type: Text
namespaces=None, # type: Optional[Collection[Text]]
**kwargs # type: Any
):
# type: (...) -> Iterator[Tuple[Text, Info]]
"""Walk a filesystem, yielding path and `Info` of resources.
Arguments:
path (str): A path to a directory.
namespaces (list, optional): A list of namespaces to include
in the resource information, e.g. ``['basic', 'access']``
(defaults to ``['basic']``).
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterable: an iterable yielding tuples of
``(<absolute path>, <resource info>)``.
This method invokes `Walker.info` with the bound `FS` object.
"""
walker = self._make_walker(**kwargs)
return walker.info(self.fs, path=path, namespaces=namespaces)
|
Remove all empty parents.
Arguments:
fs (FS): A filesystem instance.
path (str): Path to a directory on the filesystem.
|
def remove_empty(fs, path):
# type: (FS, Text) -> None
"""Remove all empty parents.
Arguments:
fs (FS): A filesystem instance.
path (str): Path to a directory on the filesystem.
"""
path = abspath(normpath(path))
try:
while path not in ("", "/"):
fs.removedir(path)
path = dirname(path)
except DirectoryNotEmpty:
pass
|
Copy data from one file object to another.
Arguments:
src_file (io.IOBase): File open for reading.
dst_file (io.IOBase): File open for writing.
chunk_size (int): Number of bytes to copy at
a time (or `None` to use sensible default).
|
def copy_file_data(src_file, dst_file, chunk_size=None):
# type: (IO, IO, Optional[int]) -> None
"""Copy data from one file object to another.
Arguments:
src_file (io.IOBase): File open for reading.
dst_file (io.IOBase): File open for writing.
chunk_size (int): Number of bytes to copy at
a time (or `None` to use sensible default).
"""
_chunk_size = 1024 * 1024 if chunk_size is None else chunk_size
read = src_file.read
write = dst_file.write
# The 'or None' is so that it works with binary and text files
for chunk in iter(lambda: read(_chunk_size) or None, None):
write(chunk)
|
Get a list of non-existing intermediate directories.
Arguments:
fs (FS): A filesystem instance.
dir_path (str): A path to a new directory on the filesystem.
Returns:
list: A list of non-existing paths.
Raises:
~fs.errors.DirectoryExpected: If a path component
references a file and not a directory.
|
def get_intermediate_dirs(fs, dir_path):
# type: (FS, Text) -> List[Text]
"""Get a list of non-existing intermediate directories.
Arguments:
fs (FS): A filesystem instance.
dir_path (str): A path to a new directory on the filesystem.
Returns:
list: A list of non-existing paths.
Raises:
~fs.errors.DirectoryExpected: If a path component
references a file and not a directory.
"""
intermediates = []
with fs.lock():
for path in recursepath(abspath(dir_path), reverse=True):
try:
resource = fs.getinfo(path)
except ResourceNotFound:
intermediates.append(abspath(path))
else:
if resource.is_dir:
break
raise errors.DirectoryExpected(dir_path)
return intermediates[::-1][:-1]
|
Given a JSON string, it returns it as a
safe formatted HTML
|
def prettify_json(json_string):
"""Given a JSON string, it returns it as a
safe formatted HTML"""
try:
data = json.loads(json_string)
html = '<pre>' + json.dumps(data, sort_keys=True, indent=4) + '</pre>'
except:
html = json_string
return mark_safe(html)
|
Removes all objects in this table.
This action first displays a confirmation page;
next, it deletes all objects and redirects back to the change list.
|
def purge_objects(self, request):
"""
Removes all objects in this table.
This action first displays a confirmation page;
next, it deletes all objects and redirects back to the change list.
"""
def truncate_table(model):
if settings.TRUNCATE_TABLE_SQL_STATEMENT:
from django.db import connection
sql = settings.TRUNCATE_TABLE_SQL_STATEMENT.format(db_table=model._meta.db_table)
cursor = connection.cursor()
cursor.execute(sql)
else:
model.objects.all().delete()
modeladmin = self
opts = modeladmin.model._meta
# Check that the user has delete permission for the actual model
if not request.user.is_superuser:
raise PermissionDenied
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
# If the user has already confirmed or cancelled the deletion,
# (eventually) do the deletion and return to the change list view again.
if request.method == 'POST':
if 'btn-confirm' in request.POST:
try:
n = modeladmin.model.objects.count()
truncate_table(modeladmin.model)
modeladmin.message_user(request, _("Successfully removed %d rows" % n), messages.SUCCESS);
except Exception as e:
modeladmin.message_user(request, _(u'ERROR') + ': %r' % e, messages.ERROR)
else:
modeladmin.message_user(request, _("Action cancelled by user"), messages.SUCCESS);
return HttpResponseRedirect(reverse('admin:%s_%s_changelist' % (opts.app_label, opts.model_name)))
context = {
"title": _("Purge all %s ... are you sure?") % opts.verbose_name_plural,
"opts": opts,
"app_label": opts.app_label,
}
# Display the confirmation page
return render(
request,
'admin/easyaudit/purge_confirmation.html',
context
)
|
Receives a list of strings with app_name.model_name format
and turns them into classes. If an item is already a class
it ignores it.
|
def get_model_list(class_list):
"""
Receives a list of strings with app_name.model_name format
and turns them into classes. If an item is already a class
it ignores it.
"""
for idx, item in enumerate(class_list):
if isinstance(item, six.string_types):
model_class = apps.get_model(item)
class_list[idx] = model_class
|
Gets the value of a given model instance field.
:param obj: The model instance.
:type obj: Model
:param field: The field you want to find the value of.
:type field: Any
:return: The value of the field as a string.
:rtype: str
|
def get_field_value(obj, field):
"""
Gets the value of a given model instance field.
:param obj: The model instance.
:type obj: Model
:param field: The field you want to find the value of.
:type field: Any
:return: The value of the field as a string.
:rtype: str
"""
if isinstance(field, DateTimeField):
# DateTimeFields are timezone-aware, so we need to convert the field
# to its naive form before we can accuratly compare them for changes.
try:
value = field.to_python(getattr(obj, field.name, None))
if value is not None and settings.USE_TZ and not timezone.is_naive(value):
value = timezone.make_naive(value, timezone=timezone.utc)
except ObjectDoesNotExist:
value = field.default if field.default is not NOT_PROVIDED else None
else:
try:
value = smart_text(getattr(obj, field.name, None))
except ObjectDoesNotExist:
value = field.default if field.default is not NOT_PROVIDED else None
return value
|
Provides delta/difference between two models
:param old: The old state of the model instance.
:type old: Model
:param new: The new state of the model instance.
:type new: Model
:return: A dictionary with the names of the changed fields as keys and a
two tuple of the old and new field values
as value.
:rtype: dict
|
def model_delta(old_model, new_model):
"""
Provides delta/difference between two models
:param old: The old state of the model instance.
:type old: Model
:param new: The new state of the model instance.
:type new: Model
:return: A dictionary with the names of the changed fields as keys and a
two tuple of the old and new field values
as value.
:rtype: dict
"""
delta = {}
fields = new_model._meta.fields
for field in fields:
old_value = get_field_value(old_model, field)
new_value = get_field_value(new_model, field)
if old_value != new_value:
delta[field.name] = [smart_text(old_value),
smart_text(new_value)]
if len(delta) == 0:
delta = None
return delta
|
Returns True or False to indicate whether the instance
should be audited or not, depending on the project settings.
|
def should_audit(instance):
"""Returns True or False to indicate whether the instance
should be audited or not, depending on the project settings."""
# do not audit any model listed in UNREGISTERED_CLASSES
for unregistered_class in UNREGISTERED_CLASSES:
if isinstance(instance, unregistered_class):
return False
# only audit models listed in REGISTERED_CLASSES (if it's set)
if len(REGISTERED_CLASSES) > 0:
for registered_class in REGISTERED_CLASSES:
if isinstance(instance, registered_class):
break
else:
return False
# all good
return True
|
https://docs.djangoproject.com/es/1.10/ref/signals/#post-save
|
def pre_save(sender, instance, raw, using, update_fields, **kwargs):
"""https://docs.djangoproject.com/es/1.10/ref/signals/#post-save"""
if raw:
# Return if loading Fixtures
return
try:
with transaction.atomic():
if not should_audit(instance):
return False
try:
object_json_repr = serializers.serialize("json", [instance])
except Exception:
# We need a better way for this to work. ManyToMany will fail on pre_save on create
return None
if instance.pk is None:
created = True
else:
created = False
# created or updated?
if not created:
old_model = sender.objects.get(pk=instance.pk)
delta = model_delta(old_model, instance)
changed_fields = json.dumps(delta)
event_type = CRUDEvent.UPDATE
# user
try:
user = get_current_user()
# validate that the user still exists
user = get_user_model().objects.get(pk=user.pk)
except:
user = None
if isinstance(user, AnonymousUser):
user = None
# callbacks
kwargs['request'] = get_current_request() # make request available for callbacks
create_crud_event = all(
callback(instance, object_json_repr, created, raw, using, update_fields, **kwargs)
for callback in CRUD_DIFFERENCE_CALLBACKS if callable(callback))
# create crud event only if all callbacks returned True
if create_crud_event and not created:
c_t = ContentType.objects.get_for_model(instance)
sid = transaction.savepoint()
try:
with transaction.atomic():
crud_event = CRUDEvent.objects.create(
event_type=event_type,
object_repr=str(instance),
object_json_repr=object_json_repr,
changed_fields=changed_fields,
content_type_id=c_t.id,
object_id=instance.pk,
user_id=getattr(user, 'id', None),
datetime=timezone.now(),
user_pk_as_string=str(user.pk) if user else user
)
except Exception as e:
logger.exception(
"easy audit had a pre-save exception on CRUDEvent creation. instance: {}, instance pk: {}".format(
instance, instance.pk))
transaction.savepoint_rollback(sid)
except Exception:
logger.exception('easy audit had a pre-save exception.')
|
Gets the name of the reverse m2m accessor from `model1` to `model2`
For example, if User has a ManyToManyField connected to Group,
`_m2m_rev_field_name(Group, User)` retrieves the name of the field on
Group that lists a group's Users. (By default, this field is called
`user_set`, but the name can be overridden).
|
def _m2m_rev_field_name(model1, model2):
"""Gets the name of the reverse m2m accessor from `model1` to `model2`
For example, if User has a ManyToManyField connected to Group,
`_m2m_rev_field_name(Group, User)` retrieves the name of the field on
Group that lists a group's Users. (By default, this field is called
`user_set`, but the name can be overridden).
"""
m2m_field_names = [
rel.get_accessor_name() for rel in model1._meta.get_fields()
if rel.many_to_many
and rel.auto_created
and rel.related_model == model2
]
return m2m_field_names[0]
|
https://docs.djangoproject.com/es/1.10/ref/signals/#m2m-changed
|
def m2m_changed(sender, instance, action, reverse, model, pk_set, using, **kwargs):
"""https://docs.djangoproject.com/es/1.10/ref/signals/#m2m-changed"""
try:
with transaction.atomic():
if not should_audit(instance):
return False
if action not in ("post_add", "post_remove", "post_clear"):
return False
object_json_repr = serializers.serialize("json", [instance])
if reverse:
event_type = CRUDEvent.M2M_CHANGE_REV
# add reverse M2M changes to event. must use json lib because
# django serializers ignore extra fields.
tmp_repr = json.loads(object_json_repr)
m2m_rev_field = _m2m_rev_field_name(instance._meta.concrete_model, model)
related_instances = getattr(instance, m2m_rev_field).all()
related_ids = [r.pk for r in related_instances]
tmp_repr[0]['m2m_rev_model'] = force_text(model._meta)
tmp_repr[0]['m2m_rev_pks'] = related_ids
tmp_repr[0]['m2m_rev_action'] = action
object_json_repr = json.dumps(tmp_repr)
else:
event_type = CRUDEvent.M2M_CHANGE
# user
try:
user = get_current_user()
# validate that the user still exists
user = get_user_model().objects.get(pk=user.pk)
except:
user = None
if isinstance(user, AnonymousUser):
user = None
c_t = ContentType.objects.get_for_model(instance)
sid = transaction.savepoint()
try:
with transaction.atomic():
crud_event = CRUDEvent.objects.create(
event_type=event_type,
object_repr=str(instance),
object_json_repr=object_json_repr,
content_type_id=c_t.id,
object_id=instance.pk,
user_id=getattr(user, 'id', None),
datetime=timezone.now(),
user_pk_as_string=str(user.pk) if user else user
)
except Exception as e:
logger.exception(
"easy audit had a pre-save exception on CRUDEvent creation. instance: {}, instance pk: {}".format(
instance, instance.pk))
transaction.savepoint_rollback(sid)
except Exception:
logger.exception('easy audit had an m2m-changed exception.')
|
https://docs.djangoproject.com/es/1.10/ref/signals/#post-delete
|
def post_delete(sender, instance, using, **kwargs):
"""https://docs.djangoproject.com/es/1.10/ref/signals/#post-delete"""
try:
with transaction.atomic():
if not should_audit(instance):
return False
object_json_repr = serializers.serialize("json", [instance])
# user
try:
user = get_current_user()
# validate that the user still exists
user = get_user_model().objects.get(pk=user.pk)
except:
user = None
if isinstance(user, AnonymousUser):
user = None
c_t = ContentType.objects.get_for_model(instance)
sid = transaction.savepoint()
try:
with transaction.atomic():
# crud event
crud_event = CRUDEvent.objects.create(
event_type=CRUDEvent.DELETE,
object_repr=str(instance),
object_json_repr=object_json_repr,
content_type_id=c_t.id,
object_id=instance.pk,
user_id=getattr(user, 'id', None),
datetime=timezone.now(),
user_pk_as_string=str(user.pk) if user else user
)
except Exception as e:
logger.exception(
"easy audit had a pre-save exception on CRUDEvent creation. instance: {}, instance pk: {}".format(
instance, instance.pk))
transaction.savepoint_rollback(sid)
except Exception:
logger.exception('easy audit had a post-delete exception.')
|
Query the information of all the GPUs on local machine
|
def new_query():
"""Query the information of all the GPUs on local machine"""
N.nvmlInit()
def _decode(b):
if isinstance(b, bytes):
return b.decode() # for python3, to unicode
return b
def get_gpu_info(handle):
"""Get one GPU information specified by nvml handle"""
def get_process_info(nv_process):
"""Get the process information of specific pid"""
process = {}
ps_process = psutil.Process(pid=nv_process.pid)
process['username'] = ps_process.username()
# cmdline returns full path;
# as in `ps -o comm`, get short cmdnames.
_cmdline = ps_process.cmdline()
if not _cmdline:
# sometimes, zombie or unknown (e.g. [kworker/8:2H])
process['command'] = '?'
else:
process['command'] = os.path.basename(_cmdline[0])
# Bytes to MBytes
process['gpu_memory_usage'] = nv_process.usedGpuMemory // MB
process['pid'] = nv_process.pid
return process
name = _decode(N.nvmlDeviceGetName(handle))
uuid = _decode(N.nvmlDeviceGetUUID(handle))
try:
temperature = N.nvmlDeviceGetTemperature(
handle, N.NVML_TEMPERATURE_GPU
)
except N.NVMLError:
temperature = None # Not supported
try:
memory = N.nvmlDeviceGetMemoryInfo(handle) # in Bytes
except N.NVMLError:
memory = None # Not supported
try:
utilization = N.nvmlDeviceGetUtilizationRates(handle)
except N.NVMLError:
utilization = None # Not supported
try:
power = N.nvmlDeviceGetPowerUsage(handle)
except N.NVMLError:
power = None
try:
power_limit = N.nvmlDeviceGetEnforcedPowerLimit(handle)
except N.NVMLError:
power_limit = None
try:
nv_comp_processes = \
N.nvmlDeviceGetComputeRunningProcesses(handle)
except N.NVMLError:
nv_comp_processes = None # Not supported
try:
nv_graphics_processes = \
N.nvmlDeviceGetGraphicsRunningProcesses(handle)
except N.NVMLError:
nv_graphics_processes = None # Not supported
if nv_comp_processes is None and nv_graphics_processes is None:
processes = None
else:
processes = []
nv_comp_processes = nv_comp_processes or []
nv_graphics_processes = nv_graphics_processes or []
for nv_process in nv_comp_processes + nv_graphics_processes:
# TODO: could be more information such as system memory
# usage, CPU percentage, create time etc.
try:
process = get_process_info(nv_process)
processes.append(process)
except psutil.NoSuchProcess:
# TODO: add some reminder for NVML broken context
# e.g. nvidia-smi reset or reboot the system
pass
index = N.nvmlDeviceGetIndex(handle)
gpu_info = {
'index': index,
'uuid': uuid,
'name': name,
'temperature.gpu': temperature,
'utilization.gpu': utilization.gpu if utilization else None,
'power.draw': power // 1000 if power is not None else None,
'enforced.power.limit': power_limit // 1000
if power_limit is not None else None,
# Convert bytes into MBytes
'memory.used': memory.used // MB if memory else None,
'memory.total': memory.total // MB if memory else None,
'processes': processes,
}
return gpu_info
# 1. get the list of gpu and status
gpu_list = []
device_count = N.nvmlDeviceGetCount()
for index in range(device_count):
handle = N.nvmlDeviceGetHandleByIndex(index)
gpu_info = get_gpu_info(handle)
gpu_stat = GPUStat(gpu_info)
gpu_list.append(gpu_stat)
# 2. additional info (driver version, etc).
try:
driver_version = _decode(N.nvmlSystemGetDriverVersion())
except N.NVMLError:
driver_version = None # N/A
N.nvmlShutdown()
return GPUStatCollection(gpu_list, driver_version=driver_version)
|
Display the GPU query results into standard output.
|
def print_gpustat(json=False, debug=False, **kwargs):
'''
Display the GPU query results into standard output.
'''
try:
gpu_stats = GPUStatCollection.new_query()
except Exception as e:
sys.stderr.write('Error on querying NVIDIA devices.'
' Use --debug flag for details\n')
if debug:
try:
import traceback
traceback.print_exc(file=sys.stderr)
except Exception:
# NVMLError can't be processed by traceback:
# https://bugs.python.org/issue28603
# as a workaround, simply re-throw the exception
raise e
sys.exit(1)
if json:
gpu_stats.print_json(sys.stdout)
else:
gpu_stats.print_formatted(sys.stdout, **kwargs)
|
fetch instruments by ids
|
def fetch_list(cls, client, ids):
"""
fetch instruments by ids
"""
results = []
request_url = "https://api.robinhood.com/options/instruments/"
for _ids in chunked_list(ids, 50):
params = {"ids": ",".join(_ids)}
data = client.get(request_url, params=params)
partial_results = data["results"]
while data["next"]:
data = client.get(data["next"])
partial_results.extend(data["results"])
results.extend(partial_results)
return results
|
fetch all option instruments in an options chain
- expiration_dates = optionally scope
|
def in_chain(cls, client, chain_id, expiration_dates=[]):
"""
fetch all option instruments in an options chain
- expiration_dates = optionally scope
"""
request_url = "https://api.robinhood.com/options/instruments/"
params = {
"chain_id": chain_id,
"expiration_dates": ",".join(expiration_dates)
}
data = client.get(request_url, params=params)
results = data['results']
while data['next']:
data = client.get(data['next'])
results.extend(data['results'])
return results
|
unroll option orders like this,
https://github.com/joshfraser/robinhood-to-csv/blob/master/csv-options-export.py
|
def unroll_option_legs(cls, client, option_orders):
'''
unroll option orders like this,
https://github.com/joshfraser/robinhood-to-csv/blob/master/csv-options-export.py
'''
#
# @TODO write this with python threats to make concurrent HTTP requests
#
results = []
for oo in option_orders:
for index, leg in enumerate(oo['legs']):
for execution in leg['executions']:
order = dict()
keys_in_question = ['legs', 'price', 'type', 'premium',
'processed_premium',
'response_category', 'cancel_url']
for k, v in oo.items():
if k not in keys_in_question:
order[k] = oo[k]
order['order_type'] = oo['type']
contract = client.get(leg['option'])
order['leg'] = index+1
order['symbol'] = contract['chain_symbol']
order['strike_price'] = contract['strike_price']
order['expiration_date'] = contract['expiration_date']
order['contract_type'] = contract['type']
for k, v in leg.items():
if k not in ['id', 'executions']:
order[k] = leg[k]
coef = (-1.0 if leg['side'] == 'buy' else 1.0)
order['price'] = float(execution['price']) * 100.0 * coef
order['execution_id'] = execution['id']
results.append(order)
return results
|
params:
- client
- direction
- legs
- price
- quantity
- time_in_force
- trigger
- order_type
- run_validations. default = True
|
def submit(cls, client, direction, legs, price, quantity, time_in_force,
trigger, order_type, run_validations=True):
'''
params:
- client
- direction
- legs
- price
- quantity
- time_in_force
- trigger
- order_type
- run_validations. default = True
'''
if run_validations:
assert(direction in ["debit", "credit"])
assert(type(price) is str)
assert(type(quantity) is int)
assert(time_in_force in ["gfd", "gtc"])
assert(trigger in ["immediate"])
assert(order_type in ["limit", "market"])
assert(cls._validate_legs(legs) is True)
payload = json.dumps({
"account": client.account_url,
"direction": direction,
"legs": legs,
"price": price,
"quantity": quantity,
"time_in_force": time_in_force,
"trigger": trigger,
"type": order_type,
"override_day_trade_checks": False,
"override_dtbp_checks": False,
"ref_id": str(uuid.uuid4())
})
request_url = "https://api.robinhood.com/options/orders/"
data = client.post(request_url, payload=payload)
return data
|
totally just playing around ideas for the API.
this IC sells
- credit put spread
- credit call spread
the approach
- set width for the wing spread (eg, 1, ie, 1 unit width spread)
- set delta for inner leg of the put credit spread (eg, -0.2)
- set delta for inner leg of the call credit spread (eg, 0.1)
|
def generate_by_deltas(cls, options,
width, put_inner_lte_delta, call_inner_lte_delta):
"""
totally just playing around ideas for the API.
this IC sells
- credit put spread
- credit call spread
the approach
- set width for the wing spread (eg, 1, ie, 1 unit width spread)
- set delta for inner leg of the put credit spread (eg, -0.2)
- set delta for inner leg of the call credit spread (eg, 0.1)
"""
raise Exception("Not Implemented starting at the 0.3.0 release")
#
# put credit spread
#
put_options_unsorted = list(
filter(lambda x: x['type'] == 'put', options))
put_options = cls.sort_by_strike_price(put_options_unsorted)
deltas_as_strings = [x['delta'] for x in put_options]
deltas = cls.strings_to_np_array(deltas_as_strings)
put_inner_index = np.argmin(deltas >= put_inner_lte_delta) - 1
put_outer_index = put_inner_index - width
put_inner_leg = cls.gen_leg(
put_options[put_inner_index]["instrument"], "sell")
put_outer_leg = cls.gen_leg(
put_options[put_outer_index]["instrument"], "buy")
#
# call credit spread
#
call_options_unsorted = list(
filter(lambda x: x['type'] == 'call', options))
call_options = cls.sort_by_strike_price(call_options_unsorted)
deltas_as_strings = [x['delta'] for x in call_options]
x = np.array(deltas_as_strings)
deltas = x.astype(np.float)
# because deep ITM call options have a delta that comes up as NaN,
# but are approximately 0.99 or 1.0, I'm replacing Nan with 1.0
# so np.argmax is able to walk up the index until it finds
# "call_inner_lte_delta"
# @TODO change this so (put credit / call credit) spreads work the same
where_are_NaNs = np.isnan(deltas)
deltas[where_are_NaNs] = 1.0
call_inner_index = np.argmax(deltas <= call_inner_lte_delta)
call_outer_index = call_inner_index + width
call_inner_leg = cls.gen_leg(
call_options[call_inner_index]["instrument"], "sell")
call_outer_leg = cls.gen_leg(
call_options[call_outer_index]["instrument"], "buy")
legs = [put_outer_leg, put_inner_leg, call_inner_leg, call_outer_leg]
#
# price calcs
#
price = (
- Decimal(put_options[put_outer_index]['adjusted_mark_price'])
+ Decimal(put_options[put_inner_index]['adjusted_mark_price'])
+ Decimal(call_options[call_inner_index]['adjusted_mark_price'])
- Decimal(call_options[call_outer_index]['adjusted_mark_price'])
)
#
# provide max bid ask spread diff
#
ic_options = [
put_options[put_outer_index],
put_options[put_inner_index],
call_options[call_inner_index],
call_options[call_outer_index]
]
max_bid_ask_spread = cls.max_bid_ask_spread(ic_options)
return {"legs": legs, "price": price,
"max_bid_ask_spread": max_bid_ask_spread}
|
fetch option chain for instrument
|
def fetch(cls, client, _id, symbol):
"""
fetch option chain for instrument
"""
url = "https://api.robinhood.com/options/chains/"
params = {
"equity_instrument_ids": _id,
"state": "active",
"tradability": "tradable"
}
data = client.get(url, params=params)
def filter_func(x):
return x["symbol"] == symbol
results = list(filter(filter_func, data["results"]))
return results[0]
|
Authenticate using data in `options`
|
def authenticate(self):
'''
Authenticate using data in `options`
'''
if "username" in self.options and "password" in self.options:
self.login_oauth2(
self.options["username"],
self.options["password"],
self.options.get('mfa_code'))
elif "access_token" in self.options:
if "refresh_token" in self.options:
self.access_token = self.options["access_token"]
self.refresh_token = self.options["refresh_token"]
self.__set_account_info()
else:
self.authenticated = False
return self.authenticated
|
Execute HTTP GET
|
def get(self, url=None, params=None, retry=True):
'''
Execute HTTP GET
'''
headers = self._gen_headers(self.access_token, url)
attempts = 1
while attempts <= HTTP_ATTEMPTS_MAX:
try:
res = requests.get(url,
headers=headers,
params=params,
timeout=15,
verify=self.certs)
res.raise_for_status()
return res.json()
except requests.exceptions.RequestException as e:
attempts += 1
if res.status_code in [400]:
raise e
elif retry and res.status_code in [403]:
self.relogin_oauth2()
|
Generate headders, adding in Oauth2 bearer token if present
|
def _gen_headers(self, bearer, url):
'''
Generate headders, adding in Oauth2 bearer token if present
'''
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": ("en;q=1, fr;q=0.9, de;q=0.8, ja;q=0.7, " +
"nl;q=0.6, it;q=0.5"),
"User-Agent": ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) " +
"AppleWebKit/537.36 (KHTML, like Gecko) " +
"Chrome/68.0.3440.106 Safari/537.36"),
}
if bearer:
headers["Authorization"] = "Bearer {0}".format(bearer)
if url == "https://api.robinhood.com/options/orders/":
headers["Content-Type"] = "application/json; charset=utf-8"
return headers
|
Login using username and password
|
def login_oauth2(self, username, password, mfa_code=None):
'''
Login using username and password
'''
data = {
"grant_type": "password",
"scope": "internal",
"client_id": CLIENT_ID,
"expires_in": 86400,
"password": password,
"username": username
}
if mfa_code is not None:
data['mfa_code'] = mfa_code
url = "https://api.robinhood.com/oauth2/token/"
res = self.post(url, payload=data, retry=False)
if res is None:
if mfa_code is None:
msg = ("Client.login_oauth2(). Could not authenticate. Check "
+ "username and password.")
raise AuthenticationError(msg)
else:
msg = ("Client.login_oauth2(). Could not authenticate. Check" +
"username and password, and enter a valid MFA code.")
raise AuthenticationError(msg)
elif res.get('mfa_required') is True:
msg = "Client.login_oauth2(). Couldn't authenticate. MFA required."
raise AuthenticationError(msg)
self.access_token = res["access_token"]
self.refresh_token = res["refresh_token"]
self.mfa_code = res["mfa_code"]
self.scope = res["scope"]
self.__set_account_info()
return self.authenticated
|
(Re)login using the Oauth2 refresh token
|
def relogin_oauth2(self):
'''
(Re)login using the Oauth2 refresh token
'''
url = "https://api.robinhood.com/oauth2/token/"
data = {
"grant_type": "refresh_token",
"refresh_token": self.refresh_token,
"scope": "internal",
"client_id": CLIENT_ID,
"expires_in": 86400,
}
res = self.post(url, payload=data, retry=False)
self.access_token = res["access_token"]
self.refresh_token = res["refresh_token"]
self.mfa_code = res["mfa_code"]
self.scope = res["scope"]
|
Logout for given Oauth2 bearer token
|
def logout_oauth2(self):
'''
Logout for given Oauth2 bearer token
'''
url = "https://api.robinhood.com/oauth2/revoke_token/"
data = {
"client_id": CLIENT_ID,
"token": self.refresh_token,
}
res = self.post(url, payload=data)
if res is None:
self.account_id = None
self.account_url = None
self.access_token = None
self.refresh_token = None
self.mfa_code = None
self.scope = None
self.authenticated = False
return True
else:
raise AuthenticationError("fast_arrow could not log out.")
|
fetch data for stock
|
def fetch(cls, client, symbol):
"""
fetch data for stock
"""
assert(type(symbol) is str)
url = ("https://api.robinhood.com/instruments/?symbol={0}".
format(symbol))
data = client.get(url)
return data["results"][0]
|
fetch data for multiple stocks
|
def all(cls, client, symbols):
""""
fetch data for multiple stocks
"""
params = {"symbol": ",".join(symbols)}
request_url = "https://api.robinhood.com/instruments/"
data = client.get(request_url, params=params)
results = data["results"]
while data["next"]:
data = client.get(data["next"])
results.extend(data["results"])
return results
|
Generate Pandas Dataframe of Vertical
:param options: python dict of options.
:param width: offset for spread. Must be integer.
:param spread_type: call or put. defaults to "call".
:param spread_kind: buy or sell. defaults to "buy".
|
def gen_df(cls, options, width, spread_type="call", spread_kind="buy"):
"""
Generate Pandas Dataframe of Vertical
:param options: python dict of options.
:param width: offset for spread. Must be integer.
:param spread_type: call or put. defaults to "call".
:param spread_kind: buy or sell. defaults to "buy".
"""
assert type(width) is int
assert spread_type in ["call", "put"]
assert spread_kind in ["buy", "sell"]
# get CALLs or PUTs
options = list(filter(lambda x: x["type"] == spread_type, options))
coef = (1 if spread_type == "put" else -1)
shift = width * coef
df = pd.DataFrame.from_dict(options)
df['expiration_date'] = pd.to_datetime(
df['expiration_date'], format="%Y-%m-%d")
df['adjusted_mark_price'] = pd.to_numeric(df['adjusted_mark_price'])
df['strike_price'] = pd.to_numeric(df['strike_price'])
df.sort_values(["expiration_date", "strike_price"], inplace=True)
for k, v in df.groupby("expiration_date"):
sdf = v.shift(shift)
df.loc[v.index, "strike_price_shifted"] = sdf["strike_price"]
df.loc[v.index, "delta_shifted"] = sdf["delta"]
df.loc[v.index, "volume_shifted"] = sdf["volume"]
df.loc[v.index, "open_interest_shifted"] = sdf["open_interest"]
df.loc[v.index, "instrument_shifted"] = sdf["instrument"]
df.loc[v.index, "adjusted_mark_price_shift"] = \
sdf["adjusted_mark_price"]
if spread_kind == "sell":
df.loc[v.index, "margin"] = \
abs(sdf["strike_price"] - v["strike_price"])
else:
df.loc[v.index, "margin"] = 0.0
if spread_kind == "buy":
df.loc[v.index, "premium_adjusted_mark_price"] = (
v["adjusted_mark_price"] - sdf["adjusted_mark_price"])
elif spread_kind == "sell":
df.loc[v.index, "premium_adjusted_mark_price"] = (
sdf["adjusted_mark_price"] - v["adjusted_mark_price"])
return df
|
fetch data for multiple stocks
|
def all(cls, client):
""""
fetch data for multiple stocks
"""
url = "https://api.robinhood.com/orders/"
data = client.get(url)
results = data["results"]
while data["next"]:
data = client.get(data["next"])
results.extend(data["results"])
return results
|
Break lists into small lists for processing:w
|
def chunked_list(_list, _chunk_size=50):
"""
Break lists into small lists for processing:w
"""
for i in range(0, len(_list), _chunk_size):
yield _list[i:i + _chunk_size]
|
create instrument urls, fetch, return results
|
def quote_by_instruments(cls, client, ids):
"""
create instrument urls, fetch, return results
"""
base_url = "https://api.robinhood.com/instruments"
id_urls = ["{}/{}/".format(base_url, _id) for _id in ids]
return cls.quotes_by_instrument_urls(client, id_urls)
|
fetch and return results
|
def quotes_by_instrument_urls(cls, client, urls):
"""
fetch and return results
"""
instruments = ",".join(urls)
params = {"instruments": instruments}
url = "https://api.robinhood.com/marketdata/quotes/"
data = client.get(url, params=params)
results = data["results"]
while "next" in data and data["next"]:
data = client.get(data["next"])
results.extend(data["results"])
return results
|
fetch all option positions
|
def all(cls, client, **kwargs):
"""
fetch all option positions
"""
max_date = kwargs['max_date'] if 'max_date' in kwargs else None
max_fetches = \
kwargs['max_fetches'] if 'max_fetches' in kwargs else None
url = 'https://api.robinhood.com/options/positions/'
params = {}
data = client.get(url, params=params)
results = data["results"]
if is_max_date_gt(max_date, results[-1]['updated_at'][0:10]):
return results
if max_fetches == 1:
return results
fetches = 1
while data["next"]:
fetches = fetches + 1
data = client.get(data["next"])
results.extend(data["results"])
if is_max_date_gt(max_date, results[-1]['updated_at'][0:10]):
return results
if max_fetches and (fetches >= max_fetches):
return results
return results
|
Fetch and merge in Marketdata for each option position
|
def mergein_marketdata_list(cls, client, option_positions):
"""
Fetch and merge in Marketdata for each option position
"""
ids = cls._extract_ids(option_positions)
mds = OptionMarketdata.quotes_by_instrument_ids(client, ids)
results = []
for op in option_positions:
# @TODO optimize this so it's better than O(n^2)
md = [x for x in mds if x['instrument'] == op['option']][0]
# there is no overlap in keys so this is fine
merged_dict = dict(list(op.items()) + list(md.items()))
results.append(merged_dict)
return results
|
Evaluates raw Python string like `ast.literal_eval` does
|
def evaluateRawString(self, escaped):
"""Evaluates raw Python string like `ast.literal_eval` does"""
unescaped = []
hexdigit = None
escape = False
for char in escaped:
number = ord(char)
if hexdigit is not None:
if hexdigit:
number = (int(hexdigit, 16) << 4) + int(char, 16)
hexdigit = None
else:
hexdigit = char
continue
if escape:
escape = False
try:
number = self.ESCAPE_CHARS[number]
except KeyError:
if number == 120:
hexdigit = ''
continue
raise ValueError('Unknown escape character %c' % char)
elif number == 92: # '\'
escape = True
continue
unescaped.append(number)
return unescaped
|
Return a list corresponding to the lines of text in the `txt` list
indented by `indent`. Prepend instead the string given in `prepend` to the
beginning of the first line. Note that if len(prepend) > len(indent), then
`prepend` will be truncated (doing better is tricky!). This preserves a
special '' entry at the end of `txt` (see `do_para` for the meaning).
|
def shift(txt, indent = ' ', prepend = ''):
"""Return a list corresponding to the lines of text in the `txt` list
indented by `indent`. Prepend instead the string given in `prepend` to the
beginning of the first line. Note that if len(prepend) > len(indent), then
`prepend` will be truncated (doing better is tricky!). This preserves a
special '' entry at the end of `txt` (see `do_para` for the meaning).
"""
if type(indent) is int:
indent = indent * ' '
special_end = txt[-1:] == ['']
lines = ''.join(txt).splitlines(True)
for i in range(1,len(lines)):
if lines[i].strip() or indent.strip():
lines[i] = indent + lines[i]
if not lines:
return prepend
prepend = prepend[:len(indent)]
indent = indent[len(prepend):]
lines[0] = prepend + indent + lines[0]
ret = [''.join(lines)]
if special_end:
ret.append('')
return ret
|
Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes.
|
def parse(self, node):
"""Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes.
"""
pm = getattr(self, "parse_%s" % node.__class__.__name__)
pm(node)
|
Parse the subnodes of a given node. Subnodes with tags in the
`ignore` list are ignored. If pieces is given, use this as target for
the parse results instead of self.pieces. Indent all lines by the amount
given in `indent`. Note that the initial content in `pieces` is not
indented. The final result is in any case added to self.pieces.
|
def subnode_parse(self, node, pieces=None, indent=0, ignore=[], restrict=None):
"""Parse the subnodes of a given node. Subnodes with tags in the
`ignore` list are ignored. If pieces is given, use this as target for
the parse results instead of self.pieces. Indent all lines by the amount
given in `indent`. Note that the initial content in `pieces` is not
indented. The final result is in any case added to self.pieces."""
if pieces is not None:
old_pieces, self.pieces = self.pieces, pieces
else:
old_pieces = []
if type(indent) is int:
indent = indent * ' '
if len(indent) > 0:
pieces = ''.join(self.pieces)
i_piece = pieces[:len(indent)]
if self.pieces[-1:] == ['']:
self.pieces = [pieces[len(indent):]] + ['']
elif self.pieces != []:
self.pieces = [pieces[len(indent):]]
self.indent += len(indent)
for n in node.childNodes:
if restrict is not None:
if n.nodeType == n.ELEMENT_NODE and n.tagName in restrict:
self.parse(n)
elif n.nodeType != n.ELEMENT_NODE or n.tagName not in ignore:
self.parse(n)
if len(indent) > 0:
self.pieces = shift(self.pieces, indent, i_piece)
self.indent -= len(indent)
old_pieces.extend(self.pieces)
self.pieces = old_pieces
|
Parse the subnodes of a given node. Subnodes with tags in the
`ignore` list are ignored. Prepend `pre_char` and append `post_char` to
the output in self.pieces.
|
def surround_parse(self, node, pre_char, post_char):
"""Parse the subnodes of a given node. Subnodes with tags in the
`ignore` list are ignored. Prepend `pre_char` and append `post_char` to
the output in self.pieces."""
self.add_text(pre_char)
self.subnode_parse(node)
self.add_text(post_char)
|
Given a node and a name, return a list of child `ELEMENT_NODEs`, that
have a `tagName` matching the `name`. Search recursively for `recursive`
levels.
|
def get_specific_subnodes(self, node, name, recursive=0):
"""Given a node and a name, return a list of child `ELEMENT_NODEs`, that
have a `tagName` matching the `name`. Search recursively for `recursive`
levels.
"""
children = [x for x in node.childNodes if x.nodeType == x.ELEMENT_NODE]
ret = [x for x in children if x.tagName == name]
if recursive > 0:
for x in children:
ret.extend(self.get_specific_subnodes(x, name, recursive-1))
return ret
|
Given a node and a sequence of strings in `names`, return a
dictionary containing the names as keys and child
`ELEMENT_NODEs`, that have a `tagName` equal to the name.
|
def get_specific_nodes(self, node, names):
"""Given a node and a sequence of strings in `names`, return a
dictionary containing the names as keys and child
`ELEMENT_NODEs`, that have a `tagName` equal to the name.
"""
nodes = [(x.tagName, x) for x in node.childNodes
if x.nodeType == x.ELEMENT_NODE and
x.tagName in names]
return dict(nodes)
|
Adds text corresponding to `value` into `self.pieces`.
|
def add_text(self, value):
"""Adds text corresponding to `value` into `self.pieces`."""
if isinstance(value, (list, tuple)):
self.pieces.extend(value)
else:
self.pieces.append(value)
|
Make sure to create an empty line. This is overridden, if the previous
text ends with the special marker ''. In that case, nothing is done.
|
def start_new_paragraph(self):
"""Make sure to create an empty line. This is overridden, if the previous
text ends with the special marker ''. In that case, nothing is done.
"""
if self.pieces[-1:] == ['']: # respect special marker
return
elif self.pieces == []: # first paragraph, add '\n', override with ''
self.pieces = ['\n']
elif self.pieces[-1][-1:] != '\n': # previous line not ended
self.pieces.extend([' \n' ,'\n'])
else: #default
self.pieces.append('\n')
|
Add line of text and wrap such that subsequent lines are indented
by `indent` spaces.
|
def add_line_with_subsequent_indent(self, line, indent=4):
"""Add line of text and wrap such that subsequent lines are indented
by `indent` spaces.
"""
if isinstance(line, (list, tuple)):
line = ''.join(line)
line = line.strip()
width = self.textwidth-self.indent-indent
wrapped_lines = textwrap.wrap(line[indent:], width=width)
for i in range(len(wrapped_lines)):
if wrapped_lines[i] != '':
wrapped_lines[i] = indent * ' ' + wrapped_lines[i]
self.pieces.append(line[:indent] + '\n'.join(wrapped_lines)[indent:] + ' \n')
|
Return the string representation of the node or list of nodes by parsing the
subnodes, but returning the result as a string instead of adding it to `self.pieces`.
Note that this allows extracting text even if the node is in the ignore list.
|
def extract_text(self, node):
"""Return the string representation of the node or list of nodes by parsing the
subnodes, but returning the result as a string instead of adding it to `self.pieces`.
Note that this allows extracting text even if the node is in the ignore list.
"""
if not isinstance(node, (list, tuple)):
node = [node]
pieces, self.pieces = self.pieces, ['']
for n in node:
for sn in n.childNodes:
self.parse(sn)
ret = ''.join(self.pieces)
self.pieces = pieces
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.