docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
List objects.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
first_level (bool): It True, returns only first level objects.
Else, returns full tree.
max_request_entries (int): If specified, maximum entries returned
by request.
Returns:
generator of tuple: object name str, object header dict
|
def list_objects(self, path='', relative=False, first_level=False,
max_request_entries=None):
entries = 0
next_values = []
max_request_entries_arg = None
if not relative:
path = self.relpath(path)
# From root
if not path:
objects = self._list_locators()
# Sub directory
else:
objects = self._list_objects(
self.get_client_kwargs(path), max_request_entries)
# Yield file hierarchy
for obj in objects:
# Generate first level objects entries
try:
name, header, is_directory = obj
except ValueError:
# Locators
name, header = obj
is_directory = True
# Start to generate subdirectories content
if is_directory and not first_level:
name = next_path = name.rstrip('/') + '/'
if path:
next_path = '/'.join((path.rstrip('/'), name))
if max_request_entries is not None:
max_request_entries_arg = max_request_entries - entries
next_values.append((
name, self._generate_async(self.list_objects(
next_path, relative=True,
max_request_entries=max_request_entries_arg))))
entries += 1
yield name, header
if entries == max_request_entries:
return
for next_name, generator in next_values:
# Generate other levels objects entries
for name, header in generator:
entries += 1
yield '/'.join((next_name.rstrip('/'), name)), header
if entries == max_request_entries:
return
| 707,830
|
Returns system keyword arguments removing Nones.
Args:
kwargs: system keyword arguments.
Returns:
dict: system keyword arguments.
|
def _system_parameters(**kwargs):
return {key: value for key, value in kwargs.items()
if (value is not None or value == {})}
| 707,850
|
Return the creation time of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
float: The number of seconds since the epoch
(see the time module).
|
def getctime(self, path=None, client_kwargs=None, header=None):
return self._getctime_from_header(
self.head(path, client_kwargs, header))
| 707,864
|
Return the time of last access of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
float: The number of seconds since the epoch
(see the time module).
|
def getmtime(self, path=None, client_kwargs=None, header=None):
return self._getmtime_from_header(
self.head(path, client_kwargs, header))
| 707,865
|
Get time from header
Args:
header (dict): Object header.
keys (tuple of str): Header keys.
name (str): Method name.
Returns:
float: The number of seconds since the epoch
|
def _get_time(header, keys, name):
for key in keys:
try:
date_value = header.pop(key)
except KeyError:
continue
try:
# String to convert
return to_timestamp(parse(date_value))
except TypeError:
# Already number
return float(date_value)
raise UnsupportedOperation(name)
| 707,866
|
Return the size, in bytes, of path.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
int: Size in bytes.
|
def getsize(self, path=None, client_kwargs=None, header=None):
return self._getsize_from_header(self.head(path, client_kwargs, header))
| 707,867
|
Return the size from header
Args:
header (dict): Object header.
Returns:
int: Size in bytes.
|
def _getsize_from_header(self, header):
# By default, assumes that information are in a standard HTTP header
for key in self._SIZE_KEYS:
try:
return int(header.pop(key))
except KeyError:
continue
else:
raise UnsupportedOperation('getsize')
| 707,868
|
Returns object HTTP header.
Args:
path (str): Path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
dict: HTTP header.
|
def head(self, path=None, client_kwargs=None, header=None):
if header is not None:
return header
elif client_kwargs is None:
client_kwargs = self.get_client_kwargs(path)
return self._head(client_kwargs)
| 707,871
|
Get path relative to storage.
args:
path (str): Absolute path or URL.
Returns:
str: relative path.
|
def relpath(self, path):
for root in self.roots:
# Root is regex, convert to matching root string
if isinstance(root, Pattern):
match = root.match(path)
if not match:
continue
root = match.group(0)
# Split root and relative path
try:
relative = path.split(root, 1)[1]
# Strip "/" only at path start. "/" is used to known if
# path is a directory on some cloud storage.
return relative.lstrip('/')
except IndexError:
continue
return path
| 707,872
|
Returns True if path refer to a locator.
Depending the storage, locator may be a bucket or container name,
a hostname, ...
args:
path (str): path or URL.
relative (bool): Path is relative to current root.
Returns:
bool: True if locator.
|
def is_locator(self, path, relative=False):
if not relative:
path = self.relpath(path)
# Bucket is the main directory
return path and '/' not in path.rstrip('/')
| 707,873
|
Split the path into a pair (locator, path).
args:
path (str): Absolute path or URL.
Returns:
tuple of str: locator, path.
|
def split_locator(self, path):
relative = self.relpath(path)
try:
locator, tail = relative.split('/', 1)
except ValueError:
locator = relative
tail = ''
return locator, tail
| 707,874
|
Make a directory.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
|
def make_dir(self, path, relative=False):
if not relative:
path = self.relpath(path)
self._make_dir(self.get_client_kwargs(self.ensure_dir_path(
path, relative=True)))
| 707,875
|
Remove an object.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
|
def remove(self, path, relative=False):
if not relative:
path = self.relpath(path)
self._remove(self.get_client_kwargs(path))
| 707,876
|
Ensure the path is a dir path.
Should end with '/' except for schemes and locators.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
Returns:
path: dir path
|
def ensure_dir_path(self, path, relative=False):
if not relative:
rel_path = self.relpath(path)
else:
rel_path = path
# Locator
if self.is_locator(rel_path, relative=True):
path = path.rstrip('/')
# Directory
elif rel_path:
path = path.rstrip('/') + '/'
# else: root
return path
| 707,877
|
List objects.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
first_level (bool): It True, returns only first level objects.
Else, returns full tree.
max_request_entries (int): If specified, maximum entries returned
by request.
Returns:
generator of tuple: object name str, object header dict
|
def list_objects(self, path='', relative=False, first_level=False,
max_request_entries=None):
entries = 0
max_request_entries_arg = None
if not relative:
path = self.relpath(path)
# From root
if not path:
locators = self._list_locators()
# Yields locators
if first_level:
for locator in locators:
entries += 1
yield locator
if entries == max_request_entries:
return
return
# Yields each locator objects
for loc_path, loc_header in locators:
# Yields locator itself
loc_path = loc_path.strip('/')
entries += 1
yield loc_path, loc_header
if entries == max_request_entries:
return
# Yields locator content is read access to it
if max_request_entries is not None:
max_request_entries_arg = max_request_entries - entries
try:
for obj_path, obj_header in self._list_objects(
self.get_client_kwargs(loc_path), '',
max_request_entries_arg):
entries += 1
yield ('/'.join((loc_path, obj_path.lstrip('/'))),
obj_header)
if entries == max_request_entries:
return
except ObjectPermissionError:
# No read access to locator
continue
return
# From locator or sub directory
locator, path = self.split_locator(path)
if first_level:
seen = set()
if max_request_entries is not None:
max_request_entries_arg = max_request_entries - entries
for obj_path, header in self._list_objects(
self.get_client_kwargs(locator), path, max_request_entries_arg):
if path:
try:
obj_path = obj_path.split(path, 1)[1]
except IndexError:
# Not sub path of path
continue
obj_path = obj_path.lstrip('/')
# Skips parent directory
if not obj_path:
continue
# Yields first level locator objects only
if first_level:
# Directory
try:
obj_path, _ = obj_path.strip('/').split('/', 1)
obj_path += '/'
# Avoids to use the header of the object instead of the
# non existing header of the directory that only exists
# virtually in object path.
header = dict()
# File
except ValueError:
pass
if obj_path not in seen:
entries += 1
yield obj_path, header
if entries == max_request_entries:
return
seen.add(obj_path)
# Yields locator objects
else:
entries += 1
yield obj_path, header
if entries == max_request_entries:
return
| 707,878
|
Get the status of an object.
Args:
path (str): File path or URL.
client_kwargs (dict): Client arguments.
header (dict): Object header.
Returns:
os.stat_result: Stat result object
|
def stat(self, path=None, client_kwargs=None, header=None):
# Should contain at least the strict minimum of os.stat_result
stat = OrderedDict((
("st_mode", 0), ("st_ino", 0), ("st_dev", 0), ("st_nlink", 0),
("st_uid", 0), ("st_gid", 0), ("st_size", 0), ("st_atime", 0),
("st_mtime", 0), ("st_ctime", 0)))
# Populate standard os.stat_result values with object header content
header = self.head(path, client_kwargs, header)
for key, method in (
('st_size', self._getsize_from_header),
('st_ctime', self._getctime_from_header),
('st_mtime', self._getmtime_from_header),):
try:
stat[key] = int(method(header))
except UnsupportedOperation:
continue
# File mode
if self.islink(path=path, header=header):
# Symlink
stat['st_mode'] = S_IFLNK
elif ((not path or path[-1] == '/' or self.is_locator(path)) and not
stat['st_size']):
# Directory
stat['st_mode'] = S_IFDIR
else:
# File
stat['st_mode'] = S_IFREG
# Add storage specific keys
sub = self._CHAR_FILTER.sub
for key, value in tuple(header.items()):
stat['st_' + sub('', key.lower())] = value
# Convert to "os.stat_result" like object
stat_result = namedtuple('stat_result', tuple(stat))
stat_result.__name__ = 'os.stat_result'
stat_result.__module__ = 'pycosio'
return stat_result(**stat)
| 707,879
|
Return True if path is an existing directory.
Equivalent to "os.path.isdir".
Args:
path (path-like object): Path or URL.
Returns:
bool: True if directory exists.
|
def isdir(path):
system = get_instance(path)
# User may use directory path without trailing '/'
# like on standard file systems
return system.isdir(system.ensure_dir_path(path))
| 707,889
|
Return True if both pathname arguments refer to the same file or directory.
Equivalent to "os.path.samefile".
Args:
path1 (path-like object): Path or URL.
path2 (path-like object): Path or URL.
Returns:
bool: True if same file or directory.
|
def samefile(path1, path2):
# Handles path-like objects and checks if storage
path1, path1_is_storage = format_and_is_storage(path1)
path2, path2_is_storage = format_and_is_storage(path2)
# Local files: Redirects to "os.path.samefile"
if not path1_is_storage and not path2_is_storage:
return os_path_samefile(path1, path2)
# One path is local, the other storage
if not path1_is_storage or not path2_is_storage:
return False
with handle_os_exceptions():
# Paths don't use same storage
system = get_instance(path1)
if system is not get_instance(path2):
return False
# Relative path are different
elif system.relpath(path1) != system.relpath(path2):
return False
# Same files
return True
| 707,891
|
Split the path into a pair (drive, tail) where drive is either a
mount point or the empty string. On systems which do not use drive
specifications, drive will always be the empty string.
In all cases, drive + tail will be the same as path.
Equivalent to "os.path.splitdrive".
Args:
path (path-like object): Path or URL.
Returns:
tuple of str: drive, tail.
|
def splitdrive(path):
relative = get_instance(path).relpath(path)
drive = path.rsplit(relative, 1)[0]
if drive and not drive[-2:] == '//':
# Keep "/" tail side
relative = '/' + relative
drive = drive.rstrip('/')
return drive, relative
| 707,892
|
Return a list containing the names of the entries in the directory given by
path.
Equivalent to "os.listdir".
Args:
path (path-like object): Path or URL.
Returns:
list of str: Entries names.
|
def listdir(path='.'):
return [name.rstrip('/') for name, _ in
get_instance(path).list_objects(path, first_level=True)]
| 707,896
|
Remove a file.
Equivalent to "os.remove" and "os.unlink".
Args:
path (path-like object): Path or URL.
dir_fd: directory descriptors;
see the os.remove() description for how it is interpreted.
Not supported on cloud storage objects.
|
def remove(path, dir_fd=None):
system = get_instance(path)
# Only support files
if system.is_locator(path) or path[-1] == '/':
raise is_a_directory_error("Is a directory: '%s'" % path)
# Remove
system.remove(path)
| 707,899
|
Remove a directory.
Equivalent to "os.rmdir".
Args:
path (path-like object): Path or URL.
dir_fd: directory descriptors;
see the os.rmdir() description for how it is interpreted.
Not supported on cloud storage objects.
|
def rmdir(path, dir_fd=None):
system = get_instance(path)
system.remove(system.ensure_dir_path(path))
| 707,900
|
scandir generator
Args:
is_bytes (bool): True if DirEntry must handle path as bytes.
scandir_path (str): Path.
system (pycosio._core.io_system.SystemBase subclass):
Storage system.
Yields:
DirEntry: Directory entries
|
def _scandir_generator(is_bytes, scandir_path, system):
with handle_os_exceptions():
for name, header in system.list_objects(scandir_path, first_level=True):
yield DirEntry(
scandir_path=scandir_path, system=system, name=name,
header=header, bytes_path=is_bytes)
| 707,902
|
Should only be instantiated by "scandir".
Args:
scandir_path (str): scandir path argument.
system (pycosio._core.io_system.SystemBase subclass):
Storage system.
name (str): Name of the object relative to "scandir_path".
header (dict): Object header
bytes_path (bool): True if path must be returned as bytes.
|
def __init__(self, scandir_path, system, name, header, bytes_path):
self._cache = dict()
self._system = system
self._name = name
self._header = header
self._path = ''.join((
scandir_path if scandir_path[-1] == '/' else (scandir_path + '/'),
name))
self._bytes_path = bytes_path
| 707,903
|
Return True if this entry is a directory or a symbolic link pointing to
a directory; return False if the entry is or points to any other kind
of file, or if it doesn’t exist anymore.
The result is cached on the os.DirEntry object.
Args:
follow_symlinks (bool): Follow symlinks.
Not supported on cloud storage objects.
Returns:
bool: True if directory exists.
|
def is_dir(self, follow_symlinks=True):
try:
return (self._system.isdir(
path=self._path, client_kwargs=self._client_kwargs,
virtual_dir=False) or
# Some directories only exists virtually in object path and
# don't have headers.
bool(S_ISDIR(self.stat().st_mode)))
except ObjectPermissionError:
# The directory was listed, but unable to head it or access to its
# content
return True
| 707,906
|
Return True if this entry is a file or a symbolic link pointing to a
file; return False if the entry is or points to a directory or other
non-file entry, or if it doesn’t exist anymore.
The result is cached on the os.DirEntry object.
Args:
follow_symlinks (bool): Follow symlinks.
Not supported on cloud storage objects.
Returns:
bool: True if directory exists.
|
def is_file(self, follow_symlinks=True):
return self._system.isfile(
path=self._path, client_kwargs=self._client_kwargs)
| 707,907
|
Return a stat_result object for this entry.
The result is cached on the os.DirEntry object.
Args:
follow_symlinks (bool): Follow symlinks.
Not supported on cloud storage objects.
Returns:
os.stat_result: Stat result object
|
def stat(self, follow_symlinks=True):
return self._system.stat(
path=self._path, client_kwargs=self._client_kwargs,
header=self._header)
| 707,908
|
Return bytes from the stream without advancing the position.
Args:
size (int): Number of bytes to read. -1 to read the full
stream.
Returns:
bytes: bytes read
|
def peek(self, size=-1):
if not self._readable:
raise UnsupportedOperation('read')
with self._seek_lock:
self._raw.seek(self._seek)
return self._raw._peek(size)
| 707,913
|
Read and return up to size bytes,
with at most one call to the underlying raw stream’s.
Use at most one call to the underlying raw stream’s read method.
Args:
size (int): Number of bytes to read. -1 to read the
stream until end.
Returns:
bytes: Object content
|
def read(self, size=-1):
if not self._readable:
raise UnsupportedOperation('read')
# Checks if EOF
if self._seek == self._size:
return b''
# Returns existing buffer with no copy
if size == self._buffer_size:
queue_index = self._seek
# Starts initial preloading on first call
if queue_index == 0:
self._preload_range()
# Get buffer from future
with handle_os_exceptions():
buffer = self._read_queue.pop(queue_index).result()
# Append another buffer preload at end of queue
buffer_size = self._buffer_size
index = queue_index + buffer_size * self._max_buffers
if index < self._size:
self._read_queue[index] = self._workers.submit(
self._read_range, index, index + buffer_size)
# Update seek
self._seek += buffer_size
else:
self._seek = self._size
return buffer
# Uses a prealocated buffer
if size != -1:
buffer = bytearray(size)
# Uses a mutable buffer
else:
buffer = bytearray()
read_size = self.readinto(buffer)
return memoryview(buffer)[:read_size].tobytes()
| 707,915
|
Read bytes into a pre-allocated, writable bytes-like object b,
and return the number of bytes read.
Args:
b (bytes-like object): buffer.
Returns:
int: number of bytes read
|
def readinto(self, b):
if not self._readable:
raise UnsupportedOperation('read')
with self._seek_lock:
# Gets seek
seek = self._seek
# Initializes queue
queue = self._read_queue
if seek == 0:
# Starts initial preloading on first call
self._preload_range()
# Initializes read data buffer
size = len(b)
if size:
# Preallocated buffer:
# Use memory view to avoid copies
b_view = memoryview(b)
size_left = size
else:
# Dynamic buffer:
# Can't avoid copy, read until EOF
b_view = b
size_left = -1
b_end = 0
# Starts reading
buffer_size = self._buffer_size
while size_left > 0 or size_left == -1:
# Finds buffer position in queue and buffer seek
start = seek % buffer_size
queue_index = seek - start
# Gets preloaded buffer
try:
buffer = queue[queue_index]
except KeyError:
# EOF
break
# Get buffer from future
with handle_os_exceptions():
try:
queue[queue_index] = buffer = buffer.result()
# Already evaluated
except AttributeError:
pass
buffer_view = memoryview(buffer)
data_size = len(buffer)
# Checks if end of file reached
if not data_size:
break
# Gets theoretical range to copy
if size_left != -1:
end = start + size_left
else:
end = data_size - start
# Checks for end of buffer
if end >= data_size:
# Adjusts range to copy
end = data_size
# Removes consumed buffer from queue
del queue[queue_index]
# Append another buffer preload at end of queue
index = queue_index + buffer_size * self._max_buffers
if index < self._size:
queue[index] = self._workers.submit(
self._read_range, index, index + buffer_size)
# Gets read size, updates seek and updates size left
read_size = end - start
if size_left != -1:
size_left -= read_size
seek += read_size
# Defines read buffer range
b_start = b_end
b_end = b_start + read_size
# Copy data from preload buffer to read buffer
b_view[b_start:b_end] = buffer_view[start:end]
# Updates seek and sync raw
self._seek = seek
self._raw.seek(seek)
# Returns read size
return b_end
| 707,916
|
Write the given bytes-like object, b, to the underlying raw stream,
and return the number of bytes written.
Args:
b (bytes-like object): Bytes to write.
Returns:
int: The number of bytes written.
|
def write(self, b):
if not self._writable:
raise UnsupportedOperation('write')
size = len(b)
b_view = memoryview(b)
size_left = size
buffer_size = self._buffer_size
max_buffers = self._max_buffers
with self._seek_lock:
end = self._buffer_seek
buffer_view = memoryview(self._write_buffer)
while size_left > 0:
# Get range to copy
start = end
end = start + size_left
if end > buffer_size:
# End of buffer, need flush after copy
end = buffer_size
flush = True
else:
flush = False
buffer_range = end - start
# Update not remaining data size
b_start = size - size_left
size_left -= buffer_range
# Copy data
buffer_view[start:end] = b_view[b_start: b_start + buffer_range]
# Flush buffer if needed
if flush:
# Update buffer seek
# Needed to write the good amount of data
self._buffer_seek = end
# Update global seek, this is the number
# of buffer flushed
self._seek += 1
# Block flush based on maximum number of
# buffers in flush progress
if max_buffers:
futures = self._write_futures
flush_wait = self._FLUSH_WAIT
while sum(1 for future in futures
if not future.done()) >= max_buffers:
sleep(flush_wait)
# Flush
with handle_os_exceptions():
self._flush()
# Clear buffer
self._write_buffer = bytearray(buffer_size)
buffer_view = memoryview(self._write_buffer)
end = 0
# Update buffer seek
self._buffer_seek = end
return size
| 707,918
|
Construct a Queryable from any iterable.
Args:
iterable: Any object supporting the iterator protocol.
Raises:
TypeError: if iterable does not support the iterator protocol.
|
def __init__(self, iterable):
if not is_iterable(iterable):
raise TypeError("Cannot construct Queryable from non-iterable {0}"
.format(str(type(iterable))[7: -2]))
self._iterable = iterable
| 708,018
|
Omit elements from the start for which a predicate is True.
Note: This method uses deferred execution.
Args:
predicate: A single argument predicate function.
Returns:
A Queryable over the sequence of elements beginning with the first
element for which the predicate returns False.
Raises:
ValueError: If the Queryable is closed().
TypeError: If predicate is not callable.
|
def skip_while(self, predicate):
if self.closed():
raise ValueError("Attempt to call take_while() on a "
"closed Queryable.")
if not is_callable(predicate):
raise TypeError("skip_while() parameter predicate={0} is "
"not callable".format(repr(predicate)))
return self._create(itertools.dropwhile(predicate, self))
| 708,037
|
Concatenates two sequences.
Note: This method uses deferred execution.
Args:
second_iterable: The sequence to concatenate on to the sequence.
Returns:
A Queryable over the concatenated sequences.
Raises:
ValueError: If the Queryable is closed().
TypeError: If second_iterable is not in fact iterable.
|
def concat(self, second_iterable):
if self.closed():
raise ValueError("Attempt to call concat() on a closed Queryable.")
if not is_iterable(second_iterable):
raise TypeError("Cannot compute concat() with second_iterable of "
"non-iterable {0}".format(str(type(second_iterable))[7: -1]))
return self._create(itertools.chain(self, second_iterable))
| 708,038
|
Return the element at ordinal index.
Note: This method uses immediate execution.
Args:
index: The index of the element to be returned.
Returns:
The element at ordinal index in the source sequence.
Raises:
ValueError: If the Queryable is closed().
ValueError: If index is out of range.
|
def element_at(self, index):
if self.closed():
raise ValueError("Attempt to call element_at() on a "
"closed Queryable.")
if index < 0:
raise OutOfRangeError("Attempt to use negative index.")
# Attempt to use __getitem__
try:
return self._iterable[index]
except IndexError:
raise OutOfRangeError("Index out of range.")
except TypeError:
pass
# Fall back to iterating
for i, item in enumerate(self):
if i == index:
return item
raise OutOfRangeError("element_at(index) out of range.")
| 708,041
|
Determines whether the sequence contains a particular value.
Execution is immediate. Depending on the type of the sequence, all or
none of the sequence may be consumed by this operation.
Note: This method uses immediate execution.
Args:
value: The value to test for membership of the sequence
Returns:
True if value is in the sequence, otherwise False.
Raises:
ValueError: If the Queryable has been closed.
|
def contains(self, value, equality_comparer=operator.eq):
if self.closed():
raise ValueError("Attempt to call contains() on a "
"closed Queryable.")
if not is_callable(equality_comparer):
raise TypeError("contains() parameter equality_comparer={0} is "
"not callable".format(repr(equality_comparer)))
if equality_comparer is operator.eq:
return value in self._iterable
for item in self:
if equality_comparer(value, item):
return True
return False
| 708,049
|
Create an OrderedIterable.
Args:
iterable: The iterable sequence to be ordered.
order: +1 for ascending, -1 for descending.
func: The function to select the sorting key.
|
def __init__(self, iterable, order, func):
assert abs(order) == 1, 'order argument must be +1 or -1'
super(OrderedQueryable, self).__init__(iterable)
self._funcs = [(order, func)]
| 708,095
|
Construct a Lookup with a sequence of (key, value) tuples.
Args:
key_value_pairs:
An iterable over 2-tuples each containing a key, value pair.
|
def __init__(self, key_value_pairs):
# Maintain an ordered dictionary of groups represented as lists
self._dict = OrderedDict()
for key, value in key_value_pairs:
if key not in self._dict:
self._dict[key] = []
self._dict[key].append(value)
# Replace each list with a Grouping
for key, value in iteritems(self._dict):
grouping = Grouping(key, value)
self._dict[key] = grouping
super(Lookup, self).__init__(self._dict)
| 708,098
|
The sequence corresponding to a given key, or an empty sequence if
there are no values corresponding to that key.
Args:
key: The key of the group to be returned.
Returns:
The Grouping corresponding to the supplied key.
|
def __getitem__(self, key):
if key in self._dict:
return self._dict[key]
return Grouping(key, [])
| 708,099
|
Create a Grouping with a given key and a collection of members.
Args:
key: The key corresponding to this Grouping
items: An iterable collection of the members of the group.
|
def __init__(self, key, items):
self._key = key
sequence = list(items)
super(Grouping, self).__init__(sequence)
| 708,103
|
Determine value equality with another grouping.
Args:
rhs: The object on the right-hand-side of the comparison must
support a property called 'key' and be iterable.
Returns:
True if the keys and sequences are equal, otherwise False.
|
def __eq__(self, rhs):
return self.key == rhs.key and self.sequence_equal(rhs)
| 708,104
|
Determine value inequality with another grouping.
Args:
rhs: The object on the right-hand-side of the comparison must
support a property called 'key' and be iterable.
Returns:
True if the keys or sequences are not equal, otherwise False.
|
def __ne__(self, rhs):
return self.key != rhs.key or not self.sequence_equal(rhs)
| 708,105
|
Generates in sequence the integral numbers within a range.
Note: This method uses deferred execution.
Args:
start: The first integer in the sequence.
count: The number of sequential integers to generate.
Returns:
A Queryable over the specified range of integers.
Raises:
ValueError: If count is negative.
|
def integers(start, count):
if count < 0:
raise ValueError("integers() count cannot be negative")
return query(irange(start, start + count))
| 708,108
|
Generate a sequence with one repeated value.
Note: This method uses deferred execution.
Args:
element: The value to be repeated.
count: The number of times to repeat the value.
Raises:
ValueError: If the count is negative.
|
def repeat(element, count):
if count < 0:
raise ValueError("repeat() count cannot be negative")
return query(itertools.repeat(element, count))
| 708,109
|
Create a selector callable from the supplied value.
Args:
value: If is a callable, then returned unchanged. If a string is used
then create an attribute selector. If in an integer is used then
create a key selector.
Returns:
A callable selector based on the supplied value.
Raises:
ValueError: If a selector cannot be created from the value.
|
def make_selector(value):
if is_callable(value):
return value
if is_string(value):
return a_(value)
raise ValueError("Unable to create callable selector from '{0}'".format(value))
| 708,133
|
Create unique groups for groupby loop
Args:
factor_list:
values_list:
groupby_cols:
bool_arr:
Returns:
carray: (carray_factor)
int: (nr_groups) the number of resulting groups
int: (skip_key)
|
def make_group_index(self, groupby_cols, bool_arr):
factor_list, values_list = self.factorize_groupby_cols(groupby_cols)
# create unique groups for groupby loop
if len(factor_list) == 0:
# no columns to groupby over, so directly aggregate the measure
# columns to 1 total
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.zeros(len(self), dtype='int64', rootdir=tmp_rootdir, mode='w')
carray_values = ['Total']
elif len(factor_list) == 1:
# single column groupby, the groupby output column
# here is 1:1 to the values
carray_factor = factor_list[0]
carray_values = values_list[0]
else:
# multi column groupby
# first combine the factorized columns to single values
if self.group_cache_valid(col_list=groupby_cols):
# there is a group cache that we can use
col_rootdir = os.path.join(self.rootdir, self.create_group_base_name(groupby_cols))
col_factor_rootdir = col_rootdir + '.factor'
carray_factor = bcolz.carray(rootdir=col_factor_rootdir)
col_values_rootdir = col_rootdir + '.values'
carray_values = bcolz.carray(rootdir=col_values_rootdir)
else:
# create a brand new groupby col combination
carray_factor, carray_values = \
self.create_group_column_factor(factor_list, groupby_cols, cache=self.auto_cache)
nr_groups = len(carray_values)
skip_key = None
if bool_arr is not None:
# make all non relevant combinations -1
tmp_rootdir = self.create_tmp_rootdir()
carray_factor = bcolz.eval(
'(factor + 1) * bool - 1',
user_dict={'factor': carray_factor, 'bool': bool_arr}, rootdir=tmp_rootdir, mode='w')
# now check how many unique values there are left
tmp_rootdir = self.create_tmp_rootdir()
labels = bcolz.carray([], dtype='int64', expectedlen=len(carray_factor), rootdir=tmp_rootdir, mode='w')
carray_factor, values = ctable_ext.factorize(carray_factor, labels)
# values might contain one value too much (-1) (no direct lookup
# possible because values is a reversed dict)
filter_check = \
[key for key, value in values.items() if value == -1]
if filter_check:
skip_key = filter_check[0]
# the new nr of groups depends on the outcome after filtering
nr_groups = len(values)
# using nr_groups as a total length might be one one off due to the skip_key
# (skipping a row in aggregation)
# but that is okay normally
if skip_key is None:
# if we shouldn't skip a row, set it at the first row after the total number of groups
skip_key = nr_groups
return carray_factor, nr_groups, skip_key
| 708,496
|
Encrypt data with AES-CBC and sign it with HMAC-SHA256
Arguments:
base64_encryption_key (str): a base64-encoded string containing an AES encryption key
and HMAC signing key as generated by generate_encryption_key()
data (str): a byte string containing the data to be encrypted
Returns:
str: the encrypted data as a byte string with the HMAC signature appended to the end
|
def aes_encrypt(base64_encryption_key, data):
if isinstance(data, text_type):
data = data.encode("UTF-8")
aes_key_bytes, hmac_key_bytes = _extract_keys(base64_encryption_key)
data = _pad(data)
iv_bytes = os.urandom(AES_BLOCK_SIZE)
cipher = AES.new(aes_key_bytes, mode=AES.MODE_CBC, IV=iv_bytes)
data = iv_bytes + cipher.encrypt(data) # prepend init vector
hmac_signature = hmac.new(hmac_key_bytes, data, hashlib.sha256).digest()
return as_base64(data + hmac_signature)
| 708,531
|
For those with older versions of Python, a pure-Python
implementation of Python 2.7's :meth:`~datetime.timedelta.total_seconds`.
Args:
td (datetime.timedelta): The timedelta to convert to seconds.
Returns:
float: total number of seconds
>>> td = timedelta(days=4, seconds=33)
>>> total_seconds(td)
345633.0
|
def total_seconds(td):
a_milli = 1000000.0
td_ds = td.seconds + (td.days * 86400) # 24 * 60 * 60
td_micro = td.microseconds + (td_ds * a_milli)
return td_micro / a_milli
| 708,636
|
Calls each element of sequence to invoke the side effect.
Args:
seq:
Returns: None
|
def call_each(seq):
try:
reduce(lambda _, y: y(), seq)
except TypeError as e:
if text_type(e) != "reduce() of empty sequence with no initial value":
raise
| 708,707
|
Pull out the metadata about each packet from the input_stream
Args:
None
Returns:
generator (dictionary): a generator that contains packet meta data in the form of a dictionary
|
def packet_meta_data(self):
# For each packet in the pcap process the contents
for item in self.input_stream:
# Output object
output = {}
# Grab the fields I need
timestamp = item['timestamp']
buf = item['raw_buf']
# Print out the timestamp in UTC
output['timestamp'] = datetime.datetime.utcfromtimestamp(timestamp)
# Unpack the Ethernet frame (mac src/dst, ethertype)
eth = dpkt.ethernet.Ethernet(buf)
output['eth'] = {'src': eth.src, 'dst': eth.dst, 'type':eth.type, 'len': len(eth)}
# Grab packet data
packet = eth.data
# Packet Type ('EtherType') (IP, ARP, PPPoE, IP6... see http://en.wikipedia.org/wiki/EtherType)
if hasattr(packet, 'data'):
output['packet'] = {'type': packet.__class__.__name__, 'data': packet.data}
else:
output['packet'] = {'type': None, 'data': None}
# It this an IP packet?
if output['packet']['type'] == 'IP':
# Pull out fragment information (flags and offset all packed into off field, so use bitmasks)
df = bool(packet.off & dpkt.ip.IP_DF)
mf = bool(packet.off & dpkt.ip.IP_MF)
offset = packet.off & dpkt.ip.IP_OFFMASK
# Pulling out src, dst, length, fragment info, TTL, checksum and Protocol
output['packet'].update({'src':packet.src, 'dst':packet.dst, 'p': packet.p, 'len':packet.len, 'ttl':packet.ttl,
'df':df, 'mf': mf, 'offset': offset, 'checksum': packet.sum})
# Is this an IPv6 packet?
elif output['packet']['type'] == 'IP6':
# Pulling out the IP6 fields
output['packet'].update({'src':packet.src, 'dst':packet.dst, 'p': packet.p, 'len':packet.plen, 'ttl':packet.hlim})
# If the packet isn't IP or IPV6 just pack it as a dictionary
else:
output['packet'].update(data_utils.make_dict(packet))
# For the transport layer we're going to set the transport to None. and
# hopefully a 'link' upstream will manage the transport functionality
output['transport'] = None
# For the application layer we're going to set the application to None. and
# hopefully a 'link' upstream will manage the application functionality
output['application'] = None
# All done
yield output
| 708,786
|
Method that turns bit flags into a human readable list
Args:
transport (dict): transport info, specifically needs a 'flags' key with bit_flags
Returns:
list: a list of human readable flags (e.g. ['syn_ack', 'fin', 'rst', ...]
|
def _readable_flags(transport):
if 'flags' not in transport:
return None
_flag_list = []
flags = transport['flags']
if flags & dpkt.tcp.TH_SYN:
if flags & dpkt.tcp.TH_ACK:
_flag_list.append('syn_ack')
else:
_flag_list.append('syn')
elif flags & dpkt.tcp.TH_FIN:
if flags & dpkt.tcp.TH_ACK:
_flag_list.append('fin_ack')
else:
_flag_list.append('fin')
elif flags & dpkt.tcp.TH_RST:
_flag_list.append('rst')
elif flags & dpkt.tcp.TH_PUSH:
_flag_list.append('psh')
return _flag_list
| 708,909
|
Follow the dot notation to get the proper field, then perform the action
Args:
data: the data as a dictionary (required to be a dictionary)
key: the key (as dot notation) into the data that gives the field (IP.src)
Returns:
the value of the field(subfield) if it exist, otherwise None
|
def get_value(data, key):
ref = data
try:
for subkey in key.split('.'):
if isinstance(ref, dict):
ref = ref[subkey]
else:
print('CRITICAL: Cannot use subkey %s on non-dictionary element' % subkey)
return None
return ref
# In general KeyErrors are expected
except KeyError:
return None
| 709,018
|
The callable makes it possible to include rpcinterface
in a Pyramid application.
Calling ``config.include(twitcher.rpcinterface)`` will result in this
callable being called.
Arguments:
* ``config``: the ``pyramid.config.Configurator`` object.
|
def includeme(config):
settings = config.registry.settings
if asbool(settings.get('twitcher.rpcinterface', True)):
LOGGER.debug('Twitcher XML-RPC Interface enabled.')
# include twitcher config
config.include('twitcher.config')
# using basic auth
config.include('twitcher.basicauth')
# pyramid xml-rpc
# http://docs.pylonsproject.org/projects/pyramid-rpc/en/latest/xmlrpc.html
config.include('pyramid_rpc.xmlrpc')
config.include('twitcher.db')
config.add_xmlrpc_endpoint('api', '/RPC2')
# register xmlrpc methods
config.add_xmlrpc_method(RPCInterface, attr='generate_token', endpoint='api', method='generate_token')
config.add_xmlrpc_method(RPCInterface, attr='revoke_token', endpoint='api', method='revoke_token')
config.add_xmlrpc_method(RPCInterface, attr='revoke_all_tokens', endpoint='api', method='revoke_all_tokens')
config.add_xmlrpc_method(RPCInterface, attr='register_service', endpoint='api', method='register_service')
config.add_xmlrpc_method(RPCInterface, attr='unregister_service', endpoint='api', method='unregister_service')
config.add_xmlrpc_method(RPCInterface, attr='get_service_by_name', endpoint='api', method='get_service_by_name')
config.add_xmlrpc_method(RPCInterface, attr='get_service_by_url', endpoint='api', method='get_service_by_url')
config.add_xmlrpc_method(RPCInterface, attr='clear_services', endpoint='api', method='clear_services')
config.add_xmlrpc_method(RPCInterface, attr='list_services', endpoint='api', method='list_services')
| 709,070
|
Initialize ReverseDNS Class
Args:
domain_postfix: the string to be appended to the ip fields (e.g. IP.src -> IP.src_domain)
|
def __init__(self, domain_postfix='_domain'):
# Call super class init
super(ReverseDNS, self).__init__()
self.domain_postfix = domain_postfix
self.ip_lookup_cache = cache.Cache(timeout=600)
# Set my output
self.output_stream = self.process_for_rdns()
| 709,117
|
Initialize PacketTags Class
Args:
add_tag_methods: a list of additional tag methods (optional, defaults to None))
Note: all methods must take the data dictionary as an argmument (e.g. tag_method(data))
|
def __init__(self, add_tag_methods=None):
# Call super class init
super(PacketTags, self).__init__()
# Set up the complete tag dictionary
self.tag_methods = [PacketTags._tag_net_direction, PacketTags._tag_nxdomain]
if add_tag_methods:
self.tag_methods += add_tag_methods
# Set my output
self.output_stream = self.tag_stuff()
| 709,154
|
Execute UNIX command and wait for its completion
Args:
cmd (str or list): command to execute
shell (bool): invoke inside shell environment
catch_out (bool): collect process' output
Returns:
returncode (int): process return code
stdout (str): collected process stdout (only if catch_out set to true)
stderr (str): collected process stderr (only if catch_out set to true)
|
def execute(cmd, shell=False, poll_period=1.0, catch_out=False):
# FIXME: move to module level
log = logging.getLogger(__name__)
log.debug("Starting: %s", cmd)
stdout = ""
stderr = ""
if not shell and isinstance(cmd, string_types):
cmd = shlex.split(cmd)
if catch_out:
process = subprocess.Popen(
cmd,
shell=shell,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
else:
process = subprocess.Popen(cmd, shell=shell, close_fds=True)
stdout, stderr = process.communicate()
if stderr:
log.error("There were errors:\n%s", stderr)
if stdout:
log.debug("Process output:\n%s", stdout)
returncode = process.returncode
log.debug("Process exit code: %s", returncode)
return returncode, stdout, stderr
| 709,344
|
Create and register metric subscriber,
find metrics for this subscriber (using filter_) and subscribe
Args:
callback (object method): subscriber's callback
filter_ (dict): filter dict
filter sample:
{'type': 'metrics', 'source': 'gun'}
|
def subscribe(self, callback, filter_):
sub_id = "subscriber_{uuid}".format(uuid=uuid.uuid4())
# register subscriber in manager
sub = pd.DataFrame({sub_id: filter_}).T
sub['callback'] = callback
self.subscribers = self.subscribers.append(sub)
# find metrics for subscriber using `filter`
this_subscriber_metrics = self.__filter(self.metrics_meta, filter_)
if this_subscriber_metrics.empty:
logger.debug('Metrics for subscriber %s not found', sub_id)
else:
logger.debug('Found metrics for this subscriber, subscribing...: %s', this_subscriber_metrics)
# attach this sub callback to discovered metrics and select id <-> callbacks
this_subscriber_metrics['callback'] = callback
prepared_callbacks = this_subscriber_metrics[['callback']]
# add this subscriber callbacks to DataManager's callbacks
self.callbacks = self.callbacks.append(prepared_callbacks)
| 709,502
|
Set up logging with default parameters:
* default console logging level is INFO
* ERROR, WARNING and CRITICAL are redirected to stderr
Args:
log_filename (str): if set, will write DEBUG log there
verbose (bool): DEBUG level in console, overrides 'quiet'
quiet (bool): WARNING level in console
|
def init_logging(log_filename, verbose, quiet):
# TODO: consider making one verbosity parameter instead of two mutually exclusive
# TODO: default values for parameters
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
# add file handler if needed
if log_filename:
file_handler = logging.FileHandler(log_filename)
file_handler.setLevel(logging.DEBUG)
# TODO: initialize all formatters in the beginning of this function
file_handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s"
))
logger.addHandler(file_handler)
# console stdout and stderr handlers
console_handler = logging.StreamHandler(sys.stdout)
stderr_hdl = logging.StreamHandler(sys.stderr)
# formatters
fmt_verbose = logging.Formatter(
fmt="%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s",
datefmt='%Y-%m-%d,%H:%M:%S.%f'
)
fmt_regular = logging.Formatter(
"%(asctime)s [%(levelname).4s] [%(filename).8s] %(message)s", "%H:%M:%S")
# set formatters and log levels
if verbose:
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(fmt_verbose)
stderr_hdl.setFormatter(fmt_verbose)
elif quiet:
console_handler.setLevel(logging.WARNING)
console_handler.setFormatter(fmt_regular)
stderr_hdl.setFormatter(fmt_regular)
else:
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(fmt_regular)
stderr_hdl.setFormatter(fmt_regular)
# TODO: do we really need these to be redirected?
# redirect ERROR, WARNING and CRITICAL to sterr
f_err = SingleLevelFilter(logging.ERROR, True)
f_warn = SingleLevelFilter(logging.WARNING, True)
f_crit = SingleLevelFilter(logging.CRITICAL, True)
console_handler.addFilter(f_err)
console_handler.addFilter(f_warn)
console_handler.addFilter(f_crit)
logger.addHandler(console_handler)
f_info = SingleLevelFilter(logging.INFO, True)
f_debug = SingleLevelFilter(logging.DEBUG, True)
stderr_hdl.addFilter(f_info)
stderr_hdl.addFilter(f_debug)
logger.addHandler(stderr_hdl)
| 709,767
|
Transform data to a new type.
Args:
transformer_chain: A sequence of (transformer, type) pairs to convert the data.
data: The data to be transformed.
context: The context of the transformations (mutable).
Returns:
The transformed data.
|
def _transform(transformer_chain: Sequence[Tuple[DataTransformer, Type]], data: S, context: PipelineContext = None) -> T:
for transformer, target_type in transformer_chain:
# noinspection PyTypeChecker
data = transformer.transform(target_type, data, context)
return data
| 709,848
|
Initializes a handler for a data sink.
Args:
sink: The data sink.
store_type: ???
transform: ???
|
def __init__(self, sink: DataSink, store_type: Type[S], transform: Callable[[T], S]) -> None:
self._sink = sink
self._store_type = store_type
self._transform = transform
| 709,849
|
Puts an objects into the data sink. The objects may be transformed into a new type for insertion if necessary.
Args:
item: The objects to be inserted into the data sink.
context: The context of the insertion (mutable).
|
def put(self, item: T, context: PipelineContext = None) -> None:
LOGGER.info("Converting item \"{item}\" for sink \"{sink}\"".format(item=item, sink=self._sink))
item = self._transform(data=item, context=context)
LOGGER.info("Puting item \"{item}\" into sink \"{sink}\"".format(item=item, sink=self._sink))
self._sink.put(self._store_type, item, context)
| 709,850
|
Puts multiple objects of the same type into the data sink. The objects may be transformed into a new type for insertion if necessary.
Args:
items: An iterable (e.g. list) of objects to be inserted into the data sink.
context: The context of the insertions (mutable).
|
def put_many(self, items: Iterable[T], context: PipelineContext = None) -> None:
LOGGER.info("Creating transform generator for items \"{items}\" for sink \"{sink}\"".format(items=items, sink=self._sink))
transform_generator = (self._transform(data=item, context=context) for item in items)
LOGGER.info("Putting transform generator for items \"{items}\" into sink \"{sink}\"".format(items=items, sink=self._sink))
self._sink.put_many(self._store_type, transform_generator, context)
| 709,851
|
Gets a query from the data source.
1) Extracts the query from the data source.
2) Inserts the result into any data sinks.
3) Transforms the result into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested.
context: The context for the extraction (mutable).
Returns:
The requested object.
|
def get(self, query: Mapping[str, Any], context: PipelineContext = None) -> T:
result = self._source.get(self._source_type, deepcopy(query), context)
LOGGER.info("Got result \"{result}\" from query \"{query}\" of source \"{source}\"".format(result=result, query=query, source=self._source))
LOGGER.info("Sending result \"{result}\" to sinks before converting".format(result=result))
for sink in self._before_transform:
sink.put(result, context)
LOGGER.info("Converting result \"{result}\" to request type".format(result=result))
result = self._transform(data=result, context=context)
LOGGER.info("Sending result \"{result}\" to sinks after converting".format(result=result))
for sink in self._after_transform:
sink.put(result, context)
return result
| 709,853
|
Initializes a data pipeline.
Args:
elements: The data stores and data sinks for this pipeline.
transformers: The data transformers for this pipeline.
|
def __init__(self, elements: Sequence[Union[DataSource, DataSink]], transformers: Iterable[DataTransformer] = None) -> None:
if not elements:
raise ValueError("Elements must be a non-empty sequence of DataSources and DataSinks")
if transformers is None:
transformers = set()
sources = set() # type: Set[DataSource]
sinks = set() # type: Set[DataSink]
targets = [] # type: List[Tuple[DataSource, Set[DataSink]]]
for element in elements:
if isinstance(element, DataSource):
sources.add(element)
targets.append((element, set(sinks)))
if isinstance(element, DataSink):
sinks.add(element)
LOGGER.info("Beginning construction of type graph")
# noinspection PyTypeChecker
self._type_graph = _build_type_graph(sources, sinks, transformers)
LOGGER.info("Completed construction of type graph")
self._sources = targets
self._sinks = sinks
self._get_types = {}
self._put_types = {}
| 709,856
|
Gets a query from the data pipeline.
1) Extracts the query the sequence of data sources.
2) Inserts the result into the data sinks (if appropriate).
3) Transforms the result into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested.
context: The context for the extraction (mutable).
Returns:
The requested object.
|
def get(self, type: Type[T], query: Mapping[str, Any]) -> T:
LOGGER.info("Getting SourceHandlers for \"{type}\"".format(type=type.__name__))
try:
handlers = self._get_types[type]
except KeyError:
try:
LOGGER.info("Building new SourceHandlers for \"{type}\"".format(type=type.__name__))
handlers = self._get_handlers(type)
except NoConversionError:
handlers = None
self._get_types[type] = handlers
if handlers is None:
raise NoConversionError("No source can provide \"{type}\"".format(type=type.__name__))
LOGGER.info("Creating new PipelineContext")
context = self._new_context()
LOGGER.info("Querying SourceHandlers for \"{type}\"".format(type=type.__name__))
for handler in handlers:
try:
return handler.get(query, context)
except NotFoundError:
pass
raise NotFoundError("No source returned a query result!")
| 709,866
|
Puts an objects into the data pipeline. The object may be transformed into a new type for insertion if necessary.
Args:
item: The object to be inserted into the data pipeline.
|
def put(self, type: Type[T], item: T) -> None:
LOGGER.info("Getting SinkHandlers for \"{type}\"".format(type=type.__name__))
try:
handlers = self._put_types[type]
except KeyError:
try:
LOGGER.info("Building new SinkHandlers for \"{type}\"".format(type=type.__name__))
handlers = self._put_handlers(type)
except NoConversionError:
handlers = None
self._get_types[type] = handlers
LOGGER.info("Creating new PipelineContext")
context = self._new_context()
LOGGER.info("Sending item \"{item}\" to SourceHandlers".format(item=item))
if handlers is not None:
for handler in handlers:
handler.put(item, context)
| 709,868
|
Puts multiple objects of the same type into the data sink. The objects may be transformed into a new type for insertion if necessary.
Args:
items: An iterable (e.g. list) of objects to be inserted into the data pipeline.
|
def put_many(self, type: Type[T], items: Iterable[T]) -> None:
LOGGER.info("Getting SinkHandlers for \"{type}\"".format(type=type.__name__))
try:
handlers = self._put_types[type]
except KeyError:
try:
LOGGER.info("Building new SinkHandlers for \"{type}\"".format(type=type.__name__))
handlers = self._put_handlers(type)
except NoConversionError:
handlers = None
self._get_types[type] = handlers
LOGGER.info("Creating new PipelineContext")
context = self._new_context()
LOGGER.info("Sending items \"{items}\" to SourceHandlers".format(items=items))
if handlers is not None:
items = list(items)
for handler in handlers:
handler.put_many(items, context)
| 709,869
|
Transforms an object to a new type.
Args:
target_type: The type to be converted to.
value: The object to be transformed.
context: The context of the transformation (mutable).
|
def transform(self, target_type: Type[T], value: F, context: PipelineContext = None) -> T:
pass
| 709,905
|
Gets a query from the data source.
Args:
query: The query being requested.
context: The context for the extraction (mutable).
Returns:
The requested object.
|
def get(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> T:
pass
| 709,971
|
Gets a query from the data source, which contains a request for multiple objects.
Args:
query: The query being requested (contains a request for multiple objects).
context: The context for the extraction (mutable).
Returns:
The requested objects.
|
def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
| 709,972
|
Decompiles an AST into Python code.
Arguments:
- ast: code to decompile, using AST objects as generated by the standard library ast module
- indentation: indentation level of lines
- line_length: if lines become longer than this length, ast_decompiler will try to break them up
(but it will not necessarily succeed in all cases)
- starting_indentation: indentation level at which to start producing code
|
def decompile(ast, indentation=4, line_length=100, starting_indentation=0):
decompiler = Decompiler(
indentation=indentation,
line_length=line_length,
starting_indentation=starting_indentation,
)
return decompiler.run(ast)
| 710,024
|
Puts multiple objects of the same type into the data sink.
Args:
type: The type of the objects being inserted.
items: The objects to be inserted.
context: The context of the insertion (mutable).
|
def put_many(self, type: Type[T], items: Iterable[T], context: PipelineContext = None) -> None:
pass
| 710,156
|
Constructor.
Args:
channel: A grpc.Channel.
|
def __init__(self, channel):
self.getAdaptorDescriptions = channel.unary_unary(
'/xenon.FileSystemService/getAdaptorDescriptions',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystemAdaptorDescriptions.FromString,
)
self.getAdaptorNames = channel.unary_unary(
'/xenon.FileSystemService/getAdaptorNames',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.AdaptorNames.FromString,
)
self.getAdaptorDescription = channel.unary_unary(
'/xenon.FileSystemService/getAdaptorDescription',
request_serializer=xenon_dot_proto_dot_xenon__pb2.AdaptorName.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystemAdaptorDescription.FromString,
)
self.create = channel.unary_unary(
'/xenon.FileSystemService/create',
request_serializer=xenon_dot_proto_dot_xenon__pb2.CreateFileSystemRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.FromString,
)
self.getAdaptorName = channel.unary_unary(
'/xenon.FileSystemService/getAdaptorName',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.AdaptorName.FromString,
)
self.getLocation = channel.unary_unary(
'/xenon.FileSystemService/getLocation',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Location.FromString,
)
self.getCredential = channel.unary_unary(
'/xenon.FileSystemService/getCredential',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.GetCredentialResponse.FromString,
)
self.getProperties = channel.unary_unary(
'/xenon.FileSystemService/getProperties',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Properties.FromString,
)
self.createDirectories = channel.unary_unary(
'/xenon.FileSystemService/createDirectories',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.createDirectory = channel.unary_unary(
'/xenon.FileSystemService/createDirectory',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.createFile = channel.unary_unary(
'/xenon.FileSystemService/createFile',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.createSymbolicLink = channel.unary_unary(
'/xenon.FileSystemService/createSymbolicLink',
request_serializer=xenon_dot_proto_dot_xenon__pb2.CreateSymbolicLinkRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.copy = channel.unary_unary(
'/xenon.FileSystemService/copy',
request_serializer=xenon_dot_proto_dot_xenon__pb2.CopyRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.CopyOperation.FromString,
)
self.cancel = channel.unary_unary(
'/xenon.FileSystemService/cancel',
request_serializer=xenon_dot_proto_dot_xenon__pb2.CopyOperationRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.CopyStatus.FromString,
)
self.getStatus = channel.unary_unary(
'/xenon.FileSystemService/getStatus',
request_serializer=xenon_dot_proto_dot_xenon__pb2.CopyOperationRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.CopyStatus.FromString,
)
self.rename = channel.unary_unary(
'/xenon.FileSystemService/rename',
request_serializer=xenon_dot_proto_dot_xenon__pb2.RenameRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.delete = channel.unary_unary(
'/xenon.FileSystemService/delete',
request_serializer=xenon_dot_proto_dot_xenon__pb2.DeleteRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.exists = channel.unary_unary(
'/xenon.FileSystemService/exists',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Is.FromString,
)
self.readFromFile = channel.unary_stream(
'/xenon.FileSystemService/readFromFile',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.ReadFromFileResponse.FromString,
)
self.writeToFile = channel.stream_unary(
'/xenon.FileSystemService/writeToFile',
request_serializer=xenon_dot_proto_dot_xenon__pb2.WriteToFileRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.appendToFile = channel.stream_unary(
'/xenon.FileSystemService/appendToFile',
request_serializer=xenon_dot_proto_dot_xenon__pb2.AppendToFileRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.list = channel.unary_stream(
'/xenon.FileSystemService/list',
request_serializer=xenon_dot_proto_dot_xenon__pb2.ListRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.PathAttributes.FromString,
)
self.getAttributes = channel.unary_unary(
'/xenon.FileSystemService/getAttributes',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.PathAttributes.FromString,
)
self.getWorkingDirectory = channel.unary_unary(
'/xenon.FileSystemService/getWorkingDirectory',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Path.FromString,
)
self.setWorkingDirectory = channel.unary_unary(
'/xenon.FileSystemService/setWorkingDirectory',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.setPosixFilePermissions = channel.unary_unary(
'/xenon.FileSystemService/setPosixFilePermissions',
request_serializer=xenon_dot_proto_dot_xenon__pb2.SetPosixFilePermissionsRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.readSymbolicLink = channel.unary_unary(
'/xenon.FileSystemService/readSymbolicLink',
request_serializer=xenon_dot_proto_dot_xenon__pb2.PathRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Path.FromString,
)
self.getPathSeparator = channel.unary_unary(
'/xenon.FileSystemService/getPathSeparator',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.GetPathSeparatorResponse.FromString,
)
self.isOpen = channel.unary_unary(
'/xenon.FileSystemService/isOpen',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Is.FromString,
)
self.close = channel.unary_unary(
'/xenon.FileSystemService/close',
request_serializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.waitUntilDone = channel.unary_unary(
'/xenon.FileSystemService/waitUntilDone',
request_serializer=xenon_dot_proto_dot_xenon__pb2.WaitUntilDoneRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.CopyStatus.FromString,
)
self.localFileSystems = channel.unary_unary(
'/xenon.FileSystemService/localFileSystems',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystems.FromString,
)
self.listFileSystems = channel.unary_unary(
'/xenon.FileSystemService/listFileSystems',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystems.FromString,
)
| 710,339
|
Constructor.
Args:
channel: A grpc.Channel.
|
def __init__(self, channel):
self.getAdaptorDescriptions = channel.unary_unary(
'/xenon.SchedulerService/getAdaptorDescriptions',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.SchedulerAdaptorDescriptions.FromString,
)
self.getAdaptorNames = channel.unary_unary(
'/xenon.SchedulerService/getAdaptorNames',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.AdaptorNames.FromString,
)
self.getAdaptorDescription = channel.unary_unary(
'/xenon.SchedulerService/getAdaptorDescription',
request_serializer=xenon_dot_proto_dot_xenon__pb2.AdaptorName.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.SchedulerAdaptorDescription.FromString,
)
self.create = channel.unary_unary(
'/xenon.SchedulerService/create',
request_serializer=xenon_dot_proto_dot_xenon__pb2.CreateSchedulerRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
)
self.getAdaptorName = channel.unary_unary(
'/xenon.SchedulerService/getAdaptorName',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.AdaptorName.FromString,
)
self.getLocation = channel.unary_unary(
'/xenon.SchedulerService/getLocation',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Location.FromString,
)
self.getCredential = channel.unary_unary(
'/xenon.SchedulerService/getCredential',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.GetCredentialResponse.FromString,
)
self.getProperties = channel.unary_unary(
'/xenon.SchedulerService/getProperties',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Properties.FromString,
)
self.submitBatchJob = channel.unary_unary(
'/xenon.SchedulerService/submitBatchJob',
request_serializer=xenon_dot_proto_dot_xenon__pb2.SubmitBatchJobRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Job.FromString,
)
self.submitInteractiveJob = channel.stream_stream(
'/xenon.SchedulerService/submitInteractiveJob',
request_serializer=xenon_dot_proto_dot_xenon__pb2.SubmitInteractiveJobRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.SubmitInteractiveJobResponse.FromString,
)
self.getQueueNames = channel.unary_unary(
'/xenon.SchedulerService/getQueueNames',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Queues.FromString,
)
self.getDefaultQueueName = channel.unary_unary(
'/xenon.SchedulerService/getDefaultQueueName',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Queue.FromString,
)
self.getJobs = channel.unary_unary(
'/xenon.SchedulerService/getJobs',
request_serializer=xenon_dot_proto_dot_xenon__pb2.SchedulerAndQueues.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Jobs.FromString,
)
self.getJobStatus = channel.unary_unary(
'/xenon.SchedulerService/getJobStatus',
request_serializer=xenon_dot_proto_dot_xenon__pb2.JobRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.JobStatus.FromString,
)
self.getJobStatuses = channel.unary_unary(
'/xenon.SchedulerService/getJobStatuses',
request_serializer=xenon_dot_proto_dot_xenon__pb2.GetJobStatusesRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.GetJobStatusesResponse.FromString,
)
self.getQueueStatus = channel.unary_unary(
'/xenon.SchedulerService/getQueueStatus',
request_serializer=xenon_dot_proto_dot_xenon__pb2.GetQueueStatusRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.QueueStatus.FromString,
)
self.getQueueStatuses = channel.unary_unary(
'/xenon.SchedulerService/getQueueStatuses',
request_serializer=xenon_dot_proto_dot_xenon__pb2.SchedulerAndQueues.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.QueueStatuses.FromString,
)
self.waitUntilDone = channel.unary_unary(
'/xenon.SchedulerService/waitUntilDone',
request_serializer=xenon_dot_proto_dot_xenon__pb2.WaitRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.JobStatus.FromString,
)
self.waitUntilRunning = channel.unary_unary(
'/xenon.SchedulerService/waitUntilRunning',
request_serializer=xenon_dot_proto_dot_xenon__pb2.WaitRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.JobStatus.FromString,
)
self.isOpen = channel.unary_unary(
'/xenon.SchedulerService/isOpen',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Is.FromString,
)
self.cancelJob = channel.unary_unary(
'/xenon.SchedulerService/cancelJob',
request_serializer=xenon_dot_proto_dot_xenon__pb2.JobRequest.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.JobStatus.FromString,
)
self.getFileSystem = channel.unary_unary(
'/xenon.SchedulerService/getFileSystem',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.FileSystem.FromString,
)
self.close = channel.unary_unary(
'/xenon.SchedulerService/close',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Empty.FromString,
)
self.localScheduler = channel.unary_unary(
'/xenon.SchedulerService/localScheduler',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Scheduler.FromString,
)
self.listSchedulers = channel.unary_unary(
'/xenon.SchedulerService/listSchedulers',
request_serializer=xenon_dot_proto_dot_xenon__pb2.Empty.SerializeToString,
response_deserializer=xenon_dot_proto_dot_xenon__pb2.Schedulers.FromString,
)
| 710,340
|
Write the metric to elasticsearch
Args:
name (str): The name of the metric to write
data (dict): Additional data to store with the metric
|
def write(self, name, **data):
data["name"] = name
if not ("timestamp" in data):
data["timestamp"] = datetime.utcnow()
try:
self.client.index(
index=self.get_index(),
doc_type=self.doc_type,
id=None,
body=data
)
except TransportError as exc:
logger.warning('writing metric %r failure %r', data, exc)
| 710,879
|
Write multiple metrics to elasticsearch in one request
Args:
metrics (list): data with mappings to send to elasticsearch
|
def bulk_write(self, metrics):
actions = []
index = self.get_index()
for metric in metrics:
actions.append({'index': {'_index': index, '_type': self.doc_type}})
actions.append(metric)
try:
self.client.bulk(actions)
except TransportError as exc:
logger.warning('bulk_write metrics %r failure %r', metrics, exc)
| 710,880
|
Write the metric to kafka
Args:
name (str): The name of the metric to write
data (dict): Additional data to store with the metric
|
def write(self, name, **data):
data["name"] = name
if not ("timestamp" in data):
data["timestamp"] = datetime.utcnow()
try:
self.producer.send(topic=self.topic, value=data)
self.producer.flush()
except (KafkaTimeoutError, NoBrokersAvailable) as exc:
logger.warning('writing metric %r failure %r', data, exc)
| 710,894
|
Write multiple metrics to kafka in one request
Args:
metrics (list):
|
def bulk_write(self, metrics):
try:
for metric in metrics:
self.producer.send(self.topic, metric)
self.producer.flush()
except (KafkaTimeoutError, NoBrokersAvailable) as exc:
logger.warning('bulk_write metrics %r failure %r', metrics, exc)
| 710,895
|
Select a given attribute (or chain or attributes) from the objects within the
list.
Args:
attr (str): attributes to be selected (with initial `.` omitted)
default (any): value to return if given element in list doesn't contain
desired attribute
Returns:
nhl.List: list of selected attribute values
|
def select(self, attr, default=None):
return List([_select(item, attr, default) for item in self])
| 711,099
|
initialise the class with the local cache folder
Args:
cache_folder: path to the cache
|
def __init__(self, cache_folder, genome_build):
self.api_version = ('1')
self.genome_build = genome_build
self.today = datetime.today()
if not os.path.exists(cache_folder):
os.mkdir(cache_folder)
# generate a database with tables if it doesn't already exist
path = os.path.join(cache_folder, "ensembl_cache.db")
if not os.path.exists(path):
try:
with sqlite3.connect(path) as conn:
with conn as cursor:
cursor.execute("CREATE TABLE ensembl " \
"(key text PRIMARY KEY, genome_build text, " \
"cache_date text, api_version text, data blob)")
except sqlite3.OperationalError:
time.sleep(random.uniform(1, 5))
self.conn = sqlite3.connect(path)
self.conn.row_factory = sqlite3.Row
| 711,528
|
get cached data for a url if stored in the cache and not outdated
Args:
url: URL for the Ensembl REST service
Returns:
data if data in cache, else None
|
def get_cached_data(self, url):
key = self.get_key_from_url(url)
with self.conn as conn:
cursor = conn.cursor()
cursor.execute("SELECT * FROM ensembl WHERE key=? AND genome_build=?",
(key, self.genome_build))
row = cursor.fetchone()
# if the data has been cached, check that it is not out of date, and
# the data was generated from the same Ensembl API version
if row is not None:
api_version = row["api_version"]
data = zlib.decompress(row["data"])
if IS_PYTHON3:
data = data.decode("utf-8")
date = datetime.strptime(row["cache_date"], "%Y-%m-%d")
diff = self.today - date
if diff.days < 180 and self.api_version == api_version:
return data
return None
| 711,529
|
cache the data retrieved from ensembl
Args:
url: URL for the Ensembl REST service
data: response data from Ensembl
|
def cache_url_data(self, url, data, attempt=0):
if attempt > 5:
raise ValueError('too many attempts at writing to the cache')
key = self.get_key_from_url(url)
# don't cache the ensembl version check
if key == "info.rest":
return
current_date = datetime.strftime(self.today, "%Y-%m-%d")
# python3 zlib requires encoded strings
if IS_PYTHON3:
data = data.encode("utf-8")
compressed = zlib.compress(data)
# python2 sqlite3 can't write "8-bit bytestrings", but it can handle
# buffer versions of the bytestrings
if IS_PYTHON2:
compressed = buffer(compressed)
t = (key, self.genome_build, current_date, self.api_version, compressed)
cmd = "INSERT OR REPLACE INTO ensembl " \
"(key, genome_build, cache_date, api_version, data) VALUES (?,?,?,?,?)"
try:
with self.conn as cursor:
cursor.execute(cmd, t)
except sqlite3.OperationalError:
# if we hit a sqlite locking error, wait a random time so conflicting
# instances are less likely to reconflict, then retry
time.sleep(random.uniform(1, 10))
self.cache_url_data(url, data.decode('utf-8'), attempt + 1)
| 711,530
|
parses the url into a list of folder locations
We take a URL like:
http://rest.ensembl.org/sequence/id/ENST00000538324?type=genomic;expand_3prime=10;expand_5prime=10
and turn it into 'sequence.id.ENST00000538324.genomic'
Args:
url: URL for the Ensembl REST service
Returns:
a parsed unique database key for the URLs data
|
def get_key_from_url(self, url):
key = url.split("/")[3:]
# fix the final bit of the url, none of which uniquely define the data
suffix = key.pop()
suffix = suffix.split(";")[0]
# convert "LONG_ID?feature=transcript" to ['LONG_ID', "transcript"] etc
id = suffix.split("?", 1)
suffix = id.pop()
if "=" in suffix:
_, suffix = suffix.split("=")
key += id + [suffix]
# replace characters not tolerated in keys and remove blank entries
key = ( x.replace(':', '_') for x in key )
key = ( x for x in key if x != '' )
return ".".join(key)
| 711,531
|
obtain the sequence for a transcript from ensembl
Args:
cache_folder: path to folder for caching data requested from Ensembl
genome_build: string indicating the genome build ("grch37" or "grch38")
|
def __init__(self, cache_folder, genome_build):
self.cache = EnsemblCache(cache_folder, genome_build)
self.prior_time = time.time() - 1
self.rate_limit = 0.067
server_dict = {"grch37": "grch37.", "grch38": ""}
self.server = "http://{}rest.ensembl.org".format(server_dict[genome_build])
self.check_ensembl_api_version()
| 711,595
|
sometimes we get HGNC symbols that do not match the ensembl rest version
that we are currently using. We can look for earlier HGNC symbols for
the gene using the service at rest.genenames.org
Args:
hgnc_symbol: HGNC symbol for the gene (eg "MLL2")
Returns:
list of deprecated gene symbols (eg ["KMT2A"])
|
def get_previous_symbol(self, hgnc_symbol):
ensembl_server = self.server
gene_names_server = "http://rest.genenames.org"
self.server = gene_names_server
headers = {"accept": "application/json", "content-type": "application/json"}
ext = "/fetch/symbol/{}".format(hgnc_symbol)
try:
r = self.ensembl_request(ext, headers)
finally:
self.server = ensembl_server
gene_json = json.loads(r)
prev_gene = []
docs = gene_json["response"]["docs"]
# strip out any gene entries that have been invalidated
docs = [ x for x in docs if x["status"] != "Entry Withdrawn"]
if len(docs) == 0:
pass
elif len(docs) > 1:
raise ValueError("{0} has more than one alternate symbol, which I haven't accounted for.".format(hgnc_symbol))
elif "prev_symbol" in docs[0]:
prev_gene = docs[0]["prev_symbol"]
return prev_gene
| 711,600
|
fetch the ensembl transcript IDs for a given ensembl gene ID.
Args:
gene_ids: list of Ensembl gene IDs for the gene
hgnc_symbols: list of possible HGNC symbols for gene
|
def get_transcript_ids_for_ensembl_gene_ids(self, gene_ids, hgnc_symbols):
chroms = {"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", \
"12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", \
"X", "Y"}
headers = {"content-type": "application/json"}
transcript_ids = []
for gene_id in gene_ids:
self.attempt = 0
ext = "/overlap/id/{}?feature=transcript".format(gene_id)
r = self.ensembl_request(ext, headers)
for item in json.loads(r):
# ignore non-coding transcripts
if item["biotype"] not in ["protein_coding", "polymorphic_pseudogene"]:
continue
# ignore transcripts not on the standard chromosomes
# (non-default chroms fail to map the known de novo variants
# to the gene location
if item["Parent"] != gene_id or item["seq_region_name"] not in \
chroms or \
all([symbol not in item["external_name"] for symbol in hgnc_symbols]):
continue
transcript_ids.append(item["id"])
return transcript_ids
| 711,601
|
load a file listing gene and transcript IDs
Args:
path: path to file containing gene IDs and transcript IDs e.g.
gene_1 transcript_1.1 length_1 denovo_count
gene_2 transcript_2.1 length_3 denovo_count
Returns:
dict of transcripts eg {'CTC1': ["ENST00000315684", "ENST00000485511"]}
|
def load_genes(path):
with open(path, 'rt') as f:
lines = [ x.split('\t')[:2] for x in f if not x.startswith('hgnc') ]
transcripts = {}
for symbol, tx in lines:
if symbol not in transcripts:
transcripts[symbol] = []
transcripts[symbol].append(tx)
return transcripts
| 711,734
|
determines mutation rates per functional category for transcripts
Args:
transcripts: list of transcript IDs for a gene
mut_dict: dictionary of local sequence context mutation rates
ensembl: EnsemblRequest object, to retrieve information from Ensembl.
Returns:
tuple of (rates, merged transcript, and transcript CDS length)
|
def get_mutation_rates(transcripts, mut_dict, ensembl):
rates = {'missense': 0, 'nonsense': 0, 'splice_lof': 0,
'splice_region': 0, 'synonymous': 0}
combined = None
for tx_id in transcripts:
try:
tx = construct_gene_object(ensembl, tx_id)
except ValueError:
continue
if len(tx.get_cds_sequence()) % 3 != 0:
raise ValueError("anomalous_coding_sequence")
# ignore mitochondrial genes
if tx.get_chrom() == "MT":
continue
sites = SiteRates(tx, mut_dict, masked_sites=combined)
combined = tx + combined
for cq in ['missense', 'nonsense', 'splice_lof', 'splice_region', 'synonymous']:
rates[cq] += sites[cq].get_summed_rate()
if combined is None:
raise ValueError('no tx found')
length = combined.get_coding_distance(combined.get_cds_end())['pos']
return rates, combined, length
| 711,735
|
construct a bsub job submission command
Args:
command: list of strings that forma unix command
job_id: string for job ID for submission
dependent_id: job ID, or list of job IDs which the current command needs
to have finished before the current command will start. Note that
the list can be empty, in which case there are no dependencies.
memory: minimum memory requirements (in megabytes)
Returns:
nothing
|
def submit_bsub_job(command, job_id=None, dependent_id=None, memory=None, requeue_code=None, logfile=None):
if job_id is None:
job_id = get_random_string()
job = "-J \"{0}\"".format(job_id)
mem = ""
if memory is not None:
mem = "-R 'select[mem>{0}] rusage[mem={0}]' -M {0}".format(memory)
requeue = ""
if requeue_code is not None:
requeue = "-Q 'EXCLUDE({0})'".format(requeue_code)
dependent = ""
if dependent_id is not None:
if type(dependent_id) == list:
dependent_id = " && ".join(dependent_id)
dependent = "-w '{0}'".format(dependent_id)
log = "bjob_output.txt"
if logfile is not None:
log = logfile
preamble = ["bsub", job, dependent, requeue, "-q", "normal", "-o", log, mem]
command = ["bash", "-c", "\""] + command + ["\""]
command = " ".join(preamble + command)
subprocess.call(command, shell=True)
| 711,778
|
load sequence context-based mutation rates
Args:
path: path to table of sequence context-based mutation rates. If None,
this defaults to per-trinucleotide rates provided by Kaitlin Samocha
(Broad Institute).
Returns:
list of [initial, changed, rate] lists e.g. [['AGA', 'ATA', '5e-8']]
|
def load_mutation_rates(path=None):
if path is None:
path = resource_filename(__name__, "data/rates.txt")
rates = []
with open(path) as handle:
for line in handle:
if line.startswith("from"): # ignore the header line
continue
line = [ x.encode('utf8') for x in line.strip().split() ]
rates.append(line)
return rates
| 711,781
|
finds the protein length for ensembl transcript IDs for a gene
Args:
ensembl: EnsemblRequest object to request sequences and data
from the ensembl REST API
transcript_ids: list of transcript IDs for a single gene
Returns:
dictionary of lengths (in amino acids), indexed by transcript IDs
|
def get_transcript_lengths(ensembl, transcript_ids):
transcripts = {}
for transcript_id in transcript_ids:
# get the transcript's protein sequence via the ensembl REST API
try:
seq = ensembl.get_protein_seq_for_transcript(transcript_id)
except ValueError:
continue
transcripts[transcript_id] = len(seq)
return transcripts
| 711,858
|
creates an Transcript object for a gene from ensembl databases
Args:
ensembl: EnsemblRequest object to request data from ensembl
transcript_id: string for an Ensembl transcript ID
Returns:
a Transcript object, containing transcript coordinates and gene and
transcript sequence.
Raises:
ValueError if CDS from genomic sequence given gene coordinates and CDS
retrieved from Ensembl do not match.
|
def construct_gene_object(ensembl, transcript_id):
# get the sequence for the identified transcript
(chrom, start, end, strand, genomic_sequence) = ensembl.get_genomic_seq_for_transcript(transcript_id, expand=10)
cds_sequence = ensembl.get_cds_seq_for_transcript(transcript_id)
# get the locations of the exons and cds from ensembl
cds_ranges = ensembl.get_cds_ranges_for_transcript(transcript_id)
exon_ranges = ensembl.get_exon_ranges_for_transcript(transcript_id)
# start a Transcript object with the locations and sequence
transcript = Transcript(transcript_id, chrom, start, end, strand)
transcript.set_exons(exon_ranges, cds_ranges)
transcript.set_cds(cds_ranges)
transcript.add_cds_sequence(cds_sequence)
transcript.add_genomic_sequence(genomic_sequence, offset=10)
return transcript
| 711,859
|
get the de novos within the coding sequence of a transcript
Args:
transcript: Transcript object, which defines the transcript coordinates
de_novos: list of chromosome sequence positions for de novo events
Returns:
list of de novo positions found within the transcript
|
def get_de_novos_in_transcript(transcript, de_novos):
in_transcript = []
for de_novo in de_novos:
# we check if the de novo is within the transcript by converting the
# chromosomal position to a CDS-based position. Variants outside the CDS
# will raise an error, which we catch and pass on. It's better to do
# this, rather than use the function in_coding_region(), since that
# function does not allow for splice site variants.
site = transcript.get_coding_distance(de_novo)
cds_length = transcript.get_coding_distance(transcript.get_cds_end())
within_cds = site['pos'] >= 0 and site['pos'] < cds_length['pos']
if within_cds and (transcript.in_coding_region(de_novo) or abs(site['offset']) < 9):
in_transcript.append(de_novo)
return in_transcript
| 711,860
|
gets transcript IDs for a gene.
Args:
ensembl: EnsemblRequest object to request data from ensembl
gene_id: HGNC symbol for gene
Returns:
dictionary of transcript ID: transcript lengths for all transcripts
for a given HGNC symbol.
|
def get_transcript_ids(ensembl, gene_id):
ensembl_genes = ensembl.get_genes_for_hgnc_id(gene_id)
transcript_ids = ensembl.get_transcript_ids_for_ensembl_gene_ids(ensembl_genes, [gene_id])
# sometimes we get HGNC symbols that do not match the ensembl rest version
# that we are currentl using. We can look for earlier HGNC symbols for
# the gene using the service at rest.genenames.org
alt_symbols = []
if len(transcript_ids) == 0:
alt_symbols = ensembl.get_previous_symbol(gene_id)
genes = [ensembl.get_genes_for_hgnc_id(symbol) for symbol in alt_symbols]
genes = [item for sublist in genes for item in sublist]
ensembl_genes += genes
symbols = [gene_id] + alt_symbols
transcript_ids = ensembl.get_transcript_ids_for_ensembl_gene_ids(ensembl_genes, symbols)
return get_transcript_lengths(ensembl, transcript_ids)
| 711,861
|
sort out all the necessary sequences and positions for a gene
Args:
ensembl: EnsemblRequest object to request data from ensembl
gene_id: HGNC symbol for gene
de_novos: list of de novo positions, so we can check they all fit in
the gene transcript
Returns:
list of Transcript objects for gene, including genomic ranges and sequences
|
def load_gene(ensembl, gene_id, de_novos=[]):
transcripts = minimise_transcripts(ensembl, gene_id, de_novos)
genes = []
for transcript_id in transcripts:
gene = construct_gene_object(ensembl, transcript_id)
genes.append(gene)
if len(genes) == 0:
raise IndexError("{0}: no suitable transcripts".format(gene_id))
return genes
| 711,862
|
count de novos in transcripts for a gene.
Args:
ensembl: EnsemblRequest object to request data from ensembl
gene_id: HGNC symbol for gene
de_novos: list of de novo positions, so we can check they all fit in
the gene transcript
Returns:
dictionary of lengths and de novo counts, indexed by transcript IDs.
|
def count_de_novos_per_transcript(ensembl, gene_id, de_novos=[]):
transcripts = get_transcript_ids(ensembl, gene_id)
# TODO: allow for genes without any coding sequence.
if len(transcripts) == 0:
raise IndexError("{0} lacks coding transcripts".format(gene_id))
# count the de novos observed in all transcripts
counts = {}
for key in transcripts:
try:
gene = construct_gene_object(ensembl, key)
total = len(get_de_novos_in_transcript(gene, de_novos))
if total > 0:
counts[key] = {}
counts[key]["n"] = total
counts[key]["len"] = transcripts[key]
except ValueError:
pass
return counts
| 711,863
|
Save word embedding file.
Args:
f (File): File to write the vectors. File should be open for writing
ascii.
arr (numpy.array): Numpy array with ``float`` dtype.
vocab (iterable): Each element is pair of a word (``bytes``) and ``arr``
index (``int``). Word should be encoded to str apriori.
|
def save(f, arr, vocab):
itr = iter(vocab)
# Avoid empty line at the end
word, idx = next(itr)
_write_line(f, arr[idx], word)
for word, idx in itr:
f.write(b'\n')
_write_line(f, arr[idx], word)
| 711,908
|
Check if a file is valid Glove format.
Args:
line0 (bytes): First line of the file
line1 (bytes): Second line of the file
Returns:
boo: ``True`` if it is valid. ``False`` if it is invalid.
|
def check_valid(line0, line1):
data = line0.strip().split(b' ')
if len(data) <= 2:
return False
# check if data[2:] is float values
try:
map(float, data[2:])
except:
return False
return True
| 711,999
|
Load word embedding file with predefined vocabulary
Args:
fin (File): File object to read. File should be open for reading ascii.
vocab (dict): Mapping from words (``bytes``) to vector indices
(``int``).
dtype (numpy.dtype): Element data type to use for the array.
Returns:
numpy.ndarray: Word embedding representation vectors
|
def load_with_vocab(fin, vocab, dtype=np.float32):
arr = None
for line in fin:
try:
token, v = _parse_line(line, dtype)
except (ValueError, IndexError):
raise ParseError(b'Parsing error in line: ' + line)
if token in vocab:
if arr is None:
arr = np.empty((len(vocab), len(v)), dtype=dtype)
arr.fill(np.NaN)
elif arr.shape[1] != len(v):
raise ParseError(b'Vector size did not match in line: ' + line)
arr[vocab[token], :] = np.array(v, dtype=dtype).reshape(1, -1)
return arr
| 712,001
|
Load word embedding file.
Args:
fin (File): File object to read. File should be open for reading ascii.
dtype (numpy.dtype): Element data type to use for the array.
max_vocab (int): Number of vocabulary to read.
Returns:
numpy.ndarray: Word embedding representation vectors
dict: Mapping from words to vector indices.
|
def load(fin, dtype=np.float32, max_vocab=None):
vocab = {}
arr = None
i = 0
for line in fin:
if max_vocab is not None and i >= max_vocab:
break
try:
token, v = _parse_line(line, dtype)
except (ValueError, IndexError):
raise ParseError(b'Parsing error in line: ' + line)
if token in vocab:
parse_warn(b'Duplicated vocabulary ' + token)
continue
if arr is None:
arr = np.array(v, dtype=dtype).reshape(1, -1)
else:
if arr.shape[1] != len(v):
raise ParseError(b'Vector size did not match in line: ' + line)
arr = np.append(arr, [v], axis=0)
vocab[token] = i
i += 1
return arr, vocab
| 712,002
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.