text stringlengths 81 112k |
|---|
Respond to the client by serving a file, either directly or as
an attachment.
:param str file_path: The path to the file to serve, this does not need to be in the web root.
:param bool attachment: Whether to serve the file as a download by setting the Content-Disposition header.
def respond_file(self, file_path, attachment=False, query=None):
"""
Respond to the client by serving a file, either directly or as
an attachment.
:param str file_path: The path to the file to serve, this does not need to be in the web root.
:param bool attachment: Whether to serve the file as a download by setting the Content-Disposition header.
"""
del query
file_path = os.path.abspath(file_path)
try:
file_obj = open(file_path, 'rb')
except IOError:
self.respond_not_found()
return
self.send_response(200)
self.send_header('Content-Type', self.guess_mime_type(file_path))
fs = os.fstat(file_obj.fileno())
self.send_header('Content-Length', str(fs[6]))
if attachment:
file_name = os.path.basename(file_path)
self.send_header('Content-Disposition', 'attachment; filename=' + file_name)
self.send_header('Last-Modified', self.date_time_string(fs.st_mtime))
self.end_headers()
shutil.copyfileobj(file_obj, self.wfile)
file_obj.close()
return |
Respond to the client with an HTML page listing the contents of
the specified directory.
:param str dir_path: The path of the directory to list the contents of.
def respond_list_directory(self, dir_path, query=None):
"""
Respond to the client with an HTML page listing the contents of
the specified directory.
:param str dir_path: The path of the directory to list the contents of.
"""
del query
try:
dir_contents = os.listdir(dir_path)
except os.error:
self.respond_not_found()
return
if os.path.normpath(dir_path) != self.__config['serve_files_root']:
dir_contents.append('..')
dir_contents.sort(key=lambda a: a.lower())
displaypath = html.escape(urllib.parse.unquote(self.path), quote=True)
f = io.BytesIO()
encoding = sys.getfilesystemencoding()
f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n')
f.write(b'<html>\n<title>Directory listing for ' + displaypath.encode(encoding) + b'</title>\n')
f.write(b'<body>\n<h2>Directory listing for ' + displaypath.encode(encoding) + b'</h2>\n')
f.write(b'<hr>\n<ul>\n')
for name in dir_contents:
fullname = os.path.join(dir_path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write(('<li><a href="' + urllib.parse.quote(linkname) + '">' + html.escape(displayname, quote=True) + '</a>\n').encode(encoding))
f.write(b'</ul>\n<hr>\n</body>\n</html>\n')
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=' + encoding)
self.send_header('Content-Length', length)
self.end_headers()
shutil.copyfileobj(f, self.wfile)
f.close()
return |
Respond to the client with a 301 message and redirect them with
a Location header.
:param str location: The new location to redirect the client to.
def respond_redirect(self, location='/'):
"""
Respond to the client with a 301 message and redirect them with
a Location header.
:param str location: The new location to redirect the client to.
"""
self.send_response(301)
self.send_header('Content-Length', 0)
self.send_header('Location', location)
self.end_headers()
return |
Handle an internal server error, logging a traceback if executed
within an exception handler.
:param int status: The status code to respond to the client with.
:param str status_line: The status message to respond to the client with.
:param str message: The body of the response that is sent to the client.
def respond_server_error(self, status=None, status_line=None, message=None):
"""
Handle an internal server error, logging a traceback if executed
within an exception handler.
:param int status: The status code to respond to the client with.
:param str status_line: The status message to respond to the client with.
:param str message: The body of the response that is sent to the client.
"""
(ex_type, ex_value, ex_traceback) = sys.exc_info()
if ex_type:
(ex_file_name, ex_line, _, _) = traceback.extract_tb(ex_traceback)[-1]
line_info = "{0}:{1}".format(ex_file_name, ex_line)
log_msg = "encountered {0} in {1}".format(repr(ex_value), line_info)
self.server.logger.error(log_msg, exc_info=True)
status = (status or 500)
status_line = (status_line or http.client.responses.get(status, 'Internal Server Error')).strip()
self.send_response(status, status_line)
message = (message or status_line)
if isinstance(message, (str, bytes)):
self.send_header('Content-Length', len(message))
self.end_headers()
if isinstance(message, str):
self.wfile.write(message.encode(sys.getdefaultencoding()))
else:
self.wfile.write(message)
elif hasattr(message, 'fileno'):
fs = os.fstat(message.fileno())
self.send_header('Content-Length', fs[6])
self.end_headers()
shutil.copyfileobj(message, self.wfile)
else:
self.end_headers()
return |
Respond to the client that the request is unauthorized.
:param bool request_authentication: Whether to request basic authentication information by sending a WWW-Authenticate header.
def respond_unauthorized(self, request_authentication=False):
"""
Respond to the client that the request is unauthorized.
:param bool request_authentication: Whether to request basic authentication information by sending a WWW-Authenticate header.
"""
headers = {}
if request_authentication:
headers['WWW-Authenticate'] = 'Basic realm="' + self.__config['server_version'] + '"'
self.send_response_full(b'Unauthorized', status=401, headers=headers)
return |
Dispatch functions based on the established handler_map. It is
generally not necessary to override this function and doing so
will prevent any handlers from being executed. This function is
executed automatically when requests of either GET, HEAD, or POST
are received.
:param dict query: Parsed query parameters from the corresponding request.
def dispatch_handler(self, query=None):
"""
Dispatch functions based on the established handler_map. It is
generally not necessary to override this function and doing so
will prevent any handlers from being executed. This function is
executed automatically when requests of either GET, HEAD, or POST
are received.
:param dict query: Parsed query parameters from the corresponding request.
"""
query = (query or {})
# normalize the path
# abandon query parameters
self.path = self.path.split('?', 1)[0]
self.path = self.path.split('#', 1)[0]
original_path = urllib.parse.unquote(self.path)
self.path = posixpath.normpath(original_path)
words = self.path.split('/')
words = filter(None, words)
tmp_path = ''
for word in words:
_, word = os.path.splitdrive(word)
_, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
tmp_path = os.path.join(tmp_path, word)
self.path = tmp_path
if self.path == 'robots.txt' and self.__config['serve_robots_txt']:
self.send_response_full(self.__config['robots_txt'])
return
self.cookies = http.cookies.SimpleCookie(self.headers.get('cookie', ''))
handler, is_method = self.__get_handler(is_rpc=False)
if handler is not None:
try:
handler(*((query,) if is_method else (self, query)))
except Exception:
self.respond_server_error()
return
if not self.__config['serve_files']:
self.respond_not_found()
return
file_path = self.__config['serve_files_root']
file_path = os.path.join(file_path, tmp_path)
if os.path.isfile(file_path) and os.access(file_path, os.R_OK):
self.respond_file(file_path, query=query)
return
elif os.path.isdir(file_path) and os.access(file_path, os.R_OK):
if not original_path.endswith('/'):
# redirect browser, doing what apache does
destination = self.path + '/'
if self.command == 'GET' and self.query_data:
destination += '?' + urllib.parse.urlencode(self.query_data, True)
self.respond_redirect(destination)
return
for index in ['index.html', 'index.htm']:
index = os.path.join(file_path, index)
if os.path.isfile(index) and os.access(index, os.R_OK):
self.respond_file(index, query=query)
return
if self.__config['serve_files_list_directories']:
self.respond_list_directory(file_path, query=query)
return
self.respond_not_found()
return |
Guess an appropriate MIME type based on the extension of the
provided path.
:param str path: The of the file to analyze.
:return: The guessed MIME type of the default if non are found.
:rtype: str
def guess_mime_type(self, path):
"""
Guess an appropriate MIME type based on the extension of the
provided path.
:param str path: The of the file to analyze.
:return: The guessed MIME type of the default if non are found.
:rtype: str
"""
_, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
return self.extensions_map[ext if ext in self.extensions_map else ''] |
Check for the presence of a basic auth Authorization header and
if the credentials contained within in are valid.
:return: Whether or not the credentials are valid.
:rtype: bool
def check_authorization(self):
"""
Check for the presence of a basic auth Authorization header and
if the credentials contained within in are valid.
:return: Whether or not the credentials are valid.
:rtype: bool
"""
try:
store = self.__config.get('basic_auth')
if store is None:
return True
auth_info = self.headers.get('Authorization')
if not auth_info:
return False
auth_info = auth_info.split()
if len(auth_info) != 2 or auth_info[0] != 'Basic':
return False
auth_info = base64.b64decode(auth_info[1]).decode(sys.getdefaultencoding())
username = auth_info.split(':')[0]
password = ':'.join(auth_info.split(':')[1:])
password_bytes = password.encode(sys.getdefaultencoding())
if hasattr(self, 'custom_authentication'):
if self.custom_authentication(username, password):
self.basic_auth_user = username
return True
return False
if not username in store:
self.server.logger.warning('received invalid username: ' + username)
return False
password_data = store[username]
if password_data['type'] == 'plain':
if password == password_data['value']:
self.basic_auth_user = username
return True
elif hashlib.new(password_data['type'], password_bytes).digest() == password_data['value']:
self.basic_auth_user = username
return True
self.server.logger.warning('received invalid password from user: ' + username)
except Exception:
pass
return False |
Check for a cookie value by name.
:param str name: Name of the cookie value to retreive.
:return: Returns the cookie value if it's set or None if it's not found.
def cookie_get(self, name):
"""
Check for a cookie value by name.
:param str name: Name of the cookie value to retreive.
:return: Returns the cookie value if it's set or None if it's not found.
"""
if not hasattr(self, 'cookies'):
return None
if self.cookies.get(name):
return self.cookies.get(name).value
return None |
Set the value of a client cookie. This can only be called while
headers can be sent.
:param str name: The name of the cookie value to set.
:param str value: The value of the cookie to set.
def cookie_set(self, name, value):
"""
Set the value of a client cookie. This can only be called while
headers can be sent.
:param str name: The name of the cookie value to set.
:param str value: The value of the cookie to set.
"""
if not self.headers_active:
raise RuntimeError('headers have already been ended')
cookie = "{0}={1}; Path=/; HttpOnly".format(name, value)
self.send_header('Set-Cookie', cookie) |
Inspect the Content-Type header to retrieve the charset that the client
has specified.
:param str default: The default charset to return if none exists.
:return: The charset of the request.
:rtype: str
def get_content_type_charset(self, default='UTF-8'):
"""
Inspect the Content-Type header to retrieve the charset that the client
has specified.
:param str default: The default charset to return if none exists.
:return: The charset of the request.
:rtype: str
"""
encoding = default
header = self.headers.get('Content-Type', '')
idx = header.find('charset=')
if idx > 0:
encoding = (header[idx + 8:].split(' ', 1)[0] or encoding)
return encoding |
Close the web socket connection and stop processing results. If the
connection is still open, a WebSocket close message will be sent to the
peer.
def close(self):
"""
Close the web socket connection and stop processing results. If the
connection is still open, a WebSocket close message will be sent to the
peer.
"""
if not self.connected:
return
self.connected = False
if self.handler.wfile.closed:
return
if select.select([], [self.handler.wfile], [], 0)[1]:
with self.lock:
self.handler.wfile.write(b'\x88\x00')
self.handler.wfile.flush()
self.on_closed() |
Send a message to the peer over the socket.
:param int opcode: The opcode for the message to send.
:param bytes message: The message data to send.
def send_message(self, opcode, message):
"""
Send a message to the peer over the socket.
:param int opcode: The opcode for the message to send.
:param bytes message: The message data to send.
"""
if not isinstance(message, bytes):
message = message.encode('utf-8')
length = len(message)
if not select.select([], [self.handler.wfile], [], 0)[1]:
self.logger.error('the socket is not ready for writing')
self.close()
return
buffer = b''
buffer += struct.pack('B', 0x80 + opcode)
if length <= 125:
buffer += struct.pack('B', length)
elif 126 <= length <= 65535:
buffer += struct.pack('>BH', 126, length)
else:
buffer += struct.pack('>BQ', 127, length)
buffer += message
self._last_sent_opcode = opcode
self.lock.acquire()
try:
self.handler.wfile.write(buffer)
self.handler.wfile.flush()
except Exception:
self.logger.error('an error occurred while sending a message', exc_info=True)
self.close()
finally:
self.lock.release() |
The primary dispatch function to handle incoming WebSocket messages.
:param int opcode: The opcode of the message that was received.
:param bytes message: The data contained within the message.
def on_message(self, opcode, message):
"""
The primary dispatch function to handle incoming WebSocket messages.
:param int opcode: The opcode of the message that was received.
:param bytes message: The data contained within the message.
"""
self.logger.debug("processing {0} (opcode: 0x{1:02x}) message".format(self._opcode_names.get(opcode, 'UNKNOWN'), opcode))
if opcode == self._opcode_close:
self.close()
elif opcode == self._opcode_ping:
if len(message) > 125:
self.close()
return
self.send_message(self._opcode_pong, message)
elif opcode == self._opcode_pong:
pass
elif opcode == self._opcode_binary:
self.on_message_binary(message)
elif opcode == self._opcode_text:
try:
message = self._decode_string(message)
except UnicodeDecodeError:
self.logger.warning('closing connection due to invalid unicode within a text message')
self.close()
else:
self.on_message_text(message)
elif opcode == self._opcode_continue:
self.close()
else:
self.logger.warning("received unknown opcode: {0} (0x{0:02x})".format(opcode))
self.close() |
Build a serializer object from a MIME Content-Type string.
:param str content_type: The Content-Type string to parse.
:return: A new serializer instance.
:rtype: :py:class:`.Serializer`
def from_content_type(cls, content_type):
"""
Build a serializer object from a MIME Content-Type string.
:param str content_type: The Content-Type string to parse.
:return: A new serializer instance.
:rtype: :py:class:`.Serializer`
"""
name = content_type
options = {}
if ';' in content_type:
name, options_str = content_type.split(';', 1)
for part in options_str.split(';'):
part = part.strip()
if '=' in part:
key, value = part.split('=')
else:
key, value = (part, None)
options[key] = value
# old style compatibility
if name.endswith('+zlib'):
options['compression'] = 'zlib'
name = name[:-5]
return cls(name, charset=options.get('charset', 'UTF-8'), compression=options.get('compression')) |
Serialize a python data type for transmission or storage.
:param data: The python object to serialize.
:return: The serialized representation of the object.
:rtype: bytes
def dumps(self, data):
"""
Serialize a python data type for transmission or storage.
:param data: The python object to serialize.
:return: The serialized representation of the object.
:rtype: bytes
"""
data = g_serializer_drivers[self.name]['dumps'](data)
if sys.version_info[0] == 3 and isinstance(data, str):
data = data.encode(self._charset)
if self._compression == 'zlib':
data = zlib.compress(data)
assert isinstance(data, bytes)
return data |
Deserialize the data into it's original python object.
:param bytes data: The serialized object to load.
:return: The original python object.
def loads(self, data):
"""
Deserialize the data into it's original python object.
:param bytes data: The serialized object to load.
:return: The original python object.
"""
if not isinstance(data, bytes):
raise TypeError("loads() argument 1 must be bytes, not {0}".format(type(data).__name__))
if self._compression == 'zlib':
data = zlib.decompress(data)
if sys.version_info[0] == 3 and self.name.startswith('application/'):
data = data.decode(self._charset)
data = g_serializer_drivers[self.name]['loads'](data, (self._charset if sys.version_info[0] == 3 else None))
if isinstance(data, list):
data = tuple(data)
return data |
Add an SSL certificate for a specific hostname as supported by SSL's
Server Name Indicator (SNI) extension. See :rfc:`3546` for more details
on SSL extensions. In order to use this method, the server instance must
have been initialized with at least one address configured for SSL.
.. warning::
This method will raise a :py:exc:`RuntimeError` if either the SNI
extension is not available in the :py:mod:`ssl` module or if SSL was
not enabled at initialization time through the use of arguments to
:py:meth:`~.__init__`.
.. versionadded:: 2.0.0
:param str hostname: The hostname for this configuration.
:param str ssl_certfile: An SSL certificate file to use, setting this enables SSL.
:param str ssl_keyfile: An SSL certificate file to use.
:param ssl_version: The SSL protocol version to use.
def add_sni_cert(self, hostname, ssl_certfile=None, ssl_keyfile=None, ssl_version=None):
"""
Add an SSL certificate for a specific hostname as supported by SSL's
Server Name Indicator (SNI) extension. See :rfc:`3546` for more details
on SSL extensions. In order to use this method, the server instance must
have been initialized with at least one address configured for SSL.
.. warning::
This method will raise a :py:exc:`RuntimeError` if either the SNI
extension is not available in the :py:mod:`ssl` module or if SSL was
not enabled at initialization time through the use of arguments to
:py:meth:`~.__init__`.
.. versionadded:: 2.0.0
:param str hostname: The hostname for this configuration.
:param str ssl_certfile: An SSL certificate file to use, setting this enables SSL.
:param str ssl_keyfile: An SSL certificate file to use.
:param ssl_version: The SSL protocol version to use.
"""
if not g_ssl_has_server_sni:
raise RuntimeError('the ssl server name indicator extension is unavailable')
if self._ssl_sni_entries is None:
raise RuntimeError('ssl was not enabled on initialization')
if ssl_certfile:
ssl_certfile = os.path.abspath(ssl_certfile)
if ssl_keyfile:
ssl_keyfile = os.path.abspath(ssl_keyfile)
cert_info = SSLSNICertificate(hostname, ssl_certfile, ssl_keyfile)
if ssl_version is None or isinstance(ssl_version, str):
ssl_version = resolve_ssl_protocol_version(ssl_version)
ssl_ctx = ssl.SSLContext(ssl_version)
ssl_ctx.load_cert_chain(ssl_certfile, keyfile=ssl_keyfile)
self._ssl_sni_entries[hostname] = SSLSNIEntry(context=ssl_ctx, certificate=cert_info) |
Remove the SSL Server Name Indicator (SNI) certificate configuration for
the specified *hostname*.
.. warning::
This method will raise a :py:exc:`RuntimeError` if either the SNI
extension is not available in the :py:mod:`ssl` module or if SSL was
not enabled at initialization time through the use of arguments to
:py:meth:`~.__init__`.
.. versionadded:: 2.2.0
:param str hostname: The hostname to delete the SNI configuration for.
def remove_sni_cert(self, hostname):
"""
Remove the SSL Server Name Indicator (SNI) certificate configuration for
the specified *hostname*.
.. warning::
This method will raise a :py:exc:`RuntimeError` if either the SNI
extension is not available in the :py:mod:`ssl` module or if SSL was
not enabled at initialization time through the use of arguments to
:py:meth:`~.__init__`.
.. versionadded:: 2.2.0
:param str hostname: The hostname to delete the SNI configuration for.
"""
if not g_ssl_has_server_sni:
raise RuntimeError('the ssl server name indicator extension is unavailable')
if self._ssl_sni_entries is None:
raise RuntimeError('ssl was not enabled on initialization')
sni_entry = self._ssl_sni_entries.pop(hostname, None)
if sni_entry is None:
raise ValueError('the specified hostname does not have an sni certificate configuration') |
.. versionadded:: 2.2.0
:return: Return a tuple of :py:class:`~.SSLSNICertificate` instances for each of the certificates that are configured.
:rtype: tuple
def sni_certs(self):
"""
.. versionadded:: 2.2.0
:return: Return a tuple of :py:class:`~.SSLSNICertificate` instances for each of the certificates that are configured.
:rtype: tuple
"""
if not g_ssl_has_server_sni or self._ssl_sni_entries is None:
return tuple()
return tuple(entry.certificate for entry in self._ssl_sni_entries.values()) |
Start handling requests. This method must be called and does not
return unless the :py:meth:`.shutdown` method is called from
another thread.
:param bool fork: Whether to fork or not before serving content.
:return: The child processes PID if *fork* is set to True.
:rtype: int
def serve_forever(self, fork=False):
"""
Start handling requests. This method must be called and does not
return unless the :py:meth:`.shutdown` method is called from
another thread.
:param bool fork: Whether to fork or not before serving content.
:return: The child processes PID if *fork* is set to True.
:rtype: int
"""
if fork:
if not hasattr(os, 'fork'):
raise OSError('os.fork is not available')
child_pid = os.fork()
if child_pid != 0:
self.logger.info('forked child process: ' + str(child_pid))
return child_pid
self.__server_thread = threading.current_thread()
self.__wakeup_fd = WakeupFd()
self.__is_shutdown.clear()
self.__should_stop.clear()
self.__is_running.set()
while not self.__should_stop.is_set():
try:
self._serve_ready()
except socket.error:
self.logger.warning('encountered socket error, stopping server')
self.__should_stop.set()
self.__is_shutdown.set()
self.__is_running.clear()
return 0 |
Shutdown the server and stop responding to requests.
def shutdown(self):
"""Shutdown the server and stop responding to requests."""
self.__should_stop.set()
if self.__server_thread == threading.current_thread():
self.__is_shutdown.set()
self.__is_running.clear()
else:
if self.__wakeup_fd is not None:
os.write(self.__wakeup_fd.write_fd, b'\x00')
self.__is_shutdown.wait()
if self.__wakeup_fd is not None:
self.__wakeup_fd.close()
self.__wakeup_fd = None
for server in self.sub_servers:
server.shutdown() |
Enable or disable requiring authentication on all incoming requests.
:param bool status: Whether to enable or disable requiring authentication.
def auth_set(self, status):
"""
Enable or disable requiring authentication on all incoming requests.
:param bool status: Whether to enable or disable requiring authentication.
"""
if not bool(status):
self.__config['basic_auth'] = None
self.logger.info('basic authentication has been disabled')
else:
self.__config['basic_auth'] = {}
self.logger.info('basic authentication has been enabled') |
Delete the credentials for a specific username if specified or all
stored credentials.
:param str username: The username of the credentials to delete.
def auth_delete_creds(self, username=None):
"""
Delete the credentials for a specific username if specified or all
stored credentials.
:param str username: The username of the credentials to delete.
"""
if not username:
self.__config['basic_auth'] = {}
self.logger.info('basic authentication database has been cleared of all entries')
return
del self.__config['basic_auth'][username] |
Add a valid set of credentials to be accepted for authentication.
Calling this function will automatically enable requiring
authentication. Passwords can be provided in either plaintext or
as a hash by specifying the hash type in the *pwtype* argument.
:param str username: The username of the credentials to be added.
:param password: The password data of the credentials to be added.
:type password: bytes, str
:param str pwtype: The type of the *password* data, (plain, md5, sha1, etc.).
def auth_add_creds(self, username, password, pwtype='plain'):
"""
Add a valid set of credentials to be accepted for authentication.
Calling this function will automatically enable requiring
authentication. Passwords can be provided in either plaintext or
as a hash by specifying the hash type in the *pwtype* argument.
:param str username: The username of the credentials to be added.
:param password: The password data of the credentials to be added.
:type password: bytes, str
:param str pwtype: The type of the *password* data, (plain, md5, sha1, etc.).
"""
if not isinstance(password, (bytes, str)):
raise TypeError("auth_add_creds() argument 2 must be bytes or str, not {0}".format(type(password).__name__))
pwtype = pwtype.lower()
if not pwtype in ('plain', 'md5', 'sha1', 'sha256', 'sha384', 'sha512'):
raise ValueError('invalid password type, must be \'plain\', or supported by hashlib')
if self.__config.get('basic_auth') is None:
self.__config['basic_auth'] = {}
self.logger.info('basic authentication has been enabled')
if pwtype != 'plain':
algorithms_available = getattr(hashlib, 'algorithms_available', ()) or getattr(hashlib, 'algorithms', ())
if pwtype not in algorithms_available:
raise ValueError('hashlib does not support the desired algorithm')
# only md5 and sha1 hex for backwards compatibility
if pwtype == 'md5' and len(password) == 32:
password = binascii.unhexlify(password)
elif pwtype == 'sha1' and len(password) == 40:
password = binascii.unhexlify(password)
if not isinstance(password, bytes):
password = password.encode('UTF-8')
if len(hashlib.new(pwtype, b'foobar').digest()) != len(password):
raise ValueError('the length of the password hash does not match the type specified')
self.__config['basic_auth'][username] = {'value': password, 'type': pwtype} |
Context manager to temporarily change the values of object attributes
while executing a function.
Example
-------
>>> class Foo: pass
>>> f = Foo(); f.attr = 'hello'
>>> with setattr_context(f, attr='goodbye'):
... print(f.attr)
goodbye
>>> print(f.attr)
hello
def setattr_context(obj, **kwargs):
"""
Context manager to temporarily change the values of object attributes
while executing a function.
Example
-------
>>> class Foo: pass
>>> f = Foo(); f.attr = 'hello'
>>> with setattr_context(f, attr='goodbye'):
... print(f.attr)
goodbye
>>> print(f.attr)
hello
"""
old_kwargs = dict([(key, getattr(obj, key)) for key in kwargs])
[setattr(obj, key, val) for key, val in kwargs.items()]
try:
yield
finally:
[setattr(obj, key, val) for key, val in old_kwargs.items()] |
Validate input arrays
This checks that
- Arrays are mutually broadcastable
- Broadcasted arrays are one-dimensional
Optionally, arrays are sorted according to the ``sort_by`` argument.
Parameters
----------
*args : ndarrays
All non-keyword arguments are arrays which will be validated
sort_by : array
If specified, sort all inputs by the order given in this array.
def validate_inputs(*arrays, **kwargs):
"""Validate input arrays
This checks that
- Arrays are mutually broadcastable
- Broadcasted arrays are one-dimensional
Optionally, arrays are sorted according to the ``sort_by`` argument.
Parameters
----------
*args : ndarrays
All non-keyword arguments are arrays which will be validated
sort_by : array
If specified, sort all inputs by the order given in this array.
"""
arrays = np.broadcast_arrays(*arrays)
sort_by = kwargs.pop('sort_by', None)
if kwargs:
raise ValueError("unrecognized arguments: {0}".format(kwargs.keys()))
if arrays[0].ndim != 1:
raise ValueError("Input arrays should be one-dimensional.")
if sort_by is not None:
isort = np.argsort(sort_by)
if isort.shape != arrays[0].shape:
raise ValueError("sort shape must equal array shape.")
arrays = tuple([a[isort] for a in arrays])
return arrays |
Private function to prepare & check variables for smooth utilities
def _prep_smooth(t, y, dy, span, t_out, span_out, period):
"""Private function to prepare & check variables for smooth utilities"""
# If period is provided, sort by phases. Otherwise sort by t
if period:
t = t % period
if t_out is not None:
t_out = t_out % period
t, y, dy = validate_inputs(t, y, dy, sort_by=t)
if span_out is not None:
if t_out is None:
raise ValueError("Must specify t_out when span_out is given")
if span is not None:
raise ValueError("Must specify only one of span, span_out")
span, t_out = np.broadcast_arrays(span_out, t_out)
indices = np.searchsorted(t, t_out)
elif span is None:
raise ValueError("Must specify either span_out or span")
else:
indices = None
return t, y, dy, span, t_out, span_out, indices |
Perform a moving-average smooth of the data
Parameters
----------
t, y, dy : array_like
time, value, and error in value of the input data
span : array_like
the integer spans of the data
cv : boolean (default=True)
if True, treat the problem as a cross-validation, i.e. don't use
each point in the evaluation of its own smoothing.
t_out : array_like (optional)
the output times for the moving averages
span_out : array_like (optional)
the spans associated with the output times t_out
period : float
if provided, then consider the inputs periodic with the given period
Returns
-------
y_smooth : array_like
smoothed y values at each time t (or t_out)
def moving_average_smooth(t, y, dy, span=None, cv=True,
t_out=None, span_out=None, period=None):
"""Perform a moving-average smooth of the data
Parameters
----------
t, y, dy : array_like
time, value, and error in value of the input data
span : array_like
the integer spans of the data
cv : boolean (default=True)
if True, treat the problem as a cross-validation, i.e. don't use
each point in the evaluation of its own smoothing.
t_out : array_like (optional)
the output times for the moving averages
span_out : array_like (optional)
the spans associated with the output times t_out
period : float
if provided, then consider the inputs periodic with the given period
Returns
-------
y_smooth : array_like
smoothed y values at each time t (or t_out)
"""
prep = _prep_smooth(t, y, dy, span, t_out, span_out, period)
t, y, dy, span, t_out, span_out, indices = prep
w = 1. / (dy ** 2)
w, yw = windowed_sum([w, y * w], t=t, span=span, subtract_mid=cv,
indices=indices, period=period)
if t_out is None or span_out is not None:
return yw / w
else:
i = np.minimum(len(t) - 1, np.searchsorted(t, t_out))
return yw[i] / w[i] |
Perform a linear smooth of the data
Parameters
----------
t, y, dy : array_like
time, value, and error in value of the input data
span : array_like
the integer spans of the data
cv : boolean (default=True)
if True, treat the problem as a cross-validation, i.e. don't use
each point in the evaluation of its own smoothing.
t_out : array_like (optional)
the output times for the moving averages
span_out : array_like (optional)
the spans associated with the output times t_out
period : float
if provided, then consider the inputs periodic with the given period
Returns
-------
y_smooth : array_like
smoothed y values at each time t or t_out
def linear_smooth(t, y, dy, span=None, cv=True,
t_out=None, span_out=None, period=None):
"""Perform a linear smooth of the data
Parameters
----------
t, y, dy : array_like
time, value, and error in value of the input data
span : array_like
the integer spans of the data
cv : boolean (default=True)
if True, treat the problem as a cross-validation, i.e. don't use
each point in the evaluation of its own smoothing.
t_out : array_like (optional)
the output times for the moving averages
span_out : array_like (optional)
the spans associated with the output times t_out
period : float
if provided, then consider the inputs periodic with the given period
Returns
-------
y_smooth : array_like
smoothed y values at each time t or t_out
"""
t_input = t
prep = _prep_smooth(t, y, dy, span, t_out, span_out, period)
t, y, dy, span, t_out, span_out, indices = prep
if period:
t_input = np.asarray(t_input) % period
w = 1. / (dy ** 2)
w, yw, tw, tyw, ttw = windowed_sum([w, y * w, w, y * w, w], t=t,
tpowers=[0, 0, 1, 1, 2],
span=span, indices=indices,
subtract_mid=cv, period=period)
denominator = (w * ttw - tw * tw)
slope = (tyw * w - tw * yw)
intercept = (ttw * yw - tyw * tw)
if np.any(denominator == 0):
raise ValueError("Zero denominator in linear smooth. This usually "
"indicates that the input contains duplicate points.")
if t_out is None:
return (slope * t_input + intercept) / denominator
elif span_out is not None:
return (slope * t_out + intercept) / denominator
else:
i = np.minimum(len(t) - 1, np.searchsorted(t, t_out))
return (slope[i] * t_out + intercept[i]) / denominator[i] |
Multiple linear interpolations
Parameters
----------
x : array_like, shape=(N,)
sorted array of x values
y : array_like, shape=(N, M)
array of y values corresponding to each x value
xquery : array_like, shape=(M,)
array of query values
slow : boolean, default=False
if True, use slow method (used mainly for unit testing)
Returns
-------
yquery : ndarray, shape=(M,)
The interpolated values corresponding to each x query.
def multinterp(x, y, xquery, slow=False):
"""Multiple linear interpolations
Parameters
----------
x : array_like, shape=(N,)
sorted array of x values
y : array_like, shape=(N, M)
array of y values corresponding to each x value
xquery : array_like, shape=(M,)
array of query values
slow : boolean, default=False
if True, use slow method (used mainly for unit testing)
Returns
-------
yquery : ndarray, shape=(M,)
The interpolated values corresponding to each x query.
"""
x, y, xquery = map(np.asarray, (x, y, xquery))
assert x.ndim == 1
assert xquery.ndim == 1
assert y.shape == x.shape + xquery.shape
# make sure xmin < xquery < xmax in all cases
xquery = np.clip(xquery, x.min(), x.max())
if slow:
from scipy.interpolate import interp1d
return np.array([interp1d(x, y)(xq) for xq, y in zip(xquery, y.T)])
elif len(x) == 3:
# Most common case: use a faster approach
yq_lower = y[0] + (xquery - x[0]) * (y[1] - y[0]) / (x[1] - x[0])
yq_upper = y[1] + (xquery - x[1]) * (y[2] - y[1]) / (x[2] - x[1])
return np.where(xquery < x[1], yq_lower, yq_upper)
else:
i = np.clip(np.searchsorted(x, xquery, side='right') - 1,
0, len(x) - 2)
j = np.arange(len(xquery))
return y[i, j] + ((xquery - x[i]) *
(y[i + 1, j] - y[i, j]) / (x[i + 1] - x[i])) |
Create a consulate.session object, and query for its leader to ensure
that the connection is made.
:param test_connection: call .leader() to ensure that the connection
is valid
:type test_connection: bool
:return consulate.Session instance
def _create_session(self, test_connection=False):
"""
Create a consulate.session object, and query for its leader to ensure
that the connection is made.
:param test_connection: call .leader() to ensure that the connection
is valid
:type test_connection: bool
:return consulate.Session instance
"""
session = consulate.Session(host=self.host, port=self.port)
if test_connection:
session.status.leader()
return session |
Applies all config values defined in consul's kv store to self.app.
There is no guarantee that these values will not be overwritten later
elsewhere.
:param namespace: kv namespace/directory. Defaults to
DEFAULT_KV_NAMESPACE
:return: None
def apply_remote_config(self, namespace=None):
"""
Applies all config values defined in consul's kv store to self.app.
There is no guarantee that these values will not be overwritten later
elsewhere.
:param namespace: kv namespace/directory. Defaults to
DEFAULT_KV_NAMESPACE
:return: None
"""
if namespace is None:
namespace = "config/{service}/{environment}/".format(
service=os.environ.get('SERVICE', 'generic_service'),
environment=os.environ.get('ENVIRONMENT', 'generic_environment')
)
for k, v in iteritems(self.session.kv.find(namespace)):
k = k.replace(namespace, '')
try:
self.app.config[k] = json.loads(v)
except (TypeError, ValueError):
self.app.logger.warning("Couldn't de-serialize {} to json, using raw value".format(v))
self.app.config[k] = v
msg = "Set {k}={v} from consul kv '{ns}'".format(
k=k,
v=v,
ns=namespace,
)
self.app.logger.debug(msg) |
register this service with consul
kwargs passed to Consul.agent.service.register
def register_service(self, **kwargs):
"""
register this service with consul
kwargs passed to Consul.agent.service.register
"""
kwargs.setdefault('name', self.app.name)
self.session.agent.service.register(**kwargs) |
Query the consul DNS server for the service IP and port
def _resolve(self):
"""
Query the consul DNS server for the service IP and port
"""
endpoints = {}
r = self.resolver.query(self.service, 'SRV')
for rec in r.response.additional:
name = rec.name.to_text()
addr = rec.items[0].address
endpoints[name] = {'addr': addr}
for rec in r.response.answer[0].items:
name = '.'.join(rec.target.labels)
endpoints[name]['port'] = rec.port
return [
'http://{ip}:{port}'.format(
ip=v['addr'], port=v['port']
) for v in endpoints.values()
] |
Proxy to requests.request
:param method: str formatted http method
:param endpoint: service endpoint
:param kwargs: kwargs passed directly to requests.request
:return:
def request(self, method, endpoint, **kwargs):
"""
Proxy to requests.request
:param method: str formatted http method
:param endpoint: service endpoint
:param kwargs: kwargs passed directly to requests.request
:return:
"""
kwargs.setdefault('timeout', (1, 30))
return self.session.request(
method,
urljoin(self.base_url, endpoint),
**kwargs
) |
Decorator that wraps an entire function in a try/except clause. On
requests.exceptions.ConnectionError, will re-run the function code
until success or max_tries is reached.
:param max_tries: maximum number of attempts before giving up
:param sleep: time to sleep between tries, or None
def with_retry_connections(max_tries=3, sleep=0.05):
"""
Decorator that wraps an entire function in a try/except clause. On
requests.exceptions.ConnectionError, will re-run the function code
until success or max_tries is reached.
:param max_tries: maximum number of attempts before giving up
:param sleep: time to sleep between tries, or None
"""
def decorator(f):
@functools.wraps(f)
def f_retry(*args, **kwargs):
tries = 0
while True:
try:
return f(*args, **kwargs)
except (ConnectionError, ConnectTimeout) as e:
tries += 1
if tries >= max_tries:
raise ConsulConnectionError(e)
if sleep:
time.sleep(sleep)
return f_retry
return decorator |
Crop the generator to a finite number of frames
Return a generator which outputs the provided generator limited
to enough samples to produce seconds seconds of audio (default 5s)
at the provided frame rate.
def crop(gens, seconds=5, cropper=None):
'''
Crop the generator to a finite number of frames
Return a generator which outputs the provided generator limited
to enough samples to produce seconds seconds of audio (default 5s)
at the provided frame rate.
'''
if hasattr(gens, "next"):
# single generator
gens = (gens,)
if cropper == None:
cropper = lambda gen: itertools.islice(gen, 0, seconds * sampler.FRAME_RATE)
cropped = [cropper(gen) for gen in gens]
return cropped[0] if len(cropped) == 1 else cropped |
Crop the generator, ending at a zero-crossing
Crop the generator to produce approximately seconds seconds
(default 5s) of audio at the provided FRAME_RATE, attempting
to end the clip at a zero crossing point to avoid clicking.
def crop_at_zero_crossing(gen, seconds=5, error=0.1):
'''
Crop the generator, ending at a zero-crossing
Crop the generator to produce approximately seconds seconds
(default 5s) of audio at the provided FRAME_RATE, attempting
to end the clip at a zero crossing point to avoid clicking.
'''
source = iter(gen)
buffer_length = int(2 * error * sampler.FRAME_RATE)
# split the source into two iterators:
# - start, which contains the bulk of the sound clip
# - and end, which contains the final 100ms, plus 100ms past
# the desired clip length. We may cut the clip anywhere
# within this +/-100ms end buffer.
start = itertools.islice(source, 0, int((seconds - error) * sampler.FRAME_RATE))
end = itertools.islice(source, 0, buffer_length)
for sample in start:
yield sample
# pull end buffer generator into memory so we can work with it
end = list(end)
# find min by sorting buffer samples, first by abs of sample, then by distance from optimal
best = sorted(enumerate(end), key=lambda x: (math.fabs(x[1]),abs((buffer_length/2)-x[0])))
print best[:10]
print best[0][0]
# todo: better logic when we don't have a perfect zero crossing
#if best[0][1] != 0:
# # we don't have a perfect zero crossing, so let's look for best fit?
# pass
# crop samples at index of best zero crossing
for sample in end[:best[0][0] + 1]:
yield sample |
Change the volume of gen by dB decibles
def volume(gen, dB=0):
'''Change the volume of gen by dB decibles'''
if not hasattr(dB, 'next'):
# not a generator
scale = 10 ** (dB / 20.)
else:
def scale_gen():
while True:
yield 10 ** (next(dB) / 20.)
scale = scale_gen()
return envelope(gen, scale) |
Mix `inputs` together based on `mix` tuple
`inputs` should be a tuple of *n* generators.
`mix` should be a tuple of *m* tuples, one per desired
output channel. Each of the *m* tuples should contain
*n* generators, corresponding to the time-sequence of
the desired mix levels for each of the *n* input channels.
That is, to make an ouput channel contain a 50/50 mix of the
two input channels, the tuple would be:
(constant(0.5), constant(0.5))
The mix generators need not be constant, allowing for time-varying
mix levels:
# 50% from input 1, pulse input 2 over a two second cycle
(constant(0.5), tone(0.5))
The mixer will return a list of *m* generators, each containing
the data from the inputs mixed as specified.
If no `mix` tuple is specified, all of the *n* input channels
will be mixed together into one generator, with the volume of
each reduced *n*-fold.
Example:
# three in, two out;
# 10Hz binaural beat with white noise across both channels
mixer(
(white_noise(), tone(440), tone(450)),
(
(constant(.5), constant(1), constant(0)),
(constant(.5), constant(0), constant(1)),
)
)
def mixer(inputs, mix=None):
'''
Mix `inputs` together based on `mix` tuple
`inputs` should be a tuple of *n* generators.
`mix` should be a tuple of *m* tuples, one per desired
output channel. Each of the *m* tuples should contain
*n* generators, corresponding to the time-sequence of
the desired mix levels for each of the *n* input channels.
That is, to make an ouput channel contain a 50/50 mix of the
two input channels, the tuple would be:
(constant(0.5), constant(0.5))
The mix generators need not be constant, allowing for time-varying
mix levels:
# 50% from input 1, pulse input 2 over a two second cycle
(constant(0.5), tone(0.5))
The mixer will return a list of *m* generators, each containing
the data from the inputs mixed as specified.
If no `mix` tuple is specified, all of the *n* input channels
will be mixed together into one generator, with the volume of
each reduced *n*-fold.
Example:
# three in, two out;
# 10Hz binaural beat with white noise across both channels
mixer(
(white_noise(), tone(440), tone(450)),
(
(constant(.5), constant(1), constant(0)),
(constant(.5), constant(0), constant(1)),
)
)
'''
if mix == None:
# by default, mix all inputs down to one channel
mix = ([constant(1.0 / len(inputs))] * len(inputs),)
duped_inputs = zip(*[itertools.tee(i, len(mix)) for i in inputs])
# second zip is backwards
return [\
sum(*[multiply(m,i) for m,i in zip(channel_mix, channel_inputs)])\
for channel_mix, channel_inputs in zip(mix, duped_inputs) \
] |
Break multi-channel generator into one sub-generator per channel
Takes a generator producing n-tuples of samples and returns n generators,
each producing samples for a single channel.
Since multi-channel generators are the only reasonable way to synchronize samples
across channels, and the sampler functions only take tuples of generators,
you must use this function to process synchronized streams for output.
def channelize(gen, channels):
'''
Break multi-channel generator into one sub-generator per channel
Takes a generator producing n-tuples of samples and returns n generators,
each producing samples for a single channel.
Since multi-channel generators are the only reasonable way to synchronize samples
across channels, and the sampler functions only take tuples of generators,
you must use this function to process synchronized streams for output.
'''
def pick(g, channel):
for samples in g:
yield samples[channel]
return [pick(gen_copy, channel) for channel, gen_copy in enumerate(itertools.tee(gen, channels))] |
Returns True if file `f` is seekable, and False if not
Useful to determine, for example, if `f` is STDOUT to
a pipe.
def file_is_seekable(f):
'''
Returns True if file `f` is seekable, and False if not
Useful to determine, for example, if `f` is STDOUT to
a pipe.
'''
try:
f.tell()
logger.info("File is seekable!")
except IOError, e:
if e.errno == errno.ESPIPE:
return False
else:
raise
return True |
Convert audio waveform generator into packed sample generator.
def sample(generator, min=-1, max=1, width=SAMPLE_WIDTH):
'''Convert audio waveform generator into packed sample generator.'''
# select signed char, short, or in based on sample width
fmt = { 1: '<B', 2: '<h', 4: '<i' }[width]
return (struct.pack(fmt, int(sample)) for sample in \
normalize(hard_clip(generator, min, max),\
min, max, -2**(width * 8 - 1), 2**(width * 8 - 1) - 1)) |
Convert list of audio waveform generators into list of packed sample generators.
def sample_all(generators, *args, **kwargs):
'''Convert list of audio waveform generators into list of packed sample generators.'''
return [sample(gen, *args, **kwargs) for gen in generators] |
Buffer the generator into byte strings of buffer_size samples
Return a generator that outputs reasonably sized byte strings
containing buffer_size samples from the generator stream.
This allows us to outputing big chunks of the audio stream to
disk at once for faster writes.
def buffer(stream, buffer_size=BUFFER_SIZE):
'''
Buffer the generator into byte strings of buffer_size samples
Return a generator that outputs reasonably sized byte strings
containing buffer_size samples from the generator stream.
This allows us to outputing big chunks of the audio stream to
disk at once for faster writes.
'''
i = iter(stream)
return iter(lambda: "".join(itertools.islice(i, buffer_size)), "") |
True if wave module can write data size of 0xFFFFFFFF, False otherwise.
def wave_module_patched():
'''True if wave module can write data size of 0xFFFFFFFF, False otherwise.'''
f = StringIO()
w = wave.open(f, "wb")
w.setparams((1, 2, 44100, 0, "NONE", "no compression"))
patched = True
try:
w.setnframes((0xFFFFFFFF - 36) / w.getnchannels() / w.getsampwidth())
w._ensure_header_written(0)
except struct.error:
patched = False
logger.info("Error setting wave data size to 0xFFFFFFFF; wave module unpatched, setting sata size to 0x7FFFFFFF")
w.setnframes((0x7FFFFFFF - 36) / w.getnchannels() / w.getsampwidth())
w._ensure_header_written(0)
return patched |
Decorator to cache audio samples produced by the wrapped generator.
def cache_finite_samples(f):
'''Decorator to cache audio samples produced by the wrapped generator.'''
cache = {}
def wrap(*args):
key = FRAME_RATE, args
if key not in cache:
cache[key] = [sample for sample in f(*args)]
return (sample for sample in cache[key])
return wrap |
Play the contents of the generator using PyAudio
Play to the system soundcard using PyAudio. PyAudio, an otherwise optional
depenency, must be installed for this feature to work.
def play(channels, blocking=True, raw_samples=False):
'''
Play the contents of the generator using PyAudio
Play to the system soundcard using PyAudio. PyAudio, an otherwise optional
depenency, must be installed for this feature to work.
'''
if not pyaudio_loaded:
raise Exception("Soundcard playback requires PyAudio. Install with `pip install pyaudio`.")
channel_count = 1 if hasattr(channels, "next") else len(channels)
wavgen = wav_samples(channels, raw_samples=raw_samples)
p = pyaudio.PyAudio()
stream = p.open(
format=p.get_format_from_width(SAMPLE_WIDTH),
channels=channel_count,
rate=FRAME_RATE,
output=True,
stream_callback=_pyaudio_callback(wavgen) if not blocking else None
)
if blocking:
try:
for chunk in buffer(wavgen, 1024):
stream.write(chunk)
except Exception:
raise
finally:
if not stream.is_stopped():
stream.stop_stream()
try:
stream.close()
except Exception:
pass
else:
return stream |
Compute the windowed sum of the given arrays.
This is a slow function, used primarily for testing and validation
of the faster version of ``windowed_sum()``
Parameters
----------
arrays : tuple of arrays
arrays to window
span : int or array of ints
The span to use for the sum at each point. If array is provided,
it must be broadcastable with ``indices``
indices : array
the indices of the center of the desired windows. If ``None``,
the indices are assumed to be ``range(len(arrays[0]))`` though
these are not actually instantiated.
t : array (optional)
Times associated with the arrays
tpowers : list (optional)
Powers of t for each array sum
period : float (optional)
Period to use, if times are periodic. If supplied, input times
must be arranged such that (t % period) is sorted!
subtract_mid : boolean
If true, then subtract the middle value from each sum
Returns
-------
arrays : tuple of ndarrays
arrays containing the windowed sum of each input array
def windowed_sum_slow(arrays, span, t=None, indices=None, tpowers=0,
period=None, subtract_mid=False):
"""Compute the windowed sum of the given arrays.
This is a slow function, used primarily for testing and validation
of the faster version of ``windowed_sum()``
Parameters
----------
arrays : tuple of arrays
arrays to window
span : int or array of ints
The span to use for the sum at each point. If array is provided,
it must be broadcastable with ``indices``
indices : array
the indices of the center of the desired windows. If ``None``,
the indices are assumed to be ``range(len(arrays[0]))`` though
these are not actually instantiated.
t : array (optional)
Times associated with the arrays
tpowers : list (optional)
Powers of t for each array sum
period : float (optional)
Period to use, if times are periodic. If supplied, input times
must be arranged such that (t % period) is sorted!
subtract_mid : boolean
If true, then subtract the middle value from each sum
Returns
-------
arrays : tuple of ndarrays
arrays containing the windowed sum of each input array
"""
span = np.asarray(span, dtype=int)
if not np.all(span > 0):
raise ValueError("span values must be positive")
arrays = tuple(map(np.asarray, arrays))
N = arrays[0].size
if not all(a.shape == (N,) for a in arrays):
raise ValueError("sizes of provided arrays must match")
t_input = t
if t is not None:
t = np.asarray(t)
if t.shape != (N,):
raise ValueError("shape of t must match shape of arrays")
else:
t = np.ones(N)
tpowers = tpowers + np.zeros(len(arrays))
if len(tpowers) != len(arrays):
raise ValueError("tpowers must be broadcastable with number of arrays")
if period:
if t_input is None:
raise ValueError("periodic requires t to be provided")
t = t % period
if indices is None:
indices = np.arange(N)
spans, indices = np.broadcast_arrays(span, indices)
results = []
for tpower, array in zip(tpowers, arrays):
if period:
result = [sum(array[j % N]
* (t[j % N] + (j // N) * period) ** tpower
for j in range(i - s // 2,
i - s // 2 + s)
if not (subtract_mid and j == i))
for i, s in np.broadcast(indices, spans)]
else:
result = [sum(array[j] * t[j] ** tpower
for j in range(max(0, i - s // 2),
min(N, i - s // 2 + s))
if not (subtract_mid and j == i))
for i, s in np.broadcast(indices, spans)]
results.append(np.asarray(result))
return tuple(results) |
Compute the windowed sum of the given arrays.
Parameters
----------
arrays : tuple of arrays
arrays to window
span : int or array of ints
The span to use for the sum at each point. If array is provided,
it must be broadcastable with ``indices``
indices : array
the indices of the center of the desired windows. If ``None``,
the indices are assumed to be ``range(len(arrays[0]))`` though
these are not actually instantiated.
t : array (optional)
Times associated with the arrays
tpowers : list (optional)
Powers of t for each array sum
period : float (optional)
Period to use, if times are periodic. If supplied, input times
must be arranged such that (t % period) is sorted!
subtract_mid : boolean
If true, then subtract the middle value from each sum
Returns
-------
arrays : tuple of ndarrays
arrays containing the windowed sum of each input array
def windowed_sum(arrays, span, t=None, indices=None, tpowers=0,
period=None, subtract_mid=False):
"""Compute the windowed sum of the given arrays.
Parameters
----------
arrays : tuple of arrays
arrays to window
span : int or array of ints
The span to use for the sum at each point. If array is provided,
it must be broadcastable with ``indices``
indices : array
the indices of the center of the desired windows. If ``None``,
the indices are assumed to be ``range(len(arrays[0]))`` though
these are not actually instantiated.
t : array (optional)
Times associated with the arrays
tpowers : list (optional)
Powers of t for each array sum
period : float (optional)
Period to use, if times are periodic. If supplied, input times
must be arranged such that (t % period) is sorted!
subtract_mid : boolean
If true, then subtract the middle value from each sum
Returns
-------
arrays : tuple of ndarrays
arrays containing the windowed sum of each input array
"""
span = np.asarray(span, dtype=int)
if not np.all(span > 0):
raise ValueError("span values must be positive")
arrays = tuple(map(np.asarray, arrays))
N = arrays[0].size
if not all(a.shape == (N,) for a in arrays):
raise ValueError("sizes of provided arrays must match")
t_input = t
if t is not None:
t = np.asarray(t)
if t.shape != (N,):
raise ValueError("shape of t must match shape of arrays "
"t -> {0} arr -> {1}".format(t.shape,
arrays[0].shape))
else:
# XXX: special-case no t?
t = np.ones(N)
tpowers = np.asarray(tpowers) + np.zeros(len(arrays))
if indices is not None:
span, indices = np.broadcast_arrays(span, indices)
# For the periodic case, re-call the function with padded arrays
if period:
if t_input is None:
raise ValueError("periodic requires t to be provided")
t = t % period
t, arrays, sl = _pad_arrays(t, arrays, indices, span, period)
if len(t) > N:
# arrays are padded. Recursively call windowed_sum() and return.
if span.ndim == 0 and indices is None:
# fixed-span/no index case is done faster this way
arrs = windowed_sum(arrays, span, t=t, indices=indices,
tpowers=tpowers, period=None,
subtract_mid=subtract_mid)
return tuple([a[sl] for a in arrs])
else:
# this works for variable span and general indices
if indices is None:
indices = np.arange(N)
indices = indices + sl.start
return windowed_sum(arrays, span, t=t, indices=indices,
tpowers=tpowers, period=None,
subtract_mid=subtract_mid)
else:
# No padding needed! We can carry-on as if it's a non-periodic case
period = None
# The rest of the algorithm now proceeds without reference to the period
# just as a sanity check...
assert not period
if span.ndim == 0:
# fixed-span case. Because of the checks & manipulations above
# we know here that indices=None
assert indices is None
window = np.ones(span)
def convolve_same(a, window):
if len(window) <= len(a):
res = np.convolve(a, window, mode='same')
else:
res = np.convolve(a, window, mode='full')
start = (len(window) - 1) // 2
res = res[start:start + len(a)]
return res
results = [convolve_same(a * t ** tp, window)
for a, tp in zip(arrays, tpowers)]
indices = slice(None)
else:
# variable-span case. Use reduceat() in a clever way for speed.
if indices is None:
indices = np.arange(len(span))
# we checked this above, but just as a sanity check assert it here...
assert span.shape == indices.shape
mins = np.asarray(indices) - span // 2
results = []
for a, tp in zip(arrays, tpowers):
ranges = np.vstack([np.maximum(0, mins),
np.minimum(len(a), mins+span)]).ravel('F')
results.append(np.add.reduceat(np.append(a * t ** tp, 0),
ranges)[::2])
# Subtract the midpoint if required: this is used in cross-validation
if subtract_mid:
results = [r - a[indices] * t[indices] ** tp
for r, a, tp in zip(results, arrays, tpowers)]
return tuple(results) |
Internal routine to pad arrays for periodic models.
def _pad_arrays(t, arrays, indices, span, period):
"""Internal routine to pad arrays for periodic models."""
N = len(t)
if indices is None:
indices = np.arange(N)
pad_left = max(0, 0 - np.min(indices - span // 2))
pad_right = max(0, np.max(indices + span - span // 2) - (N - 1))
if pad_left + pad_right > 0:
Nright, pad_right = divmod(pad_right, N)
Nleft, pad_left = divmod(pad_left, N)
t = np.concatenate([t[N - pad_left:] - (Nleft + 1) * period]
+ [t + i * period
for i in range(-Nleft, Nright + 1)]
+ [t[:pad_right] + (Nright + 1) * period])
arrays = [np.concatenate([a[N - pad_left:]]
+ (Nleft + Nright + 1) * [a]
+ [a[:pad_right]])
for a in arrays]
pad_left = pad_left % N
Nright = pad_right / N
pad_right = pad_right % N
return (t, arrays, slice(pad_left + Nleft * N,
pad_left + (Nleft + 1) * N))
else:
return (t, arrays, slice(None)) |
Search all the available I2C devices in the system
def get_i2c_bus_numbers(glober = glob.glob):
"""Search all the available I2C devices in the system"""
res = []
for device in glober("/dev/i2c-*"):
r = re.match("/dev/i2c-([\d]){1,2}", device)
res.append(int(r.group(1)))
return res |
Parse the name for led number
:param name: attribute name, like: led_1
def get_led_register_from_name(self, name):
"""Parse the name for led number
:param name: attribute name, like: led_1
"""
res = re.match('^led_([0-9]{1,2})$', name)
if res is None:
raise AttributeError("Unknown attribute: '%s'" % name)
led_num = int(res.group(1))
if led_num < 0 or led_num > 15:
raise AttributeError("Unknown attribute: '%s'" % name)
return self.calc_led_register(led_num) |
Set PWM value for the specified LED
:param led_num: LED number (0-15)
:param value: the 12 bit value (0-4095)
def set_pwm(self, led_num, value):
"""Set PWM value for the specified LED
:param led_num: LED number (0-15)
:param value: the 12 bit value (0-4095)
"""
self.__check_range('led_number', led_num)
self.__check_range('led_value', value)
register_low = self.calc_led_register(led_num)
self.write(register_low, value_low(value))
self.write(register_low + 1, value_high(value)) |
Generic getter for all LED PWM value
def get_pwm(self, led_num):
"""Generic getter for all LED PWM value"""
self.__check_range('led_number', led_num)
register_low = self.calc_led_register(led_num)
return self.__get_led_value(register_low) |
Send the controller to sleep
def sleep(self):
"""Send the controller to sleep"""
logger.debug("Sleep the controller")
self.write(Registers.MODE_1, self.mode_1 | (1 << Mode1.SLEEP)) |
Write raw byte value to the specified register
:param reg: the register number (0-69, 250-255)
:param value: byte value
def write(self, reg, value):
"""Write raw byte value to the specified register
:param reg: the register number (0-69, 250-255)
:param value: byte value
"""
# TODO: check reg: 0-69, 250-255
self.__check_range('register_value', value)
logger.debug("Write '%s' to register '%s'" % (value, reg))
self.__bus.write_byte_data(self.__address, reg, value) |
Set the frequency for all PWM output
:param value: the frequency in Hz
def set_pwm_frequency(self, value):
"""Set the frequency for all PWM output
:param value: the frequency in Hz
"""
self.__check_range('pwm_frequency', value)
reg_val = self.calc_pre_scale(value)
logger.debug("Calculated prescale value is %s" % reg_val)
self.sleep()
self.write(Registers.PRE_SCALE, reg_val)
self.wake() |
Calculates the normalized Levenshtein distance between two string
arguments. The result will be a float in the range [0.0, 1.0], with 1.0
signifying the biggest possible distance between strings with these lengths
def levenshtein_norm(source, target):
"""Calculates the normalized Levenshtein distance between two string
arguments. The result will be a float in the range [0.0, 1.0], with 1.0
signifying the biggest possible distance between strings with these lengths
"""
# Compute Levenshtein distance using helper function. The max is always
# just the length of the longer string, so this is used to normalize result
# before returning it
distance = _levenshtein_compute(source, target, False)
return float(distance) / max(len(source), len(target)) |
Check if the color provided by the user is valid.
If color is invalid the default is returned.
def check_valid_color(color):
"""Check if the color provided by the user is valid.
If color is invalid the default is returned.
"""
if color in list(mcolors.CSS4_COLORS.keys()) + ["#4CB391"]:
logging.info("Nanoplotter: Valid color {}.".format(color))
return color
else:
logging.info("Nanoplotter: Invalid color {}, using default.".format(color))
sys.stderr.write("Invalid color {}, using default.\n".format(color))
return "#4CB391" |
Check if the specified figure format is valid.
If format is invalid the default is returned.
Probably installation-dependent
def check_valid_format(figformat):
"""Check if the specified figure format is valid.
If format is invalid the default is returned.
Probably installation-dependent
"""
fig = plt.figure()
if figformat in list(fig.canvas.get_supported_filetypes().keys()):
logging.info("Nanoplotter: valid output format {}".format(figformat))
return figformat
else:
logging.info("Nanoplotter: invalid output format {}".format(figformat))
sys.stderr.write("Invalid format {}, using default.\n".format(figformat))
return "png" |
Create bivariate plots.
Create four types of bivariate plots of x vs y, containing marginal summaries
-A scatter plot with histograms on axes
-A hexagonal binned plot with histograms on axes
-A kernel density plot with density curves on axes
-A pauvre-style plot using code from https://github.com/conchoecia/pauvre
def scatter(x, y, names, path, plots, color="#4CB391", figformat="png",
stat=None, log=False, minvalx=0, minvaly=0, title=None, plot_settings=None):
"""Create bivariate plots.
Create four types of bivariate plots of x vs y, containing marginal summaries
-A scatter plot with histograms on axes
-A hexagonal binned plot with histograms on axes
-A kernel density plot with density curves on axes
-A pauvre-style plot using code from https://github.com/conchoecia/pauvre
"""
logging.info("Nanoplotter: Creating {} vs {} plots using statistics from {} reads.".format(
names[0], names[1], x.size))
if not contains_variance([x, y], names):
return []
sns.set(style="ticks", **plot_settings)
maxvalx = np.amax(x)
maxvaly = np.amax(y)
plots_made = []
if plots["hex"]:
hex_plot = Plot(
path=path + "_hex." + figformat,
title="{} vs {} plot using hexagonal bins".format(names[0], names[1]))
plot = sns.jointplot(
x=x,
y=y,
kind="hex",
color=color,
stat_func=stat,
space=0,
xlim=(minvalx, maxvalx),
ylim=(minvaly, maxvaly),
height=10)
plot.set_axis_labels(names[0], names[1])
if log:
hex_plot.title = hex_plot.title + " after log transformation of read lengths"
ticks = [10**i for i in range(10) if not 10**i > 10 * (10**maxvalx)]
plot.ax_joint.set_xticks(np.log10(ticks))
plot.ax_marg_x.set_xticks(np.log10(ticks))
plot.ax_joint.set_xticklabels(ticks)
plt.subplots_adjust(top=0.90)
plot.fig.suptitle(title or "{} vs {} plot".format(names[0], names[1]), fontsize=25)
hex_plot.fig = plot
hex_plot.save(format=figformat)
plots_made.append(hex_plot)
sns.set(style="darkgrid", **plot_settings)
if plots["dot"]:
dot_plot = Plot(
path=path + "_dot." + figformat,
title="{} vs {} plot using dots".format(names[0], names[1]))
plot = sns.jointplot(
x=x,
y=y,
kind="scatter",
color=color,
stat_func=stat,
xlim=(minvalx, maxvalx),
ylim=(minvaly, maxvaly),
space=0,
height=10,
joint_kws={"s": 1})
plot.set_axis_labels(names[0], names[1])
if log:
dot_plot.title = dot_plot.title + " after log transformation of read lengths"
ticks = [10**i for i in range(10) if not 10**i > 10 * (10**maxvalx)]
plot.ax_joint.set_xticks(np.log10(ticks))
plot.ax_marg_x.set_xticks(np.log10(ticks))
plot.ax_joint.set_xticklabels(ticks)
plt.subplots_adjust(top=0.90)
plot.fig.suptitle(title or "{} vs {} plot".format(names[0], names[1]), fontsize=25)
dot_plot.fig = plot
dot_plot.save(format=figformat)
plots_made.append(dot_plot)
if plots["kde"]:
idx = np.random.choice(x.index, min(2000, len(x)), replace=False)
kde_plot = Plot(
path=path + "_kde." + figformat,
title="{} vs {} plot using a kernel density estimation".format(names[0], names[1]))
plot = sns.jointplot(
x=x[idx],
y=y[idx],
kind="kde",
clip=((0, np.Inf), (0, np.Inf)),
xlim=(minvalx, maxvalx),
ylim=(minvaly, maxvaly),
space=0,
color=color,
stat_func=stat,
shade_lowest=False,
height=10)
plot.set_axis_labels(names[0], names[1])
if log:
kde_plot.title = kde_plot.title + " after log transformation of read lengths"
ticks = [10**i for i in range(10) if not 10**i > 10 * (10**maxvalx)]
plot.ax_joint.set_xticks(np.log10(ticks))
plot.ax_marg_x.set_xticks(np.log10(ticks))
plot.ax_joint.set_xticklabels(ticks)
plt.subplots_adjust(top=0.90)
plot.fig.suptitle(title or "{} vs {} plot".format(names[0], names[1]), fontsize=25)
kde_plot.fig = plot
kde_plot.save(format=figformat)
plots_made.append(kde_plot)
if plots["pauvre"] and names == ['Read lengths', 'Average read quality'] and log is False:
pauvre_plot = Plot(
path=path + "_pauvre." + figformat,
title="{} vs {} plot using pauvre-style @conchoecia".format(names[0], names[1]))
sns.set(style="white", **plot_settings)
margin_plot(df=pd.DataFrame({"length": x, "meanQual": y}),
Y_AXES=False,
title=title or "Length vs Quality in Pauvre-style",
plot_maxlen=None,
plot_minlen=0,
plot_maxqual=None,
plot_minqual=0,
lengthbin=None,
qualbin=None,
BASENAME="whatever",
path=pauvre_plot.path,
fileform=[figformat],
dpi=600,
TRANSPARENT=True,
QUIET=True)
plots_made.append(pauvre_plot)
plt.close("all")
return plots_made |
Make sure both arrays for bivariate ("scatter") plot have a stddev > 0
def contains_variance(arrays, names):
"""
Make sure both arrays for bivariate ("scatter") plot have a stddev > 0
"""
for ar, name in zip(arrays, names):
if np.std(ar) == 0:
sys.stderr.write(
"No variation in '{}', skipping bivariate plots.\n".format(name.lower()))
logging.info("Nanoplotter: No variation in {}, skipping bivariate plot".format(name))
return False
else:
return True |
Create histogram of normal and log transformed read lengths.
def length_plots(array, name, path, title=None, n50=None, color="#4CB391", figformat="png"):
"""Create histogram of normal and log transformed read lengths."""
logging.info("Nanoplotter: Creating length plots for {}.".format(name))
maxvalx = np.amax(array)
if n50:
logging.info("Nanoplotter: Using {} reads with read length N50 of {}bp and maximum of {}bp."
.format(array.size, n50, maxvalx))
else:
logging.info("Nanoplotter: Using {} reads maximum of {}bp.".format(array.size, maxvalx))
plots = []
HistType = namedtuple('HistType', 'weight name ylabel')
for h_type in [HistType(None, "", "Number of reads"),
HistType(array, "Weighted ", "Number of bases")]:
histogram = Plot(
path=path + h_type.name.replace(" ", "_") + "Histogram"
+ name.replace(' ', '') + "." + figformat,
title=h_type.name + "Histogram of read lengths")
ax = sns.distplot(
a=array,
kde=False,
hist=True,
bins=max(round(int(maxvalx) / 500), 10),
color=color,
hist_kws=dict(weights=h_type.weight,
edgecolor=color,
linewidth=0.2,
alpha=0.8))
if n50:
plt.axvline(n50)
plt.annotate('N50', xy=(n50, np.amax([h.get_height() for h in ax.patches])), size=8)
ax.set(
xlabel='Read length',
ylabel=h_type.ylabel,
title=title or histogram.title)
plt.ticklabel_format(style='plain', axis='y')
histogram.fig = ax.get_figure()
histogram.save(format=figformat)
plt.close("all")
log_histogram = Plot(
path=path + h_type.name.replace(" ", "_") + "LogTransformed_Histogram"
+ name.replace(' ', '') + "." + figformat,
title=h_type.name + "Histogram of read lengths after log transformation")
ax = sns.distplot(
a=np.log10(array),
kde=False,
hist=True,
color=color,
hist_kws=dict(weights=h_type.weight,
edgecolor=color,
linewidth=0.2,
alpha=0.8))
ticks = [10**i for i in range(10) if not 10**i > 10 * maxvalx]
ax.set(
xticks=np.log10(ticks),
xticklabels=ticks,
xlabel='Read length',
ylabel=h_type.ylabel,
title=title or log_histogram.title)
if n50:
plt.axvline(np.log10(n50))
plt.annotate('N50', xy=(np.log10(n50), np.amax(
[h.get_height() for h in ax.patches])), size=8)
plt.ticklabel_format(style='plain', axis='y')
log_histogram.fig = ax.get_figure()
log_histogram.save(format=figformat)
plt.close("all")
plots.extend([histogram, log_histogram])
plots.append(yield_by_minimal_length_plot(array=array,
name=name,
path=path,
title=title,
color=color,
figformat=figformat))
return plots |
Make the physical layout of the MinION flowcell.
based on https://bioinformatics.stackexchange.com/a/749/681
returned as a numpy array
def make_layout(maxval):
"""Make the physical layout of the MinION flowcell.
based on https://bioinformatics.stackexchange.com/a/749/681
returned as a numpy array
"""
if maxval > 512:
return Layout(
structure=np.concatenate([np.array([list(range(10 * i + 1, i * 10 + 11))
for i in range(25)]) + j
for j in range(0, 3000, 250)],
axis=1),
template=np.zeros((25, 120)),
xticks=range(1, 121),
yticks=range(1, 26))
else:
layoutlist = []
for i, j in zip(
[33, 481, 417, 353, 289, 225, 161, 97],
[8, 456, 392, 328, 264, 200, 136, 72]):
for n in range(4):
layoutlist.append(list(range(i + n * 8, (i + n * 8) + 8, 1)) +
list(range(j + n * 8, (j + n * 8) - 8, -1)))
return Layout(
structure=np.array(layoutlist).transpose(),
template=np.zeros((16, 32)),
xticks=range(1, 33),
yticks=range(1, 17)) |
Taking channel information and creating post run channel activity plots.
def spatial_heatmap(array, path, title=None, color="Greens", figformat="png"):
"""Taking channel information and creating post run channel activity plots."""
logging.info("Nanoplotter: Creating heatmap of reads per channel using {} reads."
.format(array.size))
activity_map = Plot(
path=path + "." + figformat,
title="Number of reads generated per channel")
layout = make_layout(maxval=np.amax(array))
valueCounts = pd.value_counts(pd.Series(array))
for entry in valueCounts.keys():
layout.template[np.where(layout.structure == entry)] = valueCounts[entry]
plt.figure()
ax = sns.heatmap(
data=pd.DataFrame(layout.template, index=layout.yticks, columns=layout.xticks),
xticklabels="auto",
yticklabels="auto",
square=True,
cbar_kws={"orientation": "horizontal"},
cmap=color,
linewidths=0.20)
ax.set_title(title or activity_map.title)
activity_map.fig = ax.get_figure()
activity_map.save(format=figformat)
plt.close("all")
return [activity_map] |
Generate CSV files from a CronosPro/CronosPlus database.
def main(database_dir, target_dir):
"""Generate CSV files from a CronosPro/CronosPlus database."""
if not os.path.isdir(database_dir):
raise click.ClickException("Database directory does not exist!")
try:
os.makedirs(target_dir)
except:
pass
try:
parse(database_dir, target_dir)
except CronosException as ex:
raise click.ClickException(ex.message) |
Check if the data contains reads created within the same `days` timeframe.
if not, print warning and only return part of the data which is within `days` days
Resetting the index twice to get also an "index" column for plotting the cum_yield_reads plot
def check_valid_time_and_sort(df, timescol, days=5, warning=True):
"""Check if the data contains reads created within the same `days` timeframe.
if not, print warning and only return part of the data which is within `days` days
Resetting the index twice to get also an "index" column for plotting the cum_yield_reads plot
"""
timediff = (df[timescol].max() - df[timescol].min()).days
if timediff < days:
return df.sort_values(timescol).reset_index(drop=True).reset_index()
else:
if warning:
sys.stderr.write(
"\nWarning: data generated is from more than {} days.\n".format(str(days)))
sys.stderr.write("Likely this indicates you are combining multiple runs.\n")
sys.stderr.write(
"Plots based on time are invalid and therefore truncated to first {} days.\n\n"
.format(str(days)))
logging.warning("Time plots truncated to first {} days: invalid timespan: {} days"
.format(str(days), str(timediff)))
return df[df[timescol] < timedelta(days=days)] \
.sort_values(timescol) \
.reset_index(drop=True) \
.reset_index() |
Making plots of time vs read length, time vs quality and cumulative yield.
def time_plots(df, path, title=None, color="#4CB391", figformat="png",
log_length=False, plot_settings=None):
"""Making plots of time vs read length, time vs quality and cumulative yield."""
dfs = check_valid_time_and_sort(df, "start_time")
logging.info("Nanoplotter: Creating timeplots using {} reads.".format(len(dfs)))
cumyields = cumulative_yield(dfs=dfs.set_index("start_time"),
path=path,
figformat=figformat,
title=title,
color=color)
reads_pores_over_time = plot_over_time(dfs=dfs.set_index("start_time"),
path=path,
figformat=figformat,
title=title,
color=color)
violins = violin_plots_over_time(dfs=dfs,
path=path,
figformat=figformat,
title=title,
log_length=log_length,
plot_settings=plot_settings)
return cumyields + reads_pores_over_time + violins |
Create a violin or boxplot from the received DataFrame.
The x-axis should be divided based on the 'dataset' column,
the y-axis is specified in the arguments
def violin_or_box_plot(df, y, figformat, path, y_name,
title=None, plot="violin", log=False, palette=None):
"""Create a violin or boxplot from the received DataFrame.
The x-axis should be divided based on the 'dataset' column,
the y-axis is specified in the arguments
"""
comp = Plot(path=path + "NanoComp_" + y.replace(' ', '_') + '.' + figformat,
title="Comparing {}".format(y))
if y == "quals":
comp.title = "Comparing base call quality scores"
if plot == 'violin':
logging.info("Nanoplotter: Creating violin plot for {}.".format(y))
process_violin_and_box(ax=sns.violinplot(x="dataset",
y=y,
data=df,
inner=None,
cut=0,
palette=palette,
linewidth=0),
log=log,
plot_obj=comp,
title=title,
y_name=y_name,
figformat=figformat,
ymax=np.amax(df[y]))
elif plot == 'box':
logging.info("Nanoplotter: Creating box plot for {}.".format(y))
process_violin_and_box(ax=sns.boxplot(x="dataset",
y=y,
data=df,
palette=palette),
log=log,
plot_obj=comp,
title=title,
y_name=y_name,
figformat=figformat,
ymax=np.amax(df[y]))
elif plot == 'ridge':
logging.info("Nanoplotter: Creating ridges plot for {}.".format(y))
comp.fig, axes = joypy.joyplot(df,
by="dataset",
column=y,
title=title or comp.title,
x_range=[-0.05, np.amax(df[y])])
if log:
xticks = [float(i.get_text()) for i in axes[-1].get_xticklabels()]
axes[-1].set_xticklabels([10**i for i in xticks])
axes[-1].set_xticklabels(axes[-1].get_xticklabels(), rotation=30, ha='center')
comp.save(format=figformat)
else:
logging.error("Unknown comp plot type {}".format(plot))
sys.exit("Unknown comp plot type {}".format(plot))
plt.close("all")
return [comp] |
Create barplots based on number of reads and total sum of nucleotides sequenced.
def output_barplot(df, figformat, path, title=None, palette=None):
"""Create barplots based on number of reads and total sum of nucleotides sequenced."""
logging.info("Nanoplotter: Creating barplots for number of reads and total throughput.")
read_count = Plot(path=path + "NanoComp_number_of_reads." + figformat,
title="Comparing number of reads")
ax = sns.countplot(x="dataset",
data=df,
palette=palette)
ax.set(ylabel='Number of reads',
title=title or read_count.title)
plt.xticks(rotation=30, ha='center')
read_count.fig = ax.get_figure()
read_count.save(format=figformat)
plt.close("all")
throughput_bases = Plot(path=path + "NanoComp_total_throughput." + figformat,
title="Comparing throughput in gigabases")
if "aligned_lengths" in df:
throughput = df.groupby('dataset')['aligned_lengths'].sum()
ylabel = 'Total gigabase aligned'
else:
throughput = df.groupby('dataset')['lengths'].sum()
ylabel = 'Total gigabase sequenced'
ax = sns.barplot(x=list(throughput.index),
y=throughput / 1e9,
palette=palette,
order=df["dataset"].unique())
ax.set(ylabel=ylabel,
title=title or throughput_bases.title)
plt.xticks(rotation=30, ha='center')
throughput_bases.fig = ax.get_figure()
throughput_bases.save(format=figformat)
plt.close("all")
return read_count, throughput_bases |
Use plotly to create an overlay of length histograms
Return html code, but also save as png
Only has 10 colors, which get recycled up to 5 times.
def overlay_histogram(df, path, palette=None):
"""
Use plotly to create an overlay of length histograms
Return html code, but also save as png
Only has 10 colors, which get recycled up to 5 times.
"""
if palette is None:
palette = plotly.colors.DEFAULT_PLOTLY_COLORS * 5
hist = Plot(path=path + "NanoComp_OverlayHistogram.html",
title="Histogram of read lengths")
hist.html, hist.fig = plot_overlay_histogram(df, palette, title=hist.title)
hist.save()
hist_norm = Plot(path=path + "NanoComp_OverlayHistogram_Normalized.html",
title="Normalized histogram of read lengths")
hist_norm.html, hist_norm.fig = plot_overlay_histogram(
df, palette, title=hist_norm.title, histnorm="probability")
hist_norm.save()
log_hist = Plot(path=path + "NanoComp_OverlayLogHistogram.html",
title="Histogram of log transformed read lengths")
log_hist.html, log_hist.fig = plot_log_histogram(df, palette, title=log_hist.title)
log_hist.save()
log_hist_norm = Plot(path=path + "NanoComp_OverlayLogHistogram_Normalized.html",
title="Normalized histogram of log transformed read lengths")
log_hist_norm.html, log_hist_norm.fig = plot_log_histogram(
df, palette, title=log_hist_norm.title, histnorm="probability")
log_hist_norm.save()
return [hist, hist_norm, log_hist, log_hist_norm] |
Plot overlaying histograms with log transformation of length
Return both html and fig for png
def plot_log_histogram(df, palette, title, histnorm=""):
"""
Plot overlaying histograms with log transformation of length
Return both html and fig for png
"""
data = [go.Histogram(x=np.log10(df.loc[df["dataset"] == d, "lengths"]),
opacity=0.4,
name=d,
histnorm=histnorm,
marker=dict(color=c))
for d, c in zip(df["dataset"].unique(), palette)]
xtickvals = [10**i for i in range(10) if not 10**i > 10 * np.amax(df["lengths"])]
html = plotly.offline.plot(
{"data": data,
"layout": go.Layout(barmode='overlay',
title=title,
xaxis=dict(tickvals=np.log10(xtickvals),
ticktext=xtickvals))},
output_type="div",
show_link=False)
fig = go.Figure(
{"data": data,
"layout": go.Layout(barmode='overlay',
title=title,
xaxis=dict(tickvals=np.log10(xtickvals),
ticktext=xtickvals))})
return html, fig |
Glob for the poor.
def get_file(db_folder, file_name):
"""Glob for the poor."""
if not os.path.isdir(db_folder):
return
file_name = file_name.lower().strip()
for cand_name in os.listdir(db_folder):
if cand_name.lower().strip() == file_name:
return os.path.join(db_folder, cand_name) |
Parse a cronos database.
Convert the database located in ``db_folder`` into CSV files in the
directory ``out_folder``.
def parse(db_folder, out_folder):
"""
Parse a cronos database.
Convert the database located in ``db_folder`` into CSV files in the
directory ``out_folder``.
"""
# The database structure, containing table and column definitions as
# well as other data.
stru_dat = get_file(db_folder, 'CroStru.dat')
# Index file for the database, which contains offsets for each record.
data_tad = get_file(db_folder, 'CroBank.tad')
# Actual data records, can only be decoded using CroBank.tad.
data_dat = get_file(db_folder, 'CroBank.dat')
if None in [stru_dat, data_tad, data_dat]:
raise CronosException("Not all database files are present.")
meta, tables = parse_structure(stru_dat)
for table in tables:
# TODO: do we want to export the "FL" table?
if table['abbr'] == 'FL' and table['name'] == 'Files':
continue
fh = open(make_csv_file_name(meta, table, out_folder), 'w')
columns = table.get('columns')
writer = csv.writer(fh)
writer.writerow([encode_cell(c['name']) for c in columns])
for row in parse_data(data_tad, data_dat, table.get('id'), columns):
writer.writerow([encode_cell(c) for c in row])
fh.close() |
Return the base64 encoding of the figure file and insert in html image tag.
def encode1(self):
"""Return the base64 encoding of the figure file and insert in html image tag."""
data_uri = b64encode(open(self.path, 'rb').read()).decode('utf-8').replace('\n', '')
return '<img src="data:image/png;base64,{0}">'.format(data_uri) |
Return the base64 encoding of the fig attribute and insert in html image tag.
def encode2(self):
"""Return the base64 encoding of the fig attribute and insert in html image tag."""
buf = BytesIO()
self.fig.savefig(buf, format='png', bbox_inches='tight', dpi=100)
buf.seek(0)
string = b64encode(buf.read())
return '<img src="data:image/png;base64,{0}">'.format(urlquote(string)) |
Calculates the normalized restricted Damerau-Levenshtein distance
(a.k.a. the normalized optimal string alignment distance) between two
string arguments. The result will be a float in the range [0.0, 1.0], with
1.0 signifying the maximum distance between strings with these lengths
def rdlevenshtein_norm(source, target):
"""Calculates the normalized restricted Damerau-Levenshtein distance
(a.k.a. the normalized optimal string alignment distance) between two
string arguments. The result will be a float in the range [0.0, 1.0], with
1.0 signifying the maximum distance between strings with these lengths
"""
# Compute restricted Damerau-Levenshtein distance using helper function.
# The max is always just the length of the longer string, so this is used
# to normalize result before returning it
distance = _levenshtein_compute(source, target, True)
return float(distance) / max(len(source), len(target)) |
Computes the Levenshtein
(https://en.wikipedia.org/wiki/Levenshtein_distance)
and restricted Damerau-Levenshtein
(https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance)
distances between two Unicode strings with given lengths using the
Wagner-Fischer algorithm
(https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm).
These distances are defined recursively, since the distance between two
strings is just the cost of adjusting the last one or two characters plus
the distance between the prefixes that exclude these characters (e.g. the
distance between "tester" and "tested" is 1 + the distance between "teste"
and "teste"). The Wagner-Fischer algorithm retains this idea but eliminates
redundant computations by storing the distances between various prefixes in
a matrix that is filled in iteratively.
def _levenshtein_compute(source, target, rd_flag):
"""Computes the Levenshtein
(https://en.wikipedia.org/wiki/Levenshtein_distance)
and restricted Damerau-Levenshtein
(https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance)
distances between two Unicode strings with given lengths using the
Wagner-Fischer algorithm
(https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm).
These distances are defined recursively, since the distance between two
strings is just the cost of adjusting the last one or two characters plus
the distance between the prefixes that exclude these characters (e.g. the
distance between "tester" and "tested" is 1 + the distance between "teste"
and "teste"). The Wagner-Fischer algorithm retains this idea but eliminates
redundant computations by storing the distances between various prefixes in
a matrix that is filled in iteratively.
"""
# Create matrix of correct size (this is s_len + 1 * t_len + 1 so that the
# empty prefixes "" can also be included). The leftmost column represents
# transforming various source prefixes into an empty string, which can
# always be done by deleting all characters in the respective prefix, and
# the top row represents transforming the empty string into various target
# prefixes, which can always be done by inserting every character in the
# respective prefix. The ternary used to build the list should ensure that
# this row and column are now filled correctly
s_range = range(len(source) + 1)
t_range = range(len(target) + 1)
matrix = [[(i if j == 0 else j) for j in t_range] for i in s_range]
# Iterate through rest of matrix, filling it in with Levenshtein
# distances for the remaining prefix combinations
for i in s_range[1:]:
for j in t_range[1:]:
# Applies the recursive logic outlined above using the values
# stored in the matrix so far. The options for the last pair of
# characters are deletion, insertion, and substitution, which
# amount to dropping the source character, the target character,
# or both and then calculating the distance for the resulting
# prefix combo. If the characters at this point are the same, the
# situation can be thought of as a free substitution
del_dist = matrix[i - 1][j] + 1
ins_dist = matrix[i][j - 1] + 1
sub_trans_cost = 0 if source[i - 1] == target[j - 1] else 1
sub_dist = matrix[i - 1][j - 1] + sub_trans_cost
# Choose option that produces smallest distance
matrix[i][j] = min(del_dist, ins_dist, sub_dist)
# If restricted Damerau-Levenshtein was requested via the flag,
# then there may be a fourth option: transposing the current and
# previous characters in the source string. This can be thought of
# as a double substitution and has a similar free case, where the
# current and preceeding character in both strings is the same
if rd_flag and i > 1 and j > 1 and source[i - 1] == target[j - 2] \
and source[i - 2] == target[j - 1]:
trans_dist = matrix[i - 2][j - 2] + sub_trans_cost
matrix[i][j] = min(matrix[i][j], trans_dist)
# At this point, the matrix is full, and the biggest prefixes are just the
# strings themselves, so this is the desired distance
return matrix[len(source)][len(target)] |
The main entry point.
def main():
"""The main entry point."""
if sys.version_info < (2, 7):
sys.exit('crispy requires at least Python 2.7')
elif sys.version_info[0] == 3 and sys.version_info < (3, 4):
sys.exit('crispy requires at least Python 3.4')
kwargs = dict(
name='crispy',
version=get_version(),
description='Core-Level Spectroscopy Simulations in Python',
long_description=get_readme(),
license='MIT',
author='Marius Retegan',
author_email='marius.retegan@esrf.eu',
url='https://github.com/mretegan/crispy',
download_url='https://github.com/mretegan/crispy/releases',
keywords='gui, spectroscopy, simulation, synchrotron, science',
install_requires=get_requirements(),
platforms=[
'MacOS :: MacOS X',
'Microsoft :: Windows',
'POSIX :: Linux',
],
packages=[
'crispy',
'crispy.gui',
'crispy.gui.uis',
'crispy.gui.icons',
'crispy.modules',
'crispy.modules.quanty',
'crispy.modules.orca',
'crispy.utils',
],
package_data={
'crispy.gui.uis': [
'*.ui',
'quanty/*.ui',
],
'crispy.gui.icons': [
'*.svg',
],
'crispy.modules.quanty': [
'parameters/*.json.gz',
'templates/*.lua',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Visualization',
]
)
# At the moment pip/setuptools doesn't play nice with shebang paths
# containing white spaces.
# See: https://github.com/pypa/pip/issues/2783
# https://github.com/xonsh/xonsh/issues/879
# The most straight forward workaround is to have a .bat script to run
# crispy on Windows.
if 'win32' in sys.platform:
kwargs['scripts'] = ['scripts/crispy.bat']
else:
kwargs['scripts'] = ['scripts/crispy']
setup(**kwargs) |
Read the spectra from the files generated by Quanty and store them
as a list of spectum objects.
def loadFromDisk(self, calculation):
"""
Read the spectra from the files generated by Quanty and store them
as a list of spectum objects.
"""
suffixes = {
'Isotropic': 'iso',
'Circular Dichroism (R-L)': 'cd',
'Right Polarized (R)': 'r',
'Left Polarized (L)': 'l',
'Linear Dichroism (V-H)': 'ld',
'Vertical Polarized (V)': 'v',
'Horizontal Polarized (H)': 'h',
}
self.raw = list()
for spectrumName in self.toPlot:
suffix = suffixes[spectrumName]
path = '{}_{}.spec'.format(calculation.baseName, suffix)
try:
data = np.loadtxt(path, skiprows=5)
except (OSError, IOError) as e:
raise e
rows, columns = data.shape
if calculation.experiment in ['XAS', 'XPS', 'XES']:
xMin = calculation.xMin
xMax = calculation.xMax
xNPoints = calculation.xNPoints
if calculation.experiment == 'XES':
x = np.linspace(xMin, xMax, xNPoints + 1)
x = x[::-1]
y = data[:, 2]
y = y / np.abs(y.max())
else:
x = np.linspace(xMin, xMax, xNPoints + 1)
y = data[:, 2::2].flatten()
spectrum = Spectrum1D(x, y)
spectrum.name = spectrumName
if len(suffix) > 2:
spectrum.shortName = suffix.title()
else:
spectrum.shortName = suffix.upper()
if calculation.experiment in ['XAS', ]:
spectrum.xLabel = 'Absorption Energy (eV)'
elif calculation.experiment in ['XPS', ]:
spectrum.xLabel = 'Binding Energy (eV)'
elif calculation.experiment in ['XES', ]:
spectrum.xLabel = 'Emission Energy (eV)'
spectrum.yLabel = 'Intensity (a.u.)'
self.broadenings = {'gaussian': (calculation.xGaussian, ), }
else:
xMin = calculation.xMin
xMax = calculation.xMax
xNPoints = calculation.xNPoints
yMin = calculation.yMin
yMax = calculation.yMax
yNPoints = calculation.yNPoints
x = np.linspace(xMin, xMax, xNPoints + 1)
y = np.linspace(yMin, yMax, yNPoints + 1)
z = data[:, 2::2]
spectrum = Spectrum2D(x, y, z)
spectrum.name = spectrumName
if len(suffix) > 2:
spectrum.shortName = suffix.title()
else:
spectrum.shortName = suffix.upper()
spectrum.xLabel = 'Incident Energy (eV)'
spectrum.yLabel = 'Energy Transfer (eV)'
self.broadenings = {'gaussian': (calculation.xGaussian,
calculation.yGaussian), }
self.raw.append(spectrum)
# Process the spectra once they where read from disk.
self.process() |
Populate the widget using data stored in the state
object. The order in which the individual widgets are populated
follows their arrangment.
The models are recreated every time the function is called.
This might seem to be an overkill, but in practice it is very fast.
Don't try to move the model creation outside this function; is not
worth the effort, and there is nothing to gain from it.
def populateWidget(self):
"""
Populate the widget using data stored in the state
object. The order in which the individual widgets are populated
follows their arrangment.
The models are recreated every time the function is called.
This might seem to be an overkill, but in practice it is very fast.
Don't try to move the model creation outside this function; is not
worth the effort, and there is nothing to gain from it.
"""
self.elementComboBox.setItems(self.state._elements, self.state.element)
self.chargeComboBox.setItems(self.state._charges, self.state.charge)
self.symmetryComboBox.setItems(
self.state._symmetries, self.state.symmetry)
self.experimentComboBox.setItems(
self.state._experiments, self.state.experiment)
self.edgeComboBox.setItems(self.state._edges, self.state.edge)
self.temperatureLineEdit.setValue(self.state.temperature)
self.magneticFieldLineEdit.setValue(self.state.magneticField)
self.axesTabWidget.setTabText(0, str(self.state.xLabel))
self.xMinLineEdit.setValue(self.state.xMin)
self.xMaxLineEdit.setValue(self.state.xMax)
self.xNPointsLineEdit.setValue(self.state.xNPoints)
self.xLorentzianLineEdit.setList(self.state.xLorentzian)
self.xGaussianLineEdit.setValue(self.state.xGaussian)
self.k1LineEdit.setVector(self.state.k1)
self.eps11LineEdit.setVector(self.state.eps11)
self.eps12LineEdit.setVector(self.state.eps12)
if self.state.experiment in ['RIXS', ]:
if self.axesTabWidget.count() == 1:
tab = self.axesTabWidget.findChild(QWidget, 'yTab')
self.axesTabWidget.addTab(tab, tab.objectName())
self.axesTabWidget.setTabText(1, self.state.yLabel)
self.yMinLineEdit.setValue(self.state.yMin)
self.yMaxLineEdit.setValue(self.state.yMax)
self.yNPointsLineEdit.setValue(self.state.yNPoints)
self.yLorentzianLineEdit.setList(self.state.yLorentzian)
self.yGaussianLineEdit.setValue(self.state.yGaussian)
self.k2LineEdit.setVector(self.state.k2)
self.eps21LineEdit.setVector(self.state.eps21)
self.eps22LineEdit.setVector(self.state.eps22)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>σ', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>π', text)
self.eps12Label.setText(text)
else:
self.axesTabWidget.removeTab(1)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>v', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>h', text)
self.eps12Label.setText(text)
# Create the spectra selection model.
self.spectraModel = SpectraModel(parent=self)
self.spectraModel.setModelData(
self.state.spectra.toCalculate,
self.state.spectra.toCalculateChecked)
self.spectraModel.checkStateChanged.connect(
self.updateSpectraCheckState)
self.spectraListView.setModel(self.spectraModel)
self.spectraListView.selectionModel().setCurrentIndex(
self.spectraModel.index(0, 0), QItemSelectionModel.Select)
self.fkLineEdit.setValue(self.state.fk)
self.gkLineEdit.setValue(self.state.gk)
self.zetaLineEdit.setValue(self.state.zeta)
# Create the Hamiltonian model.
self.hamiltonianModel = HamiltonianModel(parent=self)
self.hamiltonianModel.setModelData(self.state.hamiltonianData)
self.hamiltonianModel.setNodesCheckState(self.state.hamiltonianState)
if self.syncParametersCheckBox.isChecked():
self.hamiltonianModel.setSyncState(True)
else:
self.hamiltonianModel.setSyncState(False)
self.hamiltonianModel.dataChanged.connect(self.updateHamiltonianData)
self.hamiltonianModel.itemCheckStateChanged.connect(
self.updateHamiltonianNodeCheckState)
# Assign the Hamiltonian model to the Hamiltonian terms view.
self.hamiltonianTermsView.setModel(self.hamiltonianModel)
self.hamiltonianTermsView.selectionModel().setCurrentIndex(
self.hamiltonianModel.index(0, 0), QItemSelectionModel.Select)
self.hamiltonianTermsView.selectionModel().selectionChanged.connect(
self.selectedHamiltonianTermChanged)
# Assign the Hamiltonian model to the Hamiltonian parameters view.
self.hamiltonianParametersView.setModel(self.hamiltonianModel)
self.hamiltonianParametersView.expandAll()
self.hamiltonianParametersView.resizeAllColumnsToContents()
self.hamiltonianParametersView.setColumnWidth(0, 130)
self.hamiltonianParametersView.setRootIndex(
self.hamiltonianTermsView.currentIndex())
self.nPsisLineEdit.setValue(self.state.nPsis)
self.nPsisAutoCheckBox.setChecked(self.state.nPsisAuto)
self.nConfigurationsLineEdit.setValue(self.state.nConfigurations)
self.nConfigurationsLineEdit.setEnabled(False)
name = '{}-Ligands Hybridization'.format(self.state.block)
for termName in self.state.hamiltonianData:
if name in termName:
termState = self.state.hamiltonianState[termName]
if termState == 0:
continue
else:
self.nConfigurationsLineEdit.setEnabled(True)
if not hasattr(self, 'resultsModel'):
# Create the results model.
self.resultsModel = ResultsModel(parent=self)
self.resultsModel.itemNameChanged.connect(
self.updateCalculationName)
self.resultsModel.itemCheckStateChanged.connect(
self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updateResultsView)
# Assign the results model to the results view.
self.resultsView.setModel(self.resultsModel)
self.resultsView.selectionModel().selectionChanged.connect(
self.selectedResultsChanged)
self.resultsView.resizeColumnsToContents()
self.resultsView.horizontalHeader().setSectionsMovable(False)
self.resultsView.horizontalHeader().setSectionsClickable(False)
if sys.platform == 'darwin':
self.resultsView.horizontalHeader().setMaximumHeight(17)
# Add a context menu to the view.
self.resultsView.setContextMenuPolicy(Qt.CustomContextMenu)
self.resultsView.customContextMenuRequested[QPoint].connect(
self.showResultsContextMenu)
if not hasattr(self, 'resultDetailsDialog'):
self.resultDetailsDialog = QuantyResultDetailsDialog(parent=self)
self.updateMainWindowTitle(self.state.baseName) |
Update the selection to contain only the result specified by
the index. This should be the last index of the model. Finally updade
the context menu.
The selectionChanged signal is used to trigger the update of
the Quanty dock widget and result details dialog.
:param index: Index of the last item of the model.
:type index: QModelIndex
def updateResultsView(self, index):
"""
Update the selection to contain only the result specified by
the index. This should be the last index of the model. Finally updade
the context menu.
The selectionChanged signal is used to trigger the update of
the Quanty dock widget and result details dialog.
:param index: Index of the last item of the model.
:type index: QModelIndex
"""
flags = (QItemSelectionModel.Clear | QItemSelectionModel.Rows |
QItemSelectionModel.Select)
self.resultsView.selectionModel().select(index, flags)
self.resultsView.resizeColumnsToContents()
self.resultsView.setFocus() |
Updating the plotting widget should not require any information
about the current state of the widget.
def updatePlotWidget(self):
"""Updating the plotting widget should not require any information
about the current state of the widget."""
pw = self.getPlotWidget()
pw.reset()
results = self.resultsModel.getCheckedItems()
for result in results:
if isinstance(result, ExperimentalData):
spectrum = result.spectra['Expt']
spectrum.legend = '{}-{}'.format(result.index, 'Expt')
spectrum.xLabel = 'X'
spectrum.yLabel = 'Y'
spectrum.plot(plotWidget=pw)
else:
if len(results) > 1 and result.experiment in ['RIXS', ]:
continue
for spectrum in result.spectra.processed:
spectrum.legend = '{}-{}'.format(
result.index, spectrum.shortName)
if spectrum.name in result.spectra.toPlotChecked:
spectrum.plot(plotWidget=pw) |
Return the row of the child.
def row(self):
"""Return the row of the child."""
if self.parent is not None:
children = self.parent.getChildren()
# The index method of the list object.
return children.index(self)
else:
return 0 |
Return the index of the item in the model specified by the
given row, column, and parent index.
def index(self, row, column, parent=QModelIndex()):
"""Return the index of the item in the model specified by the
given row, column, and parent index.
"""
if parent is not None and not parent.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parent)
childItem = parentItem.child(row)
if childItem:
index = self.createIndex(row, column, childItem)
else:
index = QModelIndex()
return index |
Return the index of the parent for a given index of the
child. Unfortunately, the name of the method has to be parent,
even though a more verbose name like parentIndex, would avoid
confusion about what parent actually is - an index or an item.
def parent(self, index):
"""Return the index of the parent for a given index of the
child. Unfortunately, the name of the method has to be parent,
even though a more verbose name like parentIndex, would avoid
confusion about what parent actually is - an index or an item.
"""
childItem = self.item(index)
parentItem = childItem.parent
if parentItem == self.rootItem:
parentIndex = QModelIndex()
else:
parentIndex = self.createIndex(parentItem.row(), 0, parentItem)
return parentIndex |
Return the number of rows under the given parent. When the
parentIndex is valid, rowCount() returns the number of children
of the parent. For this it uses item() method to extract the
parentItem from the parentIndex, and calls the childCount() of
the item to get number of children.
def rowCount(self, parentIndex):
"""Return the number of rows under the given parent. When the
parentIndex is valid, rowCount() returns the number of children
of the parent. For this it uses item() method to extract the
parentItem from the parentIndex, and calls the childCount() of
the item to get number of children.
"""
if parentIndex.column() > 0:
return 0
if not parentIndex.isValid():
parentItem = self.rootItem
else:
parentItem = self.item(parentIndex)
return parentItem.childCount() |
Return role specific data for the item referred by
index.column().
def data(self, index, role):
"""Return role specific data for the item referred by
index.column()."""
if not index.isValid():
return
item = self.item(index)
column = index.column()
value = item.getItemData(column)
if role == Qt.DisplayRole:
try:
if column == 1:
# Display small values using scientific notation.
if abs(float(value)) < 1e-3 and float(value) != 0.0:
return '{0:8.1e}'.format(value)
else:
return '{0:8.3f}'.format(value)
else:
return '{0:8.2f}'.format(value)
except ValueError:
return value
elif role == Qt.EditRole:
try:
value = float(value)
if abs(value) < 1e-3 and value != 0.0:
return str('{0:8.1e}'.format(value))
else:
return str('{0:8.3f}'.format(value))
except ValueError:
return str(value)
elif role == Qt.CheckStateRole:
if item.parent == self.rootItem and column == 0:
return item.getCheckState()
elif role == Qt.TextAlignmentRole:
if column > 0:
return Qt.AlignRight |
Set the role data for the item at index to value.
def setData(self, index, value, role):
"""Set the role data for the item at index to value."""
if not index.isValid():
return False
item = self.item(index)
column = index.column()
if role == Qt.EditRole:
items = list()
items.append(item)
if self.sync:
parentIndex = self.parent(index)
# Iterate over the siblings of the parent index.
for sibling in self.siblings(parentIndex):
siblingNode = self.item(sibling)
for child in siblingNode.children:
if child.getItemData(0) == item.getItemData(0):
items.append(child)
for item in items:
columnData = str(item.getItemData(column))
if columnData and columnData != value:
try:
item.setItemData(column, float(value))
except ValueError:
return False
else:
return False
elif role == Qt.CheckStateRole:
item.setCheckState(value)
if value == Qt.Unchecked or value == Qt.Checked:
state = value
self.itemCheckStateChanged.emit(index, state)
self.dataChanged.emit(index, index)
return True |
Return the active flags for the given index. Add editable
flag to items other than the first column.
def flags(self, index):
"""Return the active flags for the given index. Add editable
flag to items other than the first column.
"""
activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable)
item = self.item(index)
column = index.column()
if column > 0 and not item.childCount():
activeFlags = activeFlags | Qt.ItemIsEditable
return activeFlags |
Return the data contained in the model.
def _getModelData(self, modelData, parentItem=None):
"""Return the data contained in the model."""
if parentItem is None:
parentItem = self.rootItem
for item in parentItem.getChildren():
key = item.getItemData(0)
if item.childCount():
modelData[key] = odict()
self._getModelData(modelData[key], item)
else:
if isinstance(item.getItemData(2), float):
modelData[key] = [item.getItemData(1), item.getItemData(2)]
else:
modelData[key] = item.getItemData(1) |
Return the check state (disabled, tristate, enable) of all items
belonging to a parent.
def getNodesCheckState(self, parentItem=None):
"""Return the check state (disabled, tristate, enable) of all items
belonging to a parent.
"""
if parentItem is None:
parentItem = self.rootItem
checkStates = odict()
children = parentItem.getChildren()
for child in children:
checkStates[child.itemData[0]] = child.getCheckState()
return checkStates |
Calculate the hexadecimal version number from the tuple version_info:
:param major: integer
:param minor: integer
:param micro: integer
:param relev: integer or string
:param serial: integer
:return: integerm always increasing with revision numbers
def calc_hexversion(major=0, minor=0, micro=0, releaselevel='dev', serial=0):
"""Calculate the hexadecimal version number from the tuple version_info:
:param major: integer
:param minor: integer
:param micro: integer
:param relev: integer or string
:param serial: integer
:return: integerm always increasing with revision numbers
"""
try:
releaselevel = int(releaselevel)
except ValueError:
releaselevel = RELEASE_LEVEL_VALUE.get(releaselevel, 0)
hex_version = int(serial)
hex_version |= releaselevel * 1 << 4
hex_version |= int(micro) * 1 << 8
hex_version |= int(minor) * 1 << 16
hex_version |= int(major) * 1 << 24
return hex_version |
Handle plot area customContextMenuRequested signal.
:param QPoint pos: Mouse position relative to plot area
def _contextMenu(self, pos):
"""Handle plot area customContextMenuRequested signal.
:param QPoint pos: Mouse position relative to plot area
"""
# Create the context menu.
menu = QMenu(self)
menu.addAction(self._zoomBackAction)
# Displaying the context menu at the mouse position requires
# a global position.
# The position received as argument is relative to PlotWidget's
# plot area, and thus needs to be converted.
plotArea = self.getWidgetHandle()
globalPosition = plotArea.mapToGlobal(pos)
menu.exec_(globalPosition) |
Convolve an array with a kernel using FFT.
Implemntation based on the convolve_fft function from astropy.
https://github.com/astropy/astropy/blob/master/astropy/convolution/convolve.py
def convolve_fft(array, kernel):
"""
Convolve an array with a kernel using FFT.
Implemntation based on the convolve_fft function from astropy.
https://github.com/astropy/astropy/blob/master/astropy/convolution/convolve.py
"""
array = np.asarray(array, dtype=np.complex)
kernel = np.asarray(kernel, dtype=np.complex)
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of "
"dimensions")
array_shape = array.shape
kernel_shape = kernel.shape
new_shape = np.array(array_shape) + np.array(kernel_shape)
array_slices = []
kernel_slices = []
for (new_dimsize, array_dimsize, kernel_dimsize) in zip(
new_shape, array_shape, kernel_shape):
center = new_dimsize - (new_dimsize + 1) // 2
array_slices += [slice(center - array_dimsize // 2,
center + (array_dimsize + 1) // 2)]
kernel_slices += [slice(center - kernel_dimsize // 2,
center + (kernel_dimsize + 1) // 2)]
array_slices = tuple(array_slices)
kernel_slices = tuple(kernel_slices)
if not np.all(new_shape == array_shape):
big_array = np.zeros(new_shape, dtype=np.complex)
big_array[array_slices] = array
else:
big_array = array
if not np.all(new_shape == kernel_shape):
big_kernel = np.zeros(new_shape, dtype=np.complex)
big_kernel[kernel_slices] = kernel
else:
big_kernel = kernel
array_fft = np.fft.fftn(big_array)
kernel_fft = np.fft.fftn(np.fft.ifftshift(big_kernel))
rifft = np.fft.ifftn(array_fft * kernel_fft)
return rifft[array_slices].real |
Diagonalize the tensor.
def diagonalize(self):
'''Diagonalize the tensor.'''
self.eigvals, self.eigvecs = np.linalg.eig(
(self.tensor.transpose() + self.tensor) / 2.0)
self.eigvals = np.diag(np.dot(
np.dot(self.eigvecs.transpose(), self.tensor), self.eigvecs)) |
Calculate the Euler angles only if the rotation matrix
(eigenframe) has positive determinant.
def euler_angles_and_eigenframes(self):
'''Calculate the Euler angles only if the rotation matrix
(eigenframe) has positive determinant.'''
signs = np.array([[1, 1, 1], [-1, 1, 1], [1, -1, 1],
[1, 1, -1], [-1, -1, 1], [-1, 1, -1],
[1, -1, -1], [-1, -1, -1]])
eulangs = []
eigframes = []
for i, sign in enumerate(signs):
eigframe = np.dot(self.eigvecs, np.diag(sign))
if np.linalg.det(eigframe) > 1e-4:
eigframes.append(np.array(eigframe))
eulangs.append(np.array(
transformations.euler_from_matrix(eigframe, axes='szyz')))
self.eigframes = np.array(eigframes)
# The sign has to be inverted to be consistent with ORCA and EasySpin.
self.eulangs = -np.array(eulangs) |
Skip a number of lines from the output.
def _skip_lines(self, n):
'''Skip a number of lines from the output.'''
for i in range(n):
self.line = next(self.output)
return self.line |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.