text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from behave import *
@given(u'main page loaded')
def given_main_page_loaded(context):
context.browser.visit('https://google.com/')
@when(u'type "{query_string}" and press enter')
def typed_and_pressed_enter(context, query_string):
context.browser.fill('q', query_string)
button = context.browser.find_by_name('btnG')
button.click()
@then(u'results page should have "{link_text}" link')
def first_link_should_be(context, link_text):
links = context.browser.find_link_by_text(link_text)
assert links.is_empty() is False
|
danielgimenes/bdd_python_tools_test
|
behave+splinter/steps/google_test.py
|
Python
|
mit
| 545
|
[
"VisIt"
] |
07fd2aab49766b0442522a1581a9d2f80607c15bd11517587473ec2709391da3
|
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
from __future__ import (absolute_import, division,
print_function, with_statement)
# Author: Jacob Kristhammar, 2010
import base64
import collections
import hashlib
import os
import struct
import tornado.escape
import tornado.web
import zlib
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8, native_str, to_unicode
from tornado import httpclient, httputil
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.log import gen_log, app_log
from tornado import simple_httpclient
from tornado.tcpclient import TCPClient
from tornado.util import _websocket_mask
try:
from urllib.parse import urlparse # py2
except ImportError:
from urlparse import urlparse # py3
try:
xrange # py2
except NameError:
xrange = range # py3
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
.. testoutput::
:hide:
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
"""
def __init__(self, application, request, **kwargs):
tornado.web.RequestHandler.__init__(self, application, request,
**kwargs)
self.ws_connection = None
self.close_code = None
self.close_reason = None
self.stream = None
self._on_close_called = False
@tornado.web.asynchronous
def get(self, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.set_status(400)
log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(lambda s: s.strip().lower(),
headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.set_status(400)
log_msg = "\"Connection\" must be \"Upgrade\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.stream = self.request.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
self.ws_connection.accept_connection()
else:
if not self.stream.closed():
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 426 Upgrade Required\r\n"
"Sec-WebSocket-Version: 7, 8, 13\r\n\r\n"))
self.stream.close()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
"""
if self.ws_connection is None:
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols):
"""Invoked when a new WebSocket requests specific subprotocols.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol. Failure to select a
subprotocol does not automatically abort the connection,
although clients may close the connection if none of their
proposed subprotocols was selected.
"""
return None
def get_compression_options(self):
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the memory and CPU usage of the compression,
but no such options are currently implemented.
.. versionadded:: 4.1
"""
return None
def open(self, *args, **kwargs):
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
"""
pass
def on_message(self, message):
"""Handle incoming messages on the WebSocket
This method must be overridden.
"""
raise NotImplementedError
def ping(self, data):
"""Send ping frame to the remote end."""
if self.ws_connection is None:
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data):
"""Invoked when the response to a ping frame is received."""
pass
def on_close(self):
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code=None, reason=None):
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin):
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return True to accept the request or False to reject it.
By default, rejects all requests with an origin on a host other
than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return true::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value):
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
self.stream.set_nodelay(value)
def on_connection_close(self):
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
if not self._on_close_called:
self._on_close_called = True
self.on_close()
def send_error(self, *args, **kwargs):
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def get_websocket_protocol(self):
websocket_version = self.request.headers.get("Sec-WebSocket-Version")
if websocket_version in ("7", "8", "13"):
return WebSocketProtocol13(
self, compression_options=self.get_compression_options())
def _wrap_method(method):
def _disallow_for_websocket(self, *args, **kwargs):
if self.stream is None:
method(self, *args, **kwargs)
else:
raise RuntimeError("Method not supported for Web Sockets")
return _disallow_for_websocket
for method in ["write", "redirect", "set_header", "set_cookie",
"set_status", "flush", "finish"]:
setattr(WebSocketHandler, method,
_wrap_method(getattr(WebSocketHandler, method)))
class WebSocketProtocol(object):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.stream = handler.stream
self.client_terminated = False
self.server_terminated = False
def _run_callback(self, callback, *args, **kwargs):
"""Runs the given callback with exception handling.
On error, aborts the websocket connection and returns False.
"""
try:
callback(*args, **kwargs)
except Exception:
app_log.error("Uncaught exception in %s",
self.request.path, exc_info=True)
self._abort()
def on_connection_close(self):
self._abort()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
class _PerMessageDeflateCompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._compressor = self._create_compressor()
else:
self._compressor = None
def _create_compressor(self):
return zlib.compressobj(tornado.web.GZipContentEncoding.GZIP_LEVEL,
zlib.DEFLATED, -self._max_wbits)
def compress(self, data):
compressor = self._compressor or self._create_compressor()
data = (compressor.compress(data) +
compressor.flush(zlib.Z_SYNC_FLUSH))
assert data.endswith(b'\x00\x00\xff\xff')
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._decompressor = self._create_decompressor()
else:
self._decompressor = None
def _create_decompressor(self):
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data):
decompressor = self._decompressor or self._create_decompressor()
return decompressor.decompress(data + b'\x00\x00\xff\xff')
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0f
def __init__(self, handler, mask_outgoing=False,
compression_options=None):
WebSocketProtocol.__init__(self, handler)
self.mask_outgoing = mask_outgoing
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None
self._frame_length = None
self._fragmented_message_buffer = None
self._fragmented_message_opcode = None
self._waiting = None
self._compression_options = compression_options
self._decompressor = None
self._compressor = None
self._frame_compressed = None
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
def accept_connection(self):
try:
self._handle_websocket_headers()
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received",
exc_info=True)
self._abort()
return
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key):
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self):
return WebSocketProtocol13.compute_accept_value(
self.request.headers.get("Sec-Websocket-Key"))
def _accept_connection(self):
subprotocol_header = ''
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
subprotocols = [s.strip() for s in subprotocols.split(',')]
if subprotocols:
selected = self.handler.select_subprotocol(subprotocols)
if selected:
assert selected in subprotocols
subprotocol_header = ("Sec-WebSocket-Protocol: %s\r\n"
% selected)
extension_header = ''
extensions = self._parse_extensions_header(self.request.headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors('server', ext[1])
if ('client_max_window_bits' in ext[1] and
ext[1]['client_max_window_bits'] is None):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]['client_max_window_bits']
extension_header = ('Sec-WebSocket-Extensions: %s\r\n' %
httputil._encode_header(
'permessage-deflate', ext[1]))
break
if self.stream.closed():
self._abort()
return
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %s\r\n"
"%s%s"
"\r\n" % (self._challenge_response(),
subprotocol_header, extension_header)))
self._run_callback(self.handler.open, *self.handler.open_args,
**self.handler.open_kwargs)
self._receive_frame()
def _parse_extensions_header(self, headers):
extensions = headers.get("Sec-WebSocket-Extensions", '')
if extensions:
return [httputil._parse_header(e.strip())
for e in extensions.split(',')]
return []
def _process_server_headers(self, key, headers):
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers['Upgrade'].lower() == 'websocket'
assert headers['Connection'].lower() == 'upgrade'
accept = self.compute_accept_value(key)
assert headers['Sec-Websocket-Accept'] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
self._create_compressors('client', ext[1])
else:
raise ValueError("unsupported extension %r", ext)
def _get_compressor_options(self, side, agreed_parameters):
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + '_no_context_takeover') not in agreed_parameters)
wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
if wbits_header is None:
options['max_wbits'] = zlib.MAX_WBITS
else:
options['max_wbits'] = int(wbits_header)
return options
def _create_compressors(self, side, agreed_parameters):
# TODO: handle invalid parameters gracefully
allowed_keys = set(['server_no_context_takeover',
'client_no_context_takeover',
'server_max_window_bits',
'client_max_window_bits'])
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = 'client' if (side == 'server') else 'server'
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters))
self._decompressor = _PerMessageDeflateDecompressor(
**self._get_compressor_options(other_side, agreed_parameters))
def _write_frame(self, fin, opcode, data, flags=0):
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if l < 126:
frame += struct.pack("B", l | mask_bit)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l)
else:
frame += struct.pack("!BQ", 127 | mask_bit, l)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
try:
return self.stream.write(frame)
except StreamClosedError:
self._abort()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
return self._write_frame(True, opcode, message, flags=flags)
def write_ping(self, data):
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
def _receive_frame(self):
try:
self.stream.read_bytes(2, self._on_frame_start)
except StreamClosedError:
self._abort()
def _on_frame_start(self, data):
self._wire_bytes_in += len(data)
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
self._frame_opcode = header & self.OPCODE_MASK
self._frame_opcode_is_control = self._frame_opcode & 0x8
if self._decompressor is not None:
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
self._masked_frame = bool(payloadlen & 0x80)
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
try:
if payloadlen < 126:
self._frame_length = payloadlen
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length,
self._on_frame_data)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
except StreamClosedError:
self._abort()
def _on_frame_length_16(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!H", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_frame_length_64(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!Q", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_masking_key(self, data):
self._wire_bytes_in += len(data)
self._frame_mask = data
try:
self.stream.read_bytes(self._frame_length,
self._on_masked_frame_data)
except StreamClosedError:
self._abort()
def _on_masked_frame_data(self, data):
# Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
self._on_frame_data(_websocket_mask(self._frame_mask, data))
def _on_frame_data(self, data):
self._wire_bytes_in += len(data)
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if self._final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = data
if self._final_frame:
self._handle_message(opcode, data)
if not self.client_terminated:
self._receive_frame()
def _handle_message(self, opcode, data):
if self.client_terminated:
return
if self._frame_compressed:
data = self._decompressor.decompress(data)
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return
self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.handler.close_code = struct.unpack('>H', data[:2])[0]
if len(data) > 2:
self.handler.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.handler.close_code)
elif opcode == 0x9:
# Ping
self._write_frame(True, 0xA, data)
elif opcode == 0xA:
# Pong
self._run_callback(self.handler.on_pong, data)
else:
self._abort()
def close(self, code=None, reason=None):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b''
else:
close_data = struct.pack('>H', code)
if reason is not None:
close_data += utf8(reason)
self._write_frame(True, 0x8, close_data)
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort)
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
def __init__(self, io_loop, request, on_message_callback=None,
compression_options=None):
self.compression_options = compression_options
self.connect_future = TracebackFuture()
self.protocol = None
self.read_future = None
self.read_queue = collections.deque()
self.key = base64.b64encode(os.urandom(16))
self._on_message_callback = on_message_callback
self.close_code = self.close_reason = None
scheme, sep, rest = request.url.partition(':')
scheme = {'ws': 'http', 'wss': 'https'}[scheme]
request.url = scheme + sep + rest
request.headers.update({
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': self.key,
'Sec-WebSocket-Version': '13',
})
if self.compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers['Sec-WebSocket-Extensions'] = (
'permessage-deflate; client_max_window_bits')
self.tcp_client = TCPClient(io_loop=io_loop)
super(WebSocketClientConnection, self).__init__(
io_loop, None, request, lambda: None, self._on_http_response,
104857600, self.tcp_client, 65536, 104857600)
def close(self, code=None, reason=None):
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None
def on_connection_close(self):
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self.on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def _on_http_response(self, response):
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(WebSocketError(
"Non-websocket response"))
def headers_received(self, start_line, headers):
if start_line.code != 101:
return super(WebSocketClientConnection, self).headers_received(
start_line, headers)
self.headers = headers
self.protocol = self.get_websocket_protocol()
self.protocol._process_server_headers(self.key, self.headers)
self.protocol._receive_frame()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.stream = self.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None
self.connect_future.set_result(self)
def write_message(self, message, binary=False):
"""Sends a message to the WebSocket server."""
return self.protocol.write_message(message, binary)
def read_message(self, callback=None):
"""Reads a message from the WebSocket server.
If on_message_callback was specified at WebSocket
initialization, this function will never return messages
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
assert self.read_future is None
future = TracebackFuture()
if self.read_queue:
future.set_result(self.read_queue.popleft())
else:
self.read_future = future
if callback is not None:
self.io_loop.add_future(future, callback)
return future
def on_message(self, message):
if self._on_message_callback:
self._on_message_callback(message)
elif self.read_future is not None:
self.read_future.set_result(message)
self.read_future = None
else:
self.read_queue.append(message)
def on_pong(self, data):
pass
def get_websocket_protocol(self):
return WebSocketProtocol13(self, mask_outgoing=True,
compression_options=self.compression_options)
def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
on_message_callback=None, compression_options=None):
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
The connection supports two styles of operation. In the coroutine
style, the application typically calls
`~.WebSocketClientConnection.read_message` in a loop::
conn = yield websocket_connect(url)
while True:
msg = yield conn.read_message()
if msg is None: break
# Do something with msg
In the callback style, pass an ``on_message_callback`` to
``websocket_connect``. In both styles, a message of ``None``
indicates that the connection has been closed.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
The ``io_loop`` argument is deprecated.
"""
if io_loop is None:
io_loop = IOLoop.current()
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS)
conn = WebSocketClientConnection(io_loop, request,
on_message_callback=on_message_callback,
compression_options=compression_options)
if callback is not None:
io_loop.add_future(conn.connect_future, callback)
return conn.connect_future
|
Batterfii/tornado
|
tornado/websocket.py
|
Python
|
apache-2.0
| 40,818
|
[
"VisIt"
] |
cbdeb2cfb635b14e80e18611f9861348ecb0f0ca78348bb1b96b3938e890d0ac
|
#!/usr/bin/env python
""" Ping a list of services and show the result
"""
__RCSID__ = "$Id$"
import sys
from DIRAC import S_OK, S_ERROR, gLogger, exit
from DIRAC.Core.Base import Script
# Define a simple class to hold the script parameters
class Params:
def __init__( self ):
self.raw = False
self.pingsToDo = 1
def setRawResult( self, value ):
self.raw = True
return S_OK()
def setNumOfPingsToDo( self, value ):
try:
self.pingsToDo = max( 1, int( value ) )
except ValueError:
return S_ERROR( "Number of pings to do has to be a number" )
return S_OK()
# Instantiate the params class
cliParams = Params()
# Register accepted switches and their callbacks
Script.registerSwitch( "r", "showRaw", "show raw result from the query", cliParams.setRawResult )
Script.registerSwitch( "p:", "numPings=", "Number of pings to do (by default 1)", cliParams.setNumOfPingsToDo )
# Define a help message
Script.setUsageMessage( '\n'.join( [ __doc__,
'Usage:',
' %s [option|cfgfile] <system name to ping>+' % Script.scriptName,
' Specifying a system is mandatory' ] ) )
# Parse the command line and initialize DIRAC
Script.parseCommandLine( ignoreErrors = False )
# Get the list of services
servicesList = Script.getPositionalArgs()
# Check and process the command line switches and options
if len( servicesList ) == 0:
Script.showHelp()
exit(1)
|
DIRACGrid/DIRACDocs
|
source/DeveloperGuide/AddingNewComponents/DevelopingCommands/dirac-ping-info.py
|
Python
|
gpl-3.0
| 1,504
|
[
"DIRAC"
] |
4399ec893c84ca1d299d1b3c1e83b066975fd7fe492832cf490dc781fd89a25f
|
#!/usr/bin/env Python
##########################################################################
#
# Copyright (C) 2015-2018 Sam Westreich
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation;
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##########################################################################
#
# DIAMOND_analysis_counter.py
# Created 8/16/2016, this version created 1/30/2018
# Sam Westreich, stwestreich@ucdavis.edu, github.com/transcript
#
# This program parses through the results file from a DIAMOND annotation run
# (in BLAST m8 format) to get the results into something more compressed
# and readable.
#
# Usage:
#
# -I infile specifies the infile (a DIAMOND results file
# in m8 format)
# -D database specifies a reference database to search against
# for results
# -O organism returns organism results
# -F function returns functional results
# -SO specific org creates a separate outfile for results that hit
# a specific organism
# -E error file generates an error file for the database used
#
##########################################################################
# imports
import operator, sys, time, gzip, re
# String searching function:
def string_find(usage_term):
for idx, elem in enumerate(sys.argv):
this_elem = elem
next_elem = sys.argv[(idx + 1) % len(sys.argv)]
if elem == usage_term:
return next_elem
t0 = time.time()
# checking for an option (organism or function) to be specified
if "-O" not in sys.argv:
if "-F" not in sys.argv:
sys.exit("WARNING: need to specify either organism results (with -O flag in command) or functional results (with -F flag in command).")
# loading starting file
if "-I" in sys.argv:
infile_name = string_find("-I")
else:
sys.exit ("WARNING: infile must be specified using '-I' flag.")
# checking to make sure database is specified
if "-D" in sys.argv:
db_name = string_find("-D")
else:
sys.exit( "No database file indicated; skipping database search step.")
infile = open (infile_name, "r")
# setting up databases
RefSeq_hit_count_db = {}
unique_seq_db = {}
line_counter = 0
# reading through the infile - the DIAMOND results m8 format
print ("\nReading through the m8 results infile...")
for line in infile:
line_counter += 1
splitline = line.split("\t")
if line_counter % 1000000 == 0:
t99 = time.time()
print str(line_counter)[:-6] + "M lines processed so far in " + str(t99-t0) + " seconds."
unique_seq_db[splitline[0]] = 1
try:
RefSeq_hit_count_db[splitline[1]] += 1
except KeyError:
RefSeq_hit_count_db[splitline[1]] = 1
continue
t1 = time.time()
print ("\nAnalysis of " + infile_name + " complete.")
print ("Number of total lines: " + str(line_counter))
print ("Number of unique sequences: " + str(len(unique_seq_db)))
print ("Time elapsed: " + str(t1-t0) + " seconds.")
infile.close()
# time to search for these in the reference database
db = open (db_name, "r")
print ("\nReading in reference database...")
t2 = time.time()
# optional outfile of specific organism results
if "-SO" in sys.argv:
target_org = string_find("-SO")
db_SO_dictionary = {}
# building a dictionary of the reference database
if "-F" in sys.argv:
db_func_dictionary = {}
if "-O" in sys.argv:
db_org_dictionary = {}
db_line_counter = 0
db_error_counter = 0
# optional error file for the database assembly
if "-E" in sys.argv:
error_file = open("database_error_list.txt", "w")
for line in db:
if line.startswith(">") == True:
db_line_counter += 1
splitline = line.split(" ", 1)
# ID, the hit returned in DIAMOND results
db_id = str(splitline[0])[1:]
# name and functional description
db_entry = line.strip().rsplit("[")
db_entry = db_entry[0].split(" ", 1)
db_entry = db_entry[1][:-1]
# organism name
if line.count("[") != 1:
splitline = line.split("[")
db_org = splitline[line.count("[")].strip()[:-1]
if db_org[0].isdigit():
split_db_org = db_org.split()
try:
if split_db_org[1] == "sp.":
db_org = split_db_org[0] + " " + split_db_org[1] + " " + split_db_org[2]
else:
db_org = split_db_org[1] + " " + split_db_org[2]
except IndexError:
try:
db_org = split_db_org[1]
except IndexError:
db_org = splitline[line.count("[")-1]
if db_org[0].isdigit():
split_db_org = db_org.split()
db_org = split_db_org[1] + " " + split_db_org[2]
else:
db_org = line.rsplit("[")
db_org = db_org[1].split(" ")
try:
db_org = str(db_org[0]) + " " + str(db_org[1])
except IndexError:
db_org = line.strip().split("[", 1)
db_org = db_org[1][:-1]
db_error_counter += 1
if "-E" in sys.argv:
error_file.write(line)
db_org = re.sub('[^a-zA-Z0-9-_*. ]', '', db_org)
# add to dictionaries
if "-F" in sys.argv:
db_func_dictionary[db_id] = db_entry
if "-O" in sys.argv:
db_org_dictionary[db_id] = db_org
if "-SO" in sys.argv:
if target_org in db_org:
db_SO_dictionary[db_id] = db_entry
# line counter to show progress
if db_line_counter % 1000000 == 0: # each million
t95 = time.time()
print (str(db_line_counter)[:-6] + "M lines processed so far in " + str(t95-t2) + " seconds.")
db.close()
t3 = time.time()
print ("Database read successfully.\n")
print ("Time elapsed: " + str(t3-t2) + " seconds.")
print ("Number of lines: " + str(db_line_counter))
print ("Number of errors: " + str(db_error_counter))
if "-SO" in sys.argv:
print ("Number of unique entries for specific organism: " + str(len(db_SO_dictionary.keys())))
# condensing down the identical matches
condensed_RefSeq_hit_db = {}
for entry in RefSeq_hit_count_db.keys():
try:
if "-O" in sys.argv:
org = db_org_dictionary[entry]
if "-F" in sys.argv:
org = db_func_dictionary[entry]
if org in condensed_RefSeq_hit_db.keys():
condensed_RefSeq_hit_db[org] += RefSeq_hit_count_db[entry]
else:
condensed_RefSeq_hit_db[org] = RefSeq_hit_count_db[entry]
except KeyError:
print ("KeyError:\t" + entry)
continue
if "-SO" in sys.argv:
condensed_RefSeq_SO_hit_db = {}
for entry in RefSeq_hit_count_db.keys():
try:
func = db_SO_dictionary[entry]
try:
condensed_RefSeq_SO_hit_db[func] += RefSeq_hit_count_db[entry]
except KeyError:
condensed_RefSeq_SO_hit_db[func] = RefSeq_hit_count_db[entry]
continue
except KeyError:
continue
# dictionary output and summary
print ("\nDictionary database assembled.")
print ("Time elapsed: " + str(t3-t2) + " seconds.")
print ("Number of errors: " + str(db_error_counter))
# creating the outfiles
if "-O" in sys.argv:
outfile_name = infile_name[:-3] + "_organism.tsv"
if "-F" in sys.argv:
outfile_name = infile_name[:-3] + "_function.tsv"
outfile = open (outfile_name, "w")
# writing the output
error_counter = 0
for k, v in sorted(condensed_RefSeq_hit_db.items(), key=lambda kv: -kv[1]):
try:
q = v * 100 / float(line_counter)
outfile.write (str(q) + "\t" + str(v) + "\t" + k + "\n")
except KeyError:
outfile.write (str(q) + "\t" + str(v) + "\tWARNING: Key not found for " + k + "\n")
error_counter += 1
continue
outfile.close()
print ("\nAnnotations saved to file: '" + outfile_name + "'.")
print ("Number of errors: " + str(error_counter))
# writing the output if optional specific organism flag is active
if "-SO" in sys.argv:
target_org_outfile = open(infile_name[:-3] + "_" + target_org + ".tsv", "w")
for k, v in sorted(condensed_RefSeq_SO_hit_db.items(), key=lambda kv: -kv[1]):
try:
q = v * 100 / float(line_counter)
target_org_outfile.write (str(q) + "\t" + str(v) + "\t" + k + "\n")
except KeyError:
target_org_outfile.write (str(q) + "\t" + str(v) + "\tWARNING: Key not found for " + k + "\n")
error_counter += 1
continue
print ("Specific organism annotations saved to file: " + infile_name[:-3] + "_" + target_org + ".tsv")
target_org_outfile.close()
|
transcript/samsa_v2
|
python_scripts/DIAMOND_general_RefSeq_analysis_counter.py
|
Python
|
gpl-3.0
| 8,393
|
[
"BLAST"
] |
40f55e4680b6a9d0dec9a9aefe739001e1c1c9940cdacc61e406b47db94db02a
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_backupconfiguration
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of BackupConfiguration Avi RESTful Object
description:
- This module is used to configure BackupConfiguration object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
backup_file_prefix:
description:
- Prefix of the exported configuration file.
- Field introduced in 17.1.1.
backup_passphrase:
description:
- Passphrase of backup configuration.
maximum_backups_stored:
description:
- Rotate the backup files based on this count.
- Allowed values are 1-20.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
name:
description:
- Name of backup configuration.
required: true
remote_directory:
description:
- Directory at remote destination with write permission for ssh user.
remote_hostname:
description:
- Remote destination.
save_local:
description:
- Local backup.
type: bool
ssh_user_ref:
description:
- Access credentials for remote destination.
- It is a reference to an object of type cloudconnectoruser.
tenant_ref:
description:
- It is a reference to an object of type tenant.
upload_to_remote_host:
description:
- Remote backup.
type: bool
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create BackupConfiguration object
avi_backupconfiguration:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_backupconfiguration
"""
RETURN = '''
obj:
description: BackupConfiguration (api/backupconfiguration) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
backup_file_prefix=dict(type='str',),
backup_passphrase=dict(type='str', no_log=True,),
maximum_backups_stored=dict(type='int',),
name=dict(type='str', required=True),
remote_directory=dict(type='str',),
remote_hostname=dict(type='str',),
save_local=dict(type='bool',),
ssh_user_ref=dict(type='str',),
tenant_ref=dict(type='str',),
upload_to_remote_host=dict(type='bool',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'backupconfiguration',
set(['backup_passphrase']))
if __name__ == '__main__':
main()
|
sgerhart/ansible
|
lib/ansible/modules/network/avi/avi_backupconfiguration.py
|
Python
|
mit
| 4,821
|
[
"VisIt"
] |
52370a22d8bf163ddacd05a9edd39999a32a43861158c8c6622fecf316a5c663
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import absolute_import
from __future__ import print_function
import re
import math
from decimal import Decimal
from collections import defaultdict
from .exceptions import *
from .pdict import PreservingDict
from . import qcformat
from . import molpro_basissets
from . import options
def harvest_output(outtext):
"""Function to read MRCC output file *outtext* and parse important
quantum chemical information from it in
"""
psivar = PreservingDict()
psivar_coord = None
psivar_grad = None
NUMBER = "((?:[-+]?\\d*\\.\\d+(?:[DdEe][-+]?\\d+)?)|(?:[-+]?\\d+\\.\\d*(?:[DdEe][-+]?\\d+)?))"
# <<< Process NRE >>>
mobj = re.search(
r'^\s*' + r'(?:NUCLEAR REPULSION ENERGY)' + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched nrc')
psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
# <<< Process SCF >>>
#mobj = re.search(
# r'^\s*' + r'(?:Energy of reference determinant (?:\[au\]|/au/):)' + r'\s+' + NUMBER + r'\s*$',
# outtext, re.MULTILINE)
#if mobj:
# print('matched scf')
# psivar['SCF TOTAL ENERGY'] = mobj.group(1)
# <<< Process MP2 >>>
mobj = re.search(
r'^\s*' + r'Reference energy[:]?\s+' + NUMBER + r'\s*' +
r'^\s*' + r'MP2 singlet pair energy[:]?\s+' + NUMBER + r'\s*' +
r'^\s*' + r'MP2 triplet pair energy[:]?\s+' + NUMBER + r'\s*' +
r'^\s*' + r'MP2 correlation energy[:]?\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched mp2')
psivar['HF TOTAL ENERGY'] = mobj.group(1)
psivar['MP2 CORRELATION ENERGY'] = mobj.group(4)
psivar['MP2 TOTAL ENERGY'] = Decimal(mobj.group(1)) + Decimal(mobj.group(4))
psivar['MP2 SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(3)) * \
Decimal(2) / Decimal(3)
psivar['MP2 OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(4)) - \
psivar['MP2 SAME-SPIN CORRELATION ENERGY']
# <<< Process SAPT-like >>>
mobj = re.search(
#r'^\s+' + r'E1pol\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E1exch\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E1exch\(S2\)\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E2ind\(unc\)\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E2ind\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E2ind-exch\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E2disp\(unc\)\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
# r'^\s+' + r'E2disp\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r'\)\s+' + NUMBER + r'\s+' + NUMBER + '\s*',
r'^\s+' + r'E2disp\s+' + NUMBER + r'.*$',
#r'^\s+' + r'E2disp-exch\(unc\)\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
#r'^\s+' + r'E2disp-exc\s+' + NUMBER + r'\s+\(\s*' + NUMBER + r')\s+' + NUMBER + r'\s+' + NUMBER + '\s*'
outtext, re.MULTILINE)
if mobj:
#print('matched sapt-like')
psivar['MP2C DISP20 ENERGY'] = Decimal(mobj.group(1)) / Decimal(1000)
# <<< Process SCF-F12 >>>
mobj = re.search(
r'^\s+' + r'CABS-singles contribution of\s+' + NUMBER + r'\s+patched into reference energy.\s*' +
r'^\s+' + r'New reference energy\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched scff12')
psivar['SCF TOTAL ENERGY'] = Decimal(mobj.group(2)) - Decimal(mobj.group(1))
psivar['HF-CABS TOTAL ENERGY'] = mobj.group(2)
# <<< Process MP2-F12 >>>
# DF-MP2-F12 correlation energies:
# --------------------------------
# Approx. Singlet Triplet Ecorr Total Energy
# DF-MP2 -0.261035854033 -0.140514056591 -0.401549910624 -112.843952380305
# DF-MP2-F12/3*C(DX,FIX) -0.367224875485 -0.163178266500 -0.530403141984 -112.972805611666
# DF-MP2-F12/3*C(FIX) -0.358294348708 -0.164988061549 -0.523282410258 -112.965684879939
# DF-MP2-F12/3C(FIX) -0.357375628783 -0.165176490386 -0.522552119169 -112.964954588851
#
# DF-MP2-F12 correlation energies:
# ================================
# Approx. Singlet Triplet Ecorr Total Energy
# DF-MP2 -0.357960885582 -0.185676627667 -0.543637513249 -132.841755020796
# DF-MP2-F12/3*C(DX,FIX) -0.381816069559 -0.188149510095 -0.569965579654 -132.868083087202
# DF-MP2-F12/3*C(FIX) -0.379285470419 -0.187468208608 -0.566753679027 -132.864871186575
# DF-MP2-F12/3C(FIX) -0.379246010149 -0.187531433611 -0.566777443760 -132.864894951307
mobj = re.search(
r'^\s*' + r'DF-MP2-F12 correlation energies:\s*' +
r'^\s*(?:[=-]+)\s*' +
r'^\s+' + r'Approx.\s+Singlet\s+Triplet\s+Ecorr\s+Total Energy\s*' +
r'^\s+' + r'DF-MP2\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'DF-MP2-F12/3\*C\(DX,FIX\)\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'DF-MP2-F12/3\*C\(FIX\)\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' +
r'^\s+' + r'DF-MP2-F12/3C\(FIX\)\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched mp2f12')
psivar['MP2 CORRELATION ENERGY'] = mobj.group(3)
psivar['MP2 SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(2)) * \
Decimal(2) / Decimal(3)
psivar['MP2 OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(3)) - \
psivar['MP2 SAME-SPIN CORRELATION ENERGY']
psivar['MP2 TOTAL ENERGY'] = mobj.group(4)
psivar['MP2-F12 CORRELATION ENERGY'] = mobj.group(15)
psivar['MP2-F12 SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(14)) * \
Decimal(2) / Decimal(3)
psivar['MP2-F12 OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(15)) - \
psivar['MP2-F12 SAME-SPIN CORRELATION ENERGY']
psivar['MP2-F12 TOTAL ENERGY'] = mobj.group(16)
# <<< Process CC >>>
mobj = re.search(
r'^\s*' + r'CCSD triplet pair energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'CCSD correlation energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'Triples \(T\) contribution\s+' + NUMBER + '\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched ccsd(t)')
psivar['CCSD CORRELATION ENERGY'] = mobj.group(2)
psivar['CCSD SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) * \
Decimal(2) / Decimal(3)
psivar['CCSD OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(2)) - \
psivar['CCSD SAME-SPIN CORRELATION ENERGY']
psivar['CCSD TOTAL ENERGY'] = Decimal(mobj.group(2)) + psivar['HF TOTAL ENERGY']
psivar['(T) CORRECTION ENERGY'] = mobj.group(3)
psivar['CCSD(T) CORRELATION ENERGY'] = Decimal(mobj.group(2)) + Decimal(mobj.group(3))
psivar['CCSD(T) TOTAL ENERGY'] = psivar['CCSD(T) CORRELATION ENERGY'] + psivar['HF TOTAL ENERGY']
# <<< Process CC-F12 >>>
mobj = re.search(
r'^\s*' + r'CCSD-F12a triplet pair energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'CCSD-F12a correlation energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'Triples \(T\) contribution\s+' + NUMBER + '\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched ccsd(t)-f12a')
psivar['CCSD-F12A CORRELATION ENERGY'] = mobj.group(2)
psivar['CCSD-F12A SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) * \
Decimal(2) / Decimal(3)
psivar['CCSD-F12A OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(2)) - \
psivar['CCSD-F12A SAME-SPIN CORRELATION ENERGY']
psivar['CCSD-F12A TOTAL ENERGY'] = Decimal(mobj.group(2)) + psivar['HF-CABS TOTAL ENERGY']
psivar['(T)-F12AB CORRECTION ENERGY'] = mobj.group(3)
psivar['CCSD(T)-F12A CORRELATION ENERGY'] = Decimal(mobj.group(2)) + Decimal(mobj.group(3))
psivar['CCSD(T)-F12A TOTAL ENERGY'] = psivar['CCSD(T)-F12A CORRELATION ENERGY'] + psivar['HF-CABS TOTAL ENERGY']
psivar['(T*)-F12AB CORRECTION ENERGY'] = Decimal(mobj.group(3)) * \
psivar['MP2-F12 CORRELATION ENERGY'] / psivar['MP2 CORRELATION ENERGY']
psivar['CCSD(T*)-F12A CORRELATION ENERGY'] = Decimal(mobj.group(2)) + psivar['(T*)-F12AB CORRECTION ENERGY']
psivar['CCSD(T*)-F12A TOTAL ENERGY'] = psivar['CCSD(T*)-F12A CORRELATION ENERGY'] + psivar['HF-CABS TOTAL ENERGY']
mobj = re.search(
r'^\s*' + r'CCSD-F12b triplet pair energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'CCSD-F12b correlation energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'Triples \(T\) contribution\s+' + NUMBER + '\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched ccsd(t)-f12b')
psivar['CCSD-F12B CORRELATION ENERGY'] = mobj.group(2)
psivar['CCSD-F12B SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) * \
Decimal(2) / Decimal(3)
psivar['CCSD-F12B OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(2)) - \
psivar['CCSD-F12B SAME-SPIN CORRELATION ENERGY']
psivar['CCSD-F12B TOTAL ENERGY'] = Decimal(mobj.group(2)) + psivar['HF-CABS TOTAL ENERGY']
psivar['(T)-F12AB CORRECTION ENERGY'] = mobj.group(3)
psivar['CCSD(T)-F12B CORRELATION ENERGY'] = Decimal(mobj.group(2)) + Decimal(mobj.group(3))
psivar['CCSD(T)-F12B TOTAL ENERGY'] = psivar['CCSD(T)-F12B CORRELATION ENERGY'] + psivar['HF-CABS TOTAL ENERGY']
psivar['(T*)-F12AB CORRECTION ENERGY'] = Decimal(mobj.group(3)) * \
psivar['MP2-F12 CORRELATION ENERGY'] / psivar['MP2 CORRELATION ENERGY']
psivar['CCSD(T*)-F12B CORRELATION ENERGY'] = Decimal(mobj.group(2)) + psivar['(T*)-F12AB CORRECTION ENERGY']
psivar['CCSD(T*)-F12B TOTAL ENERGY'] = psivar['CCSD(T*)-F12B CORRELATION ENERGY'] + psivar['HF-CABS TOTAL ENERGY']
mobj = re.search(
r'^\s*' + r'CCSD-F12c triplet pair energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'CCSD-F12c correlation energy\s+' + NUMBER + '\s*' +
r'^\s*' + r'Triples \(T\) contribution\s+' + NUMBER + '\s*$',
outtext, re.MULTILINE)
if mobj:
#print('matched ccsd(t)-f12c')
psivar['CCSD-F12C CORRELATION ENERGY'] = mobj.group(2)
psivar['CCSD-F12C SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) * \
Decimal(2) / Decimal(3)
psivar['CCSD-F12C OPPOSITE-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(2)) - \
psivar['CCSD-F12C SAME-SPIN CORRELATION ENERGY']
psivar['CCSD-F12C TOTAL ENERGY'] = Decimal(mobj.group(2)) + psivar['HF-CABS TOTAL ENERGY']
psivar['(T)-F12C CORRECTION ENERGY'] = mobj.group(3)
psivar['CCSD(T)-F12C CORRELATION ENERGY'] = Decimal(mobj.group(2)) + Decimal(mobj.group(3))
psivar['CCSD(T)-F12C TOTAL ENERGY'] = psivar['CCSD(T)-F12C CORRELATION ENERGY'] + psivar['HF-CABS TOTAL ENERGY']
psivar['(T*)-F12C CORRECTION ENERGY'] = Decimal(mobj.group(3)) * \
psivar['MP2-F12 CORRELATION ENERGY'] / psivar['MP2 CORRELATION ENERGY']
psivar['CCSD(T*)-F12C CORRELATION ENERGY'] = Decimal(mobj.group(2)) + psivar['(T*)-F12C CORRECTION ENERGY']
psivar['CCSD(T*)-F12C TOTAL ENERGY'] = psivar['CCSD(T*)-F12C CORRELATION ENERGY'] + psivar['HF-CABS TOTAL ENERGY']
# Process Completion
mobj = re.search(
r'^\s*' + r'Variable memory released' + r'\s+$',
outtext, re.MULTILINE)
if mobj:
psivar['SUCCESS'] = True
# Process CURRENT energies (TODO: needs better way)
if 'HF TOTAL ENERGY' in psivar:
psivar['CURRENT REFERENCE ENERGY'] = psivar['HF TOTAL ENERGY']
psivar['CURRENT ENERGY'] = psivar['HF TOTAL ENERGY']
if 'HF-CABS TOTAL ENERGY' in psivar:
psivar['CURRENT REFERENCE ENERGY'] = psivar['HF-CABS TOTAL ENERGY']
psivar['CURRENT ENERGY'] = psivar['HF-CABS TOTAL ENERGY']
if 'MP2 TOTAL ENERGY' in psivar and 'MP2 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['MP2 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['MP2 TOTAL ENERGY']
if 'MP2-F12 TOTAL ENERGY' in psivar and 'MP2-F12 CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['MP2-F12 CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['MP2-F12 TOTAL ENERGY']
if 'CCSD TOTAL ENERGY' in psivar and 'CCSD CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD TOTAL ENERGY']
if 'CCSD-F12A TOTAL ENERGY' in psivar and 'CCSD-F12A CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD-F12A CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD-F12A TOTAL ENERGY']
if 'CCSD-F12B TOTAL ENERGY' in psivar and 'CCSD-F12B CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD-F12B CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD-F12B TOTAL ENERGY']
if 'CCSD-F12C TOTAL ENERGY' in psivar and 'CCSD-F12C CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD-F12C CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD-F12C TOTAL ENERGY']
if 'CCSD(T) TOTAL ENERGY' in psivar and 'CCSD(T) CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD(T) CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD(T) TOTAL ENERGY']
if 'CCSD(T)-F12A TOTAL ENERGY' in psivar and 'CCSD(T)-F12A CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD(T)-F12A CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD(T)-F12A TOTAL ENERGY']
if 'CCSD(T)-F12B TOTAL ENERGY' in psivar and 'CCSD(T)-F12B CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD(T)-F12B CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD(T)-F12B TOTAL ENERGY']
if 'CCSD(T)-F12C TOTAL ENERGY' in psivar and 'CCSD(T)-F12C CORRELATION ENERGY' in psivar:
psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD(T)-F12C CORRELATION ENERGY']
psivar['CURRENT ENERGY'] = psivar['CCSD(T)-F12C TOTAL ENERGY']
return psivar, psivar_coord, psivar_grad
class Infile(qcformat.InputFormat2):
def __init__(self, mem, mol, mtd, der, opt):
qcformat.InputFormat2.__init__(self, mem, mol, mtd, der, opt)
#print self.method, self.molecule.nactive_fragments()
if ('sapt' in self.method or 'mp2c' in self.method) and self.molecule.nactive_fragments() != 2:
raise FragmentCountError("""Requested molecule has %d, not 2, fragments.""" % (self.molecule.nactive_fragments()))
# # memory in MB --> MW
# self.memory = int(math.ceil(mem / 8.0))
# auxiliary basis sets
[self.unaugbasis, self.augbasis, self.auxbasis] = self.corresponding_aux_basis()
def muster_basis_options(self):
text = ''
lowername = self.method.lower()
options = defaultdict(lambda: defaultdict(dict))
options['BASIS']['ORBITAL']['value'] = self.basis
# this f12 basis setting may be totally messed up
if self.method in ['ccsd(t)-f12-optri']:
if self.basis == 'cc-pvdz-f12':
options['BASIS']['JKFIT']['value'] = 'aug-cc-pvtz/jkfit'
options['BASIS']['JKFITC']['value'] = self.basis + '/optri'
options['BASIS']['MP2FIT']['value'] = 'aug-cc-pvtz/mp2fit'
elif self.method in ['ccsd(t)-f12-cabsfit']:
if self.unaugbasis and self.auxbasis:
#options['BASIS']['JKFIT']['value'] = self.auxbasis + '/jkfit'
#options['BASIS']['JKFITB']['value'] = self.unaugbasis + '/jkfit'
#options['BASIS']['MP2FIT']['value'] = self.auxbasis + '/mp2fit'
#options['BASIS']['DFLHF']['value'] = self.auxbasis + '/jkfit'
options['BASIS']['JKFITC']['value'] = 'aug-cc-pv5z/mp2fit'
else:
raise ValidationError("""Auxiliary basis not predictable from orbital basis '%s'""" % (self.basis))
elif ('df-' in self.method) or ('f12' in self.method) or (self.method in ['mp2c', 'dft-sapt', 'dft-sapt-pbe0acalda']):
if self.unaugbasis and self.auxbasis:
options['BASIS']['JKFIT']['value'] = self.auxbasis + '/jkfit'
options['BASIS']['JKFITB']['value'] = self.unaugbasis + '/jkfit'
options['BASIS']['MP2FIT']['value'] = self.auxbasis + '/mp2fit'
options['BASIS']['DFLHF']['value'] = self.auxbasis + '/jkfit'
else:
raise ValidationError("""Auxiliary basis not predictable from orbital basis '%s'""" % (self.basis))
return text, options
def prepare_basis_for_molpro(self):
text = ''
for opt, val in self.options['BASIS'].items():
#print opt, val['value']
#print molpro_basissets.altbasis.keys()
if not text:
text += """basis={\n"""
try:
# jaxz, maxz, etc.
for line in molpro_basissets.altbasis[val['value']]:
text += """%s\n""" % (line)
text += '\n'
except KeyError:
# haxz
if val['value'].startswith('heavy-aug-'):
text += """set,%s; default,%s,H=%s\n""" % (opt.lower(), self.augbasis, self.unaugbasis)
# xz, axz, 6-31g*
else:
text += """set,%s; default,%s\n""" % (opt.lower(), val['value'])
if text:
text += """}\n\n"""
return text
def format_infile_string(self):
"""
"""
# Handle memory and comment
memcmd, _memkw = """***, %s\nmemory,%d,m\n""" % (self.molecule.tagline, int(math.ceil(self.memory / 8.0))), {}
# Handle molecule and basis set
molcmd, _molkw = self.molecule.format_molecule_for_molpro(), {}
# format global convergence directions
# text += self.format_global_parameters()
_cdscmd, cdskw = muster_cdsgroup_options(self.method)
# Handle calc type and quantum chemical method
mdccmd, mdckw, mdcls = procedures['energy'][self.method](self.method, self.dertype, self.molecule)
_bascmd, baskw = self.muster_basis_options()
# # format options
# optcmd = qcdb.options.prepare_options_for_psi4(mdckw)
# make options from imdb only user options (currently non-existent). set basis and castup from here.
# Handle driver vs input/default keyword reconciliation
userkw = self.options
# userkw = p4util.prepare_options_for_modules()
#userkw = qcdb.options.reconcile_options(userkw, memkw)
#userkw = qcdb.options.reconcile_options(userkw, molkw)
userkw = options.reconcile_options2(userkw, cdskw)
userkw = options.reconcile_options2(userkw, baskw)
#userkw = qcdb.options.reconcile_options(userkw, psikw)
userkw = options.reconcile_options2(userkw, mdckw)
# Handle conversion of psi4 keyword structure into cfour format
#optcmdB = options.prepare_options_for_psi4(userkw)
optcmd = prepare_options_for_molpro(userkw, mdcls)
bascmd, _baskw = self.prepare_basis_for_molpro(), {} #self.options['BASIS']), {}
# Handle text to be passed untouched
litcmd = """\nshow[1,20f20.12],ee*,ce*,te*\nshow[1,60f20.12],_E*\n\n"""
# Assemble infile pieces
return memcmd + molcmd + bascmd + optcmd + mdccmd + litcmd
def muster_cdsgroup_options(name):
text = ''
lowername = name.lower()
options = defaultdict(lambda: defaultdict(dict))
options['GTHRESH']['ZERO']['value'] = 1.0e-14
options['GTHRESH']['ONEINT']['value'] = 1.0e-14
options['GTHRESH']['TWOINT']['value'] = 1.0e-14
options['GTHRESH']['ENERGY']['value'] = 1.0e-9
if name in ['mp2c', 'dft-sapt-shift', 'dft-sapt', 'dft-sapt-pbe0ac', 'dft-sapt-pbe0acalda']:
options['GTHRESH']['ENERGY']['value'] = 1.0e-8
options['GTHRESH']['ORBITAL']['value'] = 1.0e-8
options['GTHRESH']['GRID']['value'] = 1.0e-8
elif name in ['b3lyp', 'b3lyp-d', 'df-b3lyp', 'df-b3lyp-d']:
options['GTHRESH']['ENERGY']['value'] = 1.0e-8
options['GTHRESH']['ORBITAL']['value'] = 1.0e-7
options['GTHRESH']['GRID']['value'] = 1.0e-8
else:
pass
return text, options
def prepare_options_for_molpro(options, proc):
"""Function to take the full snapshot of the liboptions object
encoded in dictionary *options*, find the options directable toward
Cfour (options['CFOUR']['CFOUR_**']) that aren't default, then write
a CFOUR deck with those options.
Note that unlike the cfour version, this uses complete options deck.
"""
text = ''
if len(options['GTHRESH']) > 0:
text += 'gthresh'
for opt, val in options['GTHRESH'].items():
text += """,%s=%s""" % (opt, val['value'])
text += '\n\n'
for item in proc:
if len(options[item.upper()]) > 0:
text += """{%s%s}\n""" % (item, options[item.upper()]['OPTIONS']['value'])
else:
text += """%s\n""" % (item)
if text:
text += '\n'
return text
def muster_modelchem(name, dertype, mol):
"""Transform calculation method *name* and derivative level *dertype*
into options for cfour. While deliberately requested pieces,
generally |cfour__cfour_deriv_level| and |cfour__cfour_calc_level|,
are set to complain if contradicted ('clobber' set to True), other
'recommended' settings, like |cfour__cfour_cc_program|, can be
countermanded by keywords in input file ('clobber' set to False).
Occasionally, want these pieces to actually overcome keywords in
input file ('superclobber' set to True).
"""
text = ''
lowername = name.lower()
options = defaultdict(lambda: defaultdict(dict))
proc = []
if dertype == 0:
pass
else:
raise ValidationError("""Requested Psi4 dertype %d is not available.""" % (dertype))
if lowername == 'mp2':
pass
options['GLOBALS']['FREEZE_CORE']['value'] = True
options['SCF']['SCF_TYPE']['value'] = 'direct'
options['MP2']['MP2_TYPE']['value'] = 'conv'
text += """mp2')\n\n"""
elif lowername == 'ccsd(t)-f12':
proc.append('rhf')
proc.append('ccsd(t)-f12')
options['CCSD(T)-F12']['OPTIONS']['value'] = ',df_basis=mp2fit,df_basis_exch=jkfitb,ri_basis=jkfitb'
elif lowername == 'ccsd(t)-f12c':
proc.append('rhf')
proc.append('ccsd(t)-f12c')
options['CCSD(T)-F12C']['OPTIONS']['value'] = ',df_basis=mp2fit,df_basis_exch=jkfitb,ri_basis=jkfitb'
elif lowername == 'ccsd(t)-f12-optri':
proc.append('rhf')
proc.append('ccsd(t)-f12')
options['CCSD(T)-F12']['OPTIONS']['value'] = ',df_basis=mp2fit,df_basis_exch=jkfit,ri_basis=jkfitc'
elif lowername == 'ccsd(t)-f12-cabsfit':
proc.append('rhf')
proc.append('ccsd(t)-f12')
options['CCSD(T)-F12']['OPTIONS']['value'] = ',df_basis=jkfitc,df_basis_exch=jkfitc,ri_basis=jkfitc'
elif lowername == 'mp2c':
proc.append('gdirect')
proc.append(mol.extract_fragments(1, 2).format_molecule_for_molpro())
proc.append('df-hf,')
proc.append('df-ks,')
proc.append('sapt; monomerA')
options['DF-HF,']['OPTIONS']['value'] = """basis=jkfit,locorb=0; start,atdens; save,1101.2"""
options['DF-KS,']['OPTIONS']['value'] = """lhf,df_basis=dflhf,basis_coul=jkfitb,basis_exch=jkfitb; dftfac,1.0; start,1101.2; save,2101.2"""
proc.append(mol.extract_fragments(2, 1).format_molecule_for_molpro())
proc.append('df-hf')
proc.append('df-ks')
proc.append('sapt; monomerB')
options['DF-HF']['OPTIONS']['value'] = """,basis=jkfit,locorb=0; start,atdens; save,1102.2"""
options['DF-KS']['OPTIONS']['value'] = """,lhf,df_basis=dflhf,basis_coul=jkfitb,basis_exch=jkfitb; dftfac,1.0; start,1102.2; save,2102.2"""
proc.append(mol.format_molecule_for_molpro())
proc.append('sapt; intermol')
options['SAPT; INTERMOL']['OPTIONS']['value'] = """,saptlevel=3,ca=2101.2,cb=2102.2,icpks=0,fitlevel=3,nlexfac=0.0,cfac=0.0; dfit,basis_coul=jkfit,basis_exch=jkfit,cfit_scf=3"""
else:
raise ValidationError("""Requested Cfour computational methods %d is not available.""" % (lowername))
# # Set clobbering
# if 'CFOUR_DERIV_LEVEL' in options['CFOUR']:
# options['CFOUR']['CFOUR_DERIV_LEVEL']['clobber'] = True
# options['CFOUR']['CFOUR_DERIV_LEVEL']['superclobber'] = True
# if 'CFOUR_CALC_LEVEL' in options['CFOUR']:
# options['CFOUR']['CFOUR_CALC_LEVEL']['clobber'] = True
# options['CFOUR']['CFOUR_CALC_LEVEL']['superclobber'] = True
# if 'CFOUR_CC_PROGRAM' in options['CFOUR']:
# options['CFOUR']['CFOUR_CC_PROGRAM']['clobber'] = False
return text, options, proc
procedures = {
'energy': {
'mp2c' : muster_modelchem,
'ccsd(t)-f12' : muster_modelchem,
'ccsd(t)-f12c' : muster_modelchem,
'ccsd(t)-f12-optri' : muster_modelchem,
'ccsd(t)-f12-cabsfit' : muster_modelchem,
#'sapt0' : muster_modelchem,
#'sapt2+' : muster_modelchem,
#'sapt2+(3)' : muster_modelchem,
#'sapt2+3(ccd)' : muster_modelchem,
}
}
qcmtdIN = procedures['energy']
def psi4_list():
"""Return an array of Psi4 methods with energies.
"""
return procedures['energy'].keys()
|
kannon92/psi4
|
psi4/driver/qcdb/molpro2.py
|
Python
|
gpl-2.0
| 27,551
|
[
"CFOUR",
"Psi4"
] |
9d30cb78aa127c59aca63afd2838e348b2d1b0568342f42dd42bdc23e8c9ca14
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import te, topi
from tvm.testing import assert_allclose
from tvm.topi.utils import get_const_tuple
def check_grad(
out, inputs, args=[], data_range=(-10, 10), desired_grads=None, assert_no_jacobian=True
):
inputs = inputs if isinstance(inputs, list) else [inputs]
def check_device(device, host="llvm"):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(host):
return
sout = te.create_schedule(out.op)
mout = tvm.build(sout, [out] + inputs + args)
out_shape = get_const_tuple(out.shape)
l, h = data_range
input_data = [
tvm.nd.array(
np.random.uniform(l, h, size=get_const_tuple(input.shape)).astype(input.dtype)
)
for input in inputs
]
arg_vals = [
tvm.nd.array(np.random.uniform(l, h, size=get_const_tuple(arg.shape)).astype(arg.dtype))
for arg in args
]
ones = topi.full_like(out, 1.0)
# we provide head to sum and reduce the output dimension,
# which equals to grad(out.sum(), inputs)
grads = te.gradient(out, inputs, head=ones)
grad_sched = te.create_schedule([grad.op for grad in grads])
mgrad = tvm.build(grad_sched, list(grads) + inputs + args)
if assert_no_jacobian:
# TODO(yzhliu): it is better to visit the expression and do assertion
lowered_ir = str(tvm.lower(grad_sched, list(grads) + inputs + args, simple_mode=True))
assert "jacobian" not in lowered_ir, lowered_ir
grad_data = [tvm.nd.empty(get_const_tuple(i.shape), g.dtype) for i, g in zip(inputs, grads)]
mgrad(*grad_data, *input_data, *arg_vals)
g_res = [g.numpy() for g in grad_data]
if desired_grads:
assert isinstance(desired_grads, list)
for actual, desired in zip(g_res, desired_grads):
assert_allclose(actual, desired, rtol=0.1, atol=1e-2)
else:
def forward(*in_data):
out_data = tvm.nd.empty(out_shape, out.dtype)
mout(out_data, *[tvm.nd.array(d) for d in list(in_data)])
return out_data.numpy().sum()
tvm.testing.check_numerical_grads(
forward, [d.numpy() for d in input_data + arg_vals], g_res
)
check_device("cpu")
def test_basic_operation():
np.random.seed(0)
shape = (10, 10)
x = te.var("x", dtype="float32")
k = te.reduce_axis((0, 10), name="k")
l = te.reduce_axis((0, 10), name="l")
A0 = te.placeholder(shape, name="A0")
A1 = te.placeholder(shape, name="A1")
zeros = np.zeros(shape)
B = te.compute(shape, lambda i, j: A0[i, j], name="B")
check_grad(B, [A0])
B = te.compute(shape, lambda i, j: A0[i, j] + A1[i, j], name="B")
check_grad(B, [A0, A1])
B = te.compute(shape, lambda i, j: A0[i, j] + A0[j, i], name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.floor(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: te.ceil(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: te.trunc(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: te.round(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: A0[i, j] + te.exp(A0[j, i]), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.log(0.1 + te.abs(A0[i, j] + te.exp(A0[j, i]))), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.sigmoid(A0[i, j] * A0[i, j] * A0[j, i]), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.tanh(A0[i, j] * A0[i, j] * A0[j, i]), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.sqrt(A0[i, j] * A0[i, j] * A0[j, i]), name="B")
check_grad(B, A0, data_range=(0.1, 10))
B = te.compute(shape, lambda i, j: te.power(te.abs(A0[i, j]), A0[j, i]), name="B")
check_grad(B, A0, data_range=(-4, 4))
B = te.compute(shape, lambda i, j: A0[i, j] * A0[j, i], name="B")
check_grad(B, A0)
B = te.compute((10,), lambda i: te.sum(A0[i, k] * A0[k, i], axis=k), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.sum(A0[i, k] * A0[k, i] + 5, axis=k), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.max(A0[i, k] * A0[k, j] + 5, axis=k), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: A0[i, j] * (A1[j, i] + A0[j, i]), name="B")
check_grad(B, [A0, A1])
B = te.compute(
shape, lambda i, j: te.sum(A0[k, k] - A0[te.min(j + k, 9), j] * A0[i, k], axis=k), name="B"
)
check_grad(B, A0)
def fcombine(x, y):
return x * y
def fidentity(t0):
return tvm.tir.const(1, t0)
prod = te.comm_reducer(fcombine, fidentity, name="prod")
B = te.compute((10, 10), lambda i, j: prod(A0[i, k] + A0[k, i], axis=k), name="B")
check_grad(B, A0)
X = te.placeholder((10,), name="X")
A = te.compute((10,), lambda i: X[i] + X[9 - i])
B = te.compute((10,), lambda i: X[i] * X[9 - i])
Y = topi.tensordot(A, B, 1)
check_grad(Y, X)
X = te.placeholder((3, 3), name="X")
Y = topi.einsum("ii->i", (X))
check_grad(Y, X)
def test_topi():
X = te.placeholder((1, 2, 4, 4), name="X")
W = te.placeholder((5, 2, 3, 3), name="W")
W1 = te.placeholder((2, 5, 3, 3), name="W1")
W2 = te.placeholder((1,), name="W2")
R = topi.nn.conv2d(X, W, 1, 1, 1)
check_grad(R, [X, W])
R1 = topi.nn.conv2d(topi.nn.relu(R), W1, 1, 0, 1)
check_grad(R1, [X, W, W1])
R = topi.broadcast_to(W2, (5, 2, 3, 3))
check_grad(R, [W2])
R = topi.nn.conv2d(X, topi.broadcast_to(W2, (5, 2, 3, 3)), 1, 1, 1)
check_grad(R, [X, W2])
R = topi.nn.pool2d(X, [2, 2], [1, 1], [2, 2], [0, 0, 0, 0], "avg")
check_grad(R, X)
R = topi.nn.pool2d(X, [2, 2], [1, 1], [2, 2], [0, 0, 0, 0], "max")
check_grad(R, X)
X = te.placeholder((1, 2, 5, 5), name="X")
R = topi.reshape(X, (1, 32))
check_grad(R, [X])
X = te.placeholder((1, 2, 5, 5), name="X")
W = te.placeholder((2, 2, 3, 3), name="W")
S = topi.reshape(X, (1, 50))
check_grad(S, [X])
R = X + topi.nn.conv2d(X + topi.nn.conv2d(X, W, 1, 1, 1), W, 1, 1, 1)
check_grad(R, [X, W])
S = topi.nn.softmax(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
S = topi.sigmoid(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
S = topi.tanh(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
S = topi.nn.log_softmax(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
check_grad(S, [W], [X])
X = te.placeholder((1, 2, 3, 5), name="X")
Y = te.placeholder((1, 2, 7, 5), name="Y")
S = topi.concatenate((X, Y), 2)
check_grad(S, [X, Y])
X = te.placeholder((1, 2, 6, 5), name="X")
(S, R) = topi.split(X, 2, 2)
check_grad(S, [X])
check_grad(R, [X])
R1 = topi.concatenate((S, R), 2)
check_grad(R1, [X])
R2 = topi.concatenate((R, S), 2)
check_grad(R2, [X])
X = te.placeholder((4, 5), name="X")
I = te.placeholder((100,), name="I", dtype="int32")
R = topi.take(X, topi.abs(I))
check_grad(R, [X], [I])
W = te.placeholder((5, 5), name="W")
exps = topi.exp(topi.nn.dense(X, W))
sumexps = topi.sum(exps, axis=-1, keepdims=True)
R = exps / sumexps
check_grad(R, [X, W], data_range=(-1, 1))
def test_stride_dilation():
X = te.placeholder((1, 2, 10, 10), name="X")
W = te.placeholder((2, 2, 1, 1), name="W")
Y = topi.nn.conv2d(X, W, 1, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 3)
check_grad(Y, [X, W])
W = te.placeholder((2, 2, 2, 2), name="W")
Y = topi.nn.conv2d(X, W, 1, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 3)
check_grad(Y, [X, W])
W = te.placeholder((2, 2, 3, 3), name="W")
Y = topi.nn.conv2d(X, W, 1, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.pool2d(X, [1, 1], [1, 1], [1, 1], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [1, 1], [1, 1], [2, 2], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [1, 1], [1, 1], [3, 3], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [2, 2], [1, 1], [1, 1], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [2, 2], [1, 1], [2, 2], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [2, 2], [1, 1], [3, 3], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [3, 3], [1, 1], [1, 1], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [3, 3], [1, 1], [2, 2], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool2d(X, [3, 3], [1, 1], [3, 3], [0, 0, 0, 0], "max")
check_grad(Y, [X])
@pytest.mark.xfail
def test_reduction_init():
np.random.seed(0)
shape = (10, 10)
k = te.reduce_axis((0, 10), name="k")
A0 = te.placeholder(shape, name="A0")
B = te.compute((10,), lambda i: te.sum(A0[i, k] * A0[k, i], axis=k, init=0.0), name="B")
check_grad(B, A0)
if __name__ == "__main__":
test_basic_operation()
test_topi()
test_stride_dilation()
|
Laurawly/tvm-1
|
tests/python/unittest/test_te_autodiff.py
|
Python
|
apache-2.0
| 11,590
|
[
"VisIt"
] |
8c1fc8f07f4ea93618f9b93a8fe81614c25a4d97c42c2c5051d1c5cfe6d9ca50
|
# Adafruit BNO055 Absolute Orientation Sensor Library
# Copyright (c) 2015 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import binascii
import logging
import struct
import time
import serial
# I2C addresses
BNO055_ADDRESS_A = 0x28
BNO055_ADDRESS_B = 0x29
BNO055_ID = 0xA0
# Page id register definition
BNO055_PAGE_ID_ADDR = 0X07
# PAGE0 REGISTER DEFINITION START
BNO055_CHIP_ID_ADDR = 0x00
BNO055_ACCEL_REV_ID_ADDR = 0x01
BNO055_MAG_REV_ID_ADDR = 0x02
BNO055_GYRO_REV_ID_ADDR = 0x03
BNO055_SW_REV_ID_LSB_ADDR = 0x04
BNO055_SW_REV_ID_MSB_ADDR = 0x05
BNO055_BL_REV_ID_ADDR = 0X06
# Accel data register
BNO055_ACCEL_DATA_X_LSB_ADDR = 0X08
BNO055_ACCEL_DATA_X_MSB_ADDR = 0X09
BNO055_ACCEL_DATA_Y_LSB_ADDR = 0X0A
BNO055_ACCEL_DATA_Y_MSB_ADDR = 0X0B
BNO055_ACCEL_DATA_Z_LSB_ADDR = 0X0C
BNO055_ACCEL_DATA_Z_MSB_ADDR = 0X0D
# Mag data register
BNO055_MAG_DATA_X_LSB_ADDR = 0X0E
BNO055_MAG_DATA_X_MSB_ADDR = 0X0F
BNO055_MAG_DATA_Y_LSB_ADDR = 0X10
BNO055_MAG_DATA_Y_MSB_ADDR = 0X11
BNO055_MAG_DATA_Z_LSB_ADDR = 0X12
BNO055_MAG_DATA_Z_MSB_ADDR = 0X13
# Gyro data registers
BNO055_GYRO_DATA_X_LSB_ADDR = 0X14
BNO055_GYRO_DATA_X_MSB_ADDR = 0X15
BNO055_GYRO_DATA_Y_LSB_ADDR = 0X16
BNO055_GYRO_DATA_Y_MSB_ADDR = 0X17
BNO055_GYRO_DATA_Z_LSB_ADDR = 0X18
BNO055_GYRO_DATA_Z_MSB_ADDR = 0X19
# Euler data registers
BNO055_EULER_H_LSB_ADDR = 0X1A
BNO055_EULER_H_MSB_ADDR = 0X1B
BNO055_EULER_R_LSB_ADDR = 0X1C
BNO055_EULER_R_MSB_ADDR = 0X1D
BNO055_EULER_P_LSB_ADDR = 0X1E
BNO055_EULER_P_MSB_ADDR = 0X1F
# Quaternion data registers
BNO055_QUATERNION_DATA_W_LSB_ADDR = 0X20
BNO055_QUATERNION_DATA_W_MSB_ADDR = 0X21
BNO055_QUATERNION_DATA_X_LSB_ADDR = 0X22
BNO055_QUATERNION_DATA_X_MSB_ADDR = 0X23
BNO055_QUATERNION_DATA_Y_LSB_ADDR = 0X24
BNO055_QUATERNION_DATA_Y_MSB_ADDR = 0X25
BNO055_QUATERNION_DATA_Z_LSB_ADDR = 0X26
BNO055_QUATERNION_DATA_Z_MSB_ADDR = 0X27
# Linear acceleration data registers
BNO055_LINEAR_ACCEL_DATA_X_LSB_ADDR = 0X28
BNO055_LINEAR_ACCEL_DATA_X_MSB_ADDR = 0X29
BNO055_LINEAR_ACCEL_DATA_Y_LSB_ADDR = 0X2A
BNO055_LINEAR_ACCEL_DATA_Y_MSB_ADDR = 0X2B
BNO055_LINEAR_ACCEL_DATA_Z_LSB_ADDR = 0X2C
BNO055_LINEAR_ACCEL_DATA_Z_MSB_ADDR = 0X2D
# Gravity data registers
BNO055_GRAVITY_DATA_X_LSB_ADDR = 0X2E
BNO055_GRAVITY_DATA_X_MSB_ADDR = 0X2F
BNO055_GRAVITY_DATA_Y_LSB_ADDR = 0X30
BNO055_GRAVITY_DATA_Y_MSB_ADDR = 0X31
BNO055_GRAVITY_DATA_Z_LSB_ADDR = 0X32
BNO055_GRAVITY_DATA_Z_MSB_ADDR = 0X33
# Temperature data register
BNO055_TEMP_ADDR = 0X34
# Status registers
BNO055_CALIB_STAT_ADDR = 0X35
BNO055_SELFTEST_RESULT_ADDR = 0X36
BNO055_INTR_STAT_ADDR = 0X37
BNO055_SYS_CLK_STAT_ADDR = 0X38
BNO055_SYS_STAT_ADDR = 0X39
BNO055_SYS_ERR_ADDR = 0X3A
# Unit selection register
BNO055_UNIT_SEL_ADDR = 0X3B
BNO055_DATA_SELECT_ADDR = 0X3C
# Mode registers
BNO055_OPR_MODE_ADDR = 0X3D
BNO055_PWR_MODE_ADDR = 0X3E
BNO055_SYS_TRIGGER_ADDR = 0X3F
BNO055_TEMP_SOURCE_ADDR = 0X40
# Axis remap registers
BNO055_AXIS_MAP_CONFIG_ADDR = 0X41
BNO055_AXIS_MAP_SIGN_ADDR = 0X42
# Axis remap values
AXIS_REMAP_X = 0x00
AXIS_REMAP_Y = 0x01
AXIS_REMAP_Z = 0x02
AXIS_REMAP_POSITIVE = 0x00
AXIS_REMAP_NEGATIVE = 0x01
# SIC registers
BNO055_SIC_MATRIX_0_LSB_ADDR = 0X43
BNO055_SIC_MATRIX_0_MSB_ADDR = 0X44
BNO055_SIC_MATRIX_1_LSB_ADDR = 0X45
BNO055_SIC_MATRIX_1_MSB_ADDR = 0X46
BNO055_SIC_MATRIX_2_LSB_ADDR = 0X47
BNO055_SIC_MATRIX_2_MSB_ADDR = 0X48
BNO055_SIC_MATRIX_3_LSB_ADDR = 0X49
BNO055_SIC_MATRIX_3_MSB_ADDR = 0X4A
BNO055_SIC_MATRIX_4_LSB_ADDR = 0X4B
BNO055_SIC_MATRIX_4_MSB_ADDR = 0X4C
BNO055_SIC_MATRIX_5_LSB_ADDR = 0X4D
BNO055_SIC_MATRIX_5_MSB_ADDR = 0X4E
BNO055_SIC_MATRIX_6_LSB_ADDR = 0X4F
BNO055_SIC_MATRIX_6_MSB_ADDR = 0X50
BNO055_SIC_MATRIX_7_LSB_ADDR = 0X51
BNO055_SIC_MATRIX_7_MSB_ADDR = 0X52
BNO055_SIC_MATRIX_8_LSB_ADDR = 0X53
BNO055_SIC_MATRIX_8_MSB_ADDR = 0X54
# Accelerometer Offset registers
ACCEL_OFFSET_X_LSB_ADDR = 0X55
ACCEL_OFFSET_X_MSB_ADDR = 0X56
ACCEL_OFFSET_Y_LSB_ADDR = 0X57
ACCEL_OFFSET_Y_MSB_ADDR = 0X58
ACCEL_OFFSET_Z_LSB_ADDR = 0X59
ACCEL_OFFSET_Z_MSB_ADDR = 0X5A
# Magnetometer Offset registers
MAG_OFFSET_X_LSB_ADDR = 0X5B
MAG_OFFSET_X_MSB_ADDR = 0X5C
MAG_OFFSET_Y_LSB_ADDR = 0X5D
MAG_OFFSET_Y_MSB_ADDR = 0X5E
MAG_OFFSET_Z_LSB_ADDR = 0X5F
MAG_OFFSET_Z_MSB_ADDR = 0X60
# Gyroscope Offset register s
GYRO_OFFSET_X_LSB_ADDR = 0X61
GYRO_OFFSET_X_MSB_ADDR = 0X62
GYRO_OFFSET_Y_LSB_ADDR = 0X63
GYRO_OFFSET_Y_MSB_ADDR = 0X64
GYRO_OFFSET_Z_LSB_ADDR = 0X65
GYRO_OFFSET_Z_MSB_ADDR = 0X66
# Radius registers
ACCEL_RADIUS_LSB_ADDR = 0X67
ACCEL_RADIUS_MSB_ADDR = 0X68
MAG_RADIUS_LSB_ADDR = 0X69
MAG_RADIUS_MSB_ADDR = 0X6A
# Power modes
POWER_MODE_NORMAL = 0X00
POWER_MODE_LOWPOWER = 0X01
POWER_MODE_SUSPEND = 0X02
# Operation mode settings
OPERATION_MODE_CONFIG = 0X00
OPERATION_MODE_ACCONLY = 0X01
OPERATION_MODE_MAGONLY = 0X02
OPERATION_MODE_GYRONLY = 0X03
OPERATION_MODE_ACCMAG = 0X04
OPERATION_MODE_ACCGYRO = 0X05
OPERATION_MODE_MAGGYRO = 0X06
OPERATION_MODE_AMG = 0X07
OPERATION_MODE_IMUPLUS = 0X08
OPERATION_MODE_COMPASS = 0X09
OPERATION_MODE_M4G = 0X0A
OPERATION_MODE_NDOF_FMC_OFF = 0X0B
OPERATION_MODE_NDOF = 0X0C
logger = logging.getLogger(__name__)
class BNO055(object):
def __init__(self, rst=None, address=BNO055_ADDRESS_A, i2c=None, gpio=None,
serial_port=None, serial_timeout_sec=5, **kwargs):
# If reset pin is provided save it and a reference to provided GPIO
# bus (or the default system GPIO bus if none is provided).
self._rst = rst
if self._rst is not None:
if gpio is None:
import Adafruit_GPIO as GPIO
gpio = GPIO.get_platform_gpio()
self._gpio = gpio
# Setup the reset pin as an output at a high level.
self._gpio.setup(self._rst, GPIO.OUT)
self._gpio.set_high(self._rst)
# Wait a 650 milliseconds in case setting the reset high reset the chip.
time.sleep(0.65)
self._serial = None
self.serial_attempt_delay = 0.0
self._i2c_device = None
if serial_port is not None:
# Use serial communication if serial_port name is provided.
# Open the serial port at 115200 baud, 8N1. Add a 5 second timeout
# to prevent hanging if device is disconnected.
self._serial = serial.Serial(serial_port, 115200, timeout=serial_timeout_sec,
writeTimeout=serial_timeout_sec)
else:
# Use I2C if no serial port is provided.
# Assume we're using platform's default I2C bus if none is specified.
if i2c is None:
import Adafruit_GPIO.I2C as I2C
i2c = I2C
# Save a reference to the I2C device instance for later communication.
self._i2c_device = i2c.get_i2c_device(address, **kwargs)
def _serial_send(self, command, ack=True, max_attempts=5):
# Send a serial command and automatically handle if it needs to be resent
# because of a bus error. If ack is True then an ackowledgement is
# expected and only up to the maximum specified attempts will be made
# to get a good acknowledgement (default is 5). If ack is False then
# no acknowledgement is expected (like when resetting the device).
attempts = 0
while True:
# Flush any pending received data to get into a clean state.
self._serial.flushInput()
# Send the data.
self._serial.write(command)
logger.debug('Serial send: 0x{0}'.format(binascii.hexlify(command)))
# Stop if no acknowledgment is expected.
if not ack:
return
# Read acknowledgement response (2 bytes).
resp = bytearray(self._serial.read(2))
logger.debug('Serial receive: 0x{0}'.format(binascii.hexlify(resp)))
if resp is None or len(resp) != 2:
raise RuntimeError('Timeout waiting for serial acknowledge, is the BNO055 connected?')
# Stop if there's no bus error (0xEE07 response) and return response bytes.
if not (resp[0] == 0xEE and resp[1] == 0x07):
return resp
# Else there was a bus error so resend, as recommended in UART app
# note at:
# http://ae-bst.resource.bosch.com/media/products/dokumente/bno055/BST-BNO055-AN012-00.pdf
time.sleep(self.serial_attempt_delay)
attempts += 1
if attempts >= max_attempts:
raise RuntimeError('Exceeded maximum attempts to acknowledge serial command without bus error!')
def _write_bytes(self, address, data, ack=True):
# Write a list of 8-bit values starting at the provided register address.
if self._i2c_device is not None:
# I2C write.
self._i2c_device.writeList(address, data)
else:
# Build and send serial register write command.
command = bytearray(4 + len(data))
command[0] = 0xAA # Start byte
command[1] = 0x00 # Write
command[2] = address & 0xFF
command[3] = len(data) & 0xFF
command[4:] = map(lambda x: x & 0xFF, data)
resp = self._serial_send(command, ack=ack)
# Verify register write succeeded if there was an acknowledgement.
if resp[0] != 0xEE and resp[1] != 0x01:
raise RuntimeError('Register write error: 0x{0}'.format(binascii.hexlify(resp)))
def _write_byte(self, address, value, ack=True):
# Write an 8-bit value to the provided register address. If ack is True
# then expect an acknowledgement in serial mode, otherwise ignore any
# acknowledgement (necessary when resetting the device).
if self._i2c_device is not None:
# I2C write.
self._i2c_device.write8(address, value)
else:
# Build and send serial register write command.
command = bytearray(5)
command[0] = 0xAA # Start byte
command[1] = 0x00 # Write
command[2] = address & 0xFF
command[3] = 1 # Length (1 byte)
command[4] = value & 0xFF
resp = self._serial_send(command, ack=ack)
# Verify register write succeeded if there was an acknowledgement.
if ack and resp[0] != 0xEE and resp[1] != 0x01:
raise RuntimeError('Register write error: 0x{0}'.format(binascii.hexlify(resp)))
def _read_bytes(self, address, length):
# Read a number of unsigned byte values starting from the provided address.
if self._i2c_device is not None:
# I2C read.
return bytearray(self._i2c_device.readList(address, length))
else:
# Build and send serial register read command.
command = bytearray(4)
command[0] = 0xAA # Start byte
command[1] = 0x01 # Read
command[2] = address & 0xFF
command[3] = length & 0xFF
resp = self._serial_send(command)
# Verify register read succeeded.
if resp[0] != 0xBB:
raise RuntimeError('Register read error: 0x{0}'.format(binascii.hexlify(resp)))
# Read the returned bytes.
length = resp[1]
resp = bytearray(self._serial.read(length))
logger.debug('Received: 0x{0}'.format(binascii.hexlify(resp)))
if resp is None or len(resp) != length:
raise RuntimeError('Timeout waiting to read data, is the BNO055 connected?')
return resp
def _read_byte(self, address):
# Read an 8-bit unsigned value from the provided register address.
if self._i2c_device is not None:
# I2C read.
return self._i2c_device.readU8(address)
else:
return self._read_bytes(address, 1)[0]
def _read_signed_byte(self, address):
# Read an 8-bit signed value from the provided register address.
data = self._read_byte(address)
if data > 127:
return data - 256
else:
return data
def _config_mode(self):
# Enter configuration mode.
self.set_mode(OPERATION_MODE_CONFIG)
def _operation_mode(self):
# Enter operation mode to read sensor data.
self.set_mode(self._mode)
def begin(self, mode=OPERATION_MODE_NDOF):
"""Initialize the BNO055 sensor. Must be called once before any other
BNO055 library functions. Will return True if the BNO055 was
successfully initialized, and False otherwise.
"""
# Save the desired normal operation mode.
self._mode = mode
# First send a thow-away command and ignore any response or I2C errors
# just to make sure the BNO is in a good state and ready to accept
# commands (this seems to be necessary after a hard power down).
try:
self._write_byte(BNO055_PAGE_ID_ADDR, 0, ack=False)
except IOError:
# Swallow an IOError that might be raised by an I2C issue. Only do
# this for this very first command to help get the BNO and board's
# I2C into a clear state ready to accept the next commands.
pass
# Make sure we're in config mode and on page 0.
self._config_mode()
self._write_byte(BNO055_PAGE_ID_ADDR, 0)
# Check the chip ID
bno_id = self._read_byte(BNO055_CHIP_ID_ADDR)
logger.debug('Read chip ID: 0x{0:02X}'.format(bno_id))
if bno_id != BNO055_ID:
return False
# Reset the device.
if self._rst is not None:
# Use the hardware reset pin if provided.
# Go low for a short period, then high to signal a reset.
self._gpio.set_low(self._rst)
time.sleep(0.01) # 10ms
self._gpio.set_high(self._rst)
else:
# Else use the reset command. Note that ack=False is sent because
# the chip doesn't seem to ack a reset in serial mode (by design?).
self._write_byte(BNO055_SYS_TRIGGER_ADDR, 0x20, ack=False)
# Wait 650ms after reset for chip to be ready (as suggested
# in datasheet).
time.sleep(0.65)
# Set to normal power mode.
self._write_byte(BNO055_PWR_MODE_ADDR, POWER_MODE_NORMAL)
# Default to internal oscillator.
self._write_byte(BNO055_SYS_TRIGGER_ADDR, 0x0)
# Enter normal operation mode.
self._operation_mode()
return True
def set_mode(self, mode):
"""Set operation mode for BNO055 sensor. Mode should be a value from
table 3-3 and 3-5 of the datasheet:
http://www.adafruit.com/datasheets/BST_BNO055_DS000_12.pdf
"""
self._write_byte(BNO055_OPR_MODE_ADDR, mode & 0xFF)
# Delay for 30 milliseconds (datasheet recommends 19ms, but a little more
# can't hurt and the kernel is going to spend some unknown amount of time
# too).
time.sleep(0.025)
def get_revision(self):
"""Return a tuple with revision information about the BNO055 chip. Will
return 5 values:
- Software revision
- Bootloader version
- Accelerometer ID
- Magnetometer ID
- Gyro ID
"""
# Read revision values.
accel = self._read_byte(BNO055_ACCEL_REV_ID_ADDR)
mag = self._read_byte(BNO055_MAG_REV_ID_ADDR)
gyro = self._read_byte(BNO055_GYRO_REV_ID_ADDR)
bl = self._read_byte(BNO055_BL_REV_ID_ADDR)
sw_lsb = self._read_byte(BNO055_SW_REV_ID_LSB_ADDR)
sw_msb = self._read_byte(BNO055_SW_REV_ID_MSB_ADDR)
sw = ((sw_msb << 8) | sw_lsb) & 0xFFFF
# Return the results as a tuple of all 5 values.
return (sw, bl, accel, mag, gyro)
def set_external_crystal(self, external_crystal):
"""Set if an external crystal is being used by passing True, otherwise
use the internal oscillator by passing False (the default behavior).
"""
# Switch to configuration mode.
self._config_mode()
# Set the clock bit appropriately in the SYS_TRIGGER register.
if external_crystal:
self._write_byte(BNO055_SYS_TRIGGER_ADDR, 0x80)
else:
self._write_byte(BNO055_SYS_TRIGGER_ADDR, 0x00)
# Go back to normal operation mode.
self._operation_mode()
def get_system_status(self, run_self_test=True):
"""Return a tuple with status information. Three values will be returned:
- System status register value with the following meaning:
0 = Idle
1 = System Error
2 = Initializing Peripherals
3 = System Initialization
4 = Executing Self-Test
5 = Sensor fusion algorithm running
6 = System running without fusion algorithms
- Self test result register value with the following meaning:
Bit value: 1 = test passed, 0 = test failed
Bit 0 = Accelerometer self test
Bit 1 = Magnetometer self test
Bit 2 = Gyroscope self test
Bit 3 = MCU self test
Value of 0x0F = all good!
- System error register value with the following meaning:
0 = No error
1 = Peripheral initialization error
2 = System initialization error
3 = Self test result failed
4 = Register map value out of range
5 = Register map address out of range
6 = Register map write error
7 = BNO low power mode not available for selected operation mode
8 = Accelerometer power mode not available
9 = Fusion algorithm configuration error
10 = Sensor configuration error
If run_self_test is passed in as False then no self test is performed and
None will be returned for the self test result. Note that running a
self test requires going into config mode which will stop the fusion
engine from running.
"""
self_test = None
if run_self_test:
# Switch to configuration mode if running self test.
self._config_mode()
# Perform a self test.
sys_trigger = self._read_byte(BNO055_SYS_TRIGGER_ADDR)
self._write_byte(BNO055_SYS_TRIGGER_ADDR, sys_trigger | 0x1)
# Wait for self test to finish.
time.sleep(1.0)
# Read test result.
self_test = self._read_byte(BNO055_SELFTEST_RESULT_ADDR)
# Go back to operation mode.
self._operation_mode()
# Now read status and error registers.
status = self._read_byte(BNO055_SYS_STAT_ADDR)
error = self._read_byte(BNO055_SYS_ERR_ADDR)
# Return the results as a tuple of all 3 values.
return (status, self_test, error)
def get_calibration_status(self):
"""Read the calibration status of the sensors and return a 4 tuple with
calibration status as follows:
- System, 3=fully calibrated, 0=not calibrated
- Gyroscope, 3=fully calibrated, 0=not calibrated
- Accelerometer, 3=fully calibrated, 0=not calibrated
- Magnetometer, 3=fully calibrated, 0=not calibrated
"""
# Return the calibration status register value.
cal_status = self._read_byte(BNO055_CALIB_STAT_ADDR)
sys = (cal_status >> 6) & 0x03
gyro = (cal_status >> 4) & 0x03
accel = (cal_status >> 2) & 0x03
mag = cal_status & 0x03
# Return the results as a tuple of all 3 values.
return (sys, gyro, accel, mag)
def get_calibration(self):
"""Return the sensor's calibration data and return it as an array of
22 bytes. Can be saved and then reloaded with the set_calibration function
to quickly calibrate from a previously calculated set of calibration data.
"""
# Switch to configuration mode, as mentioned in section 3.10.4 of datasheet.
self._config_mode()
# Read the 22 bytes of calibration data and convert it to a list (from
# a bytearray) so it's more easily serialized should the caller want to
# store it.
cal_data = list(self._read_bytes(ACCEL_OFFSET_X_LSB_ADDR, 22))
# Go back to normal operation mode.
self._operation_mode()
return cal_data
def set_calibration(self, data):
"""Set the sensor's calibration data using a list of 22 bytes that
represent the sensor offsets and calibration data. This data should be
a value that was previously retrieved with get_calibration (and then
perhaps persisted to disk or other location until needed again).
"""
# Check that 22 bytes were passed in with calibration data.
if data is None or len(data) != 22:
raise ValueError('Expected a list of 22 bytes for calibration data.')
# Switch to configuration mode, as mentioned in section 3.10.4 of datasheet.
self._config_mode()
# Set the 22 bytes of calibration data.
self._write_bytes(ACCEL_OFFSET_X_LSB_ADDR, data)
# Go back to normal operation mode.
self._operation_mode()
def get_axis_remap(self):
"""Return a tuple with the axis remap register values. This will return
6 values with the following meaning:
- X axis remap (a value of AXIS_REMAP_X, AXIS_REMAP_Y, or AXIS_REMAP_Z.
which indicates that the physical X axis of the chip
is remapped to a different axis)
- Y axis remap (see above)
- Z axis remap (see above)
- X axis sign (a value of AXIS_REMAP_POSITIVE or AXIS_REMAP_NEGATIVE
which indicates if the X axis values should be positive/
normal or negative/inverted. The default is positive.)
- Y axis sign (see above)
- Z axis sign (see above)
Note that by default the axis orientation of the BNO chip looks like
the following (taken from section 3.4, page 24 of the datasheet). Notice
the dot in the corner that corresponds to the dot on the BNO chip:
| Z axis
|
| / X axis
____|__/____
Y axis / * | / /|
_________ /______|/ //
/___________ //
|____________|/
"""
# Get the axis remap register value.
map_config = self._read_byte(BNO055_AXIS_MAP_CONFIG_ADDR)
z = (map_config >> 4) & 0x03
y = (map_config >> 2) & 0x03
x = map_config & 0x03
# Get the axis remap sign register value.
sign_config = self._read_byte(BNO055_AXIS_MAP_SIGN_ADDR)
x_sign = (sign_config >> 2) & 0x01
y_sign = (sign_config >> 1) & 0x01
z_sign = sign_config & 0x01
# Return the results as a tuple of all 3 values.
return (x, y, z, x_sign, y_sign, z_sign)
def set_axis_remap(self, x, y, z,
x_sign=AXIS_REMAP_POSITIVE, y_sign=AXIS_REMAP_POSITIVE,
z_sign=AXIS_REMAP_POSITIVE):
"""Set axis remap for each axis. The x, y, z parameter values should
be set to one of AXIS_REMAP_X, AXIS_REMAP_Y, or AXIS_REMAP_Z and will
change the BNO's axis to represent another axis. Note that two axises
cannot be mapped to the same axis, so the x, y, z params should be a
unique combination of AXIS_REMAP_X, AXIS_REMAP_Y, AXIS_REMAP_Z values.
The x_sign, y_sign, z_sign values represent if the axis should be positive
or negative (inverted).
See the get_axis_remap documentation for information on the orientation
of the axises on the chip, and consult section 3.4 of the datasheet.
"""
# Switch to configuration mode.
self._config_mode()
# Set the axis remap register value.
map_config = 0x00
map_config |= (z & 0x03) << 4
map_config |= (y & 0x03) << 2
map_config |= x & 0x03
self._write_byte(BNO055_AXIS_MAP_CONFIG_ADDR, map_config)
# Set the axis remap sign register value.
sign_config = 0x00
sign_config |= (x_sign & 0x01) << 2
sign_config |= (y_sign & 0x01) << 1
sign_config |= z_sign & 0x01
self._write_byte(BNO055_AXIS_MAP_SIGN_ADDR, sign_config)
# Go back to normal operation mode.
self._operation_mode()
def _read_vector(self, address, count=3):
# Read count number of 16-bit signed values starting from the provided
# address. Returns a tuple of the values that were read.
data = self._read_bytes(address, count * 2)
result = [0] * count
for i in range(count):
result[i] = ((data[i * 2 + 1] << 8) | data[i * 2]) & 0xFFFF
if result[i] > 32767:
result[i] -= 65536
return result
def read_euler(self):
"""Return the current absolute orientation as a tuple of heading, roll,
and pitch euler angles in degrees.
"""
heading, roll, pitch = self._read_vector(BNO055_EULER_H_LSB_ADDR)
return (heading / 16.0, roll / 16.0, pitch / 16.0)
def read_magnetometer(self):
"""Return the current magnetometer reading as a tuple of X, Y, Z values
in micro-Teslas.
"""
x, y, z = self._read_vector(BNO055_MAG_DATA_X_LSB_ADDR)
return (x / 16.0, y / 16.0, z / 16.0)
def read_gyroscope(self):
"""Return the current gyroscope (angular velocity) reading as a tuple of
X, Y, Z values in degrees per second.
"""
x, y, z = self._read_vector(BNO055_GYRO_DATA_X_LSB_ADDR)
return (x / 900.0, y / 900.0, z / 900.0)
def read_accelerometer(self):
"""Return the current accelerometer reading as a tuple of X, Y, Z values
in meters/second^2.
"""
x, y, z = self._read_vector(BNO055_ACCEL_DATA_X_LSB_ADDR)
return (x / 100.0, y / 100.0, z / 100.0)
def read_linear_acceleration(self):
"""Return the current linear acceleration (acceleration from movement,
not from gravity) reading as a tuple of X, Y, Z values in meters/second^2.
"""
x, y, z = self._read_vector(BNO055_LINEAR_ACCEL_DATA_X_LSB_ADDR)
return (x / 100.0, y / 100.0, z / 100.0)
def read_gravity(self):
"""Return the current gravity acceleration reading as a tuple of X, Y, Z
values in meters/second^2.
"""
x, y, z = self._read_vector(BNO055_GRAVITY_DATA_X_LSB_ADDR)
return (x / 100.0, y / 100.0, z / 100.0)
def read_quaternion(self):
"""Return the current orientation as a tuple of X, Y, Z, W quaternion
values.
"""
w, x, y, z = self._read_vector(BNO055_QUATERNION_DATA_W_LSB_ADDR, 4)
# Scale values, see 3.6.5.5 in the datasheet.
scale = (1.0 / (1 << 14))
return (x * scale, y * scale, z * scale, w * scale)
def read_temp(self):
"""Return the current temperature in Celsius."""
return self._read_signed_byte(BNO055_TEMP_ADDR)
|
francisc0garcia/autonomous_bicycle
|
src/classes/BNO055.py
|
Python
|
apache-2.0
| 28,496
|
[
"CRYSTAL"
] |
d572ae30d0e8efb22624c0aa37f4d1871b37ffea2ca39a0f75757fe319d30b1b
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import gtk
import mock
from stoqlib.domain.returnedsale import ReturnedSale
from stoqlib.domain.views import PendingReturnedSalesView, ReturnedSalesView
from stoqlib.gui.dialogs.returnedsaledialog import (ReturnedSaleDialog,
ReturnedSaleUndoDialog)
from stoqlib.gui.test.uitestutils import GUITest
class TestReturnedSaleDialog(GUITest):
def test_show_pending(self):
pending_return = self.create_pending_returned_sale()
pending_return.sale.identifier = 336
pending_return.identifier = 60
pending_return.reason = u'Teste'
model = self.store.find(PendingReturnedSalesView).one()
dialog = ReturnedSaleDialog(self.store, model)
self.check_dialog(dialog, 'dialog-receive-pending-returned-sale')
def test_show_undone(self):
rsale = self.create_returned_sale()
rsale.sale.identifier = 336
rsale.identifier = 60
rsale.reason = u'Teste'
rsale.undo_reason = u'Foo bar'
rsale.status = ReturnedSale.STATUS_CANCELLED
rsale.confirm_responsible = rsale.responsible
rsale.confirm_date = rsale.return_date
model = self.store.find(ReturnedSalesView).one()
dialog = ReturnedSaleDialog(self.store, model)
self.check_dialog(dialog, 'dialog-returned-sale-undone')
@mock.patch('stoqlib.gui.dialogs.returnedsaledialog.yesno')
def test_receive_pending_returned_sale(self, yesno):
self.create_pending_returned_sale()
model = self.store.find(PendingReturnedSalesView).one()
dialog = ReturnedSaleDialog(self.store, model)
self.assertEquals(dialog.receive_button.get_property('visible'), True)
self.assertEquals(model.returned_sale.status, ReturnedSale.STATUS_PENDING)
with mock.patch.object(self.store, 'commit'):
self.click(dialog.receive_button)
yesno.assert_called_once_with(u'Receive pending returned sale?',
gtk.RESPONSE_NO,
u'Receive', u"Don't receive")
self.assertEquals(model.returned_sale.status, ReturnedSale.STATUS_CONFIRMED)
@mock.patch('stoqlib.gui.dialogs.returnedsaledialog.print_report')
def test_print_button(self, print_report):
self.create_pending_returned_sale()
model = self.store.find(PendingReturnedSalesView).one()
dialog = ReturnedSaleDialog(self.store, model)
self.click(dialog.print_button)
print_report.assert_called_once_with(dialog.report_class, dialog.model)
@mock.patch('stoqlib.gui.dialogs.returnedsaledialog.run_dialog')
@mock.patch('stoqlib.gui.dialogs.saledetails.api.new_store')
def test_undo(self, new_store, run_dialog):
new_store.return_value = self.store
rsale = self.create_returned_sale()
rsale.status = ReturnedSale.STATUS_CONFIRMED
rsale.confirm_responsible = rsale.responsible
rsale.confirm_date = rsale.return_date
model = self.store.find(ReturnedSalesView).one()
dialog = ReturnedSaleDialog(self.store, model)
with mock.patch.object(self.store, 'commit'):
with mock.patch.object(self.store, 'close'):
self.click(dialog.undo_button)
run_dialog.assert_called_once_with(ReturnedSaleUndoDialog, dialog,
self.store, model.returned_sale)
class TestReturnedSaleUndoDialog(GUITest):
def test_show(self):
rsale = self.create_pending_returned_sale()
rsale.sale.identifier = 336
rsale.identifier = 60
rsale.status = ReturnedSale.STATUS_CONFIRMED
rsale.reason = u'Teste'
dialog = ReturnedSaleUndoDialog(self.store, rsale)
self.check_dialog(dialog, 'dialog-returned-sale-undo')
def test_cancel(self):
rsale = self.create_pending_returned_sale()
rsale.status = ReturnedSale.STATUS_CONFIRMED
dialog = ReturnedSaleUndoDialog(self.store, rsale)
self.assertFalse(rsale.is_undone())
self.click(dialog.main_dialog.cancel_button)
self.assertFalse(rsale.is_undone())
def test_undo(self):
rsale = self.create_pending_returned_sale()
rsale.status = ReturnedSale.STATUS_CONFIRMED
dialog = ReturnedSaleUndoDialog(self.store, rsale)
self.assertNotSensitive(dialog.main_dialog, ['ok_button'])
dialog.undo_reason.update('foo')
self.assertSensitive(dialog.main_dialog, ['ok_button'])
self.assertFalse(rsale.is_undone())
self.click(dialog.main_dialog.ok_button)
self.assertTrue(rsale.is_undone())
@mock.patch('stoqlib.gui.dialogs.returnedsaledialog.warning')
def test_undo_without_stock(self, warning):
product = self.create_product(storable=True)
rsale = self.create_pending_returned_sale(product=product)
rsale.status = ReturnedSale.STATUS_CONFIRMED
dialog = ReturnedSaleUndoDialog(self.store, rsale)
self.assertNotSensitive(dialog.main_dialog, ['ok_button'])
dialog.undo_reason.update('foo')
self.assertSensitive(dialog.main_dialog, ['ok_button'])
self.assertFalse(rsale.is_undone())
self.click(dialog.main_dialog.ok_button)
self.assertFalse(rsale.is_undone())
warning.assert_called_once_with(
'It was not possible to undo this returned sale. Some of the '
'returned products are out of stock.')
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_returnedsaledialog.py
|
Python
|
gpl-2.0
| 6,397
|
[
"VisIt"
] |
fb152a13260479d479f025a873cd6f5d625ef270db1702febe2efbfa653c8512
|
# encoding: UTF-8
#
# Copyright 2012-2013 Alejandro Autalán
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://pygubu.web.here
from __future__ import unicode_literals
import logging
try:
import tkinter as tk
except:
import Tkinter as tk
from pygubu.builder import builderobject
logger = logging.getLogger(__name__)
# translator marker
def _(x):
return x
TK_BITMAPS = (
'error', 'gray75', 'gray50', 'gray25', 'gray12',
'hourglass', 'info', 'questhead', 'question', 'warning',
'document', 'stationery', 'edition', 'application', 'accesory',
'forder', 'pfolder', 'trash', 'floppy', 'ramdisk', 'cdrom',
'preferences', 'querydoc', 'stop', 'note', 'caution'
)
TK_CURSORS = (
'arrow', 'based_arrow_down', 'based_arrow_up', 'boat',
'bogosity', 'bottom_left_corner', 'bottom_right_corner',
'bottom_side', 'bottom_tee', 'box_spiral', 'center_ptr',
'circle', 'clock', 'coffee_mug', 'cross', 'cross_reverse',
'crosshair', 'diamond_cross', 'dot', 'dotbox', 'double_arrow',
'draft_large', 'draft_small', 'draped_box', 'exchange', 'fleur',
'gobbler', 'gumby', 'hand1', 'hand2', 'heart', 'icon',
'iron_cross', 'left_ptr', 'left_side', 'left_tee', 'leftbutton',
'll_angle', 'lr_angle', 'man', 'middlebutton', 'mouse', 'none',
'pencil', 'pirate', 'plus', 'question_arrow', 'right_ptr',
'right_side', 'right_tee', 'rightbutton', 'rtl_logo',
'sailboat', 'sb_down_arrow', 'sb_h_double_arrow',
'sb_left_arrow', 'sb_right_arrow', 'sb_up_arrow',
'sb_v_double_arrow', 'shuttle', 'sizing', 'spider', 'spraycan',
'star', 'target', 'tcross', 'top_left_arrow', 'top_left_corner',
'top_right_corner', 'top_side', 'top_tee', 'trek', 'ul_angle',
'umbrella', 'ur_angle', 'watch', 'xterm', 'X_cursor')
TK_RELIEFS = (tk.FLAT, tk.RAISED, tk.SUNKEN, tk.GROOVE, tk.RIDGE)
TK_WIDGET_OPTIONS = {
'accelerator': {
'editor': 'entry'},
'activerelief': {
'editor': 'choice',
'params': {
'values': ('', tk.FLAT, tk.RAISED, tk.SUNKEN,
tk.GROOVE, tk.RIDGE),
'state': 'readonly'}},
'activestyle': {
'editor': 'choice',
'params': {
'values': ('', 'underline', 'dotbox', 'none'),
'state': 'readonly'}},
'activebackground': {
'editor': 'colorentry'},
'activeborderwidth': {
'editor': 'entry'},
'activeforeground': {
'editor': 'colorentry'},
'after': {
'editor': 'entry'},
# ttk.Label
'anchor': {
'editor': 'choice',
'params': {'values': ('', tk.W, tk.CENTER, tk.E),
'state': 'readonly'},
'tk.Button': {
'params': {
'values': (
'', 'n', 'ne', 'nw', 'e', 'w', 's', 'se', 'sw', 'center'),
'state': 'readonly'}},
},
'aspect': {
'editor': 'entry'},
'autoseparators': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
# ttk.Label
'background': {
'editor': 'colorentry'},
# ttk.Frame, ttk.Label
'borderwidth': {
'editor': 'entry'},
'bigincrement': {
'editor': 'entry'},
'bitmap': {
'editor': 'choice',
'params': {'values': ('',) + TK_BITMAPS, 'state': 'readonly'}},
'blockcursor': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'buttonbackground': {
'editor': 'colorentry'},
'buttoncursor': {
'editor': 'choice',
'params': {'values': ('',) + TK_CURSORS, 'state': 'readonly'}},
'buttondownrelief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}},
'buttonuprelief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}},
'class_': {
'editor': 'entry'},
'closeenough': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999},
},
# ttk.Treeview.Column
'column_anchor': {
'editor': 'choice',
'params': {'values': ('', tk.W, tk.CENTER, tk.E), 'state': 'readonly'},
'default': tk.W},
'command': {
'editor': 'entry'},
# ttk.Label
'compound': {
'editor': 'choice',
'params': {
'values': ('', tk.TOP, tk.BOTTOM, tk.LEFT, tk.RIGHT),
'state': 'readonly'}},
# ttk.Button
'confine': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'container': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'cursor': {
'editor': 'choice',
'params': {'values': ('',) + TK_CURSORS, 'state': 'readonly'}},
# ttk.Button
'default': {
'editor': 'choice',
'params': {'values': ('', 'normal', 'active', 'disabled')}},
'digits': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999}},
'direction': {
'editor': 'choice',
'tk.Menubutton': {
'params': {'values': ('', tk.LEFT, tk.RIGHT, 'above'),
'state': 'readonly'}},
'ttk.Menubutton': {
'params': {
'values': ('', 'above', 'below', 'flush',
tk.LEFT, tk.RIGHT),
'state': 'readonly'}},
},
'disabledbackground': {
'editor': 'colorentry'},
'disabledforeground': {
'editor': 'colorentry'},
'elementborderwidth': {
'editor': 'entry'},
'endline': {
'editor': 'entry'},
# ttk.Checkbutton, ttk.Entry
'exportselection': {
'editor': 'choice',
'params': {'values': ('', 'true', 'false'), 'state': 'readonly'}},
# ttk.Label
'font': { 'editor': 'fontentry'},
# ttk.Label
'foreground': {
'editor': 'colorentry'},
# ttk.Spinbox
'format': {
'editor': 'entry'},
# ttk.Scale, ttk.Spinbox
'from_': {
'editor': 'spinbox',
'params': {'from_': -999, 'to': 999},
},
'handlepad': {
'editor': 'entry'},
'handlesize': {
'editor': 'entry'},
# ttk.Treeview.Column
'heading_anchor': {
'editor': 'choice',
'params': {
'values': ('', tk.W, tk.CENTER, tk.E), 'state': 'readonly'},
'default': tk.W},
# ttk.Frame,
'height': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999},
'validator': 'number_integer',
'tk.Toplevel': {'default': 200},
'tk.Frame': {'default': 200},
'ttk.Frame': {'default': 200},
'tk.LabelFrame': {'default': 200},
'ttk.Labelframe': {'default': 200},
'tk.PanedWindow': {'default': 200},
'ttk.Panedwindow': {'default': 200},
'ttk.Notebook': {'default': 200},
'tk.Text': {'default': 10},
'pygubu.builder.widgets.dialog': {'default': 100}},
'highlightbackground': {
'editor': 'colorentry'},
'highlightcolor': {
'editor': 'colorentry'},
'highlightthickness': {
'editor': 'entry'},
# ttk.Label
'image': {
'editor': 'imageentry'},
'inactiveselectbackground': {
'editor': 'colorentry'},
# ttk.Spinbox
'increment': {
'editor': 'spinbox',
'params': {'from_': -999, 'to': 999}
},
'indicatoron': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'insertbackground': {
'editor': 'colorentry'},
'insertborderwidth': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999},
},
'insertofftime': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 9999, 'increment': 100},
},
'insertontime': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 9999, 'increment': 100},
},
'insertunfocussed': {
'editor': 'choice',
'params': {
'values': ('', 'none', 'hollow', 'solid'),
'state': 'readonly'}},
'insertwidth': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999}},
# ttk.Entry
'invalidcommand': {
'editor': 'entry'},
'jump': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
# ttk.Label
'justify': {
'editor': 'choice',
'params': {'values': ('', 'left', 'center', 'right'),
'state': 'readonly'}},
'label': {
'editor': 'entry'},
# ttk.Labelframe
'labelanchor': {
'editor': 'choice',
'params': {
'values': ('', 'nw', 'n', 'ne', 'en', 'e', 'es',
'se', 's', 'sw', 'ws', 'w'),
'state': 'readonly'}},
# ttk.Progressbar
'length': {
'editor': 'entry'},
'listvariable': {
'editor': 'tkvarentry'},
# ttk.Progressbar
'maximum': {
'editor': 'entry'},
'maxundo': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999}},
'minsize': {
'editor': 'entry'},
# ttk.Treeview.Column
'minwidth': {
'editor': 'spinbox',
'params': {'from_': 5, 'to': 999},
'default': '20'},
# ttk.Progressbar
'mode': {
'editor': 'choice',
'params': {
'values': ('', 'determinate', 'indeterminate'),
'state': 'readonly'}},
'offrelief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}},
# ttk.Checkbutton
'offvalue': {
'editor': 'entry',
'help': _('offvalue_help')},
# ttk.Checkbutton
'onvalue': {
'editor': 'entry'},
'opaqueresize': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
# ttk.Panedwindow
'orient': {
'editor': 'choice',
'params': {'values': (tk.VERTICAL, tk.HORIZONTAL),
'state': 'readonly'},
'default': tk.HORIZONTAL
},
'overrelief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}
},
# ttk.Frame, ttk.Label
'padding': {
'editor': 'entry'},
'padx': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999},
},
'pady': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999},
},
# ttk.Checkbutton
'postcommand': {
'editor': 'entry'},
'readonlybackground': {
'editor': 'colorentry'},
# ttk.Frame,
'relief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}},
'repeatdelay': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 9999, 'increment': 100},
},
'repeatinterval': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 9999, 'increment': 100}},
'resolution': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999, 'increment': 0.5},
},
'sliderlength': {
'editor': 'entry'},
'sliderrelief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}},
'sashcursor': {
'editor': 'choice',
'params': {'values': ('',) + TK_CURSORS, 'state': 'readonly'}},
'sashpad': {
'editor': 'entry'},
'sashrelief': {
'editor': 'choice',
'params': {'values': ('',) + TK_RELIEFS, 'state': 'readonly'}},
'sashwidth': {
'editor': 'entry'},
'selectbackground': {
'editor': 'colorentry'},
'selectborderwidth': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999}},
'selectforeground': {
'editor': 'colorentry'},
'scrollregion': {
'editor': 'entry'},
'selectcolor': {
'editor': 'colorentry'},
'selectimage': {
'editor': 'imageentry'},
# ttk.Treeview
'selectmode': {
'editor': 'choice',
'params': {
'values': ('', tk.BROWSE, tk.SINGLE, tk.MULTIPLE, tk.EXTENDED),
'state': 'readonly'},
'ttk.Treeview': {
'params': {
'values': (tk.EXTENDED, tk.BROWSE, tk.NONE),
'state': 'readonly'},
'default': tk.EXTENDED}
},
'setgrid': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
# ttk.Entry
'show': {
'editor': 'choice',
'tk.Entry': {
'params': {'values': ('', '•'), 'state': 'normal'},
},
'ttk.Entry': {
'params': {'values': ('', '•'), 'state': 'normal'},
},
'ttk.Treeview': {
'params': {
'values': ('', 'tree', 'headings'), 'state': 'readonly'}
},
'pygubu.builder.widgets.editabletreeview': {
'params': {
'values': ('', 'tree', 'headings'), 'state': 'readonly'}
},
},
'showhandle': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'showvalue': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'spacing1': {
'editor': 'entry'},
'spacing2': {
'editor': 'entry'},
'spacing3': {
'editor': 'entry'},
'startline': {
'editor': 'entry'},
'state': {
'editor': 'choice',
'params': {'values': ('', tk.NORMAL, tk.DISABLED),
'state': 'readonly'},
'tk.Button': {
'params': {
'values': ('', tk.NORMAL, tk.ACTIVE, tk.DISABLED),
'state': 'readonly'}},
'tk.Entry': {
'params': {
'values': ('', tk.NORMAL, tk.DISABLED, 'readonly'),
'state': 'readonly'}},
'tk.Combobox': {
'params': {
'values': ('', 'readonly'), 'state': 'readonly'}},
'ttk.Entry': {
'params': {
'values': ('', tk.NORMAL, tk.DISABLED, 'readonly'),
'state': 'readonly'}},
'ttk.Combobox': {
'params': {
'values': ('', 'normal', 'readonly', 'disabled'),
'state': 'readonly'}},
'ttk.Button': {
'params': {
'values': ('', 'normal', 'disabled'),
'state': 'readonly'}},
'ttk.Notebook.Tab': {
'params': {
'values': ('', 'normal', 'disabled', 'hidden'),
'state': 'readonly'}}},
# ttk.Notebook.Tab
'sticky': {
'editor': 'stickyentry',
'params': {}},
# ttk.Treeview.Column
'stretch': {
'editor': 'choice',
'ttk.Treeview.Column': {
'params': {'values': ('true', 'false'), 'state': 'readonly'},
'default': 'true'},
'tk.PanedWindow.Pane': {
'params': {
'values': ('', 'always', 'first', 'last', 'middle', 'never'),
'state': 'readonly'}}},
'style': {
'editor': 'choice'},
'tabs': {
'editor': 'entry'}, # FIXME see tk.Text tab property
'tabstyle': {
'editor': 'choice',
'params': {
'values': ('', 'tabular', 'wordprocessor'),
'state': 'readonly'}},
'takefocus': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'tearoff': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'tearoffcommand': {
'editor': 'entry' },
# ttk.Label
'text': {
'editor': 'text'},
# ttk.Label
'textvariable': {
'editor': 'tkvarentry'},
'tickinterval': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999, 'increment': 0.5},
},
# ttk.Scale, ttk.Spinbox
'to': {
'editor': 'spinbox',
'params': {'from_': -999, 'to': 999},
},
'tristateimage': {
'editor': 'imageentry'},
'tristatevalue': {
'editor': 'entry'},
'troughcolor': {
'editor': 'colorentry'},
# ttk.Label
'underline': {
'editor': 'spinbox'},
'undo': {
'editor': 'choice',
'params': {'values': ('', 'false', 'true'), 'state': 'readonly'}},
'value': {
'editor': 'entry'},
# ttk.Checkbutton
'values': {
'editor': 'entry'},
'validate': {
'editor': 'choice',
'params': {
'values': ('', 'none', 'focus', 'focusin',
'focusout', 'key', 'all'),
'state': 'readonly'}},
'validatecommand': {
'editor': 'entry'},
# ttk.Checkbutton
'variable': {
'editor': 'tkvarentry'},
# ttk.Panedwindow.Pane
'weight': {
'editor': 'spinbox', 'params': {'from_': 0, 'to': 999}},
# ttk.Frame, ttk.Label
'width': {
'editor': 'dynamic',
'params': {'mode': 'spinbox', 'from_': 0, 'to': 999},
'tk.Button': {
'params': {'mode': 'spinbox', 'from_': -999, 'to': 999}},
'ttk.Button': {
'params': {'mode': 'spinbox', 'from_': -999, 'to': 999}},
'tk.Canvas': {
'params': {'mode': 'entry'}
},
'tk.Toplevel': {
'default': 200},
'tk.Frame': {
'default': 200},
'ttk.Frame': {
'default': 200},
'tk.LabelFrame': {
'default': 200},
'ttk.Labelframe': {
'default': 200},
'tk.PanedWindow': {
'default': 200},
'ttk.Panedwindow': {
'default': 200},
'ttk.Notebook': {
'default': 200},
'tk.Text': {
'default': 50},
'ttk.Treeview.Column': {
'params': {'mode': 'spinbox', 'from_': 5},
'default': 200},
'pygubu.builder.widgets.dialog': {
'default': 200}},
# ttk.Spinbox
'wrap': {
'editor': 'choice',
'params': {
'values': ('', 'false', 'true'),
'state': 'readonly'},
'tk.Text': {
'params': {
'values': ('', tk.CHAR, tk.WORD, tk.NONE),
'state': 'readonly'}}
},
# ttk.Label
'wraplength': {
'editor': 'entry'},
# ttk.Entry
'xscrollcommand': {
'editor': 'entry'},
'xscrollincrement': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999}
},
# ttk.Treeview
'yscrollcommand': {
'editor': 'entry'},
'yscrollincrement': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999}
},
}
REQUIRED_OPTIONS = {
'class': {
'editor': 'entry',
'params': {'state': 'readonly'}},
'id': {
'editor': 'entry'},
}
CUSTOM_OPTIONS = {
'command_id_arg': {
'editor': 'choice',
'params': {
'values': ('true', 'false'),
'state': 'readonly'},
'default': 'false'},
'geometry': {
'editor': 'entry'},
'invalidcommand_args': {
'editor': 'entry'},
'maxsize': {
'editor': 'whentry'},
'minsize': {
'editor': 'whentry'},
'overrideredirect': {
'editor': 'choice',
'params': {'values': ('', 'True', 'False'), 'state': 'readonly'}},
'resizable': {
'editor': 'choice',
'params': {
'values': ('', 'both', 'horizontally', 'vertically', 'none'),
'state': 'readonly'}},
'scrolltype': {
'editor': 'choice',
'params': {
'values': ('both', 'vertical', 'horizontal'),
'state': 'readonly'},
'default': 'both'},
'text': {
'editor': 'text'},
'title': {
'editor': 'entry'},
'tree_column': {
'editor': 'choice',
'params': {'values': ('true', 'false'), 'state': 'readonly'},
'default': 'false'},
'validatecommand_args': {
'editor': 'entry'},
'visible': {
'editor': 'choice',
'params': {'values': ('true', 'false'), 'state': 'readonly'},
'default': 'true'},
}
WIDGET_REQUIRED_OPTIONS = ('class', 'id')
WIDGET_STANDARD_OPTIONS = (
'accelerator', 'activerelief', 'activestyle', 'activebackground',
'activeborderwidth', 'activeforeground', 'after',
'anchor', 'background', 'bitmap', 'borderwidth',
'class_', 'compound', 'cursor', 'disabledforeground',
'exportselection',
'font', 'foreground', 'jump', 'highlightbackground',
'highlightcolor', 'highlightthickness', 'image',
'indicatoron', 'insertbackground',
'insertborderwidth', 'insertofftime', 'insertontime', 'insertwidth',
'justify', 'orient', 'padx', 'pady', 'relief',
'repeatdelay', 'repeatinterval', 'selectbackground', 'selectborderwidth',
'selectforeground', 'setgrid', 'state', 'style', 'takefocus', 'text',
'textvariable', 'troughcolor', 'underline', 'width', 'wraplength',
'xscrollcommand', 'yscrollcommand')
WIDGET_SPECIFIC_OPTIONS = (
'activestyle', 'activerelief', 'anchor', 'aspect',
'autoseparators', 'background', 'bigincrement',
'blockcursor', 'borderwidth', 'buttonbackground', 'buttoncursor',
'buttondownrelief', 'buttonuprelief',
'class_', 'column_anchor', 'command', 'compound', 'container',
'closeenough', 'confine', 'default', 'digits', 'direction',
'disabledbackground', 'disabledforeground', 'elementborderwidth',
'endline', 'exportselection', 'font',
'foreground', 'format', 'from_', 'to',
'inactiveselectbackground', 'increment', 'insertunfocussed',
'invalidcommand', 'justify', 'handlepad', 'handlesize',
'heading_anchor', 'height', 'image', 'indicatoron',
'label', 'labelanchor', 'listvariable', 'length',
'maximum', 'maxundo',
'minsize', 'minwidth', 'mode', 'offrelief', 'offvalue',
'onvalue', 'opaqueresize', 'orient', 'overrelief',
'padding', 'padx', 'pady',
'postcommand', 'readonlybackground', 'relief', 'resolution',
'scrollregion', 'sashcursor', 'sashpad', 'sashrelief', 'sashwidth',
'selectcolor', 'selectimage', 'selectmode', 'show',
'showhandle', 'showvalue', 'sliderlength', 'sliderrelief',
'spacing1', 'spacing2', 'spacing3', 'startline',
'state', 'sticky', 'stretch', 'tabs', 'tabstyle',
'text', 'textvariable', 'tickinterval', 'tristateimage',
'tristatevalue', 'underline', 'validate', 'undo', 'validatecommand',
'value', 'values', 'variable', 'weight', 'width', 'wrap',
'wraplength', 'xscrollincrement', 'yscrollincrement',
'tearoff', 'tearoffcommand'
)
WIDGET_CUSTOM_OPTIONS = [
'command_id_arg', 'invalidcommand_args', 'tree_column',
'validatecommand_args', 'visible', 'scrolltype', 'text',
'title', 'geometry', 'overrideredirect', 'resizable',
'minsize', 'maxsize'
]
WIDGET_PROPERTIES = wp = dict(TK_WIDGET_OPTIONS)
wp.update(REQUIRED_OPTIONS)
wp.update(CUSTOM_OPTIONS)
LAYOUT_OPTIONS = {
# grid packing properties
'row':
{'editor': 'spinbox',
'params': {'from_': 0, 'to': 50},
'validator': 'number_integer'},
'column':
{'editor': 'spinbox',
'params': {'from_': 0, 'to': 50},
'validator': 'number_integer'},
'sticky': {
'editor': 'stickyentry',
'params': {}},
'rowspan':
{'editor': 'spinbox',
'params':
{'from_': 1, 'to': 50},
'validator': 'number_integer'},
'columnspan': {
'editor': 'spinbox',
'params': {'from_': 1, 'to': 50},
'validator': 'number_integer'},
'padx': {'editor': 'entry', 'validator': 'tkpadding2'},
'pady': {'editor': 'entry', 'validator': 'tkpadding2'},
'ipadx':
{'editor': 'spinbox',
'params': {'from_': 0, 'to': 999},
'validator': 'number_integer'},
'ipady':
{'editor': 'spinbox',
'params': {'from_': 0, 'to': 999},
'validator': 'number_integer'},
'propagate': {
'editor': 'choice',
'params': {'values': ('True', 'False'), 'state': 'readonly'},
'default': 'True'},
#
# grid row and column properties (can be applied to each row or column)
#
'minsize': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999, 'state': 'readonly', 'width': 3}},
'pad': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999, 'state': 'readonly', 'width': 3}},
'weight': {
'editor': 'spinbox',
'params': {'from_': 0, 'to': 999, 'state': 'readonly', 'width': 3}}
}
GRID_PROPERTIES = [
'row', 'column', 'sticky', 'rowspan', 'columnspan', 'padx', 'pady',
'ipadx', 'ipady', 'propagate']
GRID_RC_PROPERTIES = ['minsize', 'pad', 'weight']
TRANSLATABLE_PROPERTIES = [
'label', 'text', 'title',
]
def _register_custom(name, descr):
if name not in CUSTOM_OPTIONS:
CUSTOM_OPTIONS[name] = descr
WIDGET_PROPERTIES.update(CUSTOM_OPTIONS)
WIDGET_CUSTOM_OPTIONS.append(name)
WIDGET_CUSTOM_OPTIONS.sort()
logger.debug('Registered property; {0}'.format(name))
def register_property(name, descr):
_register_custom(name, descr)
builderobject._old_register_property(name, descr)
if not hasattr(builderobject, '_register_fixed_'):
for name, descr in builderobject.CUSTOM_PROPERTIES.items():
_register_custom(name, descr)
builderobject._register_fixed_ = True
builderobject._old_register_property = builderobject.register_property
builderobject.register_property = register_property
logger.debug('Installed custom register_property function')
|
mhcrnl/pygubu
|
pygubudesigner/properties.py
|
Python
|
gpl-3.0
| 26,413
|
[
"FLEUR"
] |
f4090d08d5e9828194bddae5606a9746f6a696b8ec5d7dddc893e372639cd937
|
#!/usr/bin/env python
'''
$Id: drawMIweb.py 1390 2013-04-16 00:42:22Z apandini $
Copyright (C) 2009-13 Alessandro Pandini
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys
import os
import Tkinter
from Tkinter import *
import Pmw
from pymol import cmd
from pymol import stored
def __init__(self):
self.menuBar.addmenuitem( 'Plugin', 'command',
'drawMIweb',
label = 'drawMIweb',
command = lambda s=self : DistMatTool(s))
class DistMatTool:
def __init__(self,app):
parent = app.root
self.parent = parent
try:
self.min = stored.minValue
except:
self.min = 0.0
try:
self.mid = stored.midValue
except:
self.mid = 0.0
try:
self.max = stored.maxValue
except:
self.max = 0.0
self.incr = 0.1
try:
self.filename = stored.filename
except:
self.filename = 'MI.mat'
self.dialog = Pmw.Dialog(parent,
buttons = ('Show edges', 'Remove edges', 'Update Cutoff Range', 'Exit drawMIweb Tool'),
title = 'PyMOL drawMIweb Tool',
command = self.execute)
Pmw.setbusycursorattributes(self.dialog.component('hull'))
w = Tkinter.Label(self.dialog.interior(),
text = 'PyMOL drawMIweb Tool\nAlessandro Pandini (C) 2009-13',
background = 'black',
foreground = 'white',
)
w.pack(expand = 1, fill = 'both', padx = 4, pady = 4)
group = Pmw.Group(self.dialog.interior(),tag_text='Main options')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 5)
self.object = Pmw.EntryField(group.interior(),
labelpos='w',
label_text='Object: ',
value='all',
)
self.distfilename = Pmw.EntryField(group.interior(),
labelpos='w',
label_text='MI matrix filename',
value= self.filename,
)
self.cutoff = Pmw.Counter(group.interior(),
labelpos = 'w',
label_text = 'MI Cutoff',
label_justify = 'left',
entryfield_value = self.mid,
datatype = {'counter' : 'real', 'separator' : '.'},
entryfield_validate = {'validator' : 'real',
'min' : self.min, 'max' : self.max,
'separator' : '.'},
increment = self.incr)
self.reptype = Pmw.OptionMenu(group.interior(),
labelpos = 'w',
label_text = 'Representation type',
items = ['cartoon', 'ribbon', 'sticks', 'spheres'],
)
self.protpart = Pmw.OptionMenu(group.interior(),
labelpos = 'w',
label_text = 'Protein part',
items = ['all', 'backbone', 'C Alpha'],
)
self.colorscale = Pmw.OptionMenu(group.interior(),
labelpos = 'w',
label_text = 'color scale',
items = ['none', 'greyscale', 'colorscale'],
)
self.dashproportional = Pmw.OptionMenu(group.interior(),
labelpos = 'w',
label_text = 'proportional dashes',
items = ['no', 'yes'],
)
for entry in (self.object,self.distfilename,self.cutoff,self.reptype,self.protpart,self.colorscale,self.dashproportional):
entry.pack(fill='x',padx=4,pady=1)
self.showAppModal()
def showAppModal(self):
self.dialog.show()
def changeRepresentation(self,name):
cmd.bg_color('white')
cmd.hide('everything', name)
cmd.color('green', name)
if self.reptype.getvalue() == 'sticks':
part = ''
if self.protpart.getvalue() == 'backbone':
part = ' and name c+n+o+ca'
cmd.show(self.reptype.getvalue(), name + part)
else:
if self.reptype.getvalue() == 'spheres':
part = ''
if self.protpart.getvalue() == 'backbone':
part = ' and name c+n+o+ca'
if self.protpart.getvalue() == 'C Alpha':
part = ' and name ca'
cmd.show(self.reptype.getvalue(), name + part)
else:
cmd.show(self.reptype.getvalue(), name)
def read_distfile(self, filename):
fin = open(filename.getvalue())
lines = fin.readlines()
fin.close()
if len(lines) != len(lines[0].split()):
print "Error: file %s is not a square matrix!" % filename
else:
mat = []
nrow = len(lines)
ncol = nrow
for lidx in range(nrow):
values = [float(x) for x in lines[lidx].split()]
mat.append(values)
return(mat)
def create_FMN_colors(self):
cmd.set_color("FMN001", [ 0.00, 0.00, 1.00])
cmd.set_color("FMN002", [ 0.04, 0.02, 0.96])
cmd.set_color("FMN003", [ 0.08, 0.05, 0.92])
cmd.set_color("FMN004", [ 0.12, 0.08, 0.87])
cmd.set_color("FMN005", [ 0.16, 0.10, 0.84])
cmd.set_color("FMN006", [ 0.20, 0.13, 0.79])
cmd.set_color("FMN007", [ 0.24, 0.16, 0.75])
cmd.set_color("FMN008", [ 0.28, 0.18, 0.71])
cmd.set_color("FMN009", [ 0.33, 0.21, 0.67])
cmd.set_color("FMN010", [ 0.36, 0.24, 0.63])
cmd.set_color("FMN011", [ 0.41, 0.26, 0.59])
cmd.set_color("FMN012", [ 0.45, 0.29, 0.55])
cmd.set_color("FMN013", [ 0.49, 0.31, 0.51])
cmd.set_color("FMN014", [ 0.53, 0.34, 0.47])
cmd.set_color("FMN015", [ 0.57, 0.37, 0.43])
cmd.set_color("FMN016", [ 0.61, 0.40, 0.38])
cmd.set_color("FMN017", [ 0.65, 0.42, 0.35])
cmd.set_color("FMN018", [ 0.69, 0.45, 0.31])
cmd.set_color("FMN019", [ 0.73, 0.47, 0.26])
cmd.set_color("FMN020", [ 0.77, 0.50, 0.22])
cmd.set_color("FMN021", [ 0.82, 0.53, 0.18])
cmd.set_color("FMN022", [ 0.85, 0.55, 0.14])
cmd.set_color("FMN023", [ 0.89, 0.58, 0.10])
cmd.set_color("FMN024", [ 0.94, 0.60, 0.06])
cmd.set_color("FMN025", [ 0.98, 0.63, 0.02])
cmd.set_color("FMN026", [ 1.00, 0.63, 0.00])
cmd.set_color("FMN027", [ 1.00, 0.60, 0.00])
cmd.set_color("FMN028", [ 1.00, 0.58, 0.00])
cmd.set_color("FMN029", [ 1.00, 0.55, 0.00])
cmd.set_color("FMN030", [ 1.00, 0.53, 0.00])
cmd.set_color("FMN031", [ 1.00, 0.50, 0.00])
cmd.set_color("FMN032", [ 1.00, 0.47, 0.00])
cmd.set_color("FMN033", [ 1.00, 0.45, 0.00])
cmd.set_color("FMN034", [ 1.00, 0.42, 0.00])
cmd.set_color("FMN035", [ 1.00, 0.40, 0.00])
cmd.set_color("FMN036", [ 1.00, 0.37, 0.00])
cmd.set_color("FMN037", [ 1.00, 0.34, 0.00])
cmd.set_color("FMN038", [ 1.00, 0.31, 0.00])
cmd.set_color("FMN039", [ 1.00, 0.29, 0.00])
cmd.set_color("FMN040", [ 1.00, 0.26, 0.00])
cmd.set_color("FMN041", [ 1.00, 0.24, 0.00])
cmd.set_color("FMN042", [ 1.00, 0.21, 0.00])
cmd.set_color("FMN043", [ 1.00, 0.18, 0.00])
cmd.set_color("FMN044", [ 1.00, 0.16, 0.00])
cmd.set_color("FMN045", [ 1.00, 0.13, 0.00])
cmd.set_color("FMN046", [ 1.00, 0.10, 0.00])
cmd.set_color("FMN047", [ 1.00, 0.08, 0.00])
cmd.set_color("FMN048", [ 1.00, 0.05, 0.00])
cmd.set_color("FMN049", [ 1.00, 0.02, 0.00])
cmd.set_color("FMN050", [ 1.00, 0.00, 0.00])
def execute(self, result):
self.create_FMN_colors()
if result == 'Show edges':
mat = self.read_distfile(self.distfilename)
nrow = len(mat)
ncol = nrow
cutoff = float(self.cutoff.getvalue())
try:
self.changeRepresentation(self.object.getvalue())
except:
print "Error: cannot change representation of object %s!" % self.object.getvalue()
try:
for d in stored.distancelist:
cmd.delete(d)
except:
pass
try:
stored.distancelist = []
for i in range(nrow):
for j in range(i,ncol):
if mat[i][j] > cutoff:
cmd.distance('%s_%s' % (i + 1,j + 1), "name CA and resi %s" % (i + 1), "name CA and resi %s" % (j + 1))
cmd.hide('label', '%s_%s' % (i + 1,j + 1))
cmd.set('dash_gap', 0, '%s_%s' % (i + 1,j + 1))
cmd.set('dash_width', 4, '%s_%s' % (i + 1,j + 1))
if self.colorscale.getvalue() == 'greyscale':
cmd.color("grey%02d" % int((mat[i][j] - cutoff)/(self.max - cutoff) * 99), '%s_%s' % (i + 1,j + 1))
if self.colorscale.getvalue() == 'colorscale':
cmd.color("FMN%03d" % (int((mat[i][j] / self.max * 49)) + 1), '%s_%s' % (i + 1,j + 1))
if self.dashproportional.getvalue() == 'yes':
cmd.set('dash_radius', (mat[i][j] / self.max) * 0.25, '%s_%s' % (i + 1,j + 1))
stored.distancelist.append('%s_%s' % (i + 1,j + 1))
except:
print "Error: inconsistency in the residue numbering!"
else:
if result == 'Update Cutoff Range':
mat = self.read_distfile(self.distfilename)
nrow = len(mat)
ncol = nrow
minV = self.min
maxV = self.max
for i in range(nrow):
for j in range(i,ncol):
if self.min > mat[i][j]:
stored.minValue = mat[i][j] - self.incr
self.min = stored.minValue
minV = mat[i][j]
if self.max < mat[i][j]:
stored.maxValue = mat[i][j] + self.incr
self.max = stored.maxValue
maxV = mat[i][j]
if minV >= 0.0 and self.min < 0.0:
self.min = 0.0
stored.midValue = self.max - (self.max - self.min) / 2
self.mid = stored.midValue
print "max %8.3f" % maxV
print "min %8.3f" % minV
stored.filename = self.distfilename.getvalue()
self.dialog.withdraw()
else:
if result == 'Remove edges':
try:
for d in stored.distancelist:
cmd.delete(d)
except:
pass
else:
if __name__ == '__main__':
self.parent.destroy()
else:
self.dialog.withdraw()
|
jkleinj/GSAtools
|
Rscripts/pymolscripts/drawMIweb.py
|
Python
|
gpl-3.0
| 13,337
|
[
"PyMOL"
] |
4a4b65f7966894812f417702ccf09ac9a907a81427a6c142ade151d8195422f7
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2017 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import time
import itertools
import numpy as np
import mdtraj as md
from mdtraj.testing import eq, assert_allclose
from mdtraj.geometry.distance import compute_distances, compute_displacements, find_closest_contact
from mdtraj.geometry.distance import _displacement_mic, _displacement
N_FRAMES = 20
N_ATOMS = 20
xyz = np.asarray(np.random.randn(N_FRAMES, N_ATOMS, 3), dtype=np.float32)
pairs = np.array(list(itertools.combinations(range(N_ATOMS), 2)), dtype=np.int32)
ptraj = md.Trajectory(xyz=xyz, topology=None)
ptraj.unitcell_vectors = np.ascontiguousarray(np.random.randn(N_FRAMES, 3, 3) + 2 * np.eye(3, 3), dtype=np.float32)
def test_generator():
pairs2 = itertools.combinations(range(N_ATOMS), 2)
a = compute_distances(ptraj, pairs)
b = compute_distances(ptraj, pairs2)
eq(a, b)
def test_0():
a = compute_distances(ptraj, pairs, periodic=False, opt=True)
b = compute_distances(ptraj, pairs, periodic=False, opt=False)
eq(a, b)
def test_1():
a = compute_displacements(ptraj, pairs, periodic=False, opt=True)
b = compute_displacements(ptraj, pairs, periodic=False, opt=False)
eq(a, b)
def test_2():
a = compute_distances(ptraj, pairs, periodic=False, opt=False)
b = compute_displacements(ptraj, pairs, periodic=False, opt=False)
eq(a, np.sqrt(np.sum(np.square(b), axis=2)))
def test_3():
a = compute_distances(ptraj, pairs, periodic=False, opt=True)
b = compute_displacements(ptraj, pairs, periodic=False, opt=True)
eq(a, np.sqrt(np.sum(np.square(b), axis=2)))
def test_0p():
a = compute_distances(ptraj, pairs, periodic=True, opt=True)
b = compute_distances(ptraj, pairs, periodic=True, opt=False)
print(a, b)
eq(a, b, decimal=3)
def test_1p():
a = compute_displacements(ptraj, pairs, periodic=True, opt=True)
b = compute_displacements(ptraj, pairs, periodic=True, opt=False)
eq(a, b, decimal=3)
def test_2p():
a = compute_distances(ptraj, pairs, periodic=True, opt=False)
b = compute_displacements(ptraj, pairs, periodic=True, opt=False)
assert a.shape == (len(ptraj), len(pairs))
assert b.shape == (len(ptraj), len(pairs), 3), str(b.shape)
b = np.sqrt(np.sum(np.square(b), axis=2))
eq(a, b, decimal=5)
def test_3p():
a = compute_distances(ptraj, pairs, periodic=True, opt=True)
b = compute_displacements(ptraj, pairs, periodic=True, opt=True)
print(a, b)
eq(a, np.sqrt(np.sum(np.square(b), axis=2)))
def test_4():
# using a really big box, we should get the same results with and without
# pbcs
box = np.array([[100, 0, 0], [0, 200, 0], [0, 0, 300]])
box = np.zeros((N_FRAMES, 3, 3)) + box # broadcast it out
a = _displacement_mic(xyz, pairs, box, False)
b = _displacement(xyz, pairs)
eq(a, b, decimal=3)
def test_5():
# simple wrap around along the z axis.
xyz = np.array([[[0.0, 0.0, 0.0], [0.0, 0.0, 2.2]]])
box = np.eye(3, 3).reshape(1, 3, 3)
result = _displacement_mic(xyz, np.array([[0, 1]]), box, True)
eq(result, np.array([[[0, 0, 0.2]]]))
def test_6(get_fn):
ext_ref = np.array([17.4835, 22.2418, 24.2910, 22.5505, 12.8686, 22.1090,
7.4472, 22.4253, 19.8283, 20.6935]) / 10
traj = md.load(get_fn('test_good.nc'), top=get_fn('test.parm7'))
_run_amber_traj(traj, ext_ref)
def test_7(get_fn):
ext_ref = np.array([30.9184, 23.9040, 25.3869, 28.0060, 25.9704, 24.6836,
23.0508, 27.1983, 24.4954, 26.7448]) / 10
traj = md.load(get_fn('test_bad.nc'), top=get_fn('test.parm7'))
_run_amber_traj(traj, ext_ref)
def _run_amber_traj(traj, ext_ref):
# Test triclinic case where simple approach in Tuckerman text does not
# always work
distopt = md.compute_distances(traj, [[0, 9999]], opt=True)
distslw = md.compute_distances(traj, [[0, 9999]], opt=False)
dispopt = md.compute_displacements(traj, [[0, 9999]], opt=True)
dispslw = md.compute_displacements(traj, [[0, 9999]], opt=False)
eq(distopt, distslw, decimal=5)
eq(dispopt, dispslw, decimal=5)
assert_allclose(distopt.flatten(), ext_ref, atol=2e-5)
# Make sure distances from displacements are the same
eq(np.sqrt((dispopt.squeeze() ** 2).sum(axis=1)), distopt.squeeze())
eq(np.sqrt((dispslw.squeeze() ** 2).sum(axis=1)), distslw.squeeze())
eq(dispopt, dispslw, decimal=5)
def test_closest_contact():
box_size = np.array([3.0, 4.0, 5.0])
traj = md.Trajectory(xyz=xyz * box_size, topology=None)
_verify_closest_contact(traj)
traj.unitcell_lengths = np.array([box_size for i in range(N_FRAMES)])
traj.unitcell_angles = np.array([[90.0, 90.0, 90.0] for i in range(N_FRAMES)])
_verify_closest_contact(traj)
traj.unitcell_angles = np.array([[80.0, 90.0, 100.0] for i in range(N_FRAMES)])
_verify_closest_contact(traj)
def _verify_closest_contact(traj):
group1 = np.array([i for i in range(N_ATOMS // 2)], dtype=np.int)
group2 = np.array([i for i in range(N_ATOMS // 2, N_ATOMS)], dtype=np.int)
contact = find_closest_contact(traj, group1, group2)
pairs = np.array([(i, j) for i in group1 for j in group2], dtype=np.int)
dists = md.compute_distances(traj, pairs, True)[0]
dists2 = md.compute_distances(traj, pairs, False)[0]
nearest = np.argmin(dists)
eq(float(dists[nearest]), contact[2], decimal=5)
assert ((pairs[nearest, 0] == contact[0] and pairs[nearest, 1] == contact[1]) or (
pairs[nearest, 0] == contact[1] and pairs[nearest, 1] == contact[0]))
def test_distance_nan():
xyz = np.array([[1, 1, 1], [2, 1, 1], [np.nan, np.nan, np.nan]]).reshape(1, 3, 3)
dists = md.compute_distances(md.Trajectory(xyz=xyz, topology=None), [[0, 1]])
assert np.isfinite(dists).all()
def test_closest_contact_nan_pos():
box_size = np.array([3.0, 4.0, 5.0])
xyz = np.asarray(np.random.randn(2, 20, 3), dtype=np.float32)
xyz *= box_size
# Set the last frame to nan
xyz[-1] = np.nan
# Slice of the last frame, so nans should not cause troubles.
xyz = xyz[:-1]
traj = md.Trajectory(xyz=xyz, topology=None)
_verify_closest_contact(traj)
|
mattwthompson/mdtraj
|
tests/test_distance.py
|
Python
|
lgpl-2.1
| 7,156
|
[
"MDTraj"
] |
1bc8f4db7d12188d05626c7e0b342954f3f9b601184db4dd098e2078279ea102
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
# code based on http://gvallver.perso.univ-pau.fr/?p=587
import sys
import numpy as np
import os
import argparse
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.gridspec import GridSpec
from pymatgen.io.vasp.outputs import Vasprun, Procar
from pymatgen.symmetry.bandstructure import HighSymmKpath
from pymatgen.electronic_structure.core import Spin, Orbital, OrbitalType
def rgbline(ax, k, e, red, green, blue, alpha=1.):
# creation of segments based on
# http://nbviewer.ipython.org/urls/raw.github.com/dpsanders/matplotlib-examples/master/colorline.ipynb
pts = np.array([k, e]).T.reshape(-1, 1, 2)
seg = np.concatenate([pts[:-1], pts[1:]], axis=1)
nseg = len(k) - 1
r = [0.5 * (red[i] + red[i + 1]) for i in range(nseg)]
g = [0.5 * (green[i] + green[i + 1]) for i in range(nseg)]
b = [0.5 * (blue[i] + blue[i + 1]) for i in range(nseg)]
a = np.ones(nseg, np.float) * alpha
lc = LineCollection(seg, colors=list(zip(r, g, b, a)), linewidth=2)
ax.add_collection(lc)
def select_title():
cur_dir_name = os.getcwd().split('/')[-1]
if 'mp-' in cur_dir_name:
try:
formula = matproj.get_data(cur_dir_name, prop="pretty_formula")[0]["pretty_formula"]
cur_dir_name = formula + '_' + cur_dir_name
except:
print('If the current folder name is only a mterials project id, the formula would be added to the figure title.')
return cur_dir_name
def find_bs_labels_from_file(nself_dir = 'nself'):
if not nself_dir.endswith('/'):
nself_dir += '/'
try:
with open(nself_dir + 'KPOINTS', 'r') as kpts:
counter = 0
klist = []
for l in kpts:
counter += 1
if counter > 4:
if counter in [5, 6]:
ordered = True
line = l.split()
if len(line) > 0:
if line[-1] in klist[-1:]:
ordered = True
continue
if ordered:
klist.append(line[-1])
ordered = False
else:
klist[-1] = klist[-1] + '|' + line[-1]
ordered = True
except IOError:
IOError('KPOINTS (line-mode) file must be present')
labels = [r"$%s$" % lab for lab in klist]
return labels
def find_e_range(bands, range_override = 10):
emin = 1e100
emax = -1e100
for spin in bands.bands.keys():
for b in range(bands.nb_bands):
emin = min(emin, min(bands.bands[spin][b]))
emax = max(emax, max(bands.bands[spin][b]))
emin -= bands.efermi + 1
emax -= bands.efermi - 1
ax1.set_ylim(emin, emax)
ax2.set_ylim(emin, emax)
return max(emin, -range_override), min(emax, range_override)
if __name__ == "__main__":
### Check the input arguments
parser = argparse.ArgumentParser()
parser.add_argument("-mpk", "--mpapikey",
help="Your Materials Project API key",
required=False, default='fDJKEZpxSyvsXdCt')
parser.add_argument("-p", "--bs_projected_atom",
help="Projection of the atom's orbitals on the band structure e.g. default: -p O",
required=False, default = 'O')
args = parser.parse_args()
from pymatgen.matproj.rest import MPRester
matproj = MPRester(args.mpapikey)
# read data
# ---------
# kpoints labels
# labels = [r"$L$", r"$\Gamma$", r"$X$", r"$U,K$", r"$\Gamma$"]
# path = HighSymmKpath(mg.Structure.from_file("./nself/POSCAR")).kpath["path"]
# labels = [r"$%s$" % lab for lab in path[0]]
cur_dir_name = select_title()
labels = find_bs_labels_from_file('nself')
# density of states
dosrun = Vasprun("./self/vasprun.xml")
spd_dos = dosrun.complete_dos.get_spd_dos()
# bands
run = Vasprun("./nself/vasprun.xml", parse_projected_eigen=True)
bands = run.get_band_structure("./nself/KPOINTS",
line_mode=True,
efermi=dosrun.efermi)
# print(bands.__dict__)
print(dir(bands))
# set up matplotlib plot
# ----------------------
# general options for plot
font = {'family': 'serif', 'size': 24}
plt.rc('font', **font)
# set up 2 graph with aspec ration 2/1
# plot 1: bands diagram
# plot 2: Density of States
gs = GridSpec(1, 2, width_ratios=[2, 1])
fig = plt.figure(figsize=(11.69, 8.27))
fig.suptitle('Band Structure of ' + cur_dir_name)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1]) # , sharey=ax1)
# set ylim for the plot
# ---------------------
emin, emax = find_e_range(bands)
# Band Diagram
# ------------
name = args.bs_projected_atom
pbands = bands.get_projections_on_elts_and_orbitals({name: ["s", "p", "d"]})
# pbands = bands.get_projection_on_elements
# compute s, p, d normalized contributions
contrib = np.zeros((bands.nb_bands, len(bands.kpoints), 3))
for b in range(bands.nb_bands):
for k in range(len(bands.kpoints)):
sc = pbands[Spin.up][b][k][name]["s"]**2
pc = pbands[Spin.up][b][k][name]["p"]**2
dc = pbands[Spin.up][b][k][name]["d"]**2
tot = sc + pc + dc
if tot != 0.0:
contrib[b, k, 0] = sc / tot
contrib[b, k, 1] = pc / tot
contrib[b, k, 2] = dc / tot
# plot bands using rgb mapping
for b in range(bands.nb_bands):
rgbline(ax1,
range(len(bands.kpoints)),
[e - bands.efermi for e in bands.bands[Spin.up][b]],
contrib[b, :, 0],
contrib[b, :, 1],
contrib[b, :, 2])
# style
ax1.set_xlabel("k-points")
ax1.set_ylabel(r"$E - E_f$ / eV")
ax1.grid()
# fermi level at 0
ax1.hlines(y=0, xmin=0, xmax=len(bands.kpoints), color="k", lw=2)
# labels
nlabs = len(labels)
step = len(bands.kpoints) / (nlabs - 1)
for i, lab in enumerate(labels):
ax1.vlines(i * step, emin, emax, "k")
ax1.set_xticks([i * step for i in range(nlabs)])
ax1.set_xticklabels(labels)
ax1.set_xlim(0, len(bands.kpoints))
ax1.set_ylim(emin, emax)
#ax1.set_title("Bands diagram")
# Density of states
# -----------------
ax2.set_yticklabels([])
ax2.grid()
ax2.set_xlim(1e-6, 3)
ax2.set_xticklabels([])
ax2.hlines(y=0, xmin=0, xmax=8, color="k", lw=2)
ax2.set_xlabel("Density of States", labelpad=28)
#ax2.set_title("Density of States")
# spd contribution
ax2.plot(spd_dos[OrbitalType.s].densities[Spin.up],
dosrun.tdos.energies - dosrun.efermi,
"r-", label="s", lw=2)
ax2.plot(spd_dos[OrbitalType.p].densities[Spin.up],
dosrun.tdos.energies - dosrun.efermi,
"g-", label="p", lw=2)
ax2.plot(spd_dos[OrbitalType.d].densities[Spin.up],
dosrun.tdos.energies - dosrun.efermi,
"b-", label="d", lw=2)
# total dos
ax2.fill_between(dosrun.tdos.densities[Spin.up],
0,
dosrun.tdos.energies - dosrun.efermi,
color=(0.7, 0.7, 0.7),
facecolor=(0.7, 0.7, 0.7))
ax2.plot(dosrun.tdos.densities[Spin.up],
dosrun.tdos.energies - dosrun.efermi,
color=(0.3, 0.3, 0.3),
label="total DOS")
# plot format style
# -----------------
ax2.legend(fancybox=True, shadow=True, prop={'size': 18})
ax2.set_ylim(emin, emax)
plt.subplots_adjust(wspace=0)
# plt.show()
# plt.savefig(sys.argv[0].strip(".py") + ".pdf", format="pdf")
plt.savefig(cur_dir_name + ".png", format="png")
|
albalu/dekode
|
plot_BSDOS.py
|
Python
|
mit
| 8,006
|
[
"VASP",
"pymatgen"
] |
6a6a3c06bfbbad6b369a36bb4d39fd05ea9d00bb19f542fde6702c03870ed629
|
def find_closest_point_pd(pourbaix_diagram_object):
from pymatgen.analysis.pourbaix.plotter import PourbaixPlotter
import matplotlib.pyplot as plt
import numpy as np
from pymatgen.util.coord_utils import in_coord_list
from pymatgen.analysis.pourbaix.maker import PREFAC
lw = 1
limits = [[-2, 16],[-3, 3]]
fig1 = plt.figure()
ax = fig1.gca()
plotter = PourbaixPlotter(pourbaix_diagram_object)
(stable, unstable) = plotter.pourbaix_plot_data(limits)
#start_fold - Ambar's Region Filter Check Function
def screening_check_desirable(entry,criteria='only-solid'):
is_desired = False
if criteria not in ['only-solid']:
print "Not implemented"
sys.exit()
if criteria == 'only-solid':
if entry.nH2O == 0.0 and entry.npH == 0.0 and entry.nPhi == 0.0:
is_desired = True
print "Desired entry", entry.name
if not criteria:
print "Not desired entry", entry.name
return is_desired
#end_fold
#start_fold - Ambar's Function for Water and Hydrogen Lines
def get_water_stability_lines(limits):
from pymatgen.analysis.pourbaix.maker import PREFAC
xlim = limits[0]
ylim = limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
return (h_line, o_line)
#end_fold
#start_fold - Returns the Desirable Regions of PD in "vertices"
vertices = []
import time
for entry, lines in stable.items():
print entry.name
is_desired = screening_check_desirable(entry,criteria='only-solid')
if is_desired:
desired_entry = entry
for line in lines:
(x, y) = line
#print "points", x, y
plt.plot(x, y, "k-", linewidth=lw)
point1 = [x[0],y[0]]
point2 = [x[1],y[1]]
if point1 not in vertices and is_desired:
vertices.append(point1)
if point2 not in vertices and is_desired:
vertices.append(point2)
#end_fold
#start_fold - Placing the desired phase's name in the diagram
center_x=0
center_y=0
count = 0
for point in vertices:
x,y = point
count = count + 1
center_x = center_x + x
center_y = center_y + y
plt.plot(x,y,'ro')
center_x = center_x /count
center_y = center_y /count
plt.annotate(str(desired_entry.name), xy=(center_x, center_y))
#end_fold
#start_fold - Plotting Water and Hydrogen Equilibrium Lines
# Get water line
h_line, o_line = get_water_stability_lines(limits)
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
#end_fold
# Getting distances
print "Getting distances of vertices"
reference_line = o_line
p1 = np.array([reference_line[0][0], reference_line[1][0]])
p2 = np.array([reference_line[0][1], reference_line[1][1]])
min_d = 1000.0
min_vertex = []
d_and_vert_lst =[]
for p3 in vertices:
np.array(p3)
d = np.linalg.norm(np.cross(p2-p1, p1-p3))/np.linalg.norm(p2-p1)
d_and_vert = [d, p3]
d_and_vert_lst.append(d_and_vert)
#https://stackoverflow.com/questions/39840030/distance-between-point-and-a-line-from-two-points
#http://www.fundza.com/vectors/point2line/index.html
print "Vertex: ", p3, "Distance: ", d
if d <= min_d:
min_d = d
min_vertex = p3
fin_lst = []
for i in d_and_vert_lst:
if round(i[0], 4) == round(min_d, 4):
fin_lst.append(i)
# Plotting the star on highest stability vertices
for i in fin_lst:
plt.plot(i[1][0],i[1][1],'*b',ms=16)
###########################################
V_RHE = 1.23 - min_d
pH_0 = fin_lst[0][1][0]
V_SHE = V_RHE - PREFAC*pH_0
###########################################
# plt.annotate('d = '+ str(round(min_d,2)),xy=(center_x, center_y-0.3))
plt.annotate('V_crit = ' + str(round(V_RHE,2))+' VvsRHE',xy=(center_x, center_y-0.3))
plt.annotate('V_crit = ' + str(round(V_SHE,2))+' VvsSHE',xy=(center_x, center_y-0.6))
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.show()
|
raulf2012/pourbaix_pymatgen
|
find_closest_point_pd.py
|
Python
|
mit
| 4,017
|
[
"pymatgen"
] |
bfeed11efd5777cf69b41413716f897ba22fdf5345426e61911997b1121bb50e
|
import os
import sys
import vtkAll as vtk
from ddapp import botpy
import math
import time
import types
import functools
import numpy as np
from ddapp import transformUtils
from ddapp import lcmUtils
from ddapp.timercallback import TimerCallback
from ddapp.asynctaskqueue import AsyncTaskQueue
from ddapp import objectmodel as om
from ddapp import visualization as vis
from ddapp import applogic as app
from ddapp.debugVis import DebugData
from ddapp import ik
from ddapp import ikplanner
from ddapp import ioUtils
from ddapp.simpletimer import SimpleTimer
from ddapp.utime import getUtime
from ddapp.pointpicker import ImagePointPicker
from ddapp import affordanceitems
from ddapp import affordanceupdater
from ddapp import robotstate
from ddapp import robotplanlistener
from ddapp import cameraview
from ddapp import segmentation
from ddapp import planplayback
from ddapp import propertyset
from ddapp import asynctaskqueue as atq
import ddapp.tasks.robottasks as rt
import ddapp.tasks.taskmanagerwidget as tmw
import PythonQt
from PythonQt import QtCore, QtGui, QtUiTools
import drc as lcmdrc
import traceback
from PythonQt import QtCore, QtGui
def addWidgetsToDict(widgets, d):
for widget in widgets:
if widget.objectName:
d[str(widget.objectName)] = widget
addWidgetsToDict(widget.children(), d)
class WidgetDict(object):
def __init__(self, widgets):
addWidgetsToDict(widgets, self.__dict__)
class TaskUserPanel(object):
def __init__(self, windowTitle='Task Panel'):
loader = QtUiTools.QUiLoader()
uifile = QtCore.QFile(':/ui/ddTaskUserPanel.ui')
assert uifile.open(uifile.ReadOnly)
self.widget = loader.load(uifile)
self.ui = WidgetDict(self.widget.children())
self.widget.setWindowTitle(windowTitle)
self.manualButtons = {}
self.imageViewLayout = QtGui.QHBoxLayout(self.ui.imageFrame)
self._setupParams()
self._setupPropertiesPanel()
self._initTaskPanel()
def addManualButton(self, text, callback):
b = QtGui.QPushButton(text)
b.connect('clicked()', callback)
self.manualButtons[text] = b
self.addManualWidget(b)
return b
def addManualSpacer(self):
line = QtGui.QFrame()
line.setFrameShape(QtGui.QFrame.HLine)
line.setFrameShadow(QtGui.QFrame.Sunken)
self.addManualWidget(line)
def addManualWidget(self, widget):
self.ui.manualButtonsLayout.insertWidget(self.ui.manualButtonsLayout.count()-1, widget)
def initImageView(self, imageView, activateAffordanceUpdater=True):
if activateAffordanceUpdater:
self.affordanceUpdater = affordanceupdater.AffordanceInCameraUpdater(segmentation.affordanceManager, imageView)
self.affordanceUpdater.timer.start()
self.imageViewLayout.addWidget(self.fitter.imageView.view)
def _setupParams(self):
self.params = om.ObjectModelItem('Task Params')
self.params.properties.connectPropertyChanged(self.onPropertyChanged)
def _setupPropertiesPanel(self):
l = QtGui.QVBoxLayout(self.ui.propertyFrame)
l.setMargin(0)
self.propertiesPanel = PythonQt.dd.ddPropertiesPanel()
self.propertiesPanel.setBrowserModeToWidget()
l.addWidget(self.propertiesPanel)
self.panelConnector = propertyset.PropertyPanelConnector(self.params.properties, self.propertiesPanel)
def onPropertyChanged(self, propertySet, propertyName):
pass
def getNextTasks(self):
return self.taskTree.getTasks(fromSelected=True)
def onContinue(self):
self._activatePrompts()
self.completedTasks = []
self.taskQueue.reset()
for obj in self.getNextTasks():
self.taskQueue.addTask(obj.task)
self.taskQueue.start()
def _activatePrompts(self):
rt.UserPromptTask.promptFunction = self.onTaskPrompt
rt.PrintTask.printFunction = self.appendMessage
def onStep(self):
assert not self.taskQueue.isRunning
self._activatePrompts()
tasks = self.getNextTasks()
if not tasks:
return
task = tasks[0].task
self.nextStepTask = tasks[1].task if len(tasks) > 1 else None
self.completedTasks = []
self.taskQueue.reset()
self.taskQueue.addTask(task)
self.taskQueue.start()
def updateTaskButtons(self):
self.ui.taskStepButton.setEnabled(not self.taskQueue.isRunning)
self.ui.taskContinueButton.setEnabled(not self.taskQueue.isRunning)
self.ui.taskPauseButton.setEnabled(self.taskQueue.isRunning)
def onPause(self):
if not self.taskQueue.isRunning:
return
self.nextStepTask = None
currentTask = self.taskQueue.currentTask
self.taskQueue.stop()
if currentTask:
currentTask.stop()
self.appendMessage('<font color="red">paused</font>')
def onQueueStarted(self, taskQueue):
self.updateTaskButtons()
def onQueueStopped(self, taskQueue):
self.clearPrompt()
self.updateTaskButtons()
def onTaskStarted(self, taskQueue, task):
msg = task.properties.getProperty('Name') + ' ... <font color="green">start</font>'
self.appendMessage(msg)
self.taskTree.selectTask(task)
item = self.taskTree.findTaskItem(task)
if len(self.completedTasks) and item.getProperty('Visible'):
self.appendMessage('<font color="red">paused</font>')
raise atq.AsyncTaskQueue.PauseException()
def onTaskEnded(self, taskQueue, task):
msg = task.properties.getProperty('Name') + ' ... <font color="green">end</font>'
self.appendMessage(msg)
self.completedTasks.append(task)
if self.taskQueue.tasks:
self.taskTree.selectTask(self.taskQueue.tasks[0])
elif self.nextStepTask:
self.taskTree.selectTask(self.nextStepTask)
#else:
# self.taskTree.selectTask(self.completedTasks[0])
def onTaskFailed(self, taskQueue, task):
msg = task.properties.getProperty('Name') + ' ... <font color="red">failed: %s</font>' % task.failReason
self.appendMessage(msg)
def onTaskPaused(self, taskQueue, task):
msg = task.properties.getProperty('Name') + ' ... <font color="red">paused</font>'
self.appendMessage(msg)
def onTaskException(self, taskQueue, task):
msg = task.properties.getProperty('Name') + ' ... <font color="red">exception:\n\n%s</font>' % traceback.format_exc()
self.appendMessage(msg)
def appendMessage(self, msg):
if msg == self.lastStatusMessage:
return
self.lastStatusMessage = msg
self.ui.outputConsole.append(msg.replace('\n', '<br/>'))
def updateTaskStatus(self):
currentTask = self.taskQueue.currentTask
if not currentTask or not currentTask.statusMessage:
return
name = currentTask.properties.getProperty('Name')
status = currentTask.statusMessage
msg = name + ': ' + status
self.appendMessage(msg)
def clearPrompt(self):
self.promptTask = None
self.ui.promptLabel.text = ''
self.ui.promptAcceptButton.enabled = False
self.ui.promptRejectButton.enabled = False
def onAcceptPrompt(self):
self.promptTask.accept()
self.clearPrompt()
def onRejectPrompt(self):
self.promptTask.reject()
self.clearPrompt()
def onTaskPrompt(self, task, message):
self.promptTask = task
self.ui.promptLabel.text = message
self.ui.promptAcceptButton.enabled = True
self.ui.promptRejectButton.enabled = True
def _initTaskPanel(self):
self.lastStatusMessage = ''
self.nextStepTask = None
self.completedTasks = []
self.taskQueue = atq.AsyncTaskQueue()
self.taskQueue.connectQueueStarted(self.onQueueStarted)
self.taskQueue.connectQueueStopped(self.onQueueStopped)
self.taskQueue.connectTaskStarted(self.onTaskStarted)
self.taskQueue.connectTaskEnded(self.onTaskEnded)
self.taskQueue.connectTaskPaused(self.onTaskPaused)
self.taskQueue.connectTaskFailed(self.onTaskFailed)
self.taskQueue.connectTaskException(self.onTaskException)
self.timer = TimerCallback(targetFps=2)
self.timer.callback = self.updateTaskStatus
self.timer.start()
self.taskTree = tmw.TaskTree()
self.ui.taskFrame.layout().insertWidget(0, self.taskTree.treeWidget)
l = QtGui.QVBoxLayout(self.ui.taskPropertiesGroupBox)
l.addWidget(self.taskTree.propertiesPanel)
PythonQt.dd.ddGroupBoxHider(self.ui.taskPropertiesGroupBox)
self.ui.taskStepButton.connect('clicked()', self.onStep)
self.ui.taskContinueButton.connect('clicked()', self.onContinue)
self.ui.taskPauseButton.connect('clicked()', self.onPause)
self.ui.promptAcceptButton.connect('clicked()', self.onAcceptPrompt)
self.ui.promptRejectButton.connect('clicked()', self.onRejectPrompt)
self.clearPrompt()
self.updateTaskButtons()
class ImageBasedAffordanceFit(object):
def __init__(self, imageView=None, numberOfPoints=1):
self.imageView = imageView or cameraview.CameraImageView(cameraview.imageManager, 'CAMERA_LEFT', 'image view')
self.imagePicker = ImagePointPicker(self.imageView, numberOfPoints=numberOfPoints)
self.imagePicker.connectDoubleClickEvent(self.onImageViewDoubleClick)
self.imagePicker.annotationFunc = self.onImageAnnotation
self.imagePicker.start()
self.pointCloudSource = 'lidar'
self.pickLineRadius = 0.05
self.pickNearestToCamera = True
def getPointCloud(self):
assert self.pointCloudSource in ('lidar', 'stereo')
if self.pointCloudSource == 'stereo':
return segmentation.getDisparityPointCloud(decimation=1, removeOutliers=False)
else:
return segmentation.getCurrentRevolutionData()
def onImageAnnotation(self, *points):
polyData = self.getPointCloud()
points = [self.getPointCloudLocationFromImage(p, self.imageView, polyData) for p in points]
self.fit(polyData, points)
def getPointCloudLocationFromImage(self, imagePixel, imageView, polyData):
cameraPos, ray = imageView.getWorldPositionAndRay(imagePixel)
return segmentation.extractPointsAlongClickRay(cameraPos, ray, polyData, distanceToLineThreshold=self.pickLineRadius, nearestToCamera=self.pickNearestToCamera)
def onImageViewDoubleClick(self, displayPoint, modifiers, imageView):
pass
def fit(self, pointData, points):
pass
|
rdeits/director
|
src/python/ddapp/tasks/taskuserpanel.py
|
Python
|
bsd-3-clause
| 10,831
|
[
"VTK"
] |
6c05a4736038025841b992cb05a33880cc12ad650f733f128d6a61039e36319d
|
# dirstate.py - working directory tracking for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import errno
from node import nullid
from i18n import _
import scmutil, util, ignore, osutil, parsers, encoding, pathutil
import os, stat, errno, gc
propertycache = util.propertycache
filecache = scmutil.filecache
_rangemask = 0x7fffffff
class repocache(filecache):
"""filecache for files in .hg/"""
def join(self, obj, fname):
return obj._opener.join(fname)
class rootcache(filecache):
"""filecache for files in the repository root"""
def join(self, obj, fname):
return obj._join(fname)
class dirstate(object):
def __init__(self, opener, ui, root, validate):
'''Create a new dirstate object.
opener is an open()-like callable that can be used to open the
dirstate file; root is the root of the directory tracked by
the dirstate.
'''
self._opener = opener
self._validate = validate
self._root = root
self._rootdir = os.path.join(root, '')
self._dirty = False
self._dirtypl = False
self._lastnormaltime = 0
self._ui = ui
self._filecache = {}
@propertycache
def _map(self):
'''Return the dirstate contents as a map from filename to
(state, mode, size, time).'''
self._read()
return self._map
@propertycache
def _copymap(self):
self._read()
return self._copymap
@propertycache
def _foldmap(self):
f = {}
for name, s in self._map.iteritems():
if s[0] != 'r':
f[util.normcase(name)] = name
for name in self._dirs:
f[util.normcase(name)] = name
f['.'] = '.' # prevents useless util.fspath() invocation
return f
@repocache('branch')
def _branch(self):
try:
return self._opener.read("branch").strip() or "default"
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
return "default"
@propertycache
def _pl(self):
try:
fp = self._opener("dirstate")
st = fp.read(40)
fp.close()
l = len(st)
if l == 40:
return st[:20], st[20:40]
elif l > 0 and l < 40:
raise util.Abort(_('working directory state appears damaged!'))
except IOError, err:
if err.errno != errno.ENOENT:
raise
return [nullid, nullid]
@propertycache
def _dirs(self):
return scmutil.dirs(self._map, 'r')
def dirs(self):
return self._dirs
@rootcache('.hgignore')
def _ignore(self):
files = [self._join('.hgignore')]
for name, path in self._ui.configitems("ui"):
if name == 'ignore' or name.startswith('ignore.'):
files.append(util.expandpath(path))
return ignore.ignore(self._root, files, self._ui.warn)
@propertycache
def _slash(self):
return self._ui.configbool('ui', 'slash') and os.sep != '/'
@propertycache
def _checklink(self):
return util.checklink(self._root)
@propertycache
def _checkexec(self):
return util.checkexec(self._root)
@propertycache
def _checkcase(self):
return not util.checkcase(self._join('.hg'))
def _join(self, f):
# much faster than os.path.join()
# it's safe because f is always a relative path
return self._rootdir + f
def flagfunc(self, buildfallback):
if self._checklink and self._checkexec:
def f(x):
try:
st = os.lstat(self._join(x))
if util.statislink(st):
return 'l'
if util.statisexec(st):
return 'x'
except OSError:
pass
return ''
return f
fallback = buildfallback()
if self._checklink:
def f(x):
if os.path.islink(self._join(x)):
return 'l'
if 'x' in fallback(x):
return 'x'
return ''
return f
if self._checkexec:
def f(x):
if 'l' in fallback(x):
return 'l'
if util.isexec(self._join(x)):
return 'x'
return ''
return f
else:
return fallback
def getcwd(self):
cwd = os.getcwd()
if cwd == self._root:
return ''
# self._root ends with a path separator if self._root is '/' or 'C:\'
rootsep = self._root
if not util.endswithsep(rootsep):
rootsep += os.sep
if cwd.startswith(rootsep):
return cwd[len(rootsep):]
else:
# we're outside the repo. return an absolute path.
return cwd
def pathto(self, f, cwd=None):
if cwd is None:
cwd = self.getcwd()
path = util.pathto(self._root, cwd, f)
if self._slash:
return util.pconvert(path)
return path
def __getitem__(self, key):
'''Return the current state of key (a filename) in the dirstate.
States are:
n normal
m needs merging
r marked for removal
a marked for addition
? not tracked
'''
return self._map.get(key, ("?",))[0]
def __contains__(self, key):
return key in self._map
def __iter__(self):
for x in sorted(self._map):
yield x
def iteritems(self):
return self._map.iteritems()
def parents(self):
return [self._validate(p) for p in self._pl]
def p1(self):
return self._validate(self._pl[0])
def p2(self):
return self._validate(self._pl[1])
def branch(self):
return encoding.tolocal(self._branch)
def setparents(self, p1, p2=nullid):
"""Set dirstate parents to p1 and p2.
When moving from two parents to one, 'm' merged entries a
adjusted to normal and previous copy records discarded and
returned by the call.
See localrepo.setparents()
"""
self._dirty = self._dirtypl = True
oldp2 = self._pl[1]
self._pl = p1, p2
copies = {}
if oldp2 != nullid and p2 == nullid:
# Discard 'm' markers when moving away from a merge state
for f, s in self._map.iteritems():
if s[0] == 'm':
if f in self._copymap:
copies[f] = self._copymap[f]
self.normallookup(f)
return copies
def setbranch(self, branch):
self._branch = encoding.fromlocal(branch)
f = self._opener('branch', 'w', atomictemp=True)
try:
f.write(self._branch + '\n')
f.close()
# make sure filecache has the correct stat info for _branch after
# replacing the underlying file
ce = self._filecache['_branch']
if ce:
ce.refresh()
except: # re-raises
f.discard()
raise
def _read(self):
self._map = {}
self._copymap = {}
try:
st = self._opener.read("dirstate")
except IOError, err:
if err.errno != errno.ENOENT:
raise
return
if not st:
return
# Python's garbage collector triggers a GC each time a certain number
# of container objects (the number being defined by
# gc.get_threshold()) are allocated. parse_dirstate creates a tuple
# for each file in the dirstate. The C version then immediately marks
# them as not to be tracked by the collector. However, this has no
# effect on when GCs are triggered, only on what objects the GC looks
# into. This means that O(number of files) GCs are unavoidable.
# Depending on when in the process's lifetime the dirstate is parsed,
# this can get very expensive. As a workaround, disable GC while
# parsing the dirstate.
gcenabled = gc.isenabled()
gc.disable()
try:
p = parsers.parse_dirstate(self._map, self._copymap, st)
finally:
if gcenabled:
gc.enable()
if not self._dirtypl:
self._pl = p
def invalidate(self):
for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
"_ignore"):
if a in self.__dict__:
delattr(self, a)
self._lastnormaltime = 0
self._dirty = False
def copy(self, source, dest):
"""Mark dest as a copy of source. Unmark dest if source is None."""
if source == dest:
return
self._dirty = True
if source is not None:
self._copymap[dest] = source
elif dest in self._copymap:
del self._copymap[dest]
def copied(self, file):
return self._copymap.get(file, None)
def copies(self):
return self._copymap
def _droppath(self, f):
if self[f] not in "?r" and "_dirs" in self.__dict__:
self._dirs.delpath(f)
def _addpath(self, f, state, mode, size, mtime):
oldstate = self[f]
if state == 'a' or oldstate == 'r':
scmutil.checkfilename(f)
if f in self._dirs:
raise util.Abort(_('directory %r already in dirstate') % f)
# shadows
for d in scmutil.finddirs(f):
if d in self._dirs:
break
if d in self._map and self[d] != 'r':
raise util.Abort(
_('file %r in dirstate clashes with %r') % (d, f))
if oldstate in "?r" and "_dirs" in self.__dict__:
self._dirs.addpath(f)
self._dirty = True
self._map[f] = (state, mode, size, mtime)
def normal(self, f):
'''Mark a file normal and clean.'''
s = os.lstat(self._join(f))
mtime = int(s.st_mtime)
self._addpath(f, 'n', s.st_mode,
s.st_size & _rangemask, mtime & _rangemask)
if f in self._copymap:
del self._copymap[f]
if mtime > self._lastnormaltime:
# Remember the most recent modification timeslot for status(),
# to make sure we won't miss future size-preserving file content
# modifications that happen within the same timeslot.
self._lastnormaltime = mtime
def normallookup(self, f):
'''Mark a file normal, but possibly dirty.'''
if self._pl[1] != nullid and f in self._map:
# if there is a merge going on and the file was either
# in state 'm' (-1) or coming from other parent (-2) before
# being removed, restore that state.
entry = self._map[f]
if entry[0] == 'r' and entry[2] in (-1, -2):
source = self._copymap.get(f)
if entry[2] == -1:
self.merge(f)
elif entry[2] == -2:
self.otherparent(f)
if source:
self.copy(source, f)
return
if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
return
self._addpath(f, 'n', 0, -1, -1)
if f in self._copymap:
del self._copymap[f]
def otherparent(self, f):
'''Mark as coming from the other parent, always dirty.'''
if self._pl[1] == nullid:
raise util.Abort(_("setting %r to other parent "
"only allowed in merges") % f)
self._addpath(f, 'n', 0, -2, -1)
if f in self._copymap:
del self._copymap[f]
def add(self, f):
'''Mark a file added.'''
self._addpath(f, 'a', 0, -1, -1)
if f in self._copymap:
del self._copymap[f]
def remove(self, f):
'''Mark a file removed.'''
self._dirty = True
self._droppath(f)
size = 0
if self._pl[1] != nullid and f in self._map:
# backup the previous state
entry = self._map[f]
if entry[0] == 'm': # merge
size = -1
elif entry[0] == 'n' and entry[2] == -2: # other parent
size = -2
self._map[f] = ('r', 0, size, 0)
if size == 0 and f in self._copymap:
del self._copymap[f]
def merge(self, f):
'''Mark a file merged.'''
if self._pl[1] == nullid:
return self.normallookup(f)
s = os.lstat(self._join(f))
self._addpath(f, 'm', s.st_mode,
s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
if f in self._copymap:
del self._copymap[f]
def drop(self, f):
'''Drop a file from the dirstate'''
if f in self._map:
self._dirty = True
self._droppath(f)
del self._map[f]
def _normalize(self, path, isknown, ignoremissing=False, exists=None):
normed = util.normcase(path)
folded = self._foldmap.get(normed, None)
if folded is None:
if isknown:
folded = path
else:
if exists is None:
exists = os.path.lexists(os.path.join(self._root, path))
if not exists:
# Maybe a path component exists
if not ignoremissing and '/' in path:
d, f = path.rsplit('/', 1)
d = self._normalize(d, isknown, ignoremissing, None)
folded = d + "/" + f
else:
# No path components, preserve original case
folded = path
else:
# recursively normalize leading directory components
# against dirstate
if '/' in normed:
d, f = normed.rsplit('/', 1)
d = self._normalize(d, isknown, ignoremissing, True)
r = self._root + "/" + d
folded = d + "/" + util.fspath(f, r)
else:
folded = util.fspath(normed, self._root)
self._foldmap[normed] = folded
return folded
def normalize(self, path, isknown=False, ignoremissing=False):
'''
normalize the case of a pathname when on a casefolding filesystem
isknown specifies whether the filename came from walking the
disk, to avoid extra filesystem access.
If ignoremissing is True, missing path are returned
unchanged. Otherwise, we try harder to normalize possibly
existing path components.
The normalized case is determined based on the following precedence:
- version of name already stored in the dirstate
- version of name stored on disk
- version provided via command arguments
'''
if self._checkcase:
return self._normalize(path, isknown, ignoremissing)
return path
def clear(self):
self._map = {}
if "_dirs" in self.__dict__:
delattr(self, "_dirs")
self._copymap = {}
self._pl = [nullid, nullid]
self._lastnormaltime = 0
self._dirty = True
def rebuild(self, parent, allfiles, changedfiles=None):
changedfiles = changedfiles or allfiles
oldmap = self._map
self.clear()
for f in allfiles:
if f not in changedfiles:
self._map[f] = oldmap[f]
else:
if 'x' in allfiles.flags(f):
self._map[f] = ('n', 0777, -1, 0)
else:
self._map[f] = ('n', 0666, -1, 0)
self._pl = (parent, nullid)
self._dirty = True
def write(self):
if not self._dirty:
return
st = self._opener("dirstate", "w", atomictemp=True)
def finish(s):
st.write(s)
st.close()
self._lastnormaltime = 0
self._dirty = self._dirtypl = False
# use the modification time of the newly created temporary file as the
# filesystem's notion of 'now'
now = util.fstat(st).st_mtime
finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
def _dirignore(self, f):
if f == '.':
return False
if self._ignore(f):
return True
for p in scmutil.finddirs(f):
if self._ignore(p):
return True
return False
def _walkexplicit(self, match, subrepos):
'''Get stat data about the files explicitly specified by match.
Return a triple (results, dirsfound, dirsnotfound).
- results is a mapping from filename to stat result. It also contains
listings mapping subrepos and .hg to None.
- dirsfound is a list of files found to be directories.
- dirsnotfound is a list of files that the dirstate thinks are
directories and that were not found.'''
def badtype(mode):
kind = _('unknown')
if stat.S_ISCHR(mode):
kind = _('character device')
elif stat.S_ISBLK(mode):
kind = _('block device')
elif stat.S_ISFIFO(mode):
kind = _('fifo')
elif stat.S_ISSOCK(mode):
kind = _('socket')
elif stat.S_ISDIR(mode):
kind = _('directory')
return _('unsupported file type (type is %s)') % kind
matchedir = match.explicitdir
badfn = match.bad
dmap = self._map
normpath = util.normpath
lstat = os.lstat
getkind = stat.S_IFMT
dirkind = stat.S_IFDIR
regkind = stat.S_IFREG
lnkkind = stat.S_IFLNK
join = self._join
dirsfound = []
foundadd = dirsfound.append
dirsnotfound = []
notfoundadd = dirsnotfound.append
if match.matchfn != match.exact and self._checkcase:
normalize = self._normalize
else:
normalize = None
files = sorted(match.files())
subrepos.sort()
i, j = 0, 0
while i < len(files) and j < len(subrepos):
subpath = subrepos[j] + "/"
if files[i] < subpath:
i += 1
continue
while i < len(files) and files[i].startswith(subpath):
del files[i]
j += 1
if not files or '.' in files:
files = ['']
results = dict.fromkeys(subrepos)
results['.hg'] = None
for ff in files:
if normalize:
nf = normalize(normpath(ff), False, True)
else:
nf = normpath(ff)
if nf in results:
continue
try:
st = lstat(join(nf))
kind = getkind(st.st_mode)
if kind == dirkind:
if nf in dmap:
#file deleted on disk but still in dirstate
results[nf] = None
if matchedir:
matchedir(nf)
foundadd(nf)
elif kind == regkind or kind == lnkkind:
results[nf] = st
else:
badfn(ff, badtype(kind))
if nf in dmap:
results[nf] = None
except OSError, inst:
if nf in dmap: # does it exactly match a file?
results[nf] = None
else: # does it match a directory?
prefix = nf + "/"
for fn in dmap:
if fn.startswith(prefix):
if matchedir:
matchedir(nf)
notfoundadd(nf)
break
else:
badfn(ff, inst.strerror)
return results, dirsfound, dirsnotfound
def walk(self, match, subrepos, unknown, ignored, full=True):
'''
Walk recursively through the directory tree, finding all files
matched by match.
If full is False, maybe skip some known-clean files.
Return a dict mapping filename to stat-like object (either
mercurial.osutil.stat instance or return value of os.stat()).
'''
# full is a flag that extensions that hook into walk can use -- this
# implementation doesn't use it at all. This satisfies the contract
# because we only guarantee a "maybe".
def fwarn(f, msg):
self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
return False
ignore = self._ignore
dirignore = self._dirignore
if ignored:
ignore = util.never
dirignore = util.never
elif not unknown:
# if unknown and ignored are False, skip step 2
ignore = util.always
dirignore = util.always
matchfn = match.matchfn
matchalways = match.always()
matchtdir = match.traversedir
dmap = self._map
listdir = osutil.listdir
lstat = os.lstat
dirkind = stat.S_IFDIR
regkind = stat.S_IFREG
lnkkind = stat.S_IFLNK
join = self._join
exact = skipstep3 = False
if matchfn == match.exact: # match.exact
exact = True
dirignore = util.always # skip step 2
elif match.files() and not match.anypats(): # match.match, no patterns
skipstep3 = True
if not exact and self._checkcase:
normalize = self._normalize
skipstep3 = False
else:
normalize = None
# step 1: find all explicit files
results, work, dirsnotfound = self._walkexplicit(match, subrepos)
skipstep3 = skipstep3 and not (work or dirsnotfound)
work = [d for d in work if not dirignore(d)]
wadd = work.append
# step 2: visit subdirectories
while work:
nd = work.pop()
skip = None
if nd == '.':
nd = ''
else:
skip = '.hg'
try:
entries = listdir(join(nd), stat=True, skip=skip)
except OSError, inst:
if inst.errno in (errno.EACCES, errno.ENOENT):
fwarn(nd, inst.strerror)
continue
raise
for f, kind, st in entries:
if normalize:
nf = normalize(nd and (nd + "/" + f) or f, True, True)
else:
nf = nd and (nd + "/" + f) or f
if nf not in results:
if kind == dirkind:
if not ignore(nf):
if matchtdir:
matchtdir(nf)
wadd(nf)
if nf in dmap and (matchalways or matchfn(nf)):
results[nf] = None
elif kind == regkind or kind == lnkkind:
if nf in dmap:
if matchalways or matchfn(nf):
results[nf] = st
elif (matchalways or matchfn(nf)) and not ignore(nf):
results[nf] = st
elif nf in dmap and (matchalways or matchfn(nf)):
results[nf] = None
for s in subrepos:
del results[s]
del results['.hg']
# step 3: report unseen items in the dmap hash
if not skipstep3 and not exact:
if not results and matchalways:
visit = dmap.keys()
else:
visit = [f for f in dmap if f not in results and matchfn(f)]
visit.sort()
if unknown:
# unknown == True means we walked the full directory tree above.
# So if a file is not seen it was either a) not matching matchfn
# b) ignored, c) missing, or d) under a symlink directory.
audit_path = pathutil.pathauditor(self._root)
for nf in iter(visit):
# Report ignored items in the dmap as long as they are not
# under a symlink directory.
if audit_path.check(nf):
try:
results[nf] = lstat(join(nf))
except OSError:
# file doesn't exist
results[nf] = None
else:
# It's either missing or under a symlink directory
results[nf] = None
else:
# We may not have walked the full directory tree above,
# so stat everything we missed.
nf = iter(visit).next
for st in util.statfiles([join(i) for i in visit]):
results[nf()] = st
return results
def status(self, match, subrepos, ignored, clean, unknown):
'''Determine the status of the working copy relative to the
dirstate and return a tuple of lists (unsure, modified, added,
removed, deleted, unknown, ignored, clean), where:
unsure:
files that might have been modified since the dirstate was
written, but need to be read to be sure (size is the same
but mtime differs)
modified:
files that have definitely been modified since the dirstate
was written (different size or mode)
added:
files that have been explicitly added with hg add
removed:
files that have been explicitly removed with hg remove
deleted:
files that have been deleted through other means ("missing")
unknown:
files not in the dirstate that are not ignored
ignored:
files not in the dirstate that are ignored
(by _dirignore())
clean:
files that have definitely not been modified since the
dirstate was written
'''
listignored, listclean, listunknown = ignored, clean, unknown
lookup, modified, added, unknown, ignored = [], [], [], [], []
removed, deleted, clean = [], [], []
dmap = self._map
ladd = lookup.append # aka "unsure"
madd = modified.append
aadd = added.append
uadd = unknown.append
iadd = ignored.append
radd = removed.append
dadd = deleted.append
cadd = clean.append
mexact = match.exact
dirignore = self._dirignore
checkexec = self._checkexec
copymap = self._copymap
lastnormaltime = self._lastnormaltime
# We need to do full walks when either
# - we're listing all clean files, or
# - match.traversedir does something, because match.traversedir should
# be called for every dir in the working dir
full = listclean or match.traversedir is not None
for fn, st in self.walk(match, subrepos, listunknown, listignored,
full=full).iteritems():
if fn not in dmap:
if (listignored or mexact(fn)) and dirignore(fn):
if listignored:
iadd(fn)
else:
uadd(fn)
continue
state, mode, size, time = dmap[fn]
if not st and state in "nma":
dadd(fn)
elif state == 'n':
mtime = int(st.st_mtime)
if (size >= 0 and
((size != st.st_size and size != st.st_size & _rangemask)
or ((mode ^ st.st_mode) & 0100 and checkexec))
or size == -2 # other parent
or fn in copymap):
madd(fn)
elif time != mtime and time != mtime & _rangemask:
ladd(fn)
elif mtime == lastnormaltime:
# fn may have been changed in the same timeslot without
# changing its size. This can happen if we quickly do
# multiple commits in a single transaction.
# Force lookup, so we don't miss such a racy file change.
ladd(fn)
elif listclean:
cadd(fn)
elif state == 'm':
madd(fn)
elif state == 'a':
aadd(fn)
elif state == 'r':
radd(fn)
return (lookup, modified, added, removed, deleted, unknown, ignored,
clean)
|
spraints/for-example
|
mercurial/dirstate.py
|
Python
|
gpl-2.0
| 29,458
|
[
"VisIt"
] |
065e3962f30171b06f39363c1c5f9e5d5e8ca517cb077d286d1ad7ff1c4c25cd
|
Aimport os
import sys
import pytest
import numba
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import functools
import time
from numba import jit, njit
from PIL import Image
import pandas as pd
import seaborn as sns
sns.set()
import math
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import SGDRegressor, LinearRegression, LogisticRegression, Lasso, Ridge
from sklearn import datasets
from sklearn.metrics import confusion_matrix, mean_squared_error, r2_score
from sklearn.utils import resample
# Bootstrap
def Bootstrap(x1,x2, y, N_boot=500, method = 'ols', degrees = 5, random_state = 42):
"""
Computes bias^2, variance and the mean squared error using bootstrap resampling method
for the provided data and the method.
Arguments:
x1: 1D numpy array, covariate
x2: 1D numpy array, covariate
N_boot: integer type, the number of bootstrap samples
method: string type, accepts 'ols', 'ridge' or 'lasso' as arguments
degree: integer type, polynomial degree for generating the design matrix
random_state: integer, ensures the same split when using the train_test_split functionality
Returns: Bias_vec, Var_vec, MSE_vec, betaVariance_vec
numpy arrays. Bias, Variance, MSE and the variance of beta for the predicted model
"""
##split x1, x2 and y arrays as a train and test data and generate design matrix
x1_train, x1_test,x2_train, x2_test, y_train, y_test = train_test_split(x1,x2, y, test_size=0.2, random_state = random_state)
y_pred_test = np.zeros((y_test.shape[0], N_boot))
X_test = designMatrix(x1_test, x2_test, degrees)
betaMatrix = np.zeros((X_test.shape[1], N_boot))
##resample and fit the corresponding method on the train data
for i in range(N_boot):
x1_,x2_, y_ = resample(x1_train, x2_train, y_train)
X_train = designMatrix(x1_, x2_, degrees)
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_train[:, 0] = 1
X_test = designMatrix(x1_test, x2_test, degrees)
X_test = scaler.transform(X_test)
X_test[:, 0] = 1
if method == 'ols':
manual_regression = linregOwn(method = 'ols')
beta = manual_regression.fit(X_train, y_)
if method == 'ridge':
manual_regression = linregOwn(method = 'ridge')
beta = manual_regression.fit(X_train, y_, lambda_ = 0.05)
if method == 'lasso':
manual_regression = linregOwn(method = 'lasso')
beta = manual_regression.fit(X_train, y_, lambda_ = 0.05)
##predict on the same test data
y_pred_test[:, i] = np.dot(X_test, beta)
betaMatrix[:, i] = beta
y_test = y_test.reshape(len(y_test),1)
Bias_vec = []
Var_vec = []
MSE_vec = []
betaVariance_vec = []
R2_score = []
y_test = y_test.reshape(len(y_test),1)
MSE = np.mean( np.mean((y_test - y_pred_test)**2, axis=1, keepdims=True) )
bias = np.mean( (y_test - np.mean(y_pred_test, axis=1, keepdims=True))**2 )
variance = np.mean( np.var(y_pred_test, axis=1, keepdims=True) )
betaVariance = np.var(betaMatrix, axis=1)
print("-------------------------------------------------------------")
print("Degree: %d" % degrees)
print('MSE:', np.round(MSE, 3))
print('Bias^2:', np.round(bias, 3))
print('Var:', np.round(variance,3))
print('{} >= {} + {} = {}'.format(MSE, bias, variance, bias+variance))
print("-------------------------------------------------------------")
Bias_vec.append(bias)
Var_vec.append(variance)
MSE_vec.append(MSE)
betaVariance_vec.append(betaVariance)
return Bias_vec, Var_vec, MSE_vec, betaVariance_vec
class CrossValidation:
"""
A class of cross-validation technique. Performs cross-validation with shuffling.
"""
def __init__(self, LinearRegression, DesignMatrix):
"""
Initialization
Arguments:
LinearRegression: Instance from the class created by either linregOwn or linregSKl
DesignMatrix: Function that generates design matrix
"""
self.LinearRegression = LinearRegression
self.DesignMatrix = DesignMatrix
def kFoldCV(self, x1, x2, y, k = 10, lambda_ = 0, degree = 5):
"""
Performs shuffling of the data, holds a split of the data as a test set at each split and evaluates the model
on the rest of the data.
Calculates the MSE , R2_score, variance, bias on the test data and MSE on the train data.
Arguments:
x1: 1D numpy array
x2: 1D numpy array
y: 1D numpy array
k: integer, the number of splits
lambda_: float type, shrinkage parameter for ridge and lasso methods.
degree: integer type, the number of polynomials, complexity parameter
"""
self.lambda_ = lambda_
M = x1.shape[0]//k ## Split input data x in k folds of size M
##save the statistic in the list
MSE_train = []
MSE_k = []
R2_k = []
var_k = []
bias_k = []
##shuffle the data randomly
shf = np.random.permutation(x1.size)
x1_shuff = x1[shf]
x2_shuff = x2[shf]
y_shuff = y[shf]
for i in range(k):
# x_k and y_k are the hold out data for fold k
x1_k = x1_shuff[i*M:(i+1)*M]
x2_k = x2_shuff[i*M:(i+1)*M]
y_k = y_shuff[i*M:(i+1)*M]
## Generate train data and then scale both train and test
index_true = np.array([True for i in range(x1.shape[0])])
index_true[i*M:(i+1)*M] = False
X_train = self.DesignMatrix(x1_shuff[index_true], x2_shuff[index_true], degree)
y_train = y_shuff[index_true]
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_train[:, 0] = 1
### Fit the regression on the train data
beta = self.LinearRegression.fit(X_train, y_train, lambda_)
y_predict_train = np.dot(X_train, beta)
MSE_train.append(np.sum( (y_train-y_predict_train)**2)/len(y_train))
## Predict on the hold out data and calculate statistic of interest
X_k = self.DesignMatrix(x1_k, x2_k, degree)
X_k = scaler.transform(X_k)
X_k[:, 0] = 1
y_predict = np.dot(X_k,beta)
MSE_k.append(np.sum((y_k-y_predict)**2, axis=0, keepdims=True)/len(y_predict))
R2_k.append(1.0 - np.sum((y_k - y_predict)**2, axis=0, keepdims=True) / np.sum((y_k - np.mean(y_k))**2, axis=0, keepdims=True) )
var_k.append(np.var(y_predict,axis=0, keepdims=True))
bias_k.append((y_k - np.mean(y_predict, axis=0, keepdims=True))**2 )
means = [np.mean(MSE_k), np.mean(R2_k), np.mean(var_k),
np.mean(bias_k),np.mean(MSE_train)]
#print('MSE_test: {}' .format(np.round(np.mean(MSE_k),3)))
#print('R2: {}' .format(np.round(np.mean(R2_k),3)))
#print('Variance of the predicted outcome: {}' .format(np.round(np.mean(var_k),3)))
#print('Bias: {}' .format(np.round(np.mean(bias_k),3)))
#print('MSE_train {}' .format(np.round(np.mean(MSE_train),3)))
return means
# Franke Function
def franke(x, y):
"""
Computes Franke function.
Franke's function has two Gaussian peaks of different heights, and a smaller dip.
It is used as a test function in interpolation problems.
Franke's function is normally defined on the grid [0, 1] for each x, y.
Arguments of the function:
x : numpy array
y : numpy array
Output of the function:
f : Franke function values at specific coordinate points of x and y
"""
f = (0.75 * np.exp(-((9*x - 2)**2)/4 - ((9*y - 2)**2)/4 )
+ 0.75 * np.exp(-((9*x + 1)**2)/49 - (9*y + 1) /10)
+ 0.5 * np.exp(-((9*x - 7)**2)/4 - ((9*y - 3)**2)/4 )
- 0.2 * np.exp(-((9*x - 4)**2) - ((9*y - 7)**2) ))
return f
class linregOwn:
"""
A class of linear regressions. Perform ordinarly least squares (OLS) and Ridge regression manually. Lasso
is performed using scikit-learn functionality.
"""
def __init__(self, method = 'ols'):
"""
Constructor
Determines the method used in the fitting
Arguments:
method: string type. Accepts either 'ols', 'ridge' or 'lasso'.
"""
self.method = method
self.yHat = None
self.X = None
self.y = None
self.beta = None
self._MSE = None
self._R2 = None
self._betaVariance = None
self.lambda_ = None
def fit(self, X_train, y_train, lambda_ = 0):
"""
Performs the fit of OLS, Ridge or Lasso, depending on the argument provided initially.
Arguments:
X_train: Covariate matrix of the train data set, i.e. design matrix of
the shape m x p where m is the number of rows and p is the number of columns
(i.e. p is the complexity parameter).
y_train: Outcome variable, 1D numpy array
lambda_: float type. Shrinkage parameter for ridge and lasso methods. The higher value, higher shrinkage.
lambda_ is set to 0 for the OLS regression
"""
self.X_train = X_train
self.y_train = y_train
self.lambda_ = lambda_
if self.method == 'ols':
self._olsFit(X_train, y_train)
if self.method == 'ridge':
self._ridgeFit(X_train, y_train, lambda_)
if self.method == 'lasso':
self._lassoFitSKL(X_train, y_train, lambda_)
return self.beta
def _olsFit(self, X_train, y_train):
"""
Performs the ordinary least squares (OLS) fit on the provided data using singular value decomposition(SVD).
Arguments:
X_train: Covariate matrix of the train data set, i.e. design matrix of
the shape m x p where m is the number of rows and p is the number of columns
(i.e. p is the complexity parameter).
y_train: Outcome variable, 1D numpy array
Returns:
beta : numpy.array
The beta parameters from the performed fit
"""
self.X_train = X_train
self.y_test = y_train
U, S, VT = np.linalg.svd(self.X_train, full_matrices=True)
S_inverse = np.zeros(shape=self.X_train.shape)
##S is a vector, with shape of the number of columns
S_inverse[:S.shape[0], :S.shape[0]] = np.diag(1/S)
self.beta = np.dot(VT.T, np.dot(S_inverse.T, np.dot(U.T, self.y_train)))
#self.beta = np.linalg.inv(np.dot(X.T,X)).dot(X.T, y)
def _ridgeFit(self, X_train, y_train, lambda_):
"""
Performs the ridge regression fit
Arguments:
X_train: Covariate matrix of the train data set, design matrix of
the shape m x p (m_train_rows, p_columns).
y_train: Outcome variable, 1D numpy array, dimension m x 1
lambda_: Integer type. The shrinkage parameter
Returns:
beta : numpy.array
The beta parameters from the performed fit
"""
self.X_train = X_train
self.y_train = y_train
self.lambda_ = lambda_
self.beta = np.dot(np.linalg.inv(np.dot(X_train.T,X_train) + self.lambda_ * np.eye(X_train.shape[1])), np.dot(X_train.T,y_train))
def _lassoFitSKL(self, X_train, y_train, lambda_):
"""
Performs lasso fit using scikit-learn functionality.
Arguments:
X_train: Covariate matrix of the train data set, design matrix of
the shape m x p (m_train_datapoints, p_parameters).
y_train: Outcome variable, 1D numpy array, dimension m x 1
lambda_: Integer type. The shrinkage parameter
Returns:
self.beta : numpy.array
The beta parameters from the performed fit
"""
self.regression = Lasso(fit_intercept=True, max_iter=1000000, alpha=self.lambda_)
self.regression.fit(X_train,y_train)
self.beta = self.regression.coef_
self.beta[0] = self.regression.intercept_
def predict(self, X_test):
"""
Performs prediction of the fitted model on the provided test data set.
Arguments:
X_test: Design matrix, covariate matrix, dimension k x p (k_test_rows, p_columns)
Returns: self.yHat
numpy 1D array, prediction values of dimension k x p
"""
self.X_test = X_test
self._predictOwntest(X_test)
return self.yHat
def _predictOwntest(self, X_test):
"""
Performs manual prediction of the given model on the train data.
"""
self.X_test = X_test
self.yHat = np.dot(self.X_test, self.beta)
def MSE(self, y_test):
"""
Calculates the mean squared error (MSE) manually after the fit and prediction have been implemented.
Arguments:
y_test: Outcome variable, 1D numpy array, dimension k x 1 (k_test_rows, 1_column)
Returns: self._MSE
The mean squared error of the predicted model
"""
self.y_test = y_test
if self.yHat is None :
self._predictOwntest(X_test)
N = self.yHat.size
self._MSE = (np.sum((self.y_test - self.yHat)**2))/N
return self._MSE
def R2(self, y_test):
"""
Calculates R2 score manually after the fit and prediction have been implemented.
Arguments:
y_test: Outcome variable, 1D numpy array, dimension k x 1 (k_test_rows, 1_column)
Returns: self._R2
The R2 score of the predicted model
"""
self.y_test = y_test
if self.yHat is None:
self._predictOwntest(X_test)
yMean = (1.0 / self.y_test.size) * np.sum(self.y_test)
self._R2 = 1.0 - np.sum((self.y_test - self.yHat)**2) / np.sum((self.y_test - yMean)**2)
return self._R2
def CI(self, y_test):
"""
Calculates confidence intervals manually after the fit and prediction have been implemented.
Arguments:
y_test: Outcome variable, 1D numpy array, dimension k x 1 (k_test_rows, 1_column)
Returns: var, Lower, Upper
Variance, Lower and Upper bounds of the confidence intervals for the parameter self.beta
"""
self.y_test = y_test
if self.yHat is None:
self._predictOwntest(X_test)
sigma2 = np.sum(((self.y_test - self.yHat)**2))/(self.y_test.size - self.beta.size)
var = np.diag(np.linalg.inv(np.dot(self.X_test.T, self.X_test))) * sigma2
Lower = self.beta - 1.96*np.sqrt(var)
Upper = self.beta + 1.96*np.sqrt(var)
return var, Lower, Upper
###Implementation through scikitlearn
class linregSKL:
def __init__(self, method = 'ols'):
"""
A class of linear regressions. Perform ordinarly least squares (OLS) and Ridge and Lasso
using scikit-learn functionality.
"""
self.method = method
self.yHat = None
self.X = None
self.y = None
self.beta = None
self._MSE = None
self._R2 = None
self._betaVariance = None
def fit(self, X_train, y_train, lambda_ = 0):
self.X_train = X_train
self.y_train = y_train
if self.method == 'ols':
self._olsSKLfit(X_train, y_train)
if self.method == 'ridge':
self._sklRidgeFit(X_train, y_train, lambda_)
if self.method == 'lasso':
self._SKLlassoFit(X_train, y_train, lambda_)
return self.beta
def _olsSKLfit(self, X_train, y_train):
self.X_train = X_train
self.y_train = y_train
##We already have standardized data from design matrix
self.ols = LinearRegression().fit(self.X_train, self.y_train)
self.beta = self.ols.coef_
self.beta[0] = self.ols.intercept_
def _SKLlassoFit(self, X_train, y_train, lambda_):
self.regression = Lasso(fit_intercept=True, max_iter=100000, alpha=self.lambda_)
self.regression.fit(X_train,y_train)
self.beta = self.regression.coef_
self.beta[0] = self.regression.intercept_
def _sklRidgeFit(self, X_train, y_train, lambda_):
self.regression = Ridge(fit_intercept=True, alpha=self.lambda_)
self.regression.fit(X,y)
self.beta = self.regression.coef_
self.beta[0] = self.regression.intercept_
def predict(self, X_test):
self.X_test = X_test
if self.method == 'ols':
self._sklPredict(X_test)
return self.yHat
def _sklPredict(self, X_test):
self.X_test = X_test
## Since our data contains 1-s, we should subtract intercept, since scikit learn additionally
##generates the 1-s
self.yHat = self.ols.predict(self.X_test) - self.beta[0]
def MSE(self, y_test):
self.y_test = y_test
if self.yHat is None :
self._sklPredict(X_test)
self._MSE = mean_squared_error(self.y_test, self.yHat)
return self._MSE
def R2(self, y_test):
self.y_test = y_test
if self.yHat is None :
self._sklPredict()
self._R2 = r2_score(self.y_test, self.yHat)
return self._R2
def designMatrix(x, y, k=5):
"""
Generates the design matrix (covariates of polynomial degree k).
Intercept is included in the design matrix.
Scaling does not apply to the intercept term.
if k = 2, generated column vectors: 1, x, y, x^2, xy, y^2
if k = 3, generated column vectors: 1, x, y, x^2, xy, y^2, x^3, x^2y, xy^2, y^3
...
Arguments:
x: 1D numpy array
y: 1D numpy array
k: integer type. complexity parameter (i.e polynomial degree)
"""
xb = np.ones((x.size, 1))
for i in range(1, k+1):
for j in range(i+1):
xb = np.c_[xb, (x**(i-j))*(y**j)]
xb[:, 0] = 1
return xb
# Stochastic Gradient Descent
from matplotlib.ticker import LinearLocator, FormatStrFormatter
def compute_square_loss(X, y, theta):
loss = 0 #Initialize the average square loss
m = len(y)
loss = (1.0/m)*(np.linalg.norm((X.dot(theta) - y)) ** 2)
return loss
def gradient_ridge(X, y, beta, lambda_):
return 2*(np.dot(X.T, (X.dot(beta) - y))) + 2*lambda_*beta
def gradient_ols(X, y, beta):
m = X.shape[0]
grad = 2/m * X.T.dot(X.dot(beta) - y)
return grad
def learning_schedule(t):
t0, t1 = 5, 50
return t0/(t+t1)
def iterate_minibatches(inputs, targets, batchsize, shuffle=True):
assert inputs.shape[0] == targets.shape[0]
if shuffle:
indices = np.random.permutation(inputs.shape[0])
for start_idx in range(0, inputs.shape[0], batchsize):
end_idx = min(start_idx + batchsize, inputs.shape[0])
if shuffle:
excerpt = indices[start_idx:end_idx]
else:
excerpt = slice(start_idx, end_idx)
yield inputs[excerpt], targets[excerpt]
###sgd
def SGD(X, y, learning_rate = 0.02, n_epochs = 100, lambda_ = 0.01, batch_size = 20, method = 'ols'):
num_instances, num_features = X.shape[0], X.shape[1]
beta = np.random.randn(num_features) ##initialize beta
for epoch in range(n_epochs+1):
for batch in iterate_minibatches(X, y, batch_size, shuffle=True):
X_batch, y_batch = batch
# for i in range(batch_size):
# learning_rate = learning_schedule(n_epochs*epoch + i)
if method == 'ols':
gradient = gradient_ols(X_batch, y_batch, beta)
beta = beta - learning_rate*gradient
if method == 'ridge':
gradient = gradient_ridge(X_batch, y_batch, beta, lambda_ = lambda_)
beta = beta - learning_rate*gradient
mse_ols_train = compute_square_loss(X, y, beta)
mse_ridge_train = compute_square_loss(X, y, beta) + lambda_*np.dot(beta.T, beta)
return beta
def compute_test_mse(X_test, y_test, beta, lambda_ = 0.01):
mse_ols_test = compute_square_loss(X_test, y_test, beta)
mse_ridge_test = compute_square_loss(X_test, y_test, beta) + lambda_*np.dot(beta.T, beta)
return mse_ols_test, mse_ridge_test
# # Part A
# In[10]:
# a
##Make synthetic data
n = 1000
np.random.seed(20)
x1 = np.random.rand(n)
x2 = np.random.rand(n)
X = designMatrix(x1, x2, 4)
y = franke(x1, x2)
##Train-validation-test samples.
# We choose / play with hyper-parameters on the validation data and then test predictions on the test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.25, random_state=1) # 0.25 x 0.8 = 0.2
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_val = scaler.transform(X_val)
X_train[:, 0] = 1
X_test[:, 0] = 1
X_val[:, 0] = 1
linreg = linregOwn(method='ols')
#print('Invert OLS:', linreg.fit(X_train, y_train))
beta = SGD(X_train, y_train, learning_rate=0.07)
#print('SGD OLS:', beta)
linreg = linregOwn(method='ridge')
#print('Invert Ridge:', linreg.fit(X_train, y_train, lambda_= 0.01))
beta = SGD(X_train, y_train, learning_rate=0.0004, method='ridge')
#print('SGD Ridge:', beta)
sgdreg = SGDRegressor(max_iter = 100, penalty=None, eta0=0.1)
sgdreg.fit(X_train[:, 1:],y_train.ravel())
#print('sklearn:', sgdreg.coef_)
#print('sklearn intercept:', sgdreg.intercept_)
def plot_MSE(method = 'ridge', scheme = None):
eta = np.logspace(-5, -3, 10)
lambda_ = np.logspace(-5, -1, 10)
MSE_ols = []
MSE_ridge = []
if scheme == 'joint':
if method == 'ridge':
for lmbd in lambda_:
for i in eta:
beta = SGD(X_train, y_train, learning_rate=i, lambda_ = lmbd, method = method)
mse_ols_test, mse_ridge_test = compute_test_mse(X_val, y_val, lambda_ = lmbd, beta = beta)
MSE_ridge.append(mse_ridge_test)
fig = plt.figure()
ax = fig.gca(projection='3d') ##get current axis
lambda_ = np.ravel(lambda_)
eta = np.ravel(eta)
ax.zaxis.set_major_locator(LinearLocator(5))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.03f'))
ax.plot_trisurf(lambda_, eta, MSE_ridge, cmap='viridis', edgecolor='none')
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$\eta$')
ax.set_title(r'MSE Ridge')
ax.view_init(30, 60)
plt.show()
if scheme == 'separate':
if method == 'ols':
eta = np.logspace(-5, 0, 10)
for i in eta:
beta = SGD(X_train, y_train, learning_rate=i, lambda_ = 0.01, method = method)
mse_ols_test, mse_ridge_test = compute_test_mse(X_val, y_val, beta = beta)
MSE_ols.append(mse_ols_test)
print('The learning rate {} performs best for the OLS' .format(eta[MSE_ols.index(min(MSE_ols))]))
print('Corresponding minimum MSE for OLS: {}'.format(min(MSE_ols)))
plt.semilogx(eta, MSE_ols)
plt.xlabel(r'Learning rate, $\eta$')
plt.ylabel('MSE OLS')
plt.title('Stochastic Gradient Descent')
plt.show()
if scheme == 'separate':
if method == 'ridge':
eta = np.logspace(-5, 0, 10)
for i in eta:
beta = SGD(X_train, y_train, learning_rate=i, lambda_ = 0.01, method = method)
mse_ols_test, mse_ridge_test = compute_test_mse(X_val, y_val, beta = beta)
MSE_ols.append(mse_ridge_test)
print('The learning rate {} performs best for Ridge' .format(eta[MSE_ols.index(min(MSE_ols))]))
print('Corresponding minimum MSE for Ridge: {}'.format(min(MSE_ols)))
plt.plot(eta, MSE_ols)
plt.xlabel(r'Learning rate, $\eta$')
plt.ylabel('MSE Ridge')
plt.title('Stochastic Gradient Descent')
plt.show()
####Predict OLS, Ridge on test data after tuning learning rate and lambda on validation data
def plot_scatter(y_true, method = 'ols'):
if method == 'ols':
beta = SGD(X_train, y_train, learning_rate=0.07, lambda_ = 0, method = method, n_epochs=300)
if method == 'ridge':
beta = SGD(X_train, y_train, learning_rate=0.0001, lambda_ = 0, method = method, n_epochs=300)
y_pred = np.dot(X_test, beta)
mse_ols_test, mse_ridge_test = compute_test_mse(X_test, y_true, beta = beta)
print('Test MSE OLS: {}' .format(mse_ols_test))
print('Test MSE Ridge: {}' .format(mse_ridge_test))
a = plt.axes(aspect='equal')
plt.scatter(y_pred, y_pred, color= 'blue', label = "True values")
plt.scatter(y_pred, y_true, color = 'red', label = "Predicted values")
plt.xlabel('True y values')
plt.ylabel('Predicted y')
plt.title(f"Prediction - {method}")
plt.legend()
# if method == 'ols':
# plt.savefig(os.path.join(os.path.dirname(__file__), 'Plots', 'ols_reg_pred.png'), transparent=True, bbox_inches='tight')
# if method == 'ridge':
# plt.savefig(os.path.join(os.path.dirname(__file__), 'Plots', 'ridge_reg_pred.png'), transparent=True, bbox_inches='tight')
plt.show()
plot_scatter(y_test, method='ols')
plot_scatter(y_test, method='ridge')
|
CompPhysics/MachineLearning
|
doc/src/week39/codes/test12.py
|
Python
|
cc0-1.0
| 26,978
|
[
"Gaussian"
] |
d5f2924253f29991827f138b561514c81af16fc89c8d9b04efc0338ea0569b47
|
"""
Acceptance tests for the teams feature.
"""
import json
import random
import time
from uuid import uuid4
import ddt
from dateutil.parser import parse
from selenium.common.exceptions import TimeoutException
from common.test.acceptance.fixtures import LMS_BASE_URL
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.fixtures.discussion import ForumsConfigMixin, MultipleThreadFixture, Thread
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.utils import confirm_prompt
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.learner_profile import LearnerProfilePage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.pages.lms.teams import (
BrowseTeamsPage,
BrowseTopicsPage,
EditMembershipPage,
MyTeamsPage,
TeamManagementPage,
TeamPage,
TeamsPage
)
from common.test.acceptance.tests.helpers import EventsTestMixin, UniqueCourseTest, get_modal_alert
from openedx.core.lib.tests import attr
TOPICS_PER_PAGE = 12
class TeamsTabBase(EventsTestMixin, ForumsConfigMixin, UniqueCourseTest):
"""Base class for Teams Tab tests"""
def setUp(self):
super(TeamsTabBase, self).setUp()
self.tab_nav = TabNavPage(self.browser)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.teams_page = TeamsPage(self.browser, self.course_id)
# TODO: Refactor so resetting events database is not necessary
self.reset_event_tracking()
self.enable_forums()
def create_topics(self, num_topics):
"""Create `num_topics` test topics."""
return [{u"description": i, u"name": i, u"id": i} for i in map(str, xrange(num_topics))]
def create_teams(self, topic, num_teams, time_between_creation=0):
"""Create `num_teams` teams belonging to `topic`."""
teams = []
for i in xrange(num_teams):
team = {
'course_id': self.course_id,
'topic_id': topic['id'],
'name': 'Team {}'.format(i),
'description': 'Description {}'.format(i),
'language': 'aa',
'country': 'AF'
}
teams.append(self.post_team_data(team))
# Sadly, this sleep is necessary in order to ensure that
# sorting by last_activity_at works correctly when running
# in Jenkins.
# THIS IS AN ANTI-PATTERN - DO NOT COPY.
time.sleep(time_between_creation)
return teams
def post_team_data(self, team_data):
"""Given a JSON representation of a team, post it to the server."""
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/teams/',
data=json.dumps(team_data),
headers=self.course_fixture.headers
)
self.assertEqual(response.status_code, 200)
return json.loads(response.text)
def create_memberships(self, num_memberships, team_id):
"""Create `num_memberships` users and assign them to `team_id`. The
last user created becomes the current user."""
memberships = []
for __ in xrange(num_memberships):
user_info = AutoAuthPage(self.browser, course_id=self.course_id).visit().user_info
memberships.append(user_info)
self.create_membership(user_info['username'], team_id)
#pylint: disable=attribute-defined-outside-init
self.user_info = memberships[-1]
return memberships
def create_membership(self, username, team_id):
"""Assign `username` to `team_id`."""
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/team_membership/',
data=json.dumps({'username': username, 'team_id': team_id}),
headers=self.course_fixture.headers
)
return json.loads(response.text)
def set_team_configuration(self, configuration, enroll_in_course=True, global_staff=False):
"""
Sets team configuration on the course and calls auto-auth on the user.
"""
#pylint: disable=attribute-defined-outside-init
self.course_fixture = CourseFixture(**self.course_info)
if configuration:
self.course_fixture.add_advanced_settings(
{u"teams_configuration": {u"value": configuration}}
)
self.course_fixture.install()
enroll_course_id = self.course_id if enroll_in_course else None
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, course_id=enroll_course_id, staff=global_staff).visit().user_info
self.course_home_page.visit()
def verify_teams_present(self, present):
"""
Verifies whether or not the teams tab is present. If it should be present, also
checks the text on the page (to ensure view is working).
"""
if present:
self.assertIn("Teams", self.tab_nav.tab_names)
self.teams_page.visit()
self.assertEqual(self.teams_page.active_tab(), 'browse')
else:
self.assertNotIn("Teams", self.tab_nav.tab_names)
def verify_teams(self, page, expected_teams):
"""Verify that the list of team cards on the current page match the expected teams in order."""
def assert_team_equal(expected_team, team_card_name, team_card_description):
"""
Helper to assert that a single team card has the expected name and
description.
"""
self.assertEqual(expected_team['name'], team_card_name)
self.assertEqual(expected_team['description'], team_card_description)
team_card_names = page.team_names
team_card_descriptions = page.team_descriptions
map(assert_team_equal, expected_teams, team_card_names, team_card_descriptions)
def verify_my_team_count(self, expected_number_of_teams):
""" Verify the number of teams shown on "My Team". """
# We are doing these operations on this top-level page object to avoid reloading the page.
self.teams_page.verify_my_team_count(expected_number_of_teams)
def only_team_events(self, event):
"""Filter out all non-team events."""
return event['event_type'].startswith('edx.team.')
@ddt.ddt
@attr(shard=5)
class TeamsTabTest(TeamsTabBase):
"""
Tests verifying when the Teams tab is present.
"""
def test_teams_not_enabled(self):
"""
Scenario: teams tab should not be present if no team configuration is set
Given I am enrolled in a course without team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration(None)
self.verify_teams_present(False)
def test_teams_not_enabled_no_topics(self):
"""
Scenario: teams tab should not be present if team configuration does not specify topics
Given I am enrolled in a course with no topics in the team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": []})
self.verify_teams_present(False)
def test_teams_enabled(self):
"""
Scenario: teams tab should be present if user is enrolled in the course and it has team configuration
Given I am enrolled in a course with team configuration and topics
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(1)})
self.verify_teams_present(True)
def test_teams_enabled_global_staff(self):
"""
Scenario: teams tab should be present if user is not enrolled in the course, but is global staff
Given there is a course with team configuration
And I am not enrolled in that course, but am global staff
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration(
{u"max_team_size": 10, u"topics": self.create_topics(1)},
enroll_in_course=False,
global_staff=True
)
self.verify_teams_present(True)
@ddt.data(
'topics/{topic_id}',
'topics/{topic_id}/search',
'teams/{topic_id}/{team_id}/edit-team',
'teams/{topic_id}/{team_id}'
)
def test_unauthorized_error_message(self, route):
"""Ensure that an error message is shown to the user if they attempt
to take an action which makes an AJAX request while not signed
in.
"""
topics = self.create_topics(1)
topic = topics[0]
self.set_team_configuration(
{u'max_team_size': 10, u'topics': topics},
global_staff=True
)
team = self.create_teams(topic, 1)[0]
self.teams_page.visit()
self.browser.delete_cookie('sessionid')
url = self.browser.current_url.split('#')[0]
self.browser.get(
'{url}#{route}'.format(
url=url,
route=route.format(
topic_id=topic['id'],
team_id=team['id']
)
)
)
self.teams_page.wait_for_ajax()
self.assertEqual(
self.teams_page.warning_message,
u"Your request could not be completed. Reload the page and try again."
)
@ddt.data(
('browse', '.topics-list'),
# TODO: find a reliable way to match the "My Teams" tab
# ('my-teams', 'div.teams-list'),
('teams/{topic_id}/{team_id}', 'div.discussion-module'),
('topics/{topic_id}/create-team', 'div.create-team-instructions'),
('topics/{topic_id}', '.teams-list'),
('not-a-real-route', 'div.warning')
)
@ddt.unpack
def test_url_routing(self, route, selector):
"""Ensure that navigating to a URL route correctly updates the page
content.
"""
topics = self.create_topics(1)
topic = topics[0]
self.set_team_configuration({
u'max_team_size': 10,
u'topics': topics
})
team = self.create_teams(topic, 1)[0]
self.teams_page.visit()
# Get the base URL (the URL without any trailing fragment)
url = self.browser.current_url
fragment_index = url.find('#')
if fragment_index >= 0:
url = url[0:fragment_index]
self.browser.get(
'{url}#{route}'.format(
url=url,
route=route.format(
topic_id=topic['id'],
team_id=team['id']
))
)
self.teams_page.wait_for_page()
self.teams_page.wait_for_ajax()
self.assertTrue(self.teams_page.q(css=selector).present)
self.assertTrue(self.teams_page.q(css=selector).visible)
@attr(shard=5)
class MyTeamsTest(TeamsTabBase):
"""
Tests for the "My Teams" tab of the Teams page.
"""
def setUp(self):
super(MyTeamsTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.my_teams_page = MyTeamsPage(self.browser, self.course_id)
self.page_viewed_event = {
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'my-teams',
'topic_id': None,
'team_id': None
}
}
def test_not_member_of_any_teams(self):
"""
Scenario: Visiting the My Teams page when user is not a member of any team should not display any teams.
Given I am enrolled in a course with a team configuration and a topic but am not a member of a team
When I visit the My Teams page
And I should see no teams
And I should see a message that I belong to no teams.
"""
with self.assert_events_match_during(self.only_team_events, expected_events=[self.page_viewed_event]):
self.my_teams_page.visit()
self.assertEqual(len(self.my_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertEqual(
self.my_teams_page.q(css='.page-content-main').text,
[u'You are not currently a member of any team.']
)
def test_member_of_a_team(self):
"""
Scenario: Visiting the My Teams page when user is a member of a team should display the teams.
Given I am enrolled in a course with a team configuration and a topic and am a member of a team
When I visit the My Teams page
Then I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should not see a pagination footer
"""
teams = self.create_teams(self.topic, 1)
self.create_membership(self.user_info['username'], teams[0]['id'])
with self.assert_events_match_during(self.only_team_events, expected_events=[self.page_viewed_event]):
self.my_teams_page.visit()
self.verify_teams(self.my_teams_page, teams)
def test_multiple_team_members(self):
"""
Scenario: Visiting the My Teams page when user is a member of a team should display the teams.
Given I am a member of a team with multiple members
When I visit the My Teams page
Then I should see the correct number of team members on my membership
"""
teams = self.create_teams(self.topic, 1)
self.create_memberships(4, teams[0]['id'])
self.my_teams_page.visit()
self.assertEqual(self.my_teams_page.team_memberships[0], '4 / 10 Members')
@attr(shard=5)
@ddt.ddt
class BrowseTopicsTest(TeamsTabBase):
"""
Tests for the Browse tab of the Teams page.
"""
def setUp(self):
super(BrowseTopicsTest, self).setUp()
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
@ddt.data(('name', False), ('team_count', True))
@ddt.unpack
def test_sort_topics(self, sort_order, reverse):
"""
Scenario: the user should be able to sort the list of topics by name or team count
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
When I choose a sort order
Then I should see the paginated list of topics in that order
"""
topics = self.create_topics(TOPICS_PER_PAGE + 1)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
for i, topic in enumerate(random.sample(topics, len(topics))):
self.create_teams(topic, i)
topic['team_count'] = i
self.topics_page.visit()
self.topics_page.sort_topics_by(sort_order)
topic_names = self.topics_page.topic_names
self.assertEqual(len(topic_names), TOPICS_PER_PAGE)
self.assertEqual(
topic_names,
[t['name'] for t in sorted(topics, key=lambda t: t[sort_order], reverse=reverse)][:TOPICS_PER_PAGE]
)
def test_sort_topics_update(self):
"""
Scenario: the list of topics should remain sorted after updates
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics and choose a sort order
Then I should see the paginated list of topics in that order
When I create a team in one of those topics
And I return to the topics list
Then I should see the topics in the correct sorted order
"""
topics = self.create_topics(3)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
self.topics_page.visit()
self.topics_page.sort_topics_by('team_count')
topic_name = self.topics_page.topic_names[-1]
topic = [t for t in topics if t['name'] == topic_name][0]
self.topics_page.browse_teams_for_topic(topic_name)
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
browse_teams_page.wait_for_page()
browse_teams_page.click_create_team_link()
create_team_page = TeamManagementPage(self.browser, self.course_id, topic)
create_team_page.create_team()
team_page = TeamPage(self.browser, self.course_id)
team_page.wait_for_page()
team_page.click_all_topics()
self.topics_page.wait_for_page()
self.topics_page.wait_for_ajax()
self.assertEqual(topic_name, self.topics_page.topic_names[0])
def test_list_topics(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(2)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), 2)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-2 out of 2 total'))
self.assertFalse(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_topic_pagination(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab, paginated 12 per page
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see only the first 12 topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(20)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 20 total'))
self.assertTrue(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertTrue(self.topics_page.is_next_page_button_enabled())
def test_go_to_numbered_page(self):
"""
Scenario: topics should be able to be navigated by page number
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter a valid page number in the page number input
Then I should see that page of topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(25)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_go_to_invalid_page(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter an invalid page number in the page number input
Then I should stay on the current page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(self.topics_page.get_current_page_number(), 1)
def test_page_navigation_buttons(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
When I press the next page button
Then I should move to the next page
When I press the previous page button
Then I should move to the previous page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.press_next_page_button()
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 13-13 out of 13 total'))
self.topics_page.press_previous_page_button()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 13 total'))
def test_topic_pagination_one_page(self):
"""
Scenario: Browsing topics when there are fewer topics than the page size i.e. 12
all topics should show on one page
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I should see corrected number of topic cards
And I should see the correct page header
And I should not see a pagination footer
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(10)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), 10)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-10 out of 10 total'))
self.assertFalse(self.topics_page.pagination_controls_visible())
def test_topic_description_truncation(self):
"""
Scenario: excessively long topic descriptions should be truncated so
as to fit within a topic card.
Given I am enrolled in a course with a team configuration and a topic
with a long description
When I visit the Teams page
And I browse topics
Then I should see a truncated topic description
"""
initial_description = "A" + " really" * 50 + " long description"
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [{"name": "", "id": "", "description": initial_description}]}
)
self.topics_page.visit()
truncated_description = self.topics_page.topic_descriptions[0]
self.assertLess(len(truncated_description), len(initial_description))
self.assertTrue(truncated_description.endswith('...'))
self.assertIn(truncated_description.split('...')[0], initial_description)
def test_go_to_teams_list(self):
"""
Scenario: Clicking on a Topic Card should take you to the
teams list for that Topic.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page
And I browse topics
And I click on the arrow link to view teams for the first topic
Then I should be on the browse teams page
"""
topic = {u"name": u"Example Topic", u"id": u"example_topic", u"description": "Description"}
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [topic]}
)
self.topics_page.visit()
self.topics_page.browse_teams_for_topic('Example Topic')
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
browse_teams_page.wait_for_page()
self.assertEqual(browse_teams_page.header_name, 'Example Topic')
self.assertEqual(browse_teams_page.header_description, 'Description')
def test_page_viewed_event(self):
"""
Scenario: Visiting the browse topics page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the browse topics page
Then my browser should post a page viewed event
"""
topic = {u"name": u"Example Topic", u"id": u"example_topic", u"description": "Description"}
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [topic]}
)
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'browse',
'topic_id': None,
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.topics_page.visit()
@attr(shard=5)
@ddt.ddt
class BrowseTeamsWithinTopicTest(TeamsTabBase):
"""
Tests for browsing Teams within a Topic on the Teams page.
"""
TEAMS_PAGE_SIZE = 10
def setUp(self):
super(BrowseTeamsWithinTopicTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.max_team_size = 10
self.set_team_configuration({
'course_id': self.course_id,
'max_team_size': self.max_team_size,
'topics': [self.topic]
})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
def teams_with_default_sort_order(self, teams):
"""Return a list of teams sorted according to the default ordering
(last_activity_at, with a secondary sort by open slots).
"""
return sorted(
sorted(teams, key=lambda t: len(t['membership']), reverse=True),
key=lambda t: parse(t['last_activity_at']).replace(microsecond=0),
reverse=True
)
def verify_page_header(self):
"""Verify that the page header correctly reflects the current topic's name and description."""
self.assertEqual(self.browse_teams_page.header_name, self.topic['name'])
self.assertEqual(self.browse_teams_page.header_description, self.topic['description'])
def verify_search_header(self, search_results_page, search_query):
"""Verify that the page header correctly reflects the current topic's name and description."""
self.assertEqual(search_results_page.header_name, 'Team Search')
self.assertEqual(
search_results_page.header_description,
'Showing results for "{search_query}"'.format(search_query=search_query)
)
def verify_on_page(self, teams_page, page_num, total_teams, pagination_header_text, footer_visible):
"""
Verify that we are on the correct team list page.
Arguments:
teams_page (BaseTeamsPage): The teams page object that should be the current page.
page_num (int): The one-indexed page number that we expect to be on
total_teams (list): An unsorted list of all the teams for the
current topic
pagination_header_text (str): Text we expect to see in the
pagination header.
footer_visible (bool): Whether we expect to see the pagination
footer controls.
"""
sorted_teams = self.teams_with_default_sort_order(total_teams)
self.assertTrue(teams_page.get_pagination_header_text().startswith(pagination_header_text))
self.verify_teams(
teams_page,
sorted_teams[(page_num - 1) * self.TEAMS_PAGE_SIZE:page_num * self.TEAMS_PAGE_SIZE]
)
self.assertEqual(
teams_page.pagination_controls_visible(),
footer_visible,
msg='Expected paging footer to be ' + 'visible' if footer_visible else 'invisible'
)
@ddt.data(
('open_slots', 'last_activity_at', True),
('last_activity_at', 'open_slots', True)
)
@ddt.unpack
def test_sort_teams(self, sort_order, secondary_sort_order, reverse):
"""
Scenario: the user should be able to sort the list of teams by open slots or last activity
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse teams within a topic
Then I should see a list of teams for that topic
When I choose a sort order
Then I should see the paginated list of teams in that order
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1)
for i, team in enumerate(random.sample(teams, len(teams))):
for _ in range(i):
user_info = AutoAuthPage(self.browser, course_id=self.course_id).visit().user_info
self.create_membership(user_info['username'], team['id'])
team['open_slots'] = self.max_team_size - i
# Re-authenticate as staff after creating users
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=True
).visit()
self.browse_teams_page.visit()
self.browse_teams_page.sort_teams_by(sort_order)
team_names = self.browse_teams_page.team_names
self.assertEqual(len(team_names), self.TEAMS_PAGE_SIZE)
sorted_teams = [
team['name']
for team in sorted(
sorted(teams, key=lambda t: t[secondary_sort_order], reverse=reverse),
key=lambda t: t[sort_order],
reverse=reverse
)
][:self.TEAMS_PAGE_SIZE]
self.assertEqual(team_names, sorted_teams)
def test_default_sort_order(self):
"""
Scenario: the list of teams should be sorted by last activity by default
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse teams within a topic
Then I should see a list of teams for that topic, sorted by last activity
"""
self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1)
self.browse_teams_page.visit()
self.assertEqual(self.browse_teams_page.sort_order, 'last activity')
def test_no_teams(self):
"""
Scenario: Visiting a topic with no teams should not display any teams.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing no teams
And I should see no teams
And I should see a button to add a team
And I should not see a pagination footer
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.assertEqual(len(self.browse_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_one_page(self):
"""
Scenario: Visiting a topic with fewer teams than the page size should
all those teams on one page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should see a button to add a team
And I should not see a pagination footer
"""
teams = self.teams_with_default_sort_order(
self.create_teams(self.topic, self.TEAMS_PAGE_SIZE, time_between_creation=1)
)
self.browse_teams_page.visit()
self.verify_page_header()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 1-10 out of 10 total'))
self.verify_teams(self.browse_teams_page, teams)
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_navigation_buttons(self):
"""
Scenario: The user should be able to page through a topic's team list
using navigation buttons when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I click on the next page button
Then I should see that I am on the second page of results
And when I click on the previous page button
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1, time_between_creation=1)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 11 total', True)
self.browse_teams_page.press_next_page_button()
self.verify_on_page(self.browse_teams_page, 2, teams, 'Showing 11-11 out of 11 total', True)
self.browse_teams_page.press_previous_page_button()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 11 total', True)
def test_teams_page_input(self):
"""
Scenario: The user should be able to page through a topic's team list
using the page input when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I input the second page
Then I should see that I am on the second page of results
When I input the first page
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 10, time_between_creation=1)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 20 total', True)
self.browse_teams_page.go_to_page(2)
self.verify_on_page(self.browse_teams_page, 2, teams, 'Showing 11-20 out of 20 total', True)
self.browse_teams_page.go_to_page(1)
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 20 total', True)
def test_browse_team_topics(self):
"""
Scenario: User should be able to navigate to "browse all teams" and "search team description" links.
Given I am enrolled in a course with teams enabled
When I visit the Teams page for a topic
Then I should see the correct page header
And I should see the link to "browse teams in other topics"
When I should navigate to that link
Then I should see the topic browse page
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.browse_teams_page.click_browse_all_teams_link()
self.topics_page.wait_for_page()
def test_search(self):
"""
Scenario: User should be able to search for a team
Given I am enrolled in a course with teams enabled
When I visit the Teams page for that topic
And I search for 'banana'
Then I should see the search result page
And the search header should be shown
And 0 results should be shown
And my browser should fire a page viewed event for the search page
And a searched event should have been fired
"""
# Note: all searches will return 0 results with the mock search server
# used by Bok Choy.
search_text = 'banana'
self.create_teams(self.topic, 5)
self.browse_teams_page.visit()
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'search-teams',
'topic_id': self.topic['id'],
'team_id': None
}
}, {
'event_type': 'edx.team.searched',
'event': {
'search_text': search_text,
'topic_id': self.topic['id'],
'number_of_results': 0
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events, in_order=False):
search_results_page = self.browse_teams_page.search(search_text)
self.verify_search_header(search_results_page, search_text)
self.assertTrue(search_results_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
def test_page_viewed_event(self):
"""
Scenario: Visiting the browse page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page
Then my browser should post a page viewed event for the teams page
"""
self.create_teams(self.topic, 5)
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'single-topic',
'topic_id': self.topic['id'],
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.browse_teams_page.visit()
def test_team_name_xss(self):
"""
Scenario: Team names should be HTML-escaped on the teams page
Given I am enrolled in a course with teams enabled
When I visit the Teams page for a topic, with a team name containing JS code
Then I should not see any alerts
"""
self.post_team_data({
'course_id': self.course_id,
'topic_id': self.topic['id'],
'name': '<script>alert("XSS")</script>',
'description': 'Description',
'language': 'aa',
'country': 'AF'
})
with self.assertRaises(TimeoutException):
self.browser.get(self.browse_teams_page.url)
alert = get_modal_alert(self.browser)
alert.accept()
class TeamFormActions(TeamsTabBase):
"""
Base class for create, edit, and delete team.
"""
TEAM_DESCRIPTION = 'The Avengers are a fictional team of superheroes.'
topic = {'name': 'Example Topic', 'id': 'example_topic', 'description': 'Description'}
TEAMS_NAME = 'Avengers'
def setUp(self):
super(TeamFormActions, self).setUp()
self.team_management_page = TeamManagementPage(self.browser, self.course_id, self.topic)
def verify_page_header(self, title, description, breadcrumbs):
"""
Verify that the page header correctly reflects the
create team header, description and breadcrumb.
"""
self.assertEqual(self.team_management_page.header_page_name, title)
self.assertEqual(self.team_management_page.header_page_description, description)
self.assertEqual(self.team_management_page.header_page_breadcrumbs, breadcrumbs)
def verify_and_navigate_to_create_team_page(self):
"""Navigates to the create team page and verifies."""
self.browse_teams_page.click_create_team_link()
self.verify_page_header(
title='Create a New Team',
description='Create a new team if you can\'t find an existing team to join, '
'or if you would like to learn with friends you know.',
breadcrumbs='All Topics {topic_name}'.format(topic_name=self.topic['name'])
)
def verify_and_navigate_to_edit_team_page(self):
"""Navigates to the edit team page and verifies."""
self.assertEqual(self.team_page.team_name, self.team['name'])
self.assertTrue(self.team_page.edit_team_button_present)
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
# Edit page header.
self.verify_page_header(
title='Edit Team',
description='If you make significant changes, make sure you notify '
'members of the team before making these changes.',
breadcrumbs='All Topics {topic_name} {team_name}'.format(
topic_name=self.topic['name'],
team_name=self.team['name']
)
)
def verify_team_info(self, name, description, location, language):
"""Verify the team information on team page."""
self.assertEqual(self.team_page.team_name, name)
self.assertEqual(self.team_page.team_description, description)
self.assertEqual(self.team_page.team_location, location)
self.assertEqual(self.team_page.team_language, language)
def fill_create_or_edit_form(self):
"""Fill the create/edit team form fields with appropriate values."""
self.team_management_page.value_for_text_field(
field_id='name',
value=self.TEAMS_NAME,
press_enter=False
)
self.team_management_page.set_value_for_textarea_field(
field_id='description',
value=self.TEAM_DESCRIPTION
)
self.team_management_page.value_for_dropdown_field(field_id='language', value='English')
self.team_management_page.value_for_dropdown_field(field_id='country', value='Pakistan')
def verify_all_fields_exist(self):
"""
Verify the fields for create/edit page.
"""
self.assertEqual(
self.team_management_page.message_for_field('name'),
'A name that identifies your team (maximum 255 characters).'
)
self.assertEqual(
self.team_management_page.message_for_textarea_field('description'),
'A short description of the team to help other learners understand '
'the goals or direction of the team (maximum 300 characters).'
)
self.assertEqual(
self.team_management_page.message_for_field('country'),
'The country that team members primarily identify with.'
)
self.assertEqual(
self.team_management_page.message_for_field('language'),
'The language that team members primarily use to communicate with each other.'
)
@attr(shard=5)
@ddt.ddt
class CreateTeamTest(TeamFormActions):
"""
Tests for creating a new Team within a Topic on the Teams page.
"""
def setUp(self):
super(CreateTeamTest, self).setUp()
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.browse_teams_page.visit()
def test_user_can_see_create_team_page(self):
"""
Scenario: The user should be able to see the create team page via teams list page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the Create Team page link on bottom
And When I click create team link
Then I should see the create team page.
And I should see the create team header
And I should also see the help messages for fields.
"""
self.verify_and_navigate_to_create_team_page()
self.verify_all_fields_exist()
def test_user_can_see_error_message_for_missing_data(self):
"""
Scenario: The user should be able to see error message in case of missing required field.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
And When I click create team button without filling required fields
Then I should see the error message and highlighted fields.
"""
self.verify_and_navigate_to_create_team_page()
# `submit_form` clicks on a button, but that button doesn't always
# have the click event handler registered on it in time. That's why
# this test is flaky. Unfortunately, I don't know of a straightforward
# way to write something that waits for that event handler to be bound
# to the button element. So I used time.sleep as well, even though
# the bok choy docs explicitly ask us not to:
# https://bok-choy.readthedocs.io/en/latest/guidelines.html
# Sorry! For the story to address this anti-pattern, see TNL-5820
time.sleep(0.5)
self.team_management_page.submit_form()
self.team_management_page.wait_for(
lambda: self.team_management_page.validation_message_text,
"Validation message text never loaded."
)
self.assertEqual(
self.team_management_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.team_management_page.error_for_field(field_id='name'))
self.assertTrue(self.team_management_page.error_for_field(field_id='description'))
def test_user_can_see_error_message_for_incorrect_data(self):
"""
Scenario: The user should be able to see error message in case of increasing length for required fields.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I add text > than 255 characters for name field
And I click Create button
Then I should see the error message for exceeding length.
"""
self.verify_and_navigate_to_create_team_page()
# Fill the name field with >255 characters to see validation message.
self.team_management_page.value_for_text_field(
field_id='name',
value='EdX is a massive open online course (MOOC) provider and online learning platform. '
'It hosts online university-level courses in a wide range of disciplines to a worldwide '
'audience, some at no charge. It also conducts research into learning based on how '
'people use its platform. EdX was created for students and institutions that seek to'
'transform themselves through cutting-edge technologies, innovative pedagogy, and '
'rigorous courses. More than 70 schools, nonprofits, corporations, and international'
'organizations offer or plan to offer courses on the edX website. As of 22 October 2014,'
'edX has more than 4 million users taking more than 500 courses online.',
press_enter=False
)
self.team_management_page.submit_form()
self.assertEqual(
self.team_management_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.team_management_page.error_for_field(field_id='name'))
def test_user_can_create_new_team_successfully(self):
"""
Scenario: The user should be able to create new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I fill all the fields present with appropriate data
And I click Create button
Then I expect analytics events to be emitted
And I should see the page for my team
And I should see the message that says "You are member of this team"
And the new team should be added to the list of teams within the topic
And the number of teams should be updated on the topic card
And if I switch to "My Team", the newly created team is displayed
"""
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.browse_teams_page.visit()
self.verify_and_navigate_to_create_team_page()
self.fill_create_or_edit_form()
expected_events = [
{
'event_type': 'edx.team.created'
},
{
'event_type': 'edx.team.learner_added',
'event': {
'add_method': 'added_on_create',
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_management_page.submit_form()
# Verify that the page is shown for the new team
team_page = TeamPage(self.browser, self.course_id)
team_page.wait_for_page()
self.assertEqual(team_page.team_name, self.TEAMS_NAME)
self.assertEqual(team_page.team_description, self.TEAM_DESCRIPTION)
self.assertEqual(team_page.team_user_membership_text, 'You are a member of this team.')
# Verify the new team was added to the topic list
self.teams_page.click_specific_topic("Example Topic")
self.teams_page.verify_topic_team_count(1)
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(1)
# Verify that if one switches to "My Team" without reloading the page, the newly created team is shown.
self.verify_my_team_count(1)
def test_user_can_cancel_the_team_creation(self):
"""
Scenario: The user should be able to cancel the creation of new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I click Cancel button
Then I should see teams list page without any new team.
And if I switch to "My Team", it shows no teams
"""
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.verify_and_navigate_to_create_team_page()
# We add a sleep here to allow time for the click event handler to bind
# to the cancel button. Using time.sleep in bok-choy tests is,
# generally, an anti-pattern. So don't copy this :).
# For the story to address this anti-pattern, see TNL-5820
time.sleep(0.5)
self.team_management_page.cancel_team()
self.browse_teams_page.wait_for_page()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(0)
self.verify_my_team_count(0)
def test_page_viewed_event(self):
"""
Scenario: Visiting the create team page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the create team page
Then my browser should post a page viewed event
"""
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'new-team',
'topic_id': self.topic['id'],
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.verify_and_navigate_to_create_team_page()
@attr(shard=21)
@ddt.ddt
class DeleteTeamTest(TeamFormActions):
"""
Tests for deleting teams.
"""
def setUp(self):
super(DeleteTeamTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team = self.create_teams(self.topic, num_teams=1)[0]
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
#need to have a membership to confirm it gets deleted as well
self.create_membership(self.user_info['username'], self.team['id'])
self.team_page.visit()
def test_cancel_delete(self):
"""
Scenario: The user should be able to cancel the Delete Team dialog
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Delete Team button
When I click the delete team button
And I cancel the prompt
And I refresh the page
Then I should still see the team
"""
self.delete_team(cancel=True)
self.team_management_page.wait_for_page()
self.browser.refresh()
self.team_management_page.wait_for_page()
self.assertEqual(
' '.join(('All Topics', self.topic['name'], self.team['name'])),
self.team_management_page.header_page_breadcrumbs
)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_delete_team(self, role):
"""
Scenario: The user should be able to see and navigate to the delete team page.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Delete Team button
When I click the delete team button
And I confirm the prompt
Then I should see the browse teams page
And the team should not be present
"""
# If role is None, remain logged in as global staff
if role is not None:
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=False,
roles=role
).visit()
self.team_page.visit()
self.delete_team(require_notification=False)
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
browse_teams_page.wait_for_page()
self.assertNotIn(self.team['name'], browse_teams_page.team_names)
def delete_team(self, **kwargs):
"""
Delete a team. Passes `kwargs` to `confirm_prompt`.
Expects edx.team.deleted event to be emitted, with correct course_id.
Also expects edx.team.learner_removed event to be emitted for the
membership that is removed as a part of the delete operation.
"""
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
self.team_management_page.delete_team_button.click()
if 'cancel' in kwargs and kwargs['cancel'] is True:
confirm_prompt(self.team_management_page, **kwargs)
else:
expected_events = [
{
'event_type': 'edx.team.deleted',
'event': {
'team_id': self.team['id']
}
},
{
'event_type': 'edx.team.learner_removed',
'event': {
'team_id': self.team['id'],
'remove_method': 'team_deleted',
'user_id': self.user_info['user_id']
}
}
]
with self.assert_events_match_during(
event_filter=self.only_team_events, expected_events=expected_events
):
confirm_prompt(self.team_management_page, **kwargs)
def test_delete_team_updates_topics(self):
"""
Scenario: Deleting a team should update the team count on the topics page
Given I am staff user for a course with a team
And I delete a team
When I navigate to the browse topics page
Then the team count for the deletd team's topic should be updated
"""
self.delete_team(require_notification=False)
BrowseTeamsPage(self.browser, self.course_id, self.topic).click_all_topics()
topics_page = BrowseTopicsPage(self.browser, self.course_id)
topics_page.wait_for_page()
self.teams_page.verify_topic_team_count(0)
@attr(shard=17)
@ddt.ddt
class EditTeamTest(TeamFormActions):
"""
Tests for editing the team.
"""
def setUp(self):
super(EditTeamTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team = self.create_teams(self.topic, num_teams=1)[0]
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
self.team_page.visit()
def test_staff_can_navigate_to_edit_team_page(self):
"""
Scenario: The user should be able to see and navigate to the edit team page.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
And I should see the edit team header
And I should also see the help messages for fields
"""
self.verify_and_navigate_to_edit_team_page()
self.verify_all_fields_exist()
def test_staff_can_edit_team_successfully(self):
"""
Scenario: The staff should be able to edit team successfully.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
And an analytics event should be fired
When I edit all the fields with appropriate data
And I click Update button
Then I should see the page for my team with updated data
"""
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
expected_events = [
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'country',
'old': 'AF',
'new': 'PK',
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'name',
'old': self.team['name'],
'new': self.TEAMS_NAME,
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'language',
'old': 'aa',
'new': 'en',
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'description',
'old': self.team['description'],
'new': self.TEAM_DESCRIPTION,
'truncated': [],
}
},
]
with self.assert_events_match_during(
event_filter=self.only_team_events,
expected_events=expected_events,
):
self.team_management_page.submit_form()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.TEAMS_NAME,
description=self.TEAM_DESCRIPTION,
location='Pakistan',
language='English'
)
def test_staff_can_cancel_the_team_edit(self):
"""
Scenario: The user should be able to cancel the editing of team.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
Then I should see the Edit Team header
When I click Cancel button
Then I should see team page page without changes.
"""
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
self.team_management_page.cancel_team()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
def test_student_cannot_see_edit_button(self):
"""
Scenario: The student should not see the edit team button.
Given I am student for a course with a team
When I visit the Team profile page
Then I should not see the Edit Team button
"""
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.team_page.visit()
self.assertFalse(self.team_page.edit_team_button_present)
@ddt.data('Moderator', 'Community TA', 'Administrator')
def test_discussion_privileged_user_can_edit_team(self, role):
"""
Scenario: The user with specified role should see the edit team button.
Given I am user with privileged role for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
"""
kwargs = {
'course_id': self.course_id,
'staff': False
}
if role is not None:
kwargs['roles'] = role
AutoAuthPage(self.browser, **kwargs).visit()
self.team_page.visit()
self.teams_page.wait_for_page()
self.assertTrue(self.team_page.edit_team_button_present)
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
self.team_management_page.submit_form()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.TEAMS_NAME,
description=self.TEAM_DESCRIPTION,
location='Pakistan',
language='English'
)
def test_page_viewed_event(self):
"""
Scenario: Visiting the edit team page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the edit team page
Then my browser should post a page viewed event
"""
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'edit-team',
'topic_id': self.topic['id'],
'team_id': self.team['id']
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.verify_and_navigate_to_edit_team_page()
@attr(shard=17)
@ddt.ddt
class EditMembershipTest(TeamFormActions):
"""
Tests for administrating from the team membership page
"""
def setUp(self):
super(EditMembershipTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team_management_page = TeamManagementPage(self.browser, self.course_id, self.topic)
self.team = self.create_teams(self.topic, num_teams=1)[0]
#make sure a user exists on this team so we can edit the membership
self.create_membership(self.user_info['username'], self.team['id'])
self.edit_membership_page = EditMembershipPage(self.browser, self.course_id, self.team)
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
def edit_membership_helper(self, role, cancel=False):
"""
Helper for common functionality in edit membership tests.
Checks for all relevant assertions about membership being removed,
including verify edx.team.learner_removed events are emitted.
"""
if role is not None:
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=False,
roles=role
).visit()
self.team_page.visit()
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
self.assertTrue(
self.team_management_page.membership_button_present
)
self.team_management_page.click_membership_button()
self.edit_membership_page.wait_for_page()
self.edit_membership_page.click_first_remove()
if cancel:
self.edit_membership_page.cancel_delete_membership_dialog()
self.assertEqual(self.edit_membership_page.team_members, 1)
else:
expected_events = [
{
'event_type': 'edx.team.learner_removed',
'event': {
'team_id': self.team['id'],
'remove_method': 'removed_by_admin',
'user_id': self.user_info['user_id']
}
}
]
with self.assert_events_match_during(
event_filter=self.only_team_events, expected_events=expected_events
):
self.edit_membership_page.confirm_delete_membership_dialog()
self.assertEqual(self.edit_membership_page.team_members, 0)
self.edit_membership_page.wait_for_page()
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_remove_membership(self, role):
"""
Scenario: The user should be able to remove a membership
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Edit Membership button
And When I click the edit membership button
Then I should see the edit membership page
And When I click the remove button and confirm the dialog
Then my membership should be removed, and I should remain on the page
"""
self.edit_membership_helper(role, cancel=False)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_cancel_remove_membership(self, role):
"""
Scenario: The user should be able to remove a membership
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Edit Membership button
And When I click the edit membership button
Then I should see the edit membership page
And When I click the remove button and cancel the dialog
Then my membership should not be removed, and I should remain on the page
"""
self.edit_membership_helper(role, cancel=True)
@attr(shard=17)
@ddt.ddt
class TeamPageTest(TeamsTabBase):
"""Tests for viewing a specific team"""
SEND_INVITE_TEXT = 'Send this link to friends so that they can join too.'
def setUp(self):
super(TeamPageTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
def _set_team_configuration_and_membership(
self,
max_team_size=10,
membership_team_index=0,
visit_team_index=0,
create_membership=True,
another_user=False):
"""
Set team configuration.
Arguments:
max_team_size (int): number of users a team can have
membership_team_index (int): index of team user will join
visit_team_index (int): index of team user will visit
create_membership (bool): whether to create membership or not
another_user (bool): another user to visit a team
"""
#pylint: disable=attribute-defined-outside-init
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': max_team_size, 'topics': [self.topic]}
)
self.teams = self.create_teams(self.topic, 2)
if create_membership:
self.create_membership(self.user_info['username'], self.teams[membership_team_index]['id'])
if another_user:
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.team_page = TeamPage(self.browser, self.course_id, self.teams[visit_team_index])
def setup_thread(self):
"""
Create and return a thread for this test's discussion topic.
"""
thread = Thread(
id="test_thread_{}".format(uuid4().hex),
commentable_id=self.teams[0]['discussion_topic_id'],
body="Dummy text body.",
context="standalone",
)
thread_fixture = MultipleThreadFixture([thread])
thread_fixture.push()
return thread
def setup_discussion_user(self, role=None, staff=False):
"""Set this test's user to have the given role in its
discussions. Role is one of 'Community TA', 'Moderator',
'Administrator', or 'Student'.
"""
kwargs = {
'course_id': self.course_id,
'staff': staff
}
if role is not None:
kwargs['roles'] = role
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, **kwargs).visit().user_info
def verify_teams_discussion_permissions(self, should_have_permission):
"""Verify that the teams discussion component is in the correct state
for the test user. If `should_have_permission` is True, assert that
the user can see controls for posting replies, voting, editing, and
deleting. Otherwise, assert that those controls are hidden.
"""
thread = self.setup_thread()
self.team_page.visit()
self.assertEqual(self.team_page.discussion_id, self.teams[0]['discussion_topic_id'])
discussion_page = self.team_page.discussion_page
discussion_page.wait_for_page()
self.assertTrue(discussion_page.is_discussion_expanded())
self.assertEqual(discussion_page.get_num_displayed_threads(), 1)
discussion_page.show_thread(thread['id'])
thread_page = discussion_page.thread_page
assertion = self.assertTrue if should_have_permission else self.assertFalse
assertion(thread_page.q(css='.post-header-actions').present)
assertion(thread_page.q(css='.add-response').present)
def test_discussion_on_my_team_page(self):
"""
Scenario: Team Page renders a discussion for a team to which I belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the existing thread
And I should see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership()
self.verify_teams_discussion_permissions(True)
@ddt.data(True, False)
def test_discussion_on_other_team_page(self, is_staff):
"""
Scenario: Team Page renders a team discussion for a team to which I do
not belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the team's thread
And I should not see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(staff=is_staff)
self.verify_teams_discussion_permissions(False)
@ddt.data('Moderator', 'Community TA', 'Administrator')
def test_discussion_privileged(self, role):
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(role=role)
self.verify_teams_discussion_permissions(True)
def assert_team_details(self, num_members, is_member=True, max_size=10):
"""
Verifies that user can see all the information, present on detail page according to their membership status.
Arguments:
num_members (int): number of users in a team
is_member (bool) default True: True if request user is member else False
max_size (int): number of users a team can have
"""
self.assertEqual(
self.team_page.team_capacity_text,
self.team_page.format_capacity_text(num_members, max_size)
)
self.assertEqual(self.team_page.team_location, 'Afghanistan')
self.assertEqual(self.team_page.team_language, 'Afar')
self.assertEqual(self.team_page.team_members, num_members)
if num_members > 0:
self.assertTrue(self.team_page.team_members_present)
else:
self.assertFalse(self.team_page.team_members_present)
if is_member:
self.assertEqual(self.team_page.team_user_membership_text, 'You are a member of this team.')
self.assertTrue(self.team_page.team_leave_link_present)
self.assertTrue(self.team_page.new_post_button_present)
else:
self.assertEqual(self.team_page.team_user_membership_text, '')
self.assertFalse(self.team_page.team_leave_link_present)
self.assertFalse(self.team_page.new_post_button_present)
def test_team_member_can_see_full_team_details(self):
"""
Scenario: Team member can see full info for team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see the full team detail
And I should see the team members
And I should see my team membership text
And I should see the language & country
And I should see the Leave Team and Invite Team
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assert_team_details(
num_members=1,
)
def test_other_users_can_see_limited_team_details(self):
"""
Scenario: Users who are not member of this team can only see limited info for this team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When I visit the Team page for that team
Then I should not see full team detail
And I should see the team members
And I should not see my team membership text
And I should not see the Leave Team and Invite Team links
"""
self._set_team_configuration_and_membership(create_membership=False)
self.team_page.visit()
self.assert_team_details(is_member=False, num_members=0)
def test_user_can_navigate_to_members_profile_page(self):
"""
Scenario: User can navigate to profile page via team member profile image.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see profile images for the team members
When I click on the first profile image
Then I should be taken to the user's profile page
And I should see the username on profile page
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
learner_name = self.team_page.first_member_username
self.team_page.click_first_profile_image()
learner_profile_page = LearnerProfilePage(self.browser, learner_name)
learner_profile_page.wait_for_page()
learner_profile_page.wait_for_field('username')
self.assertTrue(learner_profile_page.field_is_visible('username'))
def test_join_team(self):
"""
Scenario: User can join a Team if not a member already..
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I visit the Team page for that team
Then I should see Join Team button
And I should not see New Post button
When I click on Join Team button
Then there should be no Join Team button and no message
And an analytics event should be emitted
And I should see the updated information under Team Details
And I should see New Post button
And if I switch to "My Team", the team I have joined is displayed
"""
self._set_team_configuration_and_membership(create_membership=False)
teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
teams_page.visit()
teams_page.view_first_team()
self.assertTrue(self.team_page.join_team_button_present)
expected_events = [
{
'event_type': 'edx.team.learner_added',
'event': {
'add_method': 'joined_from_team_view'
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_page.click_join_team_button()
self.assertFalse(self.team_page.join_team_button_present)
self.assertFalse(self.team_page.join_team_message_present)
self.assert_team_details(num_members=1, is_member=True)
# Verify that if one switches to "My Team" without reloading the page, the newly joined team is shown.
self.teams_page.click_all_topics()
self.verify_my_team_count(1)
def test_already_member_message(self):
"""
Scenario: User should see `You are already in a team` if user is a
member of other team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am already a member of a team
And I visit a team other than mine
Then I should see `You are already in a team` message
"""
self._set_team_configuration_and_membership(membership_team_index=0, visit_team_index=1)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'You already belong to another team.')
self.assert_team_details(num_members=0, is_member=False)
def test_team_full_message(self):
"""
Scenario: User should see `Team is full` message when team is full.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And team has no space left
And I am not a member of any team
And I visit the team
Then I should see `Team is full` message
"""
self._set_team_configuration_and_membership(
create_membership=True,
max_team_size=1,
membership_team_index=0,
visit_team_index=0,
another_user=True
)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'This team is full.')
self.assert_team_details(num_members=1, is_member=False, max_size=1)
def test_leave_team(self):
"""
Scenario: User can leave a team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am a member of team
And I visit the team
And I should not see Join Team button
And I should see New Post button
Then I should see Leave Team link
When I click on Leave Team link
Then user should be removed from team
And an analytics event should be emitted
And I should see Join Team button
And I should not see New Post button
And if I switch to "My Team", the team I have left is not displayed
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assertFalse(self.team_page.join_team_button_present)
self.assert_team_details(num_members=1)
expected_events = [
{
'event_type': 'edx.team.learner_removed',
'event': {
'remove_method': 'self_removal'
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
# I think we're seeing the same problem that we're seeing in
# CreateTeamTest.test_user_can_see_error_message_for_missing_data.
# We click on the "leave team" link after it's loaded, but before
# its JavaScript event handler is added. Adding this sleep gives
# enough time for that event handler to bind to the link. Sorry!
# For the story to address this anti-pattern, see TNL-5820
time.sleep(0.5)
self.team_page.click_leave_team_link()
self.assert_team_details(num_members=0, is_member=False)
self.assertTrue(self.team_page.join_team_button_present)
# Verify that if one switches to "My Team" without reloading the page, the old team no longer shows.
self.teams_page.click_all_topics()
self.verify_my_team_count(0)
def test_page_viewed_event(self):
"""
Scenario: Visiting the team profile page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the team profile page
Then my browser should post a page viewed event
"""
self._set_team_configuration_and_membership()
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'single-team',
'topic_id': self.topic['id'],
'team_id': self.teams[0]['id']
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.team_page.visit()
|
ahmedaljazzar/edx-platform
|
common/test/acceptance/tests/lms/test_teams.py
|
Python
|
agpl-3.0
| 84,274
|
[
"VisIt"
] |
994003c5fbb6e6e9dbb2159c4a1f0a535756dcbe4f81e4428838e7e1bdcd8aff
|
"""Tests for next/prev message navigation."""
import os
import re
from rust_test_common import *
# Test data for message order tests.
#
# 'command': The command to run.
# 'path': The path to open.
# 'messages': List of expected messages. Tuples of:
# (sequence, path, level, rowcol, inline_highlight, raw_highlight)
#
# sequence: The order the messages should be visited. The messages should
# be listed in the order that they are emitted by rustc. The number
# indicates the order that the Rust Enhanced plugin will visit them.
# This is different because we visit based by level (warnings first).
# path: The file that this message is for.
# level: The message level ('WARN', 'ERR', etc.)
# rowcol: The 0-based row/col where the cursor should appear.
# inline_highlight: The message that is displayed in the build output when
# `show_inline_messages` is True.
# raw_highlight: The message that is displayed in the build output when
# `show_inline_messages` is False.
#
# The highlight messages are regular expressions.
TEST_DATA = [
{'command': 'build',
'path': 'examples/ex_warning1.rs',
'messages': [
(1, 'examples/warning1.rs', 'WARN', (0, 11), 'examples/warning1.rs:1', ' --> examples/warning1.rs:1:4'),
(2, 'examples/warning1.rs', 'WARN', (4, 11), 'examples/warning1.rs:5', ' --> examples/warning1.rs:5:4'),
(3, 'examples/warning2.rs', 'WARN', (81, 14), 'examples/warning2.rs:82', ' --> examples/warning2.rs:82:4'),
]
},
{'command': 'build',
'path': 'tests/test_all_levels.rs',
'messages': [
(2, 'tests/test_all_levels.rs', 'WARN', (3, 17), 'tests/test_all_levels.rs:4', ' --> tests/test_all_levels.rs:4:7'),
(1, 'tests/test_all_levels.rs', 'ERR', (8, 25), 'tests/test_all_levels.rs:9', ' --> tests/test_all_levels.rs:9:25'),
]
},
{'command': 'test',
'path': 'tests/test_test_output.rs',
'messages': [
(1, 'tests/test_test_output.rs', 'ERR', (8, 4), 'tests/test_test_output.rs:9:5', 'tests/test_test_output.rs:9:5'),
(2, 'tests/test_test_output.rs', 'ERR', (13, 4), 'tests/test_test_output.rs:14:5', 'tests/test_test_output.rs:14:5'),
(3, 'tests/test_test_output.rs', 'ERR', (18, 4), 'tests/test_test_output.rs:19:5', 'tests/test_test_output.rs:19:5'),
(4, 'tests/test_test_output.rs', 'ERR', (23, 4), 'tests/test_test_output.rs:24:5', 'tests/test_test_output.rs:24:5'),
(5, 'tests/test_test_output.rs', 'ERR', (28, 4), 'tests/test_test_output.rs:29:5', 'tests/test_test_output.rs:29:5'),
(6, 'tests/test_test_output.rs', 'ERR', (59, 28), 'tests/test_test_output.rs:60:29', 'tests/test_test_output.rs:60:29'),
]
}
]
class TestMessageOrder(TestBase):
def setUp(self):
super(TestMessageOrder, self).setUp()
# Set a base version for these tests.
version = util.get_rustc_version(sublime.active_window(), plugin_path)
if semver.match(version, '<1.46.0-beta'):
self.skipTest('Tests require rust 1.46 or newer.')
# Make it so that the build target is automatically determined from
# the active view so each test doesn't have to specify it.
window = sublime.active_window()
pkg = os.path.normpath(os.path.join(plugin_path,
'tests/message-order'))
window.run_command('cargo_set_target', {'target': 'auto',
'variant': 'build',
'package': pkg})
window.run_command('cargo_set_target', {'target': 'auto',
'variant': 'test',
'package': pkg})
def test_message_order(self):
"""Test message order.
This opens a file and runs the build command on it. It then verifies
that next/prev message goes to the correct message in order.
"""
for data in TEST_DATA:
path = os.path.join('tests/message-order', data['path'])
# rust_next_message sorts based on error level.
inline_sort = [x[1:] for x in sorted(data['messages'])]
# Sublime's built-in next/prev message goes in source order.
unsorted = [x[1:] for x in data['messages']]
self._with_open_file(path, self._test_message_order,
messages=inline_sort, inline=True, command=data['command'])
self._with_open_file(path, self._test_message_order,
messages=unsorted, inline=False, command=data['command'])
def _test_message_order(self, view, messages, inline, command):
self._override_setting('show_errors_inline', inline)
self._cargo_clean(view)
window = view.window()
self._run_build_wait(command)
to_close = []
def check_sequence(direction):
omsgs = messages if direction == 'next' else reversed(messages)
levels = ('all', 'error', 'warning') if inline else ('all',)
times = 2 if inline else 1
for level in levels:
# Run through all messages twice to verify it starts again.
for _ in range(times):
for (next_filename, next_level, next_row_col,
inline_highlight, raw_highlight) in omsgs:
next_filename = os.path.join(plugin_path,
'tests', 'message-order', next_filename)
if inline and (
(level == 'error' and next_level != 'ERR') or
(level == 'warning' and next_level != 'WARN')):
continue
window.run_command('rust_' + direction + '_message',
{'levels': level})
# Sublime doesn't always immediately move the active
# view when 'next_result' is called, so give it a
# moment to update.
time.sleep(0.1)
next_view = window.active_view()
to_close.append(next_view)
self.assertEqual(next_view.file_name(), next_filename)
region = next_view.sel()[0]
rowcol = next_view.rowcol(region.begin())
if inline:
self.assertEqual(rowcol, next_row_col)
else:
# When inline is disabled, we use Sublime's
# built-in next/prev, which goes to the beginning.
# Just validate the row is correct.
self.assertEqual(rowcol[0], next_row_col[0])
# Verify the output panel is highlighting the correct
# thing.
build_panel = window.find_output_panel(
plugin.rust.opanel.PANEL_NAME)
panel_text = build_panel.substr(build_panel.sel()[0])
if inline:
self.assertRegex(panel_text, inline_highlight)
else:
self.assertRegex(panel_text, raw_highlight)
check_sequence('next')
if inline:
# Reset back to first.
window.run_command('rust_next_message')
# Run backwards twice, too.
check_sequence('prev')
# Test starting backwards.
window.focus_view(view)
self._cargo_clean(view)
self._run_build_wait(command)
check_sequence('prev')
for close_view in to_close:
if close_view.window():
window.focus_view(close_view)
window.run_command('close_file')
def test_no_messages(self):
self._with_open_file('tests/message-order/examples/ex_no_messages.rs',
self._test_no_messages)
def _test_no_messages(self, view):
self._cargo_clean(view)
window = view.window()
self._run_build_wait()
# Verify command does nothing.
for direction in ('next', 'prev'):
window.run_command('rust_' + direction + '_message')
active = window.active_view()
self.assertEqual(active, view)
sel = active.sel()[0]
self.assertEqual((sel.a, sel.b), (0, 0))
|
rust-lang/sublime-rust
|
tests/test_message_order.py
|
Python
|
mit
| 8,543
|
[
"VisIt"
] |
b87b9459b64aca2fcd9531d03adc755a2309a7779282b581de28a4fdb09daa85
|
# This file is part of MyPaint.
# Copyright (C) 2012 by Andrew Chadwick <andrewc-git@piffle.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Hue/Relative chroma/Luma adjuster widgets, with an editable gamut mask.
"""
import math
from copy import deepcopy
from random import random
import re
import os.path
import gtk
from gtk import gdk
import cairo
from gettext import gettext as _
from bases import CachedBgDrawingArea
from adjbases import ColorAdjuster
from adjbases import ColorAdjusterWidget
from adjbases import HueSaturationWheelMixin
from adjbases import HueSaturationWheelAdjuster
from sliders import HCYLumaSlider
from combined import CombinedAdjusterPage
from uicolor import *
from util import *
from palette import Palette
import geom
from uimisc import borderless_button
PREFS_MASK_KEY = "colors.hcywheel.mask.gamuts"
PREFS_ACTIVE_KEY = "colors.hcywheel.mask.active"
MASK_EDITOR_HELP=_("""<b>Gamut mask editor</b>
Edit the gamut mask here, or turn it off or on. Gamut masks are like a piece of
tracing paper with cut-out holes, placed over the color wheel to limit the
range of colors you can select. This allows you to plan your color schemes in
advance, which is useful for color scripting or to create specific moods. The
theory is that the corners of each mask shape represent a <i>subjective</i>
primary color, and that each shape contains all the colors which can be mixed
using those corner primaries. Subjective secondary colors lie at the midpoints
of the shape edges, and the center of the shape is the subjective neutral tone
for the shape.
Click to add shapes if the wheel is blank. Shapes can be dragged around and
their outlines can be adjusted by adding or moving the control points. Make a
shape too small to be useful to remove it: dragging a shape to the edge of the
disc is a quick way of doing this. You can delete shapes by dragging them
inside other shapes too. The entire mask can be rotated by turning the edge of
the disc to generate new and unexpected color schemes.
Gamut masks can be saved to GIMP-format palette files, and loaded from them.
The New button lets you choose one of several templates as a starting point.
""")
class MaskableWheelMixin:
"""Provides wheel widgets with maskable areas.
For use with implementations of `HueSaturationWheelAdjusterMixin`.
Concrete implementations can be masked so that they ignore clicks outside
certain colour areas. If the mask is active, clicks inside the mask
shapes are treated as normal, but clicks outside them are remapped to a
point on the nearest edge of the nearest shape. This can be useful for
artists who wish to plan the colour gamut of their artwork in advance.
http://gurneyjourney.blogspot.com/2011/09/part-1-gamut-masking-method.html
http://gurneyjourney.blogspot.com/2008/01/color-wheel-masking-part-1.html
"""
# Class-level variables: drawing constants etc.
min_shape_size = 0.15 #: Smallest useful shape: fraction of radius
# Instance variables (defaults / documentation)
__mask = None
mask_toggle = None #: gtk.ToggleAction controling whether the mask is used
mask_observers = None #: List of no-argument mask change observer callbacks
def __init__(self):
"""Instantiate instance vars and bind actions.
"""
self.__mask = []
self.mask_observers = []
action_name = "wheel%s_masked" % (id(self),)
self.mask_toggle = gtk.ToggleAction(action_name,
_("Gamut mask active"),
_("Limit your palette for specific moods using a gamut mask"),
None)
self.mask_toggle.connect("toggled", self.__mask_toggled_cb)
def __mask_toggled_cb(self, action):
active = action.get_active()
prefs = self._get_prefs()
prefs[PREFS_ACTIVE_KEY] = active
self.queue_draw()
def set_color_manager(self, manager):
"""Sets the color manager, and reads an initial mask from prefs.
Extends `ColorAdjuster`'s implementation.
"""
ColorAdjuster.set_color_manager(self, manager)
prefs = self._get_prefs()
mask_flat = prefs.get(PREFS_MASK_KEY, None)
mask_active = prefs.get(PREFS_ACTIVE_KEY, False)
if mask_flat is not None:
self.set_mask(self._unflatten_mask(mask_flat))
self.mask_toggle.set_active(mask_active)
@staticmethod
def _flatten_mask(mask):
flat_mask = []
for shape_colors in mask:
shape_flat = [c.to_hex_str() for c in shape_colors]
flat_mask.append(shape_flat)
return flat_mask
@staticmethod
def _unflatten_mask(flat_mask):
mask = []
for shape_flat in flat_mask:
shape_colors = [RGBColor.new_from_hex_str(s) for s in shape_flat]
mask.append(shape_colors)
return mask
def set_mask_from_palette(self, pal):
"""Sets the mask from a palette.
Any `palette.Palette` can be loaded into the wheel widget, and colour
names are used for distinguishing mask shapes. If a colour name
matches the pattern "``mask #<decimal-int>``", it will be associated
with the shape having the ID ``<decimal-int>``.
"""
if pal is None:
return
mask_id_re = re.compile(r'\bmask\s*#?\s*(\d+)\b')
mask_shapes = {}
for i in xrange(len(pal)):
color = pal.get_color(i)
if color is None:
continue
shape_id = 0
color_name = pal.get_color_name(i)
if color_name is not None:
mask_id_match = mask_id_re.search(color_name)
if mask_id_match:
shape_id = int(mask_id_match.group(1))
if shape_id not in mask_shapes:
mask_shapes[shape_id] = []
mask_shapes[shape_id].append(color)
mask_list = []
shape_ids = mask_shapes.keys()
shape_ids.sort()
for shape_id in shape_ids:
mask_list.append(mask_shapes[shape_id])
self.set_mask(mask_list)
def set_mask(self, mask):
"""Sets the mask (a list of lists of `UIColor`s).
"""
mgr = self.get_color_manager()
prefs = self._get_prefs()
if mask is None:
self.__mask = None
self.mask_toggle.set_active(False)
prefs[PREFS_MASK_KEY] = None
else:
self.mask_toggle.set_active(True)
self.__mask = mask
prefs[PREFS_MASK_KEY] = self._flatten_mask(mask)
for func in self.mask_observers:
func()
self.queue_draw()
def get_mask(self):
"""Returns the current mask.
"""
return self.__mask
def get_mask_voids(self):
"""Returns the current mask as a list of lists of (x, y) pairs.
"""
voids = []
if not self.__mask:
return voids
for shape in self.__mask:
if len(shape) >= 3:
void = self.colors_to_mask_void(shape)
voids.append(void)
return voids
def colors_to_mask_void(self, colors):
"""Converts a set of colours to a mask void (convex hull).
Mask voids are the convex hulls of the (x, y) positions for the
colours making up the mask, so mask shapes with fewer than 3 colours
are returned as the empty list.
"""
points = []
if len(colors) < 3:
return points
for col in colors:
points.append(self.get_pos_for_color(col))
return geom.convex_hull(points)
def get_color_at_position(self, x, y, ignore_mask=False):
"""Converts an `x`, `y` position to a colour.
Ordinarily, this implmentation uses any active mask to limit the
colours which can be clicked on. Set `ignore_mask` to disable this
added behaviour.
"""
sup = HueSaturationWheelMixin
if ignore_mask or not self.mask_toggle.get_active():
return sup.get_color_at_position(self, x, y)
voids = self.get_mask_voids()
if not voids:
return sup.get_color_at_position(self, x, y)
isects = []
for vi, void in enumerate(voids):
# If we're inside a void, use the unchanged value
if geom.point_in_convex_poly((x, y), void):
return sup.get_color_at_position(self, x, y)
# If outside, find the nearest point on the nearest void's edge
for p1, p2 in geom.pairwise(void):
isect = geom.nearest_point_in_segment(p1,p2, (x,y))
if isect is not None:
d = math.sqrt((isect[0]-x)**2 + (isect[1]-y)**2)
isects.append((d, isect))
# Above doesn't include segment ends, so add those
d = math.sqrt((p1[0]-x)**2 + (p1[1]-y)**2)
isects.append((d, p1))
# Determine the closest point.
if isects:
isects.sort()
x, y = isects[0][1]
return sup.get_color_at_position(self, x, y)
@staticmethod
def _get_void_size(void):
"""Size metric for a mask void (list of x,y points; convex hull)
"""
area = geom.poly_area(void)
return math.sqrt(area)
def _get_mask_fg(self):
"""Returns the mask edge drawing colour as an rgb triple.
"""
state = self.get_state()
style = self.get_style()
c = style.fg[state]
return RGBColor.new_from_gdk_color(c).get_rgb()
def _get_mask_bg(self):
"""Returns the mask area drawing colour as an rgb triple.
"""
state = self.get_state()
style = self.get_style()
c = style.bg[state]
return RGBColor.new_from_gdk_color(c).get_rgb()
def draw_mask(self, cr, wd, ht):
"""Draws the mask, if enabled and if it has any usable voids.
For the sake of the editor subclass, this doesn't draw any voids
which are smaller than `self.min_shape_size` times the wheel radius.
"""
if not self.mask_toggle.get_active():
return
if self.__mask is None or self.__mask == []:
return
cr.save()
radius = self.get_radius(wd=wd, ht=ht)
cx, cy = self.get_center(wd=wd, ht=ht)
cr.arc(cx, cy, radius+self.border, 0, 2*math.pi)
cr.clip()
bg_rgb = self._get_mask_bg()
fg_rgb = self._get_mask_fg()
cr.push_group()
cr.set_operator(cairo.OPERATOR_OVER)
cr.set_source_rgb(*bg_rgb)
cr.rectangle(0, 0, wd, ht)
cr.fill()
voids = []
min_size = radius * self.min_shape_size
for void in self.get_mask_voids():
if len(void) < 3:
continue
size = self._get_void_size(void)
if size >= min_size:
voids.append(void)
cr.set_source_rgb(*fg_rgb)
for void in voids:
cr.new_sub_path()
cr.move_to(*void[0])
for x, y in void[1:]:
cr.line_to(x, y)
cr.close_path()
cr.set_line_width(2.0)
cr.stroke_preserve()
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.set_source_rgba(1,1,1,0)
cr.fill()
cr.set_operator(cairo.OPERATOR_OVER)
cr.pop_group_to_source()
cr.paint_with_alpha(0.666)
cr.restore()
def paint_foreground_cb(self, cr, wd, ht):
"""Paints the foreground items: mask, then marker.
"""
self.draw_mask(cr, wd, ht)
HueSaturationWheelMixin.paint_foreground_cb(self, cr, wd, ht)
class HCYHueChromaWheelMixin:
"""Mixin for wheel-style adjusters to display the H+C from the HCY model.
For use with implementations of `HueSaturationWheelAdjusterMixin`; make
sure this mixin comes before it in the MRO.
"""
def get_normalized_polar_pos_for_color(self, col):
col = HCYColor(color=col)
return col.c, col.h
def color_at_normalized_polar_pos(self, r, theta):
col = HCYColor(color=self.get_managed_color())
col.h = theta
col.c = r
return col
class HCYHueChromaWheel (MaskableWheelMixin,
HCYHueChromaWheelMixin,
HueSaturationWheelAdjuster):
"""Circular mapping of the H and C terms of the HCY model.
"""
tooltip_text = _("HCY Hue and Chroma")
def __init__(self):
"""Instantiate, binding events.
"""
MaskableWheelMixin.__init__(self)
HueSaturationWheelAdjuster.__init__(self)
self.connect("scroll-event", self.__scroll_cb)
self.add_events(gdk.SCROLL_MASK)
def __scroll_cb(self, widget, event):
# Scrolling controls luma.
d = self.scroll_delta
if event.direction in (gdk.SCROLL_DOWN, gdk.SCROLL_LEFT):
d *= -1
col = HCYColor(color=self.get_managed_color())
y = clamp(col.y+d, 0.0, 1.0)
if col.y != y:
col.y = y
self.set_managed_color(col)
return True
class HCYMaskEditorWheel (HCYHueChromaWheel):
"""HCY wheel specialized for mask editing.
"""
## Instance vars
is_editable = False
__last_cursor = None # previously set cursor (determines some actions)
# Objects which are active or being manipulated
__tmp_new_ctrlpoint = None # new control-point colour
__active_ctrlpoint = None # active point in active_void
__active_shape = None # list of colours or None
# Drag state
__drag_func = None
__drag_start_pos = None
## Class-level constants and variables
# Specialized cursors for different actions
__add_cursor = gdk.Cursor(gdk.PLUS)
__move_cursor = gdk.Cursor(gdk.FLEUR)
__move_point_cursor = gdk.Cursor(gdk.CROSSHAIR)
__rotate_cursor = gdk.Cursor(gdk.EXCHANGE)
# Drawing constraints and activity proximities
__ctrlpoint_radius = 2.5
__ctrlpoint_grab_radius = 10
__max_num_shapes = 6 # how many shapes are allowed
tooltip_text = _("Gamut mask editor. Click in the middle to create "
"or manipulate shapes, or rotate the mask using "
"the edges of the disc.")
def __init__(self):
"""Instantiate, and connect the editor events.
"""
HCYHueChromaWheel.__init__(self)
self.connect("button-press-event", self.__button_press_cb)
self.connect("button-release-event", self.__button_release_cb)
self.connect("motion-notify-event", self.__motion_cb)
self.connect("leave-notify-event", self.__leave_cb)
self.add_events(gdk.POINTER_MOTION_MASK|gdk.LEAVE_NOTIFY_MASK)
def __leave_cb(self, widget, event):
# Reset the active objects when the pointer leaves.
if self.__drag_func is not None:
return
self.__active_shape = None
self.__active_ctrlpoint = None
self.__tmp_new_ctrlpoint = None
self.queue_draw()
self.__set_cursor(None)
def __set_cursor(self, cursor):
# Sets the window cursor, retaining a record.
if cursor != self.__last_cursor:
self.get_window().set_cursor(cursor)
self.__last_cursor = cursor
def __update_active_objects(self, x, y):
# Decides what a click or a drag at (x, y) would do, and updates the
# mouse cursor and draw state to match.
assert self.__drag_func is None
self.__active_shape = None
self.__active_ctrlpoint = None
self.__tmp_new_ctrlpoint = None
self.queue_draw() # yes, always
# Possible mask void manipulations
mask = self.get_mask()
for mask_idx in xrange(len(mask)):
colors = mask[mask_idx]
if len(colors) < 3:
continue
# If the pointer is near an existing control point, clicking and
# dragging will move it.
void = []
for col_idx in xrange(len(colors)):
col = colors[col_idx]
px, py = self.get_pos_for_color(col)
dp = math.sqrt((x-px)**2 + (y-py)**2)
if dp <= self.__ctrlpoint_grab_radius:
mask.remove(colors)
mask.insert(0, colors)
self.__active_shape = colors
self.__active_ctrlpoint = col_idx
self.__set_cursor(None)
return
void.append((px, py))
# If within a certain distance of an edge, dragging will create and
# then move a new control point.
void = geom.convex_hull(void)
for p1, p2 in geom.pairwise(void):
isect = geom.nearest_point_in_segment(p1, p2, (x, y))
if isect is not None:
ix, iy = isect
di = math.sqrt((ix-x)**2 + (iy-y)**2)
if di <= self.__ctrlpoint_grab_radius:
newcol = self.get_color_at_position(ix, iy)
self.__tmp_new_ctrlpoint = newcol
mask.remove(colors)
mask.insert(0, colors)
self.__active_shape = colors
self.__set_cursor(None)
return
# If the mouse is within a mask void, then dragging would move that
# shape around within the mask.
if geom.point_in_convex_poly((x, y), void):
mask.remove(colors)
mask.insert(0, colors)
self.__active_shape = colors
self.__set_cursor(None)
return
# Away from shapes, clicks and drags manipulate the entire mask: adding
# cutout voids to it, or rotating the whole mask around its central
# axis.
alloc = self.get_allocation()
cx, cy = self.get_center(alloc=alloc)
radius = self.get_radius(alloc=alloc)
dx, dy = x-cx, y-cy
r = math.sqrt(dx**2 + dy**2)
if r < radius*(1.0-self.min_shape_size):
if len(mask) < self.__max_num_shapes:
d = self.__dist_to_nearest_shape(x, y)
minsize = radius * self.min_shape_size
if d is None or d > minsize:
# Clicking will result in a new void
self.__set_cursor(self.__add_cursor)
else:
# Click-drag to rotate the entire mask
self.__set_cursor(self.__rotate_cursor)
def __drag_active_shape(self, px, py):
# Updates the position of the active shape during drags.
sup = HCYHueChromaWheel
x0, y0 = self.__drag_start_pos
dx = px - x0
dy = py - y0
self.__active_shape[:] = []
for col in self.__active_shape_predrag:
cx, cy = self.get_pos_for_color(col)
cx += dx
cy += dy
col2 = sup.get_color_at_position(self, cx, cy, ignore_mask=True)
self.__active_shape.append(col2)
def __drag_active_ctrlpoint(self, px, py):
# Moves the highlighted control point during drags.
sup = HCYHueChromaWheel
x0, y0 = self.__drag_start_pos
dx = px - x0
dy = py - y0
col = self.__active_ctrlpoint_predrag
cx, cy = self.get_pos_for_color(col)
cx += dx
cy += dy
col = sup.get_color_at_position(self, cx, cy, ignore_mask=True)
self.__active_shape[self.__active_ctrlpoint] = col
def __rotate_mask(self, px, py):
# Rotates the entire mask around the grey axis during drags.
cx, cy = self.get_center()
x0, y0 = self.__drag_start_pos
theta0 = math.atan2(x0-cx, y0-cy)
theta = math.atan2(px-cx, py-cy)
dntheta = (theta0 - theta) / (2*math.pi)
while dntheta <= 0:
dntheta += 1.0
if self.__mask_predrag is None:
self.__mask_predrag = []
for shape in self.get_mask():
shape_hcy = [HCYColor(color=c) for c in shape]
self.__mask_predrag.append(shape_hcy)
newmask = []
for shape in self.__mask_predrag:
shape_rot = []
for col in shape:
col_r = HCYColor(color=col)
col_r.h += dntheta
col_r.h %= 1.0
shape_rot.append(col_r)
newmask.append(shape_rot)
self.set_mask(newmask)
def __button_press_cb(self, widget, event):
# Begins drags.
if self.__drag_func is None:
self.__update_active_objects(event.x, event.y)
self.__drag_start_pos = event.x, event.y
if self.__tmp_new_ctrlpoint is not None:
self.__active_ctrlpoint = len(self.__active_shape)
self.__active_shape.append(self.__tmp_new_ctrlpoint)
self.__tmp_new_ctrlpoint = None
if self.__active_ctrlpoint is not None:
self.__active_shape_predrag = self.__active_shape[:]
ctrlpt = self.__active_shape[self.__active_ctrlpoint]
self.__active_ctrlpoint_predrag = ctrlpt
self.__drag_func = self.__drag_active_ctrlpoint
self.__set_cursor(self.__move_point_cursor)
elif self.__active_shape is not None:
self.__active_shape_predrag = self.__active_shape[:]
self.__drag_func = self.__drag_active_shape
self.__set_cursor(self.__move_cursor)
elif self.__last_cursor is self.__rotate_cursor:
self.__mask_predrag = None
self.__drag_func = self.__rotate_mask
def __button_release_cb(self, widget, event):
# Ends the current drag & cleans up, or handle other clicks.
if self.__drag_func is None:
# Clicking when not in a drag adds a new shape
if self.__last_cursor is self.__add_cursor:
self.__add_void(event.x, event.y)
else:
# Cleanup when dragging ends
self.__drag_func = None
self.__drag_start_pos = None
self.__cleanup_mask()
self.__update_active_objects(event.x, event.y)
def __motion_cb(self, widget, event):
# Fire the current drag function if one's active.
if self.__drag_func is not None:
self.__drag_func(event.x, event.y)
self.queue_draw()
else:
self.__update_active_objects(event.x, event.y)
def __cleanup_mask(self):
mask = self.get_mask()
# Drop points from all shapes which are not part of the convex hulls.
for shape in mask:
if len(shape) <= 3:
continue
points = [self.get_pos_for_color(c) for c in shape]
edge_points = geom.convex_hull(points)
for col, point in zip(shape, points):
if point in edge_points:
continue
shape.remove(col)
# Drop shapes smaller than the minimum size.
newmask = []
min_size = self.get_radius() * self.min_shape_size
for shape in mask:
points = [self.get_pos_for_color(c) for c in shape]
void = geom.convex_hull(points)
size = self._get_void_size(void)
if size >= min_size:
newmask.append(shape)
mask = newmask
# Drop shapes whose points entirely lie within other shapes
newmask = []
maskvoids = [(shape, geom.convex_hull([self.get_pos_for_color(c)
for c in shape]))
for shape in mask]
for shape1, void1 in maskvoids:
shape1_subsumed = True
for p1 in void1:
p1_subsumed = False
for shape2, void2 in maskvoids:
if shape1 is shape2:
continue
if geom.point_in_convex_poly(p1, void2):
p1_subsumed = True
break
if not p1_subsumed:
shape1_subsumed = False
break
if not shape1_subsumed:
newmask.append(shape1)
mask = newmask
self.set_mask(mask)
self.queue_draw()
def __dist_to_nearest_shape(self, x, y):
# Distance from `x`, `y` to the nearest edge or vertex of any shape.
dists = []
for hull in self.get_mask_voids():
# cx, cy = geom.poly_centroid(hull)
for p1, p2 in geom.pairwise(hull):
np = geom.nearest_point_in_segment(p1,p2, (x,y))
if np is not None:
nx, ny = np
d = math.sqrt((x-nx)**2 + (y-ny)**2)
dists.append(d)
# Segment end too
d = math.sqrt((p1[0]-x)**2 + (p1[1]-y)**2)
dists.append(d)
if not dists:
return None
dists.sort()
return dists[0]
def __add_void(self, x, y):
# Adds a new shape into the empty space centred at `x`, `y`.
self.queue_draw()
# Pick a nice size for the new shape, taking care not to
# overlap any other shapes, at least initially.
alloc = self.get_allocation()
cx, cy = self.get_center(alloc=alloc)
radius = self.get_radius(alloc=alloc)
dx, dy = x-cx, y-cy
r = math.sqrt(dx**2 + dy**2)
d = self.__dist_to_nearest_shape(x, y)
if d is None:
d = radius
size = min((radius - r), d) * 0.95
minsize = radius * self.min_shape_size
if size < minsize:
return
# Create a regular polygon with one of its edges facing the
# middle of the wheel.
shape = []
nsides = 3 + len(self.get_mask())
psi = math.atan2(dy, dx) + (math.pi/nsides)
psi += math.pi
for i in xrange(nsides):
theta = 2.0 * math.pi * float(i)/nsides
theta += psi
px = int(x + size*math.cos(theta))
py = int(y + size*math.sin(theta))
col = self.get_color_at_position(px, py, ignore_mask=True)
shape.append(col)
mask = self.get_mask()
mask.append(shape)
self.set_mask(mask)
def draw_mask_control_points(self, cr, wd, ht):
# Draw active and inactive control points on the active shape.
if self.__active_shape is None:
return
cr.save()
active_rgb = 1, 1, 1
normal_rgb = 0, 0, 0
delete_rgb = 1, 0, 0
cr.set_line_width(1.0)
void = self.colors_to_mask_void(self.__active_shape)
# Highlight the objects that would be directly or indirectly affected
# if the shape were dragged, and how.
min_size = self.get_radius(wd=wd, ht=ht) * self.min_shape_size
void_rgb = normal_rgb
if self._get_void_size(void) < min_size:
# Shape will be deleted
void_rgb = delete_rgb
elif ( (self.__active_ctrlpoint is None) \
and (self.__tmp_new_ctrlpoint is None) ):
# The entire shape would be moved
void_rgb = active_rgb
# Outline the current shape
cr.set_source_rgb(*void_rgb)
for p_idx, p in enumerate(void):
if p_idx == 0:
cr.move_to(*p)
else:
cr.line_to(*p)
cr.close_path()
cr.stroke()
# Control points
colors = self.__active_shape
for col_idx, col in enumerate(colors):
px, py = self.get_pos_for_color(col)
if (px, py) not in void:
# not in convex hull (is it worth doing this fragile test?)
continue
point_rgb = void_rgb
if col_idx == self.__active_ctrlpoint:
point_rgb = active_rgb
cr.set_source_rgb(*point_rgb)
cr.arc(px, py, self.__ctrlpoint_radius, 0, 2*math.pi)
cr.fill()
if self.__tmp_new_ctrlpoint:
px, py = self.get_pos_for_color(self.__tmp_new_ctrlpoint)
cr.set_source_rgb(*active_rgb)
cr.arc(px, py, self.__ctrlpoint_radius, 0, 2*math.pi)
cr.fill()
# Centroid
cr.set_source_rgb(*void_rgb)
cx, cy = geom.poly_centroid(void)
cr.save()
cr.set_line_cap(cairo.LINE_CAP_SQUARE)
cr.set_line_width(0.5)
cr.translate(int(cx)+0.5, int(cy)+0.5)
cr.move_to(-2, 0)
cr.line_to(2, 0)
cr.stroke()
cr.move_to(0, -2)
cr.line_to(0, 2)
cr.stroke()
cr.restore()
def paint_foreground_cb(self, cr, wd, ht):
"""Foreground drawing override.
"""
self.draw_mask(cr, wd, ht)
self.draw_mask_control_points(cr, wd, ht)
class HCYMaskPreview (MaskableWheelMixin,
HCYHueChromaWheelMixin,
HueSaturationWheelAdjuster):
"""Mask preview widget; not scrollable.
These widgets can be used with `palette.Palette.load_via_dialog()` as
preview widgets during mask selection.
"""
def __init__(self, mask=None):
MaskableWheelMixin.__init__(self)
HueSaturationWheelAdjuster.__init__(self)
self.set_app_paintable(True)
self.set_has_window(False)
self.set_mask(mask)
self.mask_toggle.set_active(True)
self.set_size_request(64, 64)
def render_background_cb(self, cr, wd, ht):
sup = HueSaturationWheelAdjuster
sup.render_background_cb(self, cr, wd=wd, ht=ht)
self.draw_mask(cr, wd=wd, ht=ht)
def paint_foreground_cb(self, cr, wd, ht):
pass
def get_background_validity(self):
return deepcopy(self.get_mask())
def get_managed_color(self):
return HCYColor(0, 0, 0.5)
def set_palette(self, palette):
# Compatibility with Palette.load_via_dialog()
self.set_mask_from_palette(palette)
class HCYMaskTemplateDialog (gtk.Dialog):
"""Dialog for choosing a mask from a small set of templates.
http://gurneyjourney.blogspot.co.uk/2008/02/shapes-of-color-schemes.html
"""
@property
def __templates(self):
Y = 0.5
H = 1-0.05
# Reusable shapes...
atmos_triad = [HCYColor( H, 0.95, Y),
HCYColor(( H+0.275)%1, 0.55, Y),
HCYColor((1+H-0.275)%1, 0.55, Y)]
def __coffin(h):
# Hexagonal coffin shape with the foot end at the centre
# of the wheel.
shape = []
shape.append(HCYColor((h + 0.25)%1, 0.03, Y))
shape.append(HCYColor((h + 1 - 0.25)%1, 0.03, Y))
shape.append(HCYColor((h + 0.01)%1, 0.95, Y))
shape.append(HCYColor((h + 1 - 0.01)%1, 0.95, Y))
shape.append(HCYColor((h + 0.04)%1, 0.70, Y))
shape.append(HCYColor((h + 1 - 0.04)%1, 0.70, Y))
return shape
def __complement_blob(h):
# Small pentagonal blob at the given hue, used for an organic-
# looking dab of a complementary hue.
shape = []
shape.append(HCYColor((h+0.015)%1, 0.94, Y))
shape.append(HCYColor((h+0.985)%1, 0.94, Y))
shape.append(HCYColor((h+0.035)%1, 0.71, Y))
shape.append(HCYColor((h+0.965)%1, 0.71, Y))
shape.append(HCYColor((h )%1, 0.54, Y))
return shape
templates = []
templates.append((_("Atmospheric Triad"),
_("Moody and subjective, defined by one dominant primary and two "
"primaries which are less intense."),
[ deepcopy(atmos_triad) ]))
templates.append((_("Shifted Triad"),
_("Weighted more strongly towards the dominant colour."),
[[HCYColor( H, 0.95, Y),
HCYColor(( H+0.35)%1, 0.4, Y),
HCYColor((1+H-0.35)%1, 0.4, Y) ]] ))
templates.append((_("Complementary"),
_("Contrasting opposites, balanced by having central neutrals "
"between them on the colour wheel."),
[[HCYColor((H+0.005)%1, 0.9, Y),
HCYColor((H+0.995)%1, 0.9, Y),
HCYColor((H+0.25 )%1, 0.1, Y),
HCYColor((H+0.75 )%1, 0.1, Y),
HCYColor((H+0.505)%1, 0.9, Y),
HCYColor((H+0.495)%1, 0.9, Y),
]] ))
templates.append((_("Mood and Accent"),
_("One main range of colors, with a complementary accent for "
"variation and highlights."),
[ deepcopy(atmos_triad),
__complement_blob(H+0.5) ] ))
#[HCYColor((H+0.483)%1, 0.95, Y),
# HCYColor((H+0.517)%1, 0.95, Y),
# HCYColor((H+0.52)%1, 0.725, Y),
# HCYColor((H+0.48)%1, 0.725, Y) ]] ))
templates.append((_("Split Complementary"),
_("Two analogous colours and a complement to them, with no "
"secondary colours between them."),
[ __coffin(H+0.5), __coffin(1+H-0.1), __coffin(H+0.1) ] ))
return templates
def __init__(self, parent, target):
gtk.Dialog.__init__(self, _("New gamut mask from template"), parent,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT))
self.set_position(gtk.WIN_POS_MOUSE)
self.target = target
#self.vbox.set_spacing(6)
size = 64
for name, desc, mask in self.__templates:
mask = deepcopy(mask)
label = gtk.Label()
label.set_markup("<b>%s</b>\n\n%s" % (name, desc))
label.set_size_request(375, -1)
label.set_line_wrap(True)
label.set_alignment(0, 0.5)
preview = HCYMaskPreview(deepcopy(mask))
preview_frame = gtk.AspectFrame(obey_child=True)
preview_frame.add(preview)
preview_frame.set_shadow_type(gtk.SHADOW_NONE)
hbox = gtk.HBox()
hbox.set_spacing(6)
hbox.pack_start(preview_frame, False, False)
hbox.pack_start(label, True, True)
button = gtk.Button()
button.add(hbox)
button.set_relief(gtk.RELIEF_NONE)
button.connect("clicked", self.__button_clicked_cb, mask)
self.vbox.pack_start(button, True, True)
self.connect("response", self.__response_cb)
self.connect("show", self.__show_cb)
for w in self.vbox:
w.show_all()
def __button_clicked_cb(self, widget, mask):
self.target.set_mask(mask)
self.hide()
def __show_cb(self, widget, *a):
self.vbox.show_all()
def __response_cb(self, widget, response_id):
self.hide()
return True
class HCYMaskPropertiesDialog (gtk.Dialog):
"""Dialog for choosing, editing, or enabling/disabling masks.
"""
def __init__(self, parent, target):
gtk.Dialog.__init__(self, _("Gamut mask editor"), parent,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_HELP, gtk.RESPONSE_HELP,
gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
self.set_position(gtk.WIN_POS_MOUSE)
self.target = target
ed = HCYMaskEditorWheel()
self.editor = ed
ed.set_size_request(300, 300)
ed.mask_toggle.set_active(True)
self.mask_toggle_ctrl = gtk.CheckButton(_("Active"), use_underline=False)
self.mask_toggle_ctrl.set_tooltip_text(ed.mask_toggle.get_tooltip())
ed.mask_observers.append(self.__mask_changed_cb)
hbox = gtk.HBox()
hbox.set_spacing(3)
# Sidebar buttonbox
# On the right and packed to the top. This places its secondary
# control, a mask toggle button, next to the "OK" button so it's less
# likely to be missed.
bbox = gtk.VButtonBox()
new_btn = self.__new_button = gtk.Button(stock=gtk.STOCK_NEW)
load_btn = self.__load_button = gtk.Button(stock=gtk.STOCK_OPEN)
save_btn = self.__save_button = gtk.Button(stock=gtk.STOCK_SAVE)
clear_btn = self.__clear_button = gtk.Button(stock=gtk.STOCK_CLEAR)
new_btn.set_tooltip_text(_("Create mask from template"))
load_btn.set_tooltip_text(_("Load mask from a GIMP palette file"))
save_btn.set_tooltip_text(_("Save mask to a GIMP palette file"))
clear_btn.set_tooltip_text(_("Erase the mask"))
new_btn.connect("clicked", self.__new_clicked)
save_btn.connect("clicked", self.__save_clicked)
load_btn.connect("clicked", self.__load_clicked)
clear_btn.connect("clicked", self.__clear_clicked)
bbox.pack_start(new_btn)
bbox.pack_start(load_btn)
bbox.pack_start(save_btn)
bbox.pack_start(clear_btn)
bbox.pack_start(self.mask_toggle_ctrl)
bbox.set_child_secondary(self.mask_toggle_ctrl, True)
bbox.set_layout(gtk.BUTTONBOX_START)
hbox.pack_start(ed, True, True)
hbox.pack_start(bbox, False, False)
hbox.set_border_width(9)
self.vbox.pack_start(hbox, True, True)
self.connect("response", self.__response_cb)
self.connect("show", self.__show_cb)
for w in self.vbox:
w.show_all()
def __mask_changed_cb(self):
mask = self.editor.get_mask()
empty = mask == []
self.__save_button.set_sensitive(not empty)
self.__clear_button.set_sensitive(not empty)
def __new_clicked(self, widget):
mask = self.editor.get_mask()
dialog = HCYMaskTemplateDialog(self, self.editor)
dialog.run()
def __save_clicked(self, button):
pal = Palette()
mask = self.editor.get_mask()
for i, shape in enumerate(mask):
for j, col in enumerate(shape):
col_name = "mask#%d primary#%d" % (i, j) #NOT localised
pal.append(col, col_name)
preview = HCYMaskPreview()
pal.save_via_dialog(
title=_("Save mask as a Gimp palette"),
parent=self,
preview=preview)
def __load_clicked(self, button):
preview = HCYMaskPreview()
preview.set_size_request(128, 128)
pal = Palette.load_via_dialog(
title=_("Load mask from a Gimp palette"),
parent=self,
preview=preview)
if pal is None:
return
self.editor.set_mask_from_palette(pal)
def __clear_clicked(self, widget):
self.editor.set_mask([])
def __show_cb(self, widget, *a):
# When the dialog is shown, clone the target adjuster's mask for
# editing. Assume the user wants to turn on the mask if there
# is no mask on the target already (reduce the number of mouse clicks)
active = True
if self.target.get_mask():
active = self.target.mask_toggle.get_active()
self.mask_toggle_ctrl.set_active(active)
mask = deepcopy(self.target.get_mask())
self.editor.set_mask(mask)
self.vbox.show_all()
def __response_cb(self, widget, response_id):
if response_id == gtk.RESPONSE_ACCEPT:
self.target.set_mask(self.editor.get_mask())
mask_active = self.mask_toggle_ctrl.get_active()
self.target.mask_toggle.set_active(mask_active)
if response_id == gtk.RESPONSE_HELP:
# Sub-sub-sub dialog. Ugh. Still, we have a lot to say.
dialog = gtk.MessageDialog(
parent=self,
flags=gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT,
buttons=gtk.BUTTONS_CLOSE, )
markup_paras = re.split(r'\n[\040\t]*\n', MASK_EDITOR_HELP)
markup = "\n\n".join([s.replace("\n", " ") for s in markup_paras])
dialog.set_markup(markup)
dialog.set_title(_("Gamut mask editor help"))
dialog.connect("response", lambda *a: dialog.destroy())
dialog.run()
else:
self.hide()
return True
class HCYAdjusterPage (CombinedAdjusterPage):
"""Combined HCY adjuster.
"""
__mask_dialog = None
__hc_adj = None
__y_adj = None
__table = None
def __init__(self):
y_adj = HCYLumaSlider()
y_adj.vertical = True
hc_adj = HCYHueChromaWheel()
table = gtk.Table(rows=2, columns=2)
xopts = gtk.FILL|gtk.EXPAND
yopts = gtk.FILL|gtk.EXPAND
table.attach(y_adj, 0,1, 0,1, gtk.FILL, yopts, 3, 3)
table.attach(hc_adj, 1,2, 0,2, xopts, yopts, 3, 3)
self.__y_adj = y_adj
self.__hc_adj = hc_adj
self.__table = table
@classmethod
def get_properties_description(class_):
return _("Set gamut mask")
def show_properties(self):
if self.__mask_dialog is None:
toplevel = self.__hc_adj.get_toplevel()
dia = HCYMaskPropertiesDialog(toplevel, self.__hc_adj)
self.__mask_dialog = dia
self.__mask_dialog.run()
@classmethod
def get_page_icon_name(class_):
return 'mypaint-tool-hcywheel'
@classmethod
def get_page_title(class_):
return _('HCY Wheel')
@classmethod
def get_page_description(class_):
return _("Set the color using cylindrical hue/chroma/luma space. "
"The circular slices are equiluminant.")
def get_page_widget(self):
frame = gtk.AspectFrame(obey_child=True)
frame.set_shadow_type(gtk.SHADOW_NONE)
frame.add(self.__table)
return frame
def set_color_manager(self, manager):
ColorAdjuster.set_color_manager(self, manager)
self.__y_adj.set_property("color-manager", manager)
self.__hc_adj.set_property("color-manager", manager)
if __name__ == '__main__':
import os, sys
from adjbases import ColorManager
mgr = ColorManager()
mgr.set_color(HSVColor(0.0, 0.0, 0.55))
if len(sys.argv) > 1:
# Generate icons
wheel = HCYHueChromaWheel()
wheel.set_color_manager(mgr)
icon_name = HCYAdjusterPage.get_page_icon_name()
for dir_name in sys.argv[1:]:
wheel.save_icon_tree(dir_name, icon_name)
else:
# Interactive test
page = HCYAdjusterPage()
page.set_color_manager(mgr)
window = gtk.Window()
window.add(page.get_page_widget())
window.set_title(os.path.basename(sys.argv[0]))
window.set_border_width(6)
window.connect("destroy", lambda *a: gtk.main_quit())
window.show_all()
gtk.main()
|
kragniz/mypaint
|
gui/colors/hcywheel.py
|
Python
|
gpl-2.0
| 43,177
|
[
"FLEUR"
] |
ac9d072dcf0566d2e5c875c5f9c73ea85857dd7920063de4b390276d307f21ce
|
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Supplies a class for working with fingerprints from databases
#DOC
"""
from rdkit import DataStructs
from rdkit.VLib.Node import VLibNode
import pickle
class DbFpSupplier(VLibNode):
"""
new fps come back with all additional fields from the
database set in a "_fieldsFromDb" data member
"""
def __init__(self, dbResults, fpColName='AutoFragmentFp', usePickles=True):
"""
DbResults should be a subclass of Dbase.DbResultSet.DbResultBase
"""
VLibNode.__init__(self)
self._usePickles = usePickles
self._data = dbResults
self._fpColName = fpColName.upper()
self._colNames = [x.upper() for x in self._data.GetColumnNames()]
if self._fpColName not in self._colNames:
raise ValueError('fp column name "%s" not found in result set: %s' %
(self._fpColName, str(self._colNames)))
self.fpCol = self._colNames.index(self._fpColName)
del self._colNames[self.fpCol]
self._colNames = tuple(self._colNames)
self._numProcessed = 0
def GetColumnNames(self):
return self._colNames
def _BuildFp(self, data):
data = list(data)
pkl = bytes(data[self.fpCol], encoding='Latin1')
del data[self.fpCol]
self._numProcessed += 1
try:
if self._usePickles:
newFp = pickle.loads(pkl, encoding='bytes')
else:
newFp = DataStructs.ExplicitBitVect(pkl)
except Exception:
import traceback
traceback.print_exc()
newFp = None
if newFp:
newFp._fieldsFromDb = data
return newFp
def next(self):
itm = self.NextItem()
if itm is None:
raise StopIteration
return itm
__next__ = next # py3
class ForwardDbFpSupplier(DbFpSupplier):
""" DbFp supplier supporting only forward iteration
>>> from rdkit import RDConfig
>>> from rdkit.Dbase.DbConnection import DbConnect
>>> fName = RDConfig.RDTestDatabase
>>> conn = DbConnect(fName,'simple_combined')
>>> suppl = ForwardDbFpSupplier(conn.GetData())
we can loop over the supplied fingerprints:
>>> fps = []
>>> for fp in suppl:
... fps.append(fp)
>>> len(fps)
12
"""
def __init__(self, *args, **kwargs):
DbFpSupplier.__init__(self, *args, **kwargs)
self.reset()
def reset(self):
DbFpSupplier.reset(self)
self._dataIter = iter(self._data)
def NextItem(self):
"""
NOTE: this has side effects
"""
try:
d = next(self._dataIter)
except StopIteration:
d = None
if d is not None:
newFp = self._BuildFp(d)
else:
newFp = None
return newFp
class RandomAccessDbFpSupplier(DbFpSupplier):
""" DbFp supplier supporting random access:
>>> import os.path
>>> from rdkit import RDConfig
>>> from rdkit.Dbase.DbConnection import DbConnect
>>> fName = RDConfig.RDTestDatabase
>>> conn = DbConnect(fName,'simple_combined')
>>> suppl = RandomAccessDbFpSupplier(conn.GetData())
>>> len(suppl)
12
we can pull individual fingerprints:
>>> fp = suppl[5]
>>> fp.GetNumBits()
128
>>> fp.GetNumOnBits()
54
a standard loop over the fingerprints:
>>> fps = []
>>> for fp in suppl:
... fps.append(fp)
>>> len(fps)
12
or we can use an indexed loop:
>>> fps = [None]*len(suppl)
>>> for i in range(len(suppl)):
... fps[i] = suppl[i]
>>> len(fps)
12
"""
def __init__(self, *args, **kwargs):
DbFpSupplier.__init__(self, *args, **kwargs)
self.reset()
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
newD = self._data[idx]
return self._BuildFp(newD)
def reset(self):
self._pos = -1
def NextItem(self):
self._pos += 1
res = None
if self._pos < len(self):
res = self[self._pos]
return res
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
|
greglandrum/rdkit
|
rdkit/Chem/Fingerprints/DbFpSupplier.py
|
Python
|
bsd-3-clause
| 4,652
|
[
"RDKit"
] |
a7f5616e2e74b9e1397a0ad87eb00cbdc6042ee82f89ec4d4f8a36729ecb29a7
|
# The MIT License
#
# Copyright 2014, 2015 Piotr Dabkowski
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
from __future__ import unicode_literals
from pyjsparserdata import *
from std_nodes import *
from pprint import pprint
ESPRIMA_VERSION = '2.2.0'
DEBUG = False
# Small naming convention changes
# len -> leng
# id -> d
# type -> typ
# str -> st
true = True
false = False
null = None
class PyJsParser:
""" Usage:
parser = PyJsParser()
parser.parse('var JavaScriptCode = 5.1')
"""
def __init__(self):
self.clean()
def test(self, code):
pprint(self.parse(code))
def clean(self):
self.strict = None
self.sourceType = None
self.index = 0
self.lineNumber = 1
self.lineStart = 0
self.hasLineTerminator = None
self.lastIndex = None
self.lastLineNumber = None
self.lastLineStart = None
self.startIndex = None
self.startLineNumber = None
self.startLineStart = None
self.scanning = None
self.lookahead = None
self.state = None
self.extra = None
self.isBindingElement = None
self.isAssignmentTarget = None
self.firstCoverInitializedNameError = None
# 7.4 Comments
def skipSingleLineComment(self, offset):
start = self.index - offset;
while self.index < self.length:
ch = self.source[self.index];
self.index += 1
if isLineTerminator(ch):
if (ord(ch) == 13 and ord(self.source[self.index]) == 10):
self.index += 1
self.lineNumber += 1
self.hasLineTerminator = True
self.lineStart = self.index
return
def skipMultiLineComment(self):
while self.index < self.length:
ch = ord(self.source[self.index])
if isLineTerminator(ch):
if (ch == 0x0D and ord(self.source[self.index+1]) == 0x0A):
self.index += 1
self.lineNumber += 1
self.index += 1
self.hasLineTerminator = True
self.lineStart = self.index
elif ch == 0x2A:
# Block comment ends with '*/'.
if ord(self.source[self.index+1]) == 0x2F:
self.index += 2
return
self.index += 1
else:
self.index += 1
self.tolerateUnexpectedToken()
def skipComment(self):
self.hasLineTerminator = False
start = (self.index==0)
while self.index < self.length:
ch = ord(self.source[self.index])
if isWhiteSpace(ch):
self.index += 1
elif isLineTerminator(ch):
self.hasLineTerminator = True
self.index += 1
if (ch == 0x0D and ord(self.source[self.index]) == 0x0A):
self.index += 1
self.lineNumber += 1
self.lineStart = self.index
start = True
elif (ch == 0x2F): # U+002F is '/'
ch = ord(self.source[self.index+1])
if (ch == 0x2F):
self.index += 2
self.skipSingleLineComment(2)
start = True
elif (ch == 0x2A): # U+002A is '*'
self.index += 2
self.skipMultiLineComment()
else:
break
elif (start and ch == 0x2D): # U+002D is '-'
# U+003E is '>'
if (ord(self.source[self.index+1]) == 0x2D) and (ord(self.source[self.index+2]) == 0x3E):
# '-->' is a single-line comment
self.index += 3
self.skipSingleLineComment(3)
else:
break
elif (ch == 0x3C): # U+003C is '<'
if self.source[self.index+1: self.index+4]=='!--':
# <!--
self.index += 4
self.skipSingleLineComment(4)
else:
break
else:
break
def scanHexEscape(self, prefix):
code = 0
leng = 4 if (prefix == 'u') else 2
for i in xrange(leng):
if self.index < self.length and isHexDigit(self.source[self.index]):
ch = self.source[self.index]
self.index += 1
code = code * 16 + HEX_CONV[ch]
else:
return ''
return unichr(code)
def scanUnicodeCodePointEscape(self):
ch = self.source[self.index]
code = 0
# At least, one hex digit is required.
if ch == '}':
self.throwUnexpectedToken()
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if not isHexDigit(ch):
break
code = code * 16 + HEX_CONV[ch]
if code > 0x10FFFF or ch != '}':
self.throwUnexpectedToken()
# UTF-16 Encoding
if (code <= 0xFFFF):
return unichr(code)
cu1 = ((code - 0x10000) >> 10) + 0xD800;
cu2 = ((code - 0x10000) & 1023) + 0xDC00;
return unichr(cu1)+unichr(cu2)
def ccode(self, offset=0):
return ord(self.source[self.index+offset])
def log_err_case(self):
if not DEBUG:
return
print 'INDEX', self.index
print self.source[self.index-10:self.index+10]
print
def at(self, loc):
return None if loc>=self.length else self.source[loc]
def substr(self, le, offset=0):
return self.source[self.index+offset:self.index+offset+le]
def getEscapedIdentifier(self):
d = self.source[self.index]
ch = ord(d)
self.index += 1
# '\u' (U+005C, U+0075) denotes an escaped character.
if (ch == 0x5C):
if (ord(self.source[self.index]) != 0x75):
self.throwUnexpectedToken()
self.index += 1
ch = self.scanHexEscape('u')
if not ch or ch == '\\' or not isIdentifierStart(ch[0]):
self.throwUnexpectedToken()
d = ch
while (self.index < self.length):
ch = self.ccode()
if not isIdentifierPart(ch):
break
self.index += 1
d += unichr(ch)
# '\u' (U+005C, U+0075) denotes an escaped character.
if (ch == 0x5C):
d = d[0: len(d)-1]
if (self.ccode() != 0x75):
self.throwUnexpectedToken()
self.index += 1
ch = self.scanHexEscape('u');
if (not ch or ch == '\\' or not isIdentifierPart(ch[0])):
self.throwUnexpectedToken()
d += ch
return d
def getIdentifier(self):
start = self.index
self.index += 1
while (self.index < self.length):
ch = self.ccode()
if (ch == 0x5C):
# Blackslash (U+005C) marks Unicode escape sequence.
self.index = start
return self.getEscapedIdentifier()
if (isIdentifierPart(ch)):
self.index += 1
else:
break
return self.source[start: self.index]
def scanIdentifier(self):
start = self.index
# Backslash (U+005C) starts an escaped character.
d = self.getEscapedIdentifier() if (self.ccode() == 0x5C) else self.getIdentifier()
# There is no keyword or literal with only one character.
# Thus, it must be an identifier.
if (len(d)==1):
type = Token.Identifier
elif (isKeyword(d)):
type = Token.Keyword
elif (d == 'null'):
type = Token.NullLiteral
elif (i == 'true' or d == 'false'):
type = Token.BooleanLiteral
else:
type = Token.Identifier;
return {
'type': type,
'value': d,
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index
}
# 7.7 Punctuators
def scanPunctuator(self):
token = {
'type': Token.Punctuator,
'value': '',
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': self.index,
'end': self.index
}
# Check for most common single-character punctuators.
st = self.source[self.index]
if st == '{':
self.state['curlyStack'].append('{')
self.index += 1
elif st == '}':
self.index += 1
self.state['curlyStack'].pop()
elif st in {'.', '(', ')', ';', ',', '[', ']', ':', '?', '~'}:
self.index += 1
else:
# 4-character punctuator.
st = self.substr(4)
if (st == '>>>='):
self.index += 4
else:
# 3-character punctuators.
st = st[0:3]
if st in {'===', '!==', '>>>', '<<=', '>>='}:
self.index += 3
else:
# 2-character punctuators.
st = st[0:2]
if st in {'&&','||','==','!=','+=','-=','*=' ,'/=' ,'++' , '--' , '<<', '>>', '&=', '|=', '^=', '%=', '<=', '>=', '=>'}:
self.index += 2
else:
# 1-character punctuators.
st = self.source[self.index]
if st in {'<', '>', '=', '!', '+', '-', '*', '%', '&', '|', '^', '/'}:
self.index += 1
if self.index == token['start']:
self.throwUnexpectedToken()
token['end'] = self.index;
token['value'] = st
return token
# 7.8.3 Numeric Literals
def scanHexLiteral(self, start):
number = ''
while (self.index < self.length):
if (not isHexDigit(self.source[self.index])):
break
number += self.source[self.index]
self.index += 1
if not number:
self.throwUnexpectedToken()
if isIdentifierStart(self.ccode()):
self.throwUnexpectedToken()
return {
'type': Token.NumericLiteral,
'value': int(number, 16),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def scanBinaryLiteral(self, start):
number = ''
while (self.index < self.length):
ch = self.source[self.index]
if (ch != '0' and ch != '1'):
break
number += self.source[self.index]
self.index += 1
if not number:
# only 0b or 0B
self.throwUnexpectedToken()
if (self.index < self.length):
ch = self.source[self.index]
# istanbul ignore else
if (isIdentifierStart(ch) or isDecimalDigit(ch)):
self.throwUnexpectedToken();
return {
'type': Token.NumericLiteral,
'value': int(number, 2),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def scanOctalLiteral(self, prefix, start):
if isOctalDigit(prefix):
octal = True
number = '0' + self.source[self.index]
self.index += 1
else:
octal = False
self.index += 1
number = ''
while (self.index < self.length):
if (not isOctalDigit(self.source[self.index])):
break
number += self.source[self.index]
self.index += 1
if (not octal and not number):
# only 0o or 0O
self.throwUnexpectedToken()
if (isIdentifierStart(self.ccode()) or isDecimalDigit(self.ccode())):
self.throwUnexpectedToken()
return {
'type': Token.NumericLiteral,
'value': int(number, 8),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def octalToDecimal(self, ch):
# \0 is not octal escape sequence
octal = (ch != '0')
code = int(ch, 8)
if (self.index < self.length and isOctalDigit(self.source[self.index])):
octal = True
code = code * 8 + int(self.source[self.index], 8)
self.index += 1
# 3 digits are only allowed when string starts
# with 0, 1, 2, 3
if (ch in '0123' and self.index < self.length and isOctalDigit(self.source[self.index])):
code = code * 8 + int((self.source[self.index]), 8)
self.index += 1
return {
'code': code,
'octal': octal}
def isImplicitOctalLiteral(self):
# Implicit octal, unless there is a non-octal digit.
# (Annex B.1.1 on Numeric Literals)
for i in xrange(self.index + 1, self.length):
ch = self.source[i];
if (ch == '8' or ch == '9'):
return False;
if (not isOctalDigit(ch)):
return True
return True
def scanNumericLiteral(self):
ch = self.source[self.index]
assert isDecimalDigit(ch) or (ch == '.'), 'Numeric literal must start with a decimal digit or a decimal point'
start = self.index
number = ''
if ch != '.':
number = self.source[self.index]
self.index += 1
ch = self.source[self.index]
# Hex number starts with '0x'.
# Octal number starts with '0'.
# Octal number in ES6 starts with '0o'.
# Binary number in ES6 starts with '0b'.
if (number == '0'):
if (ch == 'x' or ch == 'X'):
self.index += 1
return self.scanHexLiteral(start);
if (ch == 'b' or ch == 'B'):
self.index += 1
return self.scanBinaryLiteral(start)
if (ch == 'o' or ch == 'O'):
return self.scanOctalLiteral(ch, start)
if (isOctalDigit(ch)):
if (self.isImplicitOctalLiteral()):
return self.scanOctalLiteral(ch, start);
while (isDecimalDigit(self.ccode())):
number += self.source[self.index]
self.index += 1
ch = self.source[self.index];
if (ch == '.'):
number += self.source[self.index]
self.index += 1
while (isDecimalDigit(self.source[self.index])):
number += self.source[self.index]
self.index += 1
ch = self.source[self.index]
if (ch == 'e' or ch == 'E'):
number += self.source[self.index]
self.index += 1
ch = self.source[self.index]
if (ch == '+' or ch == '-'):
number += self.source[self.index]
self.index += 1
if (isDecimalDigit(self.source[self.index])):
while (isDecimalDigit(self.source[self.index])):
number += self.source[self.index]
self.index += 1
else:
self.throwUnexpectedToken()
if (isIdentifierStart(self.source[self.index])):
self.throwUnexpectedToken();
return {
'type': Token.NumericLiteral,
'value': float(number),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
# 7.8.4 String Literals
def _unescape_string(self, string):
'''Perform sctring escape - for regexp literals'''
self.index = 0
self.length = len(string)
self.source = string
self.lineNumber = 0
self.lineStart = 0
octal = False
st = ''
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if ch == '\\':
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch in 'ux':
if (self.source[self.index] == '{'):
self.index += 1
st += self.scanUnicodeCodePointEscape()
else:
unescaped = self.scanHexEscape(ch)
if (not unescaped):
self.throwUnexpectedToken() # with throw I don't know whats the difference
st += unescaped
elif ch=='n':
st += '\n';
elif ch=='r':
st += '\r';
elif ch=='t':
st += '\t';
# elif ch=='b':
# st += '\b';
elif ch=='f':
st += '\f';
elif ch=='v':
st += '\x0B'
elif ch in '89':
self.throwUnexpectedToken() # again with throw....
else:
if isOctalDigit(ch):
octToDec = self.octalToDecimal(ch)
octal = octToDec['octal'] or octal
st += unichr(octToDec['code'])
else:
st += '\\' + ch # DONT ESCAPE!!!
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
elif isLineTerminator(ch):
raise RuntimeError('Line terminator inside regexp. Did not expect that')
else:
st += ch
return st
def scanStringLiteral(self):
st = ''
octal = False
quote = self.source[self.index]
assert quote == '\''or quote == '"', 'String literal must starts with a quote'
start = self.index;
self.index += 1
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if (ch == quote):
quote = ''
break
elif (ch == '\\'):
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch in 'ux':
if (self.source[self.index] == '{'):
self.index += 1
st += self.scanUnicodeCodePointEscape()
else:
unescaped = self.scanHexEscape(ch)
if (not unescaped):
self.throwUnexpectedToken() # with throw I don't know whats the difference
st += unescaped
elif ch=='n':
st += '\n';
elif ch=='r':
st += '\r';
elif ch=='t':
st += '\t';
elif ch=='b':
st += '\b';
elif ch=='f':
st += '\f';
elif ch=='v':
st += '\x0B'
elif ch in '89':
self.throwUnexpectedToken() # again with throw....
else:
if isOctalDigit(ch):
octToDec = self.octalToDecimal(ch)
octal = octToDec['octal'] or octal
st += unichr(octToDec['code'])
else:
st += ch
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
elif isLineTerminator(ch):
break
else:
st += ch;
if (quote != ''):
self.throwUnexpectedToken()
return {
'type': Token.StringLiteral,
'value': st,
'octal': octal,
'lineNumber': self.lineNumber,
'lineStart': self.startLineStart,
'start': start,
'end': self.index}
def scanTemplate(self):
cooked = ''
terminated = False
tail = False
start = self.index
head = (self.source[self.index]=='`')
rawOffset = 2
self.index += 1
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if (ch == '`'):
rawOffset = 1;
tail = True
terminated = True
break
elif (ch == '$'):
if (self.source[self.index] == '{'):
self.state['curlyStack'].append('${')
self.index += 1
terminated = True
break;
cooked += ch
elif (ch == '\\'):
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch=='n':
cooked += '\n'
elif ch=='r':
cooked += '\r'
elif ch=='t':
cooked += '\t'
elif ch in 'ux':
if (self.source[self.index] == '{'):
self.index += 1
cooked += self.scanUnicodeCodePointEscape()
else:
restore = self.index
unescaped = self.scanHexEscape(ch)
if (unescaped):
cooked += unescaped
else:
self.index = restore
cooked += ch
elif ch=='b':
cooked += '\b'
elif ch=='f':
cooked += '\f'
elif ch=='v':
cooked += '\v'
else:
if (ch == '0'):
if isDecimalDigit(self.ccode()):
# Illegal: \01 \02 and so on
self.throwError(Messages.TemplateOctalLiteral)
cooked += '\0'
elif (isOctalDigit(ch)):
# Illegal: \1 \2
self.throwError(Messages.TemplateOctalLiteral)
else:
cooked += ch
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
elif (isLineTerminator(ch)):
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] =='\n'):
self.index += 1
self.lineStart = self.index
cooked += '\n'
else:
cooked += ch;
if (not terminated):
self.throwUnexpectedToken()
if (not head):
self.state['curlyStack'].pop();
return {
'type': Token.Template,
'value': {
'cooked': cooked,
'raw': self.source[start + 1:self.index - rawOffset]},
'head': head,
'tail': tail,
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def testRegExp(self, pattern, flags):
#todo: you should return python regexp object
return (pattern, flags)
def scanRegExpBody(self):
ch = self.source[self.index]
assert ch == '/', 'Regular expression literal must start with a slash'
st = ch
self.index += 1
classMarker = False
terminated = False
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
st += ch
if (ch == '\\'):
ch = self.source[self.index]
self.index += 1
# ECMA-262 7.8.5
if (isLineTerminator(ch)):
self.throwUnexpectedToken(None, Messages.UnterminatedRegExp)
st += ch
elif (isLineTerminator(ch)):
self.throwUnexpectedToken(None, Messages.UnterminatedRegExp)
elif (classMarker):
if (ch == ']'):
classMarker = False
else:
if (ch == '/'):
terminated = True
break
elif (ch == '['):
classMarker = True;
if (not terminated):
self.throwUnexpectedToken(None, Messages.UnterminatedRegExp)
# Exclude leading and trailing slash.
body = st[1:-1]
return {
'value': body,
'literal': st}
def scanRegExpFlags(self):
st = ''
flags = ''
while (self.index < self.length):
ch = self.source[self.index]
if (not isIdentifierPart(ch)):
break
self.index += 1
if (ch == '\\' and self.index < self.length):
ch = self.source[self.index]
if (ch == 'u'):
self.index += 1
restore = self.index
ch = self.scanHexEscape('u')
if (ch):
flags += ch
st += '\\u'
while restore < self.index:
st += self.source[restore]
restore += 1
else:
self.index = restore
flags += 'u'
st += '\\u'
self.tolerateUnexpectedToken()
else:
st += '\\'
self.tolerateUnexpectedToken()
else:
flags += ch
st += ch
return {
'value': flags,
'literal': st}
def scanRegExp(self):
self.scanning = True
self.lookahead = None
self.skipComment()
start = self.index
body = self.scanRegExpBody()
flags = self.scanRegExpFlags()
value = self.testRegExp(body['value'], flags['value'])
scanning = False
return {
'literal': body['literal'] + flags['literal'],
'value': value,
'regex': {
'pattern': body['value'],
'flags': flags['value']
},
'start': start,
'end': self.index}
def collectRegex(self):
self.skipComment();
return self.scanRegExp()
def isIdentifierName(self, token):
return token['type'] in {1,3,4,5}
#def advanceSlash(self): ???
def advance(self):
if (self.index >= self.length):
return {
'type': Token.EOF,
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': self.index,
'end': self.index}
ch = self.ccode()
if isIdentifierStart(ch):
token = self.scanIdentifier()
if (self.strict and isStrictModeReservedWord(token['value'])):
token['type'] = Token.Keyword
return token
# Very common: ( and ) and ;
if (ch == 0x28 or ch == 0x29 or ch == 0x3B):
return self.scanPunctuator()
# String literal starts with single quote (U+0027) or double quote (U+0022).
if (ch == 0x27 or ch == 0x22):
return self.scanStringLiteral()
# Dot (.) U+002E can also start a floating-point number, hence the need
# to check the next character.
if (ch == 0x2E):
if (isDecimalDigit(self.ccode(1))):
return self.scanNumericLiteral()
return self.scanPunctuator();
if (isDecimalDigit(ch)):
return self.scanNumericLiteral()
# Slash (/) U+002F can also start a regex.
#if (extra.tokenize && ch == 0x2F):
# return advanceSlash();
# Template literals start with ` (U+0060) for template head
# or } (U+007D) for template middle or template tail.
if (ch == 0x60 or (ch == 0x7D and self.state['curlyStack'][len(self.state['curlyStack']) - 1] == '${')):
return self.scanTemplate()
return self.scanPunctuator();
#def collectToken(self):
# loc = {
# 'start': {
# 'line': self.lineNumber,
# 'column': self.index - self.lineStart}}
#
# token = self.advance()
#
# loc['end'] = {
# 'line': self.lineNumber,
# 'column': self.index - self.lineStart}
# if (token['type'] != Token.EOF):
# value = self.source[token['start']: token['end']]
# entry = {
# 'type': TokenName[token['type']],
# 'value': value,
# 'range': [token['start'], token['end']],
# 'loc': loc}
# if (token.get('regex')):
# entry['regex'] = {
# 'pattern': token['regex']['pattern'],
# 'flags': token['regex']['flags']}
# self.extra['tokens'].append(entry)
# return token;
def lex(self):
self.scanning = True
self.lastIndex = self.index
self.lastLineNumber = self.lineNumber
self.lastLineStart = self.lineStart
self.skipComment()
token = self.lookahead
self.startIndex = self.index
self.startLineNumber = self.lineNumber
self.startLineStart = self.lineStart
self.lookahead = self.advance()
self.scanning = False
return token
def peek(self):
self.scanning = True
self.skipComment()
self.lastIndex = self.index
self.lastLineNumber = self.lineNumber
self.lastLineStart = self.lineStart
self.startIndex = self.index
self.startLineNumber = self.lineNumber
self.startLineStart = self.lineStart
self.lookahead = self.advance()
self.scanning = False
def createError(self, line, pos, description):
self.log_err_case()
from resources.lib.libraries.js2py.base import ERRORS, Js, JsToPyException
error = ERRORS['SyntaxError']('Line ' + unicode(line) + ': ' + unicode(description))
error.put('index', Js(pos))
error.put('lineNumber', Js(line))
error.put('column', Js(pos - (self.lineStart if self.scanning else self.lastLineStart) + 1))
error.put('description', Js(description))
return JsToPyException(error)
# Throw an exception
def throwError(self, messageFormat, *args):
msg = messageFormat % tuple(unicode(e) for e in args)
raise self.createError(self.lastLineNumber, self.lastIndex, msg);
def tolerateError(self, messageFormat, *args):
return self.throwError(messageFormat, *args)
# Throw an exception because of the token.
def unexpectedTokenError(self, token={}, message=''):
msg = message or Messages.UnexpectedToken
if (token):
typ = token['type']
if (not message):
if typ == Token.EOF: msg = Messages.UnexpectedEOS
elif (typ == Token.Identifier): msg = Messages.UnexpectedIdentifier
elif (typ == Token.NumericLiteral): msg = Messages.UnexpectedNumber
elif (typ == Token.StringLiteral): msg = Messages.UnexpectedString
elif (typ == Token.Template): msg = Messages.UnexpectedTemplate
else: msg = Messages.UnexpectedToken;
if (typ == Token.Keyword):
if (isFutureReservedWord(token['value'])):
msg = Messages.UnexpectedReserved
elif (self.strict and isStrictModeReservedWord(token['value'])):
msg = Messages.StrictReservedWord
value = token['value']['raw'] if (typ == Token.Template) else token.get('value')
else:
value = 'ILLEGAL'
msg = msg.replace('%s', unicode(value))
return (self.createError(token['lineNumber'], token['start'], msg) if (token and token.get('lineNumber')) else
self.createError(self.lineNumber if self.scanning else self.lastLineNumber, self.index if self.scanning else self.lastIndex, msg))
def throwUnexpectedToken(self, token={}, message=''):
raise self.unexpectedTokenError(token, message)
def tolerateUnexpectedToken(self, token={}, message=''):
self.throwUnexpectedToken(token, message)
# Expect the next token to match the specified punctuator.
# If not, an exception will be thrown.
def expect(self, value):
token = self.lex()
if (token['type'] != Token.Punctuator or token['value'] != value):
self.throwUnexpectedToken(token)
#/**
# * @name expectCommaSeparator
# * @description Quietly expect a comma when in tolerant mode, otherwise delegates
# * to <code>expect(value)</code>
# * @since 2.0
# */
def expectCommaSeparator(self):
self.expect(',')
# Expect the next token to match the specified keyword.
# If not, an exception will be thrown.
def expectKeyword(self, keyword):
token = self.lex();
if (token['type'] != Token.Keyword or token['value'] != keyword):
self.throwUnexpectedToken(token)
# Return true if the next token matches the specified punctuator.
def match(self, value):
return self.lookahead['type'] == Token.Punctuator and self.lookahead['value'] == value
# Return true if the next token matches the specified keyword
def matchKeyword(self, keyword):
return self.lookahead['type'] == Token.Keyword and self.lookahead['value'] == keyword
# Return true if the next token matches the specified contextual keyword
# (where an identifier is sometimes a keyword depending on the context)
def matchContextualKeyword(self, keyword):
return self.lookahead['type'] == Token.Identifier and self.lookahead['value'] == keyword
# Return true if the next token is an assignment operator
def matchAssign(self):
if (self.lookahead['type'] != Token.Punctuator):
return False;
op = self.lookahead['value']
return op in {'=','*=', '/=','%=', '+=', '-=', '<<=', '>>=', '>>>=', '&=' , '^=' , '|='}
def consumeSemicolon(self):
# Catch the very common case first: immediately a semicolon (U+003B).
if (self.at(self.startIndex) == ';' or self.match(';')):
self.lex()
return
if (self.hasLineTerminator):
return
# TODO: FIXME(ikarienator): this is seemingly an issue in the previous location info convention.
self.lastIndex = self.startIndex
self.lastLineNumber = self.startLineNumber
self.lastLineStart = self.startLineStart
if (self.lookahead['type'] != Token.EOF and not self.match('}')):
self.throwUnexpectedToken(self.lookahead)
# // Cover grammar support.
# //
# // When an assignment expression position starts with an left parenthesis, the determination of the type
# // of the syntax is to be deferred arbitrarily long until the end of the parentheses pair (plus a lookahead)
# // or the first comma. This situation also defers the determination of all the expressions nested in the pair.
# //
# // There are three productions that can be parsed in a parentheses pair that needs to be determined
# // after the outermost pair is closed. They are:
# //
# // 1. AssignmentExpression
# // 2. BindingElements
# // 3. AssignmentTargets
# //
# // In order to avoid exponential backtracking, we use two flags to denote if the production can be
# // binding element or assignment target.
# //
# // The three productions have the relationship:
# //
# // BindingElements <= AssignmentTargets <= AssignmentExpression
# //
# // with a single exception that CoverInitializedName when used directly in an Expression, generates
# // an early error. Therefore, we need the third state, firstCoverInitializedNameError, to track the
# // first usage of CoverInitializedName and report it when we reached the end of the parentheses pair.
# //
# // isolateCoverGrammar function runs the given parser function with a new cover grammar context, and it does not
# // effect the current flags. This means the production the parser parses is only used as an expression. Therefore
# // the CoverInitializedName check is conducted.
# //
# // inheritCoverGrammar function runs the given parse function with a new cover grammar context, and it propagates
# // the flags outside of the parser. This means the production the parser parses is used as a part of a potential
# // pattern. The CoverInitializedName check is deferred.
def isolateCoverGrammar(self, parser):
oldIsBindingElement = self.isBindingElement
oldIsAssignmentTarget = self.isAssignmentTarget
oldFirstCoverInitializedNameError = self.firstCoverInitializedNameError
self.isBindingElement = true
self.isAssignmentTarget = true
self.firstCoverInitializedNameError = null
result = parser()
if (self.firstCoverInitializedNameError != null):
self.throwUnexpectedToken(self.firstCoverInitializedNameError)
self.isBindingElement = oldIsBindingElement
self.isAssignmentTarget = oldIsAssignmentTarget
self.firstCoverInitializedNameError = oldFirstCoverInitializedNameError
return result
def inheritCoverGrammar(self, parser):
oldIsBindingElement = self.isBindingElement
oldIsAssignmentTarget = self.isAssignmentTarget
oldFirstCoverInitializedNameError = self.firstCoverInitializedNameError
self.isBindingElement = true
self.isAssignmentTarget = true
self.firstCoverInitializedNameError = null
result = parser()
self.isBindingElement = self.isBindingElement and oldIsBindingElement
self.isAssignmentTarget = self.isAssignmentTarget and oldIsAssignmentTarget
self.firstCoverInitializedNameError = oldFirstCoverInitializedNameError or self.firstCoverInitializedNameError
return result
def parseArrayPattern(self):
node = Node()
elements = []
self.expect('[');
while (not self.match(']')):
if (self.match(',')):
self.lex()
elements.append(null)
else:
if (self.match('...')):
restNode = Node()
self.lex()
rest = self.parseVariableIdentifier()
elements.append(restNode.finishRestElement(rest))
break
else:
elements.append(self.parsePatternWithDefault())
if (not self.match(']')):
self.expect(',')
self.expect(']')
return node.finishArrayPattern(elements)
def parsePropertyPattern(self):
node = Node()
computed = self.match('[')
if (self.lookahead['type'] == Token.Identifier):
key = self.parseVariableIdentifier()
if (self.match('=')):
self.lex();
init = self.parseAssignmentExpression()
return node.finishProperty(
'init', key, false, WrappingNode(key).finishAssignmentPattern(key, init), false, false)
elif (not self.match(':')):
return node.finishProperty('init', key, false, key, false, true)
else:
key = self.parseObjectPropertyKey()
self.expect(':')
init = self.parsePatternWithDefault()
return node.finishProperty('init', key, computed, init, false, false)
def parseObjectPattern(self):
node = Node()
properties = []
self.expect('{')
while (not self.match('}')):
properties.append(self.parsePropertyPattern())
if (not self.match('}')):
self.expect(',')
self.lex()
return node.finishObjectPattern(properties)
def parsePattern(self):
if (self.lookahead['type'] == Token.Identifier):
return self.parseVariableIdentifier()
elif (self.match('[')):
return self.parseArrayPattern()
elif (self.match('{')):
return self.parseObjectPattern()
self.throwUnexpectedToken(self.lookahead)
def parsePatternWithDefault(self):
startToken = self.lookahead
pattern = self.parsePattern()
if (self.match('=')):
self.lex()
right = self.isolateCoverGrammar(self.parseAssignmentExpression)
pattern = WrappingNode(startToken).finishAssignmentPattern(pattern, right)
return pattern
# 11.1.4 Array Initialiser
def parseArrayInitialiser(self):
elements = []
node = Node()
self.expect('[')
while (not self.match(']')):
if (self.match(',')):
self.lex()
elements.append(null)
elif (self.match('...')):
restSpread = Node()
self.lex()
restSpread.finishSpreadElement(self.inheritCoverGrammar(self.parseAssignmentExpression))
if (not self.match(']')):
self.isAssignmentTarget = self.isBindingElement = false
self.expect(',')
elements.append(restSpread)
else:
elements.append(self.inheritCoverGrammar(self.parseAssignmentExpression))
if (not self.match(']')):
self.expect(',')
self.lex();
return node.finishArrayExpression(elements)
# 11.1.5 Object Initialiser
def parsePropertyFunction(self, node, paramInfo):
self.isAssignmentTarget = self.isBindingElement = false;
previousStrict = self.strict;
body = self.isolateCoverGrammar(self.parseFunctionSourceElements);
if (self.strict and paramInfo['firstRestricted']):
self.tolerateUnexpectedToken(paramInfo['firstRestricted'], paramInfo.get('message'))
if (self.strict and paramInfo['stricted']):
self.tolerateUnexpectedToken(paramInfo['stricted'], paramInfo.get('message'));
self.strict = previousStrict;
return node.finishFunctionExpression(null, paramInfo['params'], paramInfo['defaults'], body)
def parsePropertyMethodFunction(self):
node = Node();
params = self.parseParams();
method = self.parsePropertyFunction(node, params);
return method;
def parseObjectPropertyKey(self):
node = Node()
token = self.lex();
# // Note: This function is called only from parseObjectProperty(), where
# // EOF and Punctuator tokens are already filtered out.
typ = token['type']
if typ in [Token.StringLiteral, Token.NumericLiteral]:
if self.strict and token['octal']:
self.tolerateUnexpectedToken(token, Messages.StrictOctalLiteral);
return node.finishLiteral(token);
elif typ in {Token.Identifier, Token.BooleanLiteral, Token.NullLiteral, Token.Keyword}:
return node.finishIdentifier(token['value']);
elif typ==Token.Punctuator:
if (token['value'] == '['):
expr = self.isolateCoverGrammar(self.parseAssignmentExpression)
self.expect(']')
return expr
self.throwUnexpectedToken(token)
def lookaheadPropertyName(self):
typ = self.lookahead['type']
if typ in {Token.Identifier, Token.StringLiteral, Token.BooleanLiteral, Token.NullLiteral, Token.NumericLiteral, Token.Keyword}:
return true
if typ == Token.Punctuator:
return self.lookahead['value'] == '['
return false
# // This function is to try to parse a MethodDefinition as defined in 14.3. But in the case of object literals,
# // it might be called at a position where there is in fact a short hand identifier pattern or a data property.
# // This can only be determined after we consumed up to the left parentheses.
# //
# // In order to avoid back tracking, it returns `null` if the position is not a MethodDefinition and the caller
# // is responsible to visit other options.
def tryParseMethodDefinition(self, token, key, computed, node):
if (token['type'] == Token.Identifier):
# check for `get` and `set`;
if (token['value'] == 'get' and self.lookaheadPropertyName()):
computed = self.match('[');
key = self.parseObjectPropertyKey()
methodNode = Node()
self.expect('(')
self.expect(')')
value = self.parsePropertyFunction(methodNode, {
'params': [],
'defaults': [],
'stricted': null,
'firstRestricted': null,
'message': null
})
return node.finishProperty('get', key, computed, value, false, false)
elif (token['value'] == 'set' and self.lookaheadPropertyName()):
computed = self.match('[')
key = self.parseObjectPropertyKey()
methodNode = Node()
self.expect('(')
options = {
'params': [],
'defaultCount': 0,
'defaults': [],
'firstRestricted': null,
'paramSet': {}
}
if (self.match(')')):
self.tolerateUnexpectedToken(self.lookahead);
else:
self.parseParam(options);
if (options['defaultCount'] == 0):
options['defaults'] = []
self.expect(')')
value = self.parsePropertyFunction(methodNode, options);
return node.finishProperty('set', key, computed, value, false, false);
if (self.match('(')):
value = self.parsePropertyMethodFunction();
return node.finishProperty('init', key, computed, value, true, false)
return null;
def checkProto(self, key, computed, hasProto):
if (computed == false and (key['type'] == Syntax.Identifier and key.name == '__proto__' or
key['type'] == Syntax.Literal and key.value == '__proto__')):
if (hasProto.value):
self.tolerateError(Messages.DuplicateProtoProperty);
else:
hasProto.value = true;
def parseObjectProperty(self, hasProto):
token = self.lookahead
node = Node()
computed = self.match('[');
key = self.parseObjectPropertyKey();
maybeMethod = self.tryParseMethodDefinition(token, key, computed, node)
if (maybeMethod):
self.checkProto(maybeMethod.key, maybeMethod.computed, hasProto);
return maybeMethod;
#// init property or short hand property.
self.checkProto(key, computed, hasProto);
if (self.match(':')):
self.lex();
value = self.inheritCoverGrammar(self.parseAssignmentExpression)
return node.finishProperty('init', key, computed, value, false, false)
if (token['type'] == Token.Identifier):
if (self.match('=')):
self.firstCoverInitializedNameError = self.lookahead;
self.lex();
value = self.isolateCoverGrammar(self.parseAssignmentExpression);
return node.finishProperty('init', key, computed,
WrappingNode(token).finishAssignmentPattern(key, value), false, true)
return node.finishProperty('init', key, computed, key, false, true)
self.throwUnexpectedToken(self.lookahead)
def parseObjectInitialiser(self):
properties = []
hasProto = {'value': false}
node = Node();
self.expect('{');
while (not self.match('}')):
properties.append(self.parseObjectProperty(hasProto));
if (not self.match('}')):
self.expectCommaSeparator()
self.expect('}');
return node.finishObjectExpression(properties)
def reinterpretExpressionAsPattern(self, expr):
typ = (expr['type'])
if typ in {Syntax.Identifier, Syntax.MemberExpression, Syntax.RestElement, Syntax.AssignmentPattern}:
pass
elif typ == Syntax.SpreadElement:
expr['type'] = Syntax.RestElement
self.reinterpretExpressionAsPattern(expr.argument)
elif typ == Syntax.ArrayExpression:
expr['type'] = Syntax.ArrayPattern
for i in xrange(len(expr['elements'])):
if (expr['elements'][i] != null):
self.reinterpretExpressionAsPattern(expr['elements'][i])
elif typ == Syntax.ObjectExpression:
expr['type'] = Syntax.ObjectPattern
for i in xrange(len(expr['properties'])):
self.reinterpretExpressionAsPattern(expr['properties'][i]['value']);
elif Syntax.AssignmentExpression:
expr['type'] = Syntax.AssignmentPattern;
self.reinterpretExpressionAsPattern(expr['left'])
else:
#// Allow other node type for tolerant parsing.
return
def parseTemplateElement(self, option):
if (self.lookahead['type'] != Token.Template or (option['head'] and not self.lookahead['head'])):
self.throwUnexpectedToken()
node = Node();
token = self.lex();
return node.finishTemplateElement({ 'raw': token['value']['raw'], 'cooked': token['value']['cooked'] }, token['tail'])
def parseTemplateLiteral(self):
node = Node()
quasi = self.parseTemplateElement({ 'head': true })
quasis = [quasi]
expressions = []
while (not quasi['tail']):
expressions.append(self.parseExpression());
quasi = self.parseTemplateElement({ 'head': false });
quasis.append(quasi)
return node.finishTemplateLiteral(quasis, expressions)
# 11.1.6 The Grouping Operator
def parseGroupExpression(self):
self.expect('(');
if (self.match(')')):
self.lex();
if (not self.match('=>')):
self.expect('=>')
return {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': []}
startToken = self.lookahead
if (self.match('...')):
expr = self.parseRestElement();
self.expect(')');
if (not self.match('=>')):
self.expect('=>')
return {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': [expr]}
self.isBindingElement = true;
expr = self.inheritCoverGrammar(self.parseAssignmentExpression);
if (self.match(',')):
self.isAssignmentTarget = false;
expressions = [expr]
while (self.startIndex < self.length):
if (not self.match(',')):
break
self.lex();
if (self.match('...')):
if (not self.isBindingElement):
self.throwUnexpectedToken(self.lookahead)
expressions.append(self.parseRestElement())
self.expect(')');
if (not self.match('=>')):
self.expect('=>');
self.isBindingElement = false
for i in xrange(len(expressions)):
self.reinterpretExpressionAsPattern(expressions[i])
return {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': expressions}
expressions.append(self.inheritCoverGrammar(self.parseAssignmentExpression))
expr = WrappingNode(startToken).finishSequenceExpression(expressions);
self.expect(')')
if (self.match('=>')):
if (not self.isBindingElement):
self.throwUnexpectedToken(self.lookahead);
if (expr['type'] == Syntax.SequenceExpression):
for i in xrange(len(expr.expressions)):
self.reinterpretExpressionAsPattern(expr['expressions'][i])
else:
self.reinterpretExpressionAsPattern(expr);
expr = {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': expr['expressions'] if expr['type'] == Syntax.SequenceExpression else [expr]}
self.isBindingElement = false
return expr
# 11.1 Primary Expressions
def parsePrimaryExpression(self):
if (self.match('(')):
self.isBindingElement = false;
return self.inheritCoverGrammar(self.parseGroupExpression)
if (self.match('[')):
return self.inheritCoverGrammar(self.parseArrayInitialiser)
if (self.match('{')):
return self.inheritCoverGrammar(self.parseObjectInitialiser)
typ = self.lookahead['type']
node = Node();
if (typ == Token.Identifier):
expr = node.finishIdentifier(self.lex()['value']);
elif (typ == Token.StringLiteral or typ == Token.NumericLiteral):
self.isAssignmentTarget = self.isBindingElement = false
if (self.strict and self.lookahead.get('octal')):
self.tolerateUnexpectedToken(self.lookahead, Messages.StrictOctalLiteral)
expr = node.finishLiteral(self.lex())
elif (typ == Token.Keyword):
self.isAssignmentTarget = self.isBindingElement = false
if (self.matchKeyword('function')):
return self.parseFunctionExpression()
if (self.matchKeyword('this')):
self.lex()
return node.finishThisExpression()
if (self.matchKeyword('class')):
return self.parseClassExpression()
self.throwUnexpectedToken(self.lex())
elif (typ == Token.BooleanLiteral):
isAssignmentTarget = self.isBindingElement = false
token = self.lex();
token['value'] = (token['value'] == 'true')
expr = node.finishLiteral(token)
elif (typ == Token.NullLiteral):
self.isAssignmentTarget = self.isBindingElement = false
token = self.lex()
token['value'] = null;
expr = node.finishLiteral(token)
elif (self.match('/') or self.match('/=')):
self.isAssignmentTarget = self.isBindingElement = false;
self.index = self.startIndex;
token = self.scanRegExp(); # hehe, here you are!
self.lex();
expr = node.finishLiteral(token);
elif (typ == Token.Template):
expr = self.parseTemplateLiteral()
else:
self.throwUnexpectedToken(self.lex());
return expr;
# 11.2 Left-Hand-Side Expressions
def parseArguments(self):
args = [];
self.expect('(');
if (not self.match(')')):
while (self.startIndex < self.length):
args.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
if (self.match(')')):
break
self.expectCommaSeparator()
self.expect(')')
return args;
def parseNonComputedProperty(self):
node = Node()
token = self.lex();
if (not self.isIdentifierName(token)):
self.throwUnexpectedToken(token)
return node.finishIdentifier(token['value'])
def parseNonComputedMember(self):
self.expect('.')
return self.parseNonComputedProperty();
def parseComputedMember(self):
self.expect('[')
expr = self.isolateCoverGrammar(self.parseExpression)
self.expect(']')
return expr
def parseNewExpression(self):
node = Node()
self.expectKeyword('new')
callee = self.isolateCoverGrammar(self.parseLeftHandSideExpression)
args = self.parseArguments() if self.match('(') else []
self.isAssignmentTarget = self.isBindingElement = false
return node.finishNewExpression(callee, args)
def parseLeftHandSideExpressionAllowCall(self):
previousAllowIn = self.state['allowIn']
startToken = self.lookahead;
self.state['allowIn'] = true;
if (self.matchKeyword('super') and self.state['inFunctionBody']):
expr = Node();
self.lex();
expr = expr.finishSuper()
if (not self.match('(') and not self.match('.') and not self.match('[')):
self.throwUnexpectedToken(self.lookahead);
else:
expr = self.inheritCoverGrammar(self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression)
while True:
if (self.match('.')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseNonComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('.', expr, property)
elif (self.match('(')):
self.isBindingElement = false;
self.isAssignmentTarget = false;
args = self.parseArguments();
expr = WrappingNode(startToken).finishCallExpression(expr, args)
elif (self.match('[')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('[', expr, property)
elif (self.lookahead['type'] == Token.Template and self.lookahead['head']):
quasi = self.parseTemplateLiteral()
expr = WrappingNode(startToken).finishTaggedTemplateExpression(expr, quasi)
else:
break
self.state['allowIn'] = previousAllowIn
return expr
def parseLeftHandSideExpression(self):
assert self.state['allowIn'], 'callee of new expression always allow in keyword.'
startToken = self.lookahead
if (self.matchKeyword('super') and self.state['inFunctionBody']):
expr = Node();
self.lex();
expr = expr.finishSuper();
if (not self.match('[') and not self.match('.')):
self.throwUnexpectedToken(self.lookahead)
else:
expr = self.inheritCoverGrammar(self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression);
while True:
if (self.match('[')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('[', expr, property)
elif (self.match('.')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseNonComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('.', expr, property);
elif (self.lookahead['type'] == Token.Template and self.lookahead['head']):
quasi = self.parseTemplateLiteral();
expr = WrappingNode(startToken).finishTaggedTemplateExpression(expr, quasi)
else:
break
return expr
# 11.3 Postfix Expressions
def parsePostfixExpression(self):
startToken = self.lookahead
expr = self.inheritCoverGrammar(self.parseLeftHandSideExpressionAllowCall)
if (not self.hasLineTerminator and self.lookahead['type'] == Token.Punctuator):
if (self.match('++') or self.match('--')):
# 11.3.1, 11.3.2
if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)):
self.tolerateError(Messages.StrictLHSPostfix)
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInAssignment);
self.isAssignmentTarget = self.isBindingElement = false;
token = self.lex();
expr = WrappingNode(startToken).finishPostfixExpression(token['value'], expr);
return expr;
# 11.4 Unary Operators
def parseUnaryExpression(self):
if (self.lookahead['type'] != Token.Punctuator and self.lookahead['type'] != Token.Keyword):
expr = self.parsePostfixExpression();
elif (self.match('++') or self.match('--')):
startToken = self.lookahead;
token = self.lex();
expr = self.inheritCoverGrammar(self.parseUnaryExpression);
# 11.4.4, 11.4.5
if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)):
self.tolerateError(Messages.StrictLHSPrefix)
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInAssignment)
expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr)
self.isAssignmentTarget = self.isBindingElement = false
elif (self.match('+') or self.match('-') or self.match('~') or self.match('!')):
startToken = self.lookahead;
token = self.lex();
expr = self.inheritCoverGrammar(self.parseUnaryExpression);
expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr)
self.isAssignmentTarget = self.isBindingElement = false;
elif (self.matchKeyword('delete') or self.matchKeyword('void') or self.matchKeyword('typeof')):
startToken = self.lookahead;
token = self.lex();
expr = self.inheritCoverGrammar(self.parseUnaryExpression);
expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr);
if (self.strict and expr.operator == 'delete' and expr.argument.type == Syntax.Identifier):
self.tolerateError(Messages.StrictDelete)
self.isAssignmentTarget = self.isBindingElement = false;
else:
expr = self.parsePostfixExpression()
return expr
def binaryPrecedence(self, token, allowIn):
prec = 0;
typ = token['type']
if (typ != Token.Punctuator and typ != Token.Keyword):
return 0;
val = token['value']
if val == 'in' and not allowIn:
return 0
return PRECEDENCE.get(val, 0)
# 11.5 Multiplicative Operators
# 11.6 Additive Operators
# 11.7 Bitwise Shift Operators
# 11.8 Relational Operators
# 11.9 Equality Operators
# 11.10 Binary Bitwise Operators
# 11.11 Binary Logical Operators
def parseBinaryExpression(self):
marker = self.lookahead;
left = self.inheritCoverGrammar(self.parseUnaryExpression);
token = self.lookahead;
prec = self.binaryPrecedence(token, self.state['allowIn']);
if (prec == 0):
return left
self.isAssignmentTarget = self.isBindingElement = false;
token['prec'] = prec
self.lex()
markers = [marker, self.lookahead];
right = self.isolateCoverGrammar(self.parseUnaryExpression);
stack = [left, token, right];
while True:
prec = self.binaryPrecedence(self.lookahead, self.state['allowIn'])
if not prec > 0:
break
# Reduce: make a binary expression from the three topmost entries.
while ((len(stack) > 2) and (prec <= stack[len(stack) - 2]['prec'])):
right = stack.pop();
operator = stack.pop()['value']
left = stack.pop()
markers.pop()
expr = WrappingNode(markers[len(markers) - 1]).finishBinaryExpression(operator, left, right)
stack.append(expr)
# Shift
token = self.lex();
token['prec'] = prec;
stack.append(token);
markers.append(self.lookahead);
expr = self.isolateCoverGrammar(self.parseUnaryExpression);
stack.append(expr);
# Final reduce to clean-up the stack.
i = len(stack) - 1;
expr = stack[i]
markers.pop()
while (i > 1):
expr = WrappingNode(markers.pop()).finishBinaryExpression(stack[i - 1]['value'], stack[i - 2], expr);
i -= 2
return expr
# 11.12 Conditional Operator
def parseConditionalExpression(self):
startToken = self.lookahead
expr = self.inheritCoverGrammar(self.parseBinaryExpression);
if (self.match('?')):
self.lex()
previousAllowIn = self.state['allowIn']
self.state['allowIn'] = true;
consequent = self.isolateCoverGrammar(self.parseAssignmentExpression);
self.state['allowIn'] = previousAllowIn;
self.expect(':');
alternate = self.isolateCoverGrammar(self.parseAssignmentExpression)
expr = WrappingNode(startToken).finishConditionalExpression(expr, consequent, alternate);
self.isAssignmentTarget = self.isBindingElement = false;
return expr
# [ES6] 14.2 Arrow Function
def parseConciseBody(self):
if (self.match('{')):
return self.parseFunctionSourceElements()
return self.isolateCoverGrammar(self.parseAssignmentExpression)
def checkPatternParam(self, options, param):
typ = param.type
if typ == Syntax.Identifier:
self.validateParam(options, param, param.name);
elif typ == Syntax.RestElement:
self.checkPatternParam(options, param.argument)
elif typ == Syntax.AssignmentPattern:
self.checkPatternParam(options, param.left)
elif typ == Syntax.ArrayPattern:
for i in xrange(len(param.elements)):
if (param.elements[i] != null):
self.checkPatternParam(options, param.elements[i]);
else:
assert typ == Syntax.ObjectPattern, 'Invalid type'
for i in xrange(len(param.properties)):
self.checkPatternParam(options, param.properties[i].value);
def reinterpretAsCoverFormalsList(self, expr):
defaults = [];
defaultCount = 0;
params = [expr];
typ = expr.type
if typ == Syntax.Identifier:
pass
elif typ == PlaceHolders.ArrowParameterPlaceHolder:
params = expr.params
else:
return null
options = {
'paramSet': {}}
le = len(params)
for i in xrange(le):
param = params[i]
if param.type == Syntax.AssignmentPattern:
params[i] = param.left;
defaults.append(param.right);
defaultCount += 1
self.checkPatternParam(options, param.left);
else:
self.checkPatternParam(options, param);
params[i] = param;
defaults.append(null);
if (options.get('message') == Messages.StrictParamDupe):
token = options['stricted'] if self.strict else options['firstRestricted']
self.throwUnexpectedToken(token, options.get('message'));
if (defaultCount == 0):
defaults = []
return {
'params': params,
'defaults': defaults,
'stricted': options['stricted'],
'firstRestricted': options['firstRestricted'],
'message': options.get('message')}
def parseArrowFunctionExpression(self, options, node):
if (self.hasLineTerminator):
self.tolerateUnexpectedToken(self.lookahead)
self.expect('=>')
previousStrict = self.strict;
body = self.parseConciseBody();
if (self.strict and options['firstRestricted']):
self.throwUnexpectedToken(options['firstRestricted'], options.get('message'));
if (self.strict and options['stricted']):
self.tolerateUnexpectedToken(options['stricted'], options['message']);
self.strict = previousStrict
return node.finishArrowFunctionExpression(options['params'], options['defaults'], body, body.type != Syntax.BlockStatement)
# 11.13 Assignment Operators
def parseAssignmentExpression(self):
startToken = self.lookahead;
token = self.lookahead;
expr = self.parseConditionalExpression();
if (expr.type == PlaceHolders.ArrowParameterPlaceHolder or self.match('=>')):
self.isAssignmentTarget = self.isBindingElement = false;
lis = self.reinterpretAsCoverFormalsList(expr)
if (lis):
self.firstCoverInitializedNameError = null;
return self.parseArrowFunctionExpression(lis, WrappingNode(startToken))
return expr
if (self.matchAssign()):
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInAssignment)
# 11.13.1
if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)):
self.tolerateUnexpectedToken(token, Messages.StrictLHSAssignment);
if (not self.match('=')):
self.isAssignmentTarget = self.isBindingElement = false;
else:
self.reinterpretExpressionAsPattern(expr)
token = self.lex();
right = self.isolateCoverGrammar(self.parseAssignmentExpression)
expr = WrappingNode(startToken).finishAssignmentExpression(token['value'], expr, right);
self.firstCoverInitializedNameError = null
return expr
# 11.14 Comma Operator
def parseExpression(self):
startToken = self.lookahead
expr = self.isolateCoverGrammar(self.parseAssignmentExpression)
if (self.match(',')):
expressions = [expr];
while (self.startIndex < self.length):
if (not self.match(',')):
break
self.lex();
expressions.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
expr = WrappingNode(startToken).finishSequenceExpression(expressions);
return expr
# 12.1 Block
def parseStatementListItem(self):
if (self.lookahead['type'] == Token.Keyword):
val = (self.lookahead['value'])
if val=='export':
if (self.sourceType != 'module'):
self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalExportDeclaration)
return self.parseExportDeclaration();
elif val == 'import':
if (self.sourceType != 'module'):
self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalImportDeclaration);
return self.parseImportDeclaration();
elif val == 'const' or val == 'let':
return self.parseLexicalDeclaration({'inFor': false});
elif val == 'function':
return self.parseFunctionDeclaration(Node());
elif val == 'class':
return self.parseClassDeclaration();
elif val == 'pyimport': # <<<<< MODIFIED HERE
return self.parsePyimportStatement()
return self.parseStatement();
def parsePyimportStatement(self):
n = Node()
self.lex()
n.finishPyimport(self.parseVariableIdentifier())
self.consumeSemicolon()
return n
def parseStatementList(self):
list = [];
while (self.startIndex < self.length):
if (self.match('}')):
break
list.append(self.parseStatementListItem())
return list
def parseBlock(self):
node = Node();
self.expect('{');
block = self.parseStatementList()
self.expect('}');
return node.finishBlockStatement(block);
# 12.2 Variable Statement
def parseVariableIdentifier(self):
node = Node()
token = self.lex()
if (token['type'] != Token.Identifier):
if (self.strict and token['type'] == Token.Keyword and isStrictModeReservedWord(token['value'])):
self.tolerateUnexpectedToken(token, Messages.StrictReservedWord);
else:
self.throwUnexpectedToken(token)
return node.finishIdentifier(token['value'])
def parseVariableDeclaration(self):
init = null
node = Node();
d = self.parsePattern();
# 12.2.1
if (self.strict and isRestrictedWord(d.name)):
self.tolerateError(Messages.StrictVarName);
if (self.match('=')):
self.lex();
init = self.isolateCoverGrammar(self.parseAssignmentExpression);
elif (d.type != Syntax.Identifier):
self.expect('=')
return node.finishVariableDeclarator(d, init)
def parseVariableDeclarationList(self):
lis = []
while True:
lis.append(self.parseVariableDeclaration())
if (not self.match(',')):
break
self.lex();
if not (self.startIndex < self.length):
break
return lis;
def parseVariableStatement(self, node):
self.expectKeyword('var')
declarations = self.parseVariableDeclarationList()
self.consumeSemicolon()
return node.finishVariableDeclaration(declarations)
def parseLexicalBinding(self, kind, options):
init = null
node = Node()
d = self.parsePattern();
# 12.2.1
if (self.strict and d.type == Syntax.Identifier and isRestrictedWord(d.name)):
self.tolerateError(Messages.StrictVarName);
if (kind == 'const'):
if (not self.matchKeyword('in')):
self.expect('=')
init = self.isolateCoverGrammar(self.parseAssignmentExpression)
elif ((not options['inFor'] and d.type != Syntax.Identifier) or self.match('=')):
self.expect('=');
init = self.isolateCoverGrammar(self.parseAssignmentExpression);
return node.finishVariableDeclarator(id, init)
def parseBindingList(self, kind, options):
list = [];
while True:
list.append(self.parseLexicalBinding(kind, options));
if (not self.match(',')):
break
self.lex();
if not (self.startIndex < self.length):
break
return list;
def parseLexicalDeclaration(self, options):
node = Node();
kind = self.lex()['value']
assert kind == 'let' or kind == 'const', 'Lexical declaration must be either let or const'
declarations = self.parseBindingList(kind, options);
self.consumeSemicolon();
return node.finishLexicalDeclaration(declarations, kind);
def parseRestElement(self):
node = Node();
self.lex();
if (self.match('{')):
self.throwError(Messages.ObjectPatternAsRestParameter)
param = self.parseVariableIdentifier();
if (self.match('=')):
self.throwError(Messages.DefaultRestParameter);
if (not self.match(')')):
self.throwError(Messages.ParameterAfterRestParameter);
return node.finishRestElement(param);
# 12.3 Empty Statement
def parseEmptyStatement(self, node):
self.expect(';');
return node.finishEmptyStatement()
# 12.4 Expression Statement
def parseExpressionStatement(self, node):
expr = self.parseExpression();
self.consumeSemicolon();
return node.finishExpressionStatement(expr);
# 12.5 If statement
def parseIfStatement(self, node):
self.expectKeyword('if');
self.expect('(');
test = self.parseExpression();
self.expect(')');
consequent = self.parseStatement();
if (self.matchKeyword('else')):
self.lex();
alternate = self.parseStatement();
else:
alternate = null;
return node.finishIfStatement(test, consequent, alternate)
# 12.6 Iteration Statements
def parseDoWhileStatement(self, node):
self.expectKeyword('do')
oldInIteration = self.state['inIteration']
self.state['inIteration'] = true
body = self.parseStatement();
self.state['inIteration'] = oldInIteration;
self.expectKeyword('while');
self.expect('(');
test = self.parseExpression();
self.expect(')')
if (self.match(';')):
self.lex()
return node.finishDoWhileStatement(body, test)
def parseWhileStatement(self, node):
self.expectKeyword('while')
self.expect('(')
test = self.parseExpression()
self.expect(')')
oldInIteration = self.state['inIteration']
self.state['inIteration'] = true
body = self.parseStatement()
self.state['inIteration'] = oldInIteration
return node.finishWhileStatement(test, body)
def parseForStatement(self, node):
previousAllowIn = self.state['allowIn']
init = test = update = null
self.expectKeyword('for')
self.expect('(')
if (self.match(';')):
self.lex()
else:
if (self.matchKeyword('var')):
init = Node()
self.lex()
self.state['allowIn'] = false;
init = init.finishVariableDeclaration(self.parseVariableDeclarationList())
self.state['allowIn'] = previousAllowIn
if (len(init.declarations) == 1 and self.matchKeyword('in')):
self.lex()
left = init
right = self.parseExpression()
init = null
else:
self.expect(';')
elif (self.matchKeyword('const') or self.matchKeyword('let')):
init = Node()
kind = self.lex()['value']
self.state['allowIn'] = false
declarations = self.parseBindingList(kind, {'inFor': true})
self.state['allowIn'] = previousAllowIn
if (len(declarations) == 1 and declarations[0].init == null and self.matchKeyword('in')):
init = init.finishLexicalDeclaration(declarations, kind);
self.lex();
left = init;
right = self.parseExpression();
init = null;
else:
self.consumeSemicolon();
init = init.finishLexicalDeclaration(declarations, kind);
else:
initStartToken = self.lookahead
self.state['allowIn'] = false
init = self.inheritCoverGrammar(self.parseAssignmentExpression);
self.state['allowIn'] = previousAllowIn;
if (self.matchKeyword('in')):
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInForIn)
self.lex();
self.reinterpretExpressionAsPattern(init);
left = init;
right = self.parseExpression();
init = null;
else:
if (self.match(',')):
initSeq = [init];
while (self.match(',')):
self.lex();
initSeq.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
init = WrappingNode(initStartToken).finishSequenceExpression(initSeq)
self.expect(';');
if ('left' not in locals()):
if (not self.match(';')):
test = self.parseExpression();
self.expect(';');
if (not self.match(')')):
update = self.parseExpression();
self.expect(')');
oldInIteration = self.state['inIteration']
self.state['inIteration'] = true;
body = self.isolateCoverGrammar(self.parseStatement)
self.state['inIteration'] = oldInIteration;
return node.finishForStatement(init, test, update, body) if ('left' not in locals()) else node.finishForInStatement(left, right, body);
# 12.7 The continue statement
def parseContinueStatement(self, node):
label = null
self.expectKeyword('continue');
# Optimize the most common form: 'continue;'.
if ord(self.source[self.startIndex]) == 0x3B:
self.lex();
if (not self.state['inIteration']):
self.throwError(Messages.IllegalContinue)
return node.finishContinueStatement(null)
if (self.hasLineTerminator):
if (not self.state['inIteration']):
self.throwError(Messages.IllegalContinue);
return node.finishContinueStatement(null);
if (self.lookahead['type'] == Token.Identifier):
label = self.parseVariableIdentifier();
key = '$' + label.name;
if not key in self.state['labelSet']: # todo make sure its correct!
self.throwError(Messages.UnknownLabel, label.name);
self.consumeSemicolon()
if (label == null and not self.state['inIteration']):
self.throwError(Messages.IllegalContinue)
return node.finishContinueStatement(label)
# 12.8 The break statement
def parseBreakStatement(self, node):
label = null
self.expectKeyword('break');
# Catch the very common case first: immediately a semicolon (U+003B).
if (ord(self.source[self.lastIndex]) == 0x3B):
self.lex();
if (not (self.state['inIteration'] or self.state['inSwitch'])):
self.throwError(Messages.IllegalBreak)
return node.finishBreakStatement(null)
if (self.hasLineTerminator):
if (not (self.state['inIteration'] or self.state['inSwitch'])):
self.throwError(Messages.IllegalBreak);
return node.finishBreakStatement(null);
if (self.lookahead['type'] == Token.Identifier):
label = self.parseVariableIdentifier();
key = '$' + label.name;
if not (key in self.state['labelSet']):
self.throwError(Messages.UnknownLabel, label.name);
self.consumeSemicolon();
if (label == null and not (self.state['inIteration'] or self.state['inSwitch'])):
self.throwError(Messages.IllegalBreak)
return node.finishBreakStatement(label);
# 12.9 The return statement
def parseReturnStatement(self, node):
argument = null;
self.expectKeyword('return');
if (not self.state['inFunctionBody']):
self.tolerateError(Messages.IllegalReturn);
# 'return' followed by a space and an identifier is very common.
if (ord(self.source[self.lastIndex]) == 0x20):
if (isIdentifierStart(self.source[self.lastIndex + 1])):
argument = self.parseExpression();
self.consumeSemicolon();
return node.finishReturnStatement(argument)
if (self.hasLineTerminator):
# HACK
return node.finishReturnStatement(null)
if (not self.match(';')):
if (not self.match('}') and self.lookahead['type'] != Token.EOF):
argument = self.parseExpression();
self.consumeSemicolon();
return node.finishReturnStatement(argument);
# 12.10 The with statement
def parseWithStatement(self, node):
if (self.strict):
self.tolerateError(Messages.StrictModeWith)
self.expectKeyword('with');
self.expect('(');
obj = self.parseExpression();
self.expect(')');
body = self.parseStatement();
return node.finishWithStatement(obj, body);
# 12.10 The swith statement
def parseSwitchCase(self):
consequent = []
node = Node();
if (self.matchKeyword('default')):
self.lex();
test = null;
else:
self.expectKeyword('case');
test = self.parseExpression();
self.expect(':');
while (self.startIndex < self.length):
if (self.match('}') or self.matchKeyword('default') or self.matchKeyword('case')):
break
statement = self.parseStatementListItem()
consequent.append(statement)
return node.finishSwitchCase(test, consequent)
def parseSwitchStatement(self, node):
self.expectKeyword('switch');
self.expect('(');
discriminant = self.parseExpression();
self.expect(')');
self.expect('{');
cases = [];
if (self.match('}')):
self.lex();
return node.finishSwitchStatement(discriminant, cases);
oldInSwitch = self.state['inSwitch'];
self.state['inSwitch'] = true;
defaultFound = false;
while (self.startIndex < self.length):
if (self.match('}')):
break;
clause = self.parseSwitchCase();
if (clause.test == null):
if (defaultFound):
self.throwError(Messages.MultipleDefaultsInSwitch);
defaultFound = true;
cases.append(clause);
self.state['inSwitch'] = oldInSwitch;
self.expect('}');
return node.finishSwitchStatement(discriminant, cases);
# 12.13 The throw statement
def parseThrowStatement(self, node):
self.expectKeyword('throw');
if (self.hasLineTerminator):
self.throwError(Messages.NewlineAfterThrow);
argument = self.parseExpression();
self.consumeSemicolon();
return node.finishThrowStatement(argument);
# 12.14 The try statement
def parseCatchClause(self):
node = Node();
self.expectKeyword('catch');
self.expect('(');
if (self.match(')')):
self.throwUnexpectedToken(self.lookahead);
param = self.parsePattern();
# 12.14.1
if (self.strict and isRestrictedWord(param.name)):
self.tolerateError(Messages.StrictCatchVariable);
self.expect(')');
body = self.parseBlock();
return node.finishCatchClause(param, body);
def parseTryStatement(self, node):
handler = null
finalizer = null;
self.expectKeyword('try');
block = self.parseBlock();
if (self.matchKeyword('catch')):
handler = self.parseCatchClause()
if (self.matchKeyword('finally')):
self.lex();
finalizer = self.parseBlock();
if (not handler and not finalizer):
self.throwError(Messages.NoCatchOrFinally)
return node.finishTryStatement(block, handler, finalizer)
# 12.15 The debugger statement
def parseDebuggerStatement(self, node):
self.expectKeyword('debugger');
self.consumeSemicolon();
return node.finishDebuggerStatement();
# 12 Statements
def parseStatement(self):
typ = self.lookahead['type']
if (typ == Token.EOF):
self.throwUnexpectedToken(self.lookahead)
if (typ == Token.Punctuator and self.lookahead['value'] == '{'):
return self.parseBlock()
self.isAssignmentTarget = self.isBindingElement = true;
node = Node();
val = self.lookahead['value']
if (typ == Token.Punctuator):
if val == ';':
return self.parseEmptyStatement(node);
elif val == '(':
return self.parseExpressionStatement(node);
elif (typ == Token.Keyword):
if val == 'break':
return self.parseBreakStatement(node);
elif val == 'continue':
return self.parseContinueStatement(node);
elif val == 'debugger':
return self.parseDebuggerStatement(node);
elif val == 'do':
return self.parseDoWhileStatement(node);
elif val == 'for':
return self.parseForStatement(node);
elif val == 'function':
return self.parseFunctionDeclaration(node);
elif val == 'if':
return self.parseIfStatement(node);
elif val == 'return':
return self.parseReturnStatement(node);
elif val == 'switch':
return self.parseSwitchStatement(node);
elif val == 'throw':
return self.parseThrowStatement(node);
elif val == 'try':
return self.parseTryStatement(node);
elif val == 'var':
return self.parseVariableStatement(node);
elif val == 'while':
return self.parseWhileStatement(node);
elif val == 'with':
return self.parseWithStatement(node);
expr = self.parseExpression();
# 12.12 Labelled Statements
if ((expr.type == Syntax.Identifier) and self.match(':')):
self.lex();
key = '$' + expr.name
if key in self.state['labelSet']:
self.throwError(Messages.Redeclaration, 'Label', expr.name);
self.state['labelSet'][key] = true
labeledBody = self.parseStatement()
del self.state['labelSet'][key]
return node.finishLabeledStatement(expr, labeledBody)
self.consumeSemicolon();
return node.finishExpressionStatement(expr)
# 13 Function Definition
def parseFunctionSourceElements(self):
body = []
node = Node()
firstRestricted = None
self.expect('{')
while (self.startIndex < self.length):
if (self.lookahead['type'] != Token.StringLiteral):
break
token = self.lookahead;
statement = self.parseStatementListItem()
body.append(statement)
if (statement.expression.type != Syntax.Literal):
# this is not directive
break
directive = self.source[token['start']+1 : token['end']-1]
if (directive == 'use strict'):
self.strict = true;
if (firstRestricted):
self.tolerateUnexpectedToken(firstRestricted, Messages.StrictOctalLiteral);
else:
if (not firstRestricted and token.get('octal')):
firstRestricted = token;
oldLabelSet = self.state['labelSet']
oldInIteration = self.state['inIteration']
oldInSwitch = self.state['inSwitch']
oldInFunctionBody = self.state['inFunctionBody']
oldParenthesisCount = self.state['parenthesizedCount']
self.state['labelSet'] = {}
self.state['inIteration'] = false
self.state['inSwitch'] = false
self.state['inFunctionBody'] = true
self.state['parenthesizedCount'] = 0
while (self.startIndex < self.length):
if (self.match('}')):
break
body.append(self.parseStatementListItem())
self.expect('}')
self.state['labelSet'] = oldLabelSet;
self.state['inIteration'] = oldInIteration;
self.state['inSwitch'] = oldInSwitch;
self.state['inFunctionBody'] = oldInFunctionBody;
self.state['parenthesizedCount'] = oldParenthesisCount;
return node.finishBlockStatement(body)
def validateParam(self, options, param, name):
key = '$' + name
if (self.strict):
if (isRestrictedWord(name)):
options['stricted'] = param;
options['message'] = Messages.StrictParamName
if key in options['paramSet']:
options['stricted'] = param;
options['message'] = Messages.StrictParamDupe;
elif (not options['firstRestricted']):
if (isRestrictedWord(name)):
options['firstRestricted'] = param;
options['message'] = Messages.StrictParamName;
elif (isStrictModeReservedWord(name)):
options['firstRestricted'] = param;
options['message'] = Messages.StrictReservedWord;
elif key in options['paramSet']:
options['firstRestricted']= param
options['message'] = Messages.StrictParamDupe;
options['paramSet'][key] = true
def parseParam(self, options):
token = self.lookahead
de = None
if (token['value'] == '...'):
param = self.parseRestElement();
self.validateParam(options, param.argument, param.argument.name);
options['params'].append(param);
options['defaults'].append(null);
return false
param = self.parsePatternWithDefault();
self.validateParam(options, token, token['value']);
if (param.type == Syntax.AssignmentPattern):
de = param.right;
param = param.left;
options['defaultCount'] += 1
options['params'].append(param);
options['defaults'].append(de)
return not self.match(')')
def parseParams(self, firstRestricted):
options = {
'params': [],
'defaultCount': 0,
'defaults': [],
'firstRestricted': firstRestricted}
self.expect('(');
if (not self.match(')')):
options['paramSet'] = {};
while (self.startIndex < self.length):
if (not self.parseParam(options)):
break
self.expect(',');
self.expect(')');
if (options['defaultCount'] == 0):
options['defaults'] = [];
return {
'params': options['params'],
'defaults': options['defaults'],
'stricted': options.get('stricted'),
'firstRestricted': options.get('firstRestricted'),
'message': options.get('message')}
def parseFunctionDeclaration(self, node, identifierIsOptional=None):
d = null
params = []
defaults = []
message = None
firstRestricted = None
self.expectKeyword('function');
if (identifierIsOptional or not self.match('(')):
token = self.lookahead;
d = self.parseVariableIdentifier();
if (self.strict):
if (isRestrictedWord(token['value'])):
self.tolerateUnexpectedToken(token, Messages.StrictFunctionName);
else:
if (isRestrictedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictFunctionName;
elif (isStrictModeReservedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictReservedWord;
tmp = self.parseParams(firstRestricted);
params = tmp['params']
defaults = tmp['defaults']
stricted = tmp['stricted']
firstRestricted = tmp['firstRestricted']
if (tmp.get('message')):
message = tmp['message'];
previousStrict = self.strict;
body = self.parseFunctionSourceElements();
if (self.strict and firstRestricted):
self.throwUnexpectedToken(firstRestricted, message);
if (self.strict and stricted):
self.tolerateUnexpectedToken(stricted, message);
self.strict = previousStrict;
return node.finishFunctionDeclaration(d, params, defaults, body);
def parseFunctionExpression(self):
id = null
params = []
defaults = []
node = Node();
firstRestricted = None
message = None
self.expectKeyword('function');
if (not self.match('(')):
token = self.lookahead;
id = self.parseVariableIdentifier();
if (self.strict):
if (isRestrictedWord(token['value'])):
self.tolerateUnexpectedToken(token, Messages.StrictFunctionName);
else:
if (isRestrictedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictFunctionName;
elif (isStrictModeReservedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictReservedWord;
tmp = self.parseParams(firstRestricted);
params = tmp['params']
defaults = tmp['defaults']
stricted = tmp['stricted']
firstRestricted = tmp['firstRestricted']
if (tmp.get('message')):
message = tmp['message']
previousStrict = self.strict;
body = self.parseFunctionSourceElements();
if (self.strict and firstRestricted):
self.throwUnexpectedToken(firstRestricted, message);
if (self.strict and stricted):
self.tolerateUnexpectedToken(stricted, message);
self.strict = previousStrict;
return node.finishFunctionExpression(id, params, defaults, body);
# todo Translate parse class functions!
def parseClassExpression(self):
raise NotImplementedError()
def parseClassDeclaration(self):
raise NotImplementedError()
# 14 Program
def parseScriptBody(self):
body = []
firstRestricted = None
while (self.startIndex < self.length):
token = self.lookahead;
if (token['type'] != Token.StringLiteral):
break
statement = self.parseStatementListItem();
body.append(statement);
if (statement.expression.type != Syntax.Literal):
# this is not directive
break
directive = self.source[token['start'] + 1: token['end'] - 1]
if (directive == 'use strict'):
self.strict = true;
if (firstRestricted):
self.tolerateUnexpectedToken(firstRestricted, Messages.StrictOctalLiteral)
else:
if (not firstRestricted and token.get('octal')):
firstRestricted = token;
while (self.startIndex < self.length):
statement = self.parseStatementListItem();
# istanbul ignore if
if (statement is None):
break
body.append(statement);
return body;
def parseProgram(self):
self.peek()
node = Node()
body = self.parseScriptBody()
return node.finishProgram(body)
# DONE!!!
def parse(self, code, options={}):
if options:
raise NotImplementedError('Options not implemented! You can only use default settings.')
self.clean()
self.source = unicode(code) + ' \n ; //END' # I have to add it in order not to check for EOF every time
self.index = 0
self.lineNumber = 1 if len(self.source) > 0 else 0
self.lineStart = 0
self.startIndex = self.index
self.startLineNumber = self.lineNumber;
self.startLineStart = self.lineStart;
self.length = len(self.source)
self.lookahead = null;
self.state = {
'allowIn': true,
'labelSet': {},
'inFunctionBody': false,
'inIteration': false,
'inSwitch': false,
'lastCommentStart': -1,
'curlyStack': [],
'parenthesizedCount': None}
self.sourceType = 'script';
self.strict = false;
program = self.parseProgram();
return node_to_dict(program)
if __name__=='__main__':
import time
test_path = None
if test_path:
f = open(test_path, 'rb')
x = f.read()
f.close()
else:
x = 'var $ = "Hello!"'
p = PyJsParser()
t = time.time()
res = p.parse(x)
dt = time.time() - t+ 0.000000001
if test_path:
print len(res)
else:
pprint(res)
print
print 'Parsed everyting in', round(dt,5), 'seconds.'
print 'Thats %d characters per second' % int(len(x)/dt)
|
hexpl0it/plugin.video.genesi-ita
|
resources/lib/libraries/js2py/translators/pyjsparser.py
|
Python
|
gpl-3.0
| 103,483
|
[
"VisIt"
] |
8eeae85d384f9162a15f20d01c98b2cfb1d2ad09d7a5014fc0793fde537041cf
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import tempfile
import numpy as np
from pymatgen import Structure
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinit.inputs import (BasicAbinitInput, BasicMultiDataset, calc_shiftk,
num_valence_electrons, ShiftMode, gs_input, ebands_input,
ion_ioncell_relax_input)
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "abinit")
def abiref_file(filename):
"""Return absolute path to filename in ~pymatgen/test_files/abinit"""
return os.path.join(_test_dir, filename)
def abiref_files(*filenames):
"""Return list of absolute paths to filenames in ~pymatgen/test_files/abinit"""
return [os.path.join(_test_dir, f) for f in filenames]
class AbinitInputTestCase(PymatgenTest):
"""Unit tests for BasicAbinitInput."""
def test_api(self):
"""Testing BasicAbinitInput API."""
# Build simple input with structure and pseudos
unit_cell = {
"acell": 3 * [10.217],
'rprim': [[.0, .5, .5],
[.5, .0, .5],
[.5, .5, .0]],
'ntypat': 1,
'znucl': [14],
'natom': 2,
'typat': [1, 1],
'xred': [[.0, .0, .0],
[.25, .25, .25]]
}
inp = BasicAbinitInput(structure=unit_cell, pseudos=abiref_file("14si.pspnc"))
shiftk = [[0.5, 0.5, 0.5], [0.5, 0., 0.], [0., 0.5, 0.], [0., 0., 0.5]]
self.assertArrayEqual(calc_shiftk(inp.structure), shiftk)
assert num_valence_electrons(inp.structure, inp.pseudos) == 8
repr(inp), str(inp)
assert len(inp) == 0 and not inp
assert inp.get("foo", "bar") == "bar" and inp.pop("foo", "bar") == "bar"
assert inp.comment is None
inp.set_comment("This is a comment")
assert inp.comment == "This is a comment"
assert inp.isnc and not inp.ispaw
inp["ecut"] = 1
assert inp.get("ecut") == 1 and len(inp) == 1 and "ecut" in inp.keys() and "foo" not in inp
# Test to_string
assert inp.to_string(with_structure=True, with_pseudos=True)
assert inp.to_string(with_structure=False, with_pseudos=False)
inp.set_vars(ecut=5, toldfe=1e-6)
assert inp["ecut"] == 5
inp.set_vars_ifnotin(ecut=-10)
assert inp["ecut"] == 5
_, tmpname = tempfile.mkstemp(text=True)
inp.write(filepath=tmpname)
# Cannot change structure variables directly.
with self.assertRaises(inp.Error):
inp.set_vars(unit_cell)
with self.assertRaises(TypeError):
inp.add_abiobjects({})
with self.assertRaises(KeyError):
inp.remove_vars("foo", strict=True)
assert not inp.remove_vars("foo", strict=False)
# Test deepcopy and remove_vars.
inp["bdgw"] = [1, 2]
inp_copy = inp.deepcopy()
inp_copy["bdgw"][1] = 3
assert inp["bdgw"] == [1, 2]
assert inp.remove_vars("bdgw") and "bdgw" not in inp
removed = inp.pop_tolerances()
assert len(removed) == 1 and removed["toldfe"] == 1e-6
# Test set_spin_mode
old_vars = inp.set_spin_mode("polarized")
assert "nsppol" in inp and inp["nspden"] == 2 and inp["nspinor"] == 1
inp.set_vars(old_vars)
# Test set_structure
new_structure = inp.structure.copy()
new_structure.perturb(distance=0.1)
inp.set_structure(new_structure)
assert inp.structure == new_structure
# Compatible with Pickle and MSONable?
self.serialize_with_pickle(inp, test_eq=False)
def test_input_errors(self):
"""Testing typical BasicAbinitInput Error"""
si_structure = Structure.from_file(abiref_file("si.cif"))
# Ambiguous list of pseudos.
with self.assertRaises(BasicAbinitInput.Error):
BasicAbinitInput(si_structure, pseudos=abiref_files("14si.pspnc", "14si.4.hgh"))
# Pseudos do not match structure.
with self.assertRaises(BasicAbinitInput.Error):
BasicAbinitInput(si_structure, pseudos=abiref_file("H-wdr.oncvpsp"))
si1_negative_volume = dict(
ntypat=1,
natom=1,
typat=[1],
znucl=14,
acell=3*[7.60],
rprim=[[0.0, 0.5, 0.5],
[-0.5, -0.0, -0.5],
[0.5, 0.5, 0.0]],
xred=[[0.0, 0.0, 0.0]],
)
# Negative triple product.
with self.assertRaises(BasicAbinitInput.Error):
BasicAbinitInput(si1_negative_volume, pseudos=abiref_files("14si.pspnc"))
def test_helper_functions(self):
"""Testing BasicAbinitInput helper functions."""
inp = BasicAbinitInput(structure=abiref_file("si.cif"), pseudos="14si.pspnc", pseudo_dir=_test_dir)
inp.set_kmesh(ngkpt=(1, 2, 3), shiftk=(1, 2, 3, 4, 5, 6))
assert inp["kptopt"] == 1 and inp["nshiftk"] == 2
inp.set_gamma_sampling()
assert inp["kptopt"] == 1 and inp["nshiftk"] == 1
assert np.all(inp["shiftk"] == 0)
inp.set_kpath(ndivsm=3, kptbounds=None)
assert inp["ndivsm"] == 3 and inp["iscf"] == -2 and len(inp["kptbounds"]) == 12
class TestMultiDataset(PymatgenTest):
"""Unit tests for BasicMultiDataset."""
def test_api(self):
"""Testing BasicMultiDataset API."""
structure = Structure.from_file(abiref_file("si.cif"))
pseudo = abiref_file("14si.pspnc")
pseudo_dir = os.path.dirname(pseudo)
multi = BasicMultiDataset(structure=structure, pseudos=pseudo)
with self.assertRaises(ValueError):
BasicMultiDataset(structure=structure, pseudos=pseudo, ndtset=-1)
multi = BasicMultiDataset(structure=structure, pseudos=pseudo, pseudo_dir=pseudo_dir)
assert len(multi) == 1 and multi.ndtset == 1
assert multi.isnc
for i, inp in enumerate(multi):
assert list(inp.keys()) == list(multi[i].keys())
multi.addnew_from(0)
assert multi.ndtset == 2 and multi[0] is not multi[1]
assert multi[0].structure == multi[1].structure
assert multi[0].structure is not multi[1].structure
multi.set_vars(ecut=2)
assert all(inp["ecut"] == 2 for inp in multi)
self.assertEqual(multi.get("ecut"), [2, 2])
multi[1].set_vars(ecut=1)
assert multi[0]["ecut"] == 2 and multi[1]["ecut"] == 1
self.assertEqual(multi.get("ecut"), [2, 1])
self.assertEqual(multi.get("foo", "default"), ["default", "default"])
multi[1].set_vars(paral_kgb=1)
assert "paral_kgb" not in multi[0]
self.assertEqual(multi.get("paral_kgb"), [None, 1])
pert_structure = structure.copy()
pert_structure.perturb(distance=0.1)
assert structure != pert_structure
assert multi.set_structure(structure) == multi.ndtset * [structure]
assert all(s == structure for s in multi.structure)
assert multi.has_same_structures
multi[1].set_structure(pert_structure)
assert multi[0].structure != multi[1].structure and multi[1].structure == pert_structure
assert not multi.has_same_structures
split = multi.split_datasets()
assert len(split) == 2 and all(split[i] == multi[i] for i in range(multi.ndtset))
repr(multi)
str(multi)
assert multi.to_string(with_pseudos=False)
tmpdir = tempfile.mkdtemp()
filepath = os.path.join(tmpdir, "run.abi")
inp.write(filepath=filepath)
multi.write(filepath=filepath)
new_multi = BasicMultiDataset.from_inputs([inp for inp in multi])
assert new_multi.ndtset == multi.ndtset
assert new_multi.structure == multi.structure
for old_inp, new_inp in zip(multi, new_multi):
assert old_inp is not new_inp
self.assertDictEqual(old_inp.as_dict(), new_inp.as_dict())
ref_input = multi[0]
new_multi = BasicMultiDataset.replicate_input(input=ref_input, ndtset=4)
assert new_multi.ndtset == 4
for inp in new_multi:
assert ref_input is not inp
self.assertDictEqual(ref_input.as_dict(), inp.as_dict())
# Compatible with Pickle and MSONable?
self.serialize_with_pickle(multi, test_eq=False)
class ShiftModeTest(PymatgenTest):
def test_shiftmode(self):
"""Testing shiftmode"""
gamma = ShiftMode.GammaCentered
assert ShiftMode.from_object("G") == gamma
assert ShiftMode.from_object(gamma) == gamma
with self.assertRaises(TypeError):
ShiftMode.from_object({})
class FactoryTest(PymatgenTest):
def setUp(self):
# Si ebands
self.si_structure = Structure.from_file(abiref_file("si.cif"))
self.si_pseudo = abiref_file("14si.pspnc")
def test_gs_input(self):
"""Testing gs_input factory."""
inp = gs_input(self.si_structure, self.si_pseudo, kppa=10, ecut=10, spin_mode="polarized")
str(inp)
assert inp["nsppol"] == 2
assert inp["nband"] == 14
self.assertArrayEqual(inp["ngkpt"], [2, 2, 2])
def test_ebands_input(self):
"""Testing ebands_input factory."""
multi = ebands_input(self.si_structure, self.si_pseudo, kppa=10, ecut=2)
str(multi)
scf_inp, nscf_inp = multi.split_datasets()
# Test dos_kppa and other options.
multi_dos = ebands_input(self.si_structure, self.si_pseudo, nscf_nband=10, kppa=10, ecut=2,
spin_mode="unpolarized", smearing=None, charge=2.0, dos_kppa=50)
assert len(multi_dos) == 3
assert all(i["charge"] == 2 for i in multi_dos)
self.assertEqual(multi_dos.get("nsppol"), [1, 1, 1])
self.assertEqual(multi_dos.get("iscf"), [None, -2, -2])
multi_dos = ebands_input(self.si_structure, self.si_pseudo, nscf_nband=10, kppa=10, ecut=2,
spin_mode="unpolarized", smearing=None, charge=2.0, dos_kppa=[50, 100])
assert len(multi_dos) == 4
self.assertEqual(multi_dos.get("iscf"), [None, -2, -2, -2])
str(multi_dos)
def test_ion_ioncell_relax_input(self):
"""Testing ion_ioncell_relax_input factory."""
multi = ion_ioncell_relax_input(self.si_structure, self.si_pseudo, kppa=10, ecut=2)
str(multi)
ion_inp, ioncell_inp = multi.split_datasets()
assert ion_inp["chksymbreak"] == 0
assert ion_inp["ionmov"] == 3 and ion_inp["optcell"] == 0
assert ioncell_inp["ionmov"] == 3 and ioncell_inp["optcell"] == 2
|
gVallverdu/pymatgen
|
pymatgen/io/abinit/tests/test_inputs.py
|
Python
|
mit
| 10,885
|
[
"ABINIT",
"pymatgen"
] |
e7a140b6306369548da374d5eee81e43798f3673ea9fe5c5f9aefac84d12f84f
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# sssmonitor - Global SSS monitor back end
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
from datetime import datetime, timedelta
import shared.returnvalues as returnvalues
from shared.functional import validate_input
from shared.init import initialize_main_variables
from shared.gridstat import GridStat
from shared.sandbox import load_sandbox_db
from shared.output import format_timedelta
# sandbox db has the format: {username: (password, [list_of_resources])}
PW, RESOURCES = 0, 1
def signature():
"""Signature of the main function"""
defaults = {'show_all': [''], 'sort': [''], 'group_by': ['']}
return ['sandboxinfos', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False,
op_menu=client_id)
output_objects.append({'object_type': 'header', 'text'
: '%s Screen Saver Sandbox Monitor' % \
configuration.short_title
})
defaults = signature()[1]
(validate_status, accepted) = validate_input(user_arguments_dict,
defaults, output_objects, allow_rejects=False)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
show_all = accepted['show_all'][-1].lower()
sort = accepted['sort'][-1]
group_by = accepted['group_by'][-1].lower()
if not configuration.site_enable_sandboxes:
output_objects.append({'object_type': 'text', 'text':
'''Sandbox resources are disabled on this site.
Please contact the Grid admins %s if you think they should be enabled.
''' % configuration.admin_email})
return (output_objects, returnvalues.OK)
# Load the user file
try:
userdb = load_sandbox_db(configuration)
except Exception, exc:
output_objects.append({'object_type': 'error_text', 'text'
: 'Could not load any sandbox information'})
return (output_objects, returnvalues.SYSTEM_ERROR)
# Load statistics objects
grid_stat = GridStat(configuration, logger)
grid_stat.update()
sandboxinfos = []
# loop through all users
total_jobs = 0
for username in userdb:
resources_jobs = {}
jobs_per_resource = 0
jobs_per_user = 0
resources_walltime = {}
walltime_per_resource = timedelta(0)
walltime_per_user = timedelta(0)
# loop through all resources of each user
for resource in userdb[username][RESOURCES]:
# now find number of jobs successfully executed by resource
jobs_per_resource = \
grid_stat.get_value(grid_stat.RESOURCE_TOTAL, resource,
'FINISHED')
jobs_per_user += jobs_per_resource
n = {resource: jobs_per_resource}
resources_jobs.update(n)
walltime_per_resource = \
grid_stat.get_value(grid_stat.RESOURCE_TOTAL, resource,
'USED_WALLTIME')
if walltime_per_resource != 0:
if not walltime_per_user:
walltime_per_user = walltime_per_resource
else:
walltime_per_user += walltime_per_resource
else:
walltime_per_resource = timedelta(0)
n = {resource: walltime_per_resource}
resources_walltime.update(n)
if group_by == 'users' and (jobs_per_user > 0 or show_all
== 'true'):
sandboxinfo = {'object_type': 'sandboxinfo'}
sandboxinfo['username'] = username
sandboxinfo['resource'] = len(userdb[username][RESOURCES])
sandboxinfo['jobs'] = jobs_per_user
sandboxinfo['walltime'] = format_timedelta(walltime_per_user)
sandboxinfo['walltime_sort'] = walltime_per_user
sandboxinfos.append(sandboxinfo)
elif jobs_per_user > 0 or show_all == 'true':
for res in resources_jobs.keys():
if resources_jobs[res] > 0 or show_all == 'true':
sandboxinfo = {'object_type': 'sandboxinfo'}
sandboxinfo['username'] = username
sandboxinfo['resource'] = res
sandboxinfo['jobs'] = resources_jobs[res]
sandboxinfo['walltime'] = format_timedelta(resources_walltime[res])
sandboxinfo['walltime_sort'] = resources_walltime[res]
sandboxinfos.append(sandboxinfo)
total_jobs += jobs_per_user
if 'username' == sort:
# sort by owner: case insensitive
sandboxinfos.sort(cmp=lambda a, b: cmp(a['username'].lower(),
b['username'].lower()))
elif 'resource' == sort:
# sort by numerical resource ID
if group_by == 'users':
sandboxinfos.sort(cmp=lambda a, b: cmp(int(b['resource']),
int(a['resource'])))
else:
sandboxinfos.sort(cmp=lambda a, b: cmp(int(a['resource'
].lower().replace('sandbox.', '')),
int(b['resource'
].lower().replace('sandbox.', ''))))
elif 'jobs' == sort:
# sort by most jobs done
sandboxinfos.sort(reverse=True)
elif 'walltime' == sort:
# sort by most walltime
sandboxinfos.sort(cmp=lambda a, b: cmp(a['walltime_sort'].days
* 86400 + a['walltime_sort'].seconds, b['walltime_sort'
].days * 86400 + b['walltime_sort'].seconds),
reverse=True)
else:
# do not sort
pass
# Sort
output_objects.append({'object_type': 'verbatim', 'text'
: 'Sort by: '})
link_list = []
for name in ('username', 'resource', 'jobs', 'walltime'):
link_list.append({'object_type': 'link', 'destination'
: '?sort=%s;group_by=%s' % (name, group_by),
'text': '%s' % name.capitalize()})
output_objects.append({'object_type': 'multilinkline', 'links'
: link_list})
# Group
output_objects.append({'object_type': 'html_form', 'text': '<br />'})
output_objects.append({'object_type': 'verbatim', 'text': 'Show: '})
link_list = []
for name in ('resources', 'users'):
link_list.append({'object_type': 'link', 'destination'
: '?sort=%s;group_by=%s' % (sort, name), 'text'
: '%s' % name.capitalize()})
output_objects.append({'object_type': 'multilinkline', 'links'
: link_list})
# Time stamp
now = datetime.now()
output_objects.append({'object_type': 'text', 'text'
: 'Updated on %s' % now})
output_objects.append({'object_type': 'html_form', 'text': '<br />'})
# Actual stats
output_objects.append({'object_type': 'sandboxinfos', 'sandboxinfos'
: sandboxinfos})
output_objects.append({'object_type': 'text', 'text'
: 'Total jobs run by sandboxes: %s'
% total_jobs})
return (output_objects, returnvalues.OK)
|
heromod/migrid
|
mig/shared/functionality/sssmonitor.py
|
Python
|
gpl-2.0
| 8,347
|
[
"Brian"
] |
f16d233d4c11de859a421617ffb109ff086f342a71ac91a3564b961643f2ef05
|
import ast
import codecs
import os
from subprocess import check_call
from distutils.command.sdist import sdist
from pip.download import PipSession
from pip.req import parse_requirements
from setuptools import find_packages, setup
name = "django-gitstorage"
package_name = "gitstorage"
long_desc_file = "README.rst"
classifiers = [
"Development Status :: 4 - Beta",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"License :: OSI Approved :: GPL License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Django Modules",
]
# Don't touch below
def get_requirements(source):
install_reqs = parse_requirements(source, session=PipSession())
return set([str(ir.req) for ir in install_reqs])
class VersionFinder(ast.NodeVisitor):
def __init__(self):
self.data = {}
def visit_Assign(self, node):
if node.targets[0].id in (
'__version__',
'__author__',
'__contact__',
'__homepage__',
'__license__',
):
self.data[node.targets[0].id[2:-2]] = node.value.s
def read(*path_parts):
filename = os.path.join(os.path.dirname(__file__), *path_parts)
with open(filename) as fp:
return fp.read()
def find_info(*path_parts):
finder = VersionFinder()
node = ast.parse(read(*path_parts))
finder.visit(node)
info = finder.data
info['docstring'] = ast.get_docstring(node)
return info
package_info = find_info(package_name, '__init__.py')
setup(
name=name,
version=package_info['version'],
packages=find_packages(),
include_package_data=True,
description=package_info['docstring'],
long_description=read(long_desc_file),
url=package_info['homepage'],
author=package_info['author'],
author_email=package_info['contact'],
install_requires=get_requirements('requirements.txt'),
license=package_info['license'],
classifiers=classifiers,
)
|
bors-ltd/django-gitstorage
|
setup.py
|
Python
|
gpl-3.0
| 2,122
|
[
"VisIt"
] |
889f76609ed9cc28bcae6aaaf0885a183395a10afef8f105e382cf2bd560b30f
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from nose.tools import * # noqa PEP8 asserts
from osf_tests import factories
from tests.base import OsfTestCase
from website.util import api_url_for
from website.views import find_bookmark_collection
class TestSearchViews(OsfTestCase):
def setUp(self):
super(TestSearchViews, self).setUp()
import website.search.search as search
search.delete_all()
robbie = factories.UserFactory(fullname='Robbie Williams')
self.project = factories.ProjectFactory(creator=robbie)
self.contrib = factories.UserFactory(fullname='Brian May')
for i in range(0, 12):
factories.UserFactory(fullname='Freddie Mercury{}'.format(i))
self.user_one = factories.AuthUserFactory()
self.user_two = factories.AuthUserFactory()
self.project_private_user_one = factories.ProjectFactory(title='aaa', creator=self.user_one, is_public=False)
self.project_private_user_two = factories.ProjectFactory(title='aaa', creator=self.user_two, is_public=False)
self.project_public_user_one = factories.ProjectFactory(title='aaa', creator=self.user_one, is_public=True)
self.project_public_user_two = factories.ProjectFactory(title='aaa', creator=self.user_two, is_public=True)
def tearDown(self):
super(TestSearchViews, self).tearDown()
import website.search.search as search
search.delete_all()
def test_search_views(self):
#Test search contributor
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': self.contrib.fullname})
assert_equal(res.status_code, 200)
result = res.json['users']
assert_equal(len(result), 1)
brian = result[0]
assert_equal(brian['fullname'], self.contrib.fullname)
assert_in('gravatar_url', brian)
assert_equal(brian['registered'], self.contrib.is_registered)
assert_equal(brian['active'], self.contrib.is_active)
#Test search pagination
res = self.app.get(url, {'query': 'fr'})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(pages, 3)
assert_equal(page, 0)
#Test default page 1
res = self.app.get(url, {'query': 'fr', 'page': 1})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 1)
#Test default page 2
res = self.app.get(url, {'query': 'fr', 'page': 2})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 4)
assert_equal(page, 2)
#Test smaller pages
res = self.app.get(url, {'query': 'fr', 'size': 5})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 0)
assert_equal(pages, 3)
#Test smaller pages page 2
res = self.app.get(url, {'query': 'fr', 'page': 2, 'size': 5, })
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 4)
assert_equal(page, 2)
assert_equal(pages, 3)
#Test search projects
url = '/search/'
res = self.app.get(url, {'q': self.project.title})
assert_equal(res.status_code, 200)
#Test search node
res = self.app.post_json(
api_url_for('search_node'),
{'query': self.project.title},
auth=factories.AuthUserFactory().auth
)
assert_equal(res.status_code, 200)
#Test search node includePublic true
res = self.app.post_json(
api_url_for('search_node'),
{'query': 'a', 'includePublic': True},
auth=self.user_one.auth
)
node_ids = [node['id'] for node in res.json['nodes']]
assert_in(self.project_private_user_one._id, node_ids)
assert_in(self.project_public_user_one._id, node_ids)
assert_in(self.project_public_user_two._id, node_ids)
assert_not_in(self.project_private_user_two._id, node_ids)
#Test search node includePublic false
res = self.app.post_json(
api_url_for('search_node'),
{'query': 'a', 'includePublic': False},
auth=self.user_one.auth
)
node_ids = [node['id'] for node in res.json['nodes']]
assert_in(self.project_private_user_one._id, node_ids)
assert_in(self.project_public_user_one._id, node_ids)
assert_not_in(self.project_public_user_two._id, node_ids)
assert_not_in(self.project_private_user_two._id, node_ids)
#Test search user
url = '/api/v1/search/user/'
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_false(res.json['results'])
user_one = factories.AuthUserFactory(fullname='Joe Umwali')
user_two = factories.AuthUserFactory(fullname='Joan Uwase')
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 1)
assert_false(res.json['results'][0]['social'])
user_one.social = {
'github': user_one.given_name,
'twitter': user_one.given_name,
'ssrn': user_one.given_name
}
user_one.save()
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 1)
assert_not_in('Joan', res.body)
assert_true(res.json['results'][0]['social'])
assert_equal(res.json['results'][0]['names']['fullname'], user_one.fullname)
assert_equal(res.json['results'][0]['social']['github'], 'http://github.com/{}'.format(user_one.given_name))
assert_equal(res.json['results'][0]['social']['twitter'], 'http://twitter.com/{}'.format(user_one.given_name))
assert_equal(res.json['results'][0]['social']['ssrn'], 'http://papers.ssrn.com/sol3/cf_dev/AbsByAuth.cfm?per_id={}'.format(user_one.given_name))
user_two.social = {
'profileWebsites': ['http://me.com/{}'.format(user_two.given_name)],
'orcid': user_two.given_name,
'linkedIn': user_two.given_name,
'scholar': user_two.given_name,
'impactStory': user_two.given_name,
'baiduScholar': user_two.given_name
}
user_two.save()
user_three = factories.AuthUserFactory(fullname='Janet Umwali')
user_three.social = {
'github': user_three.given_name,
'ssrn': user_three.given_name
}
user_three.save()
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 2)
assert_true(res.json['results'][0]['social'])
assert_true(res.json['results'][1]['social'])
assert_not_equal(res.json['results'][0]['social']['ssrn'], res.json['results'][1]['social']['ssrn'])
assert_not_equal(res.json['results'][0]['social']['github'], res.json['results'][1]['social']['github'])
res = self.app.get(url, {'q': 'Uwase'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 1)
assert_true(res.json['results'][0]['social'])
assert_not_in('ssrn', res.json['results'][0]['social'])
assert_equal(res.json['results'][0]['social']['profileWebsites'][0], 'http://me.com/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['impactStory'], 'https://impactstory.org/u/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['orcid'], 'http://orcid.org/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['baiduScholar'], 'http://xueshu.baidu.com/scholarID/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['linkedIn'], 'https://www.linkedin.com/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['scholar'], 'http://scholar.google.com/citations?user={}'.format(user_two.given_name))
class TestODMTitleSearch(OsfTestCase):
""" Docs from original method:
:arg term: The substring of the title.
:arg category: Category of the node.
:arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg includePublic: yes or no. Whether the projects listed should include public projects.
:arg includeContributed: yes or no. Whether the search should include projects the current user has
contributed to.
:arg ignoreNode: a list of nodes that should not be included in the search.
:return: a list of dictionaries of projects
"""
def setUp(self):
super(TestODMTitleSearch, self).setUp()
self.user = factories.AuthUserFactory()
self.user_two = factories.AuthUserFactory()
self.project = factories.ProjectFactory(creator=self.user, title="foo")
self.project_two = factories.ProjectFactory(creator=self.user_two, title="bar")
self.public_project = factories.ProjectFactory(creator=self.user_two, is_public=True, title="baz")
self.registration_project = factories.RegistrationFactory(creator=self.user, title="qux")
self.folder = factories.CollectionFactory(creator=self.user, title="quux", category='project')
self.dashboard = find_bookmark_collection(self.user)
self.dashboard.category = 'project'
self.dashboard.save()
self.url = api_url_for('search_projects_by_title')
def test_search_projects_by_title(self):
res = self.app.get(self.url, {'term': self.project.title}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 2)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
|
aaxelb/osf.io
|
osf_tests/test_search_views.py
|
Python
|
apache-2.0
| 15,088
|
[
"Brian"
] |
e5397e2b39f404778c7a001f0506341cd14c5d7b47852d20925de86bc62c3a70
|
# -*- coding: utf-8 -*-
#
# define_hill_tononi.py
#
# This file is part of the NEST Instrumentation App.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST Instrumentation App is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST Instrumentation App is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST Instrumentation App. If not, see <http://www.gnu.org/licenses/>.
"""
Definition of partial Hill-Tononi (2005) Model.
This module provides layer and projections declarations suitable for
use with the NEST Topology Module.
The file defines a Hill-Tononi model variant limited to the primary pathway.
"""
from copy import deepcopy
import numpy as np
import sobol_lib as sl
params = {
'Np': 40, # Number of rows and columns in primary nodes
'visSize': 8.0, # Extent of the layer
'ret_rate': 45.0, # Rate in the retina nodes
'ret_amplitude': 45.0, # Amplitude in the retina nodes
'temporal_frequency': 2.0, # Frequency of the retina nodes (Hz)
'lambda_dg': 2.0, # wavelength of drifting grating
'phi_dg': 0.0 # normal direction of grating (degrees)
}
def seed():
return np.random.randint(1000, 10000)
def modified_copy(orig, diff):
"""
Returns a deep copy of dictionary with changes applied.
@param orig original dictionary, will be deep-copied
@param diff copy will be updated with this dict
"""
tmp = deepcopy(orig)
tmp.update(diff)
return tmp
def make_layers():
"""Build list of layers and models for HT Model."""
nrnmod = 'ht_neuron'
# Default parameter values in ht_neuron are for excitatory
# cortical cells. For inhibitory and thalamic cells, we
# have modified parameters from [1], Tables 2, 3.
# To model absence of various intrinsic currents, we set their
# peak conductance to zero. By default, all intrinsic currents
# are active.
#
# g_KL is set to 1.0, the value for the awake state.
# No I_T, I_h in cortical excitatory cells
ctxExPars = {'g_peak_T': 0.0,
'g_peak_h': 0.0}
# But L56 has I_h
ctxExL56Pars = {'g_peak_T': 0.0,
'g_peak_h': 1.0}
# 'spike_duration': 1.0
# No I_T, I_h in cortical inhibitory cells
ctxInPars = {'tau_m': 8.0,
'theta_eq': -53.0,
'tau_theta': 1.0,
'tau_spike': 0.5,
'g_peak_T': 0.0,
'g_peak_h': 0.0}
# Thalamic neurons have no I_KNa
thalPars = {'tau_m': 8.0,
'theta_eq': -53.0,
'tau_theta': 0.75,
'tau_spike': 0.75,
'E_rev_GABA_A': -80.0,
'g_peak_KNa': 0.0}
# Reticular neurons have no I_KNa, I_h
# We assume that the "thalamic" line of Table 2 applies to
# reticular neurons as well.
reticPars = {'tau_m': 8.0,
'theta_eq': -53.0,
'tau_theta': 0.75,
'tau_spike': 0.75,
'g_peak_KNa': 0.0,
'g_peak_h': 0.0}
models = [(nrnmod, 'Relay', thalPars),
(nrnmod, 'Inter', thalPars),
(nrnmod, 'RpNeuron', reticPars)]
# Build lists of cortical models using list comprehension.
models += [(nrnmod, layer + 'pyr', ctxExPars) for layer in ('L23', 'L4')]
models += [(nrnmod, layer + 'pyr', ctxExL56Pars) for layer in ('L56',)]
models += [(nrnmod, layer + 'in', ctxInPars)
for layer in ('L23', 'L4', 'L56')]
# Add synapse models, which differ only in receptor type.
# We first obtain the mapping of receptor names to recptor indices from the
# ht_neuron, then add the synapse model information to the models list.
# Hard coded to be independent of NEST
ht_rc = {u'AMPA': 1, u'GABA_A': 3, u'GABA_B': 4, u'NMDA': 2}
syn_models = [('static_synapse', syn, {'receptor_type': ht_rc[syn]})
for syn in ('AMPA', 'NMDA', 'GABA_A', 'GABA_B')]
# Generate all the positions first to avoid overlapping.
#quasi_rand_pos = sl.i4_sobol_generate(3, P.NE + P.NI, seed())
total_number = params['Np'] * params['Np']
quasi_rand_pos = sl.i4_sobol_generate(3, 21*total_number, seed())
print("Number of points: {}".format(len(quasi_rand_pos[0])))
quasi_rand_Vp_h_23_ex = [quasi_rand_pos[0][:2*total_number],
quasi_rand_pos[1][:2*total_number],
quasi_rand_pos[2][:2*total_number]]
quasi_rand_Vp_h_23_in = [quasi_rand_pos[0][2*total_number:3*total_number],
quasi_rand_pos[1][2*total_number:3*total_number],
quasi_rand_pos[2][2*total_number:3*total_number]]
quasi_rand_Vp_h_4_ex = [quasi_rand_pos[0][3*total_number:5*total_number],
quasi_rand_pos[1][3*total_number:5*total_number],
quasi_rand_pos[2][3*total_number:5*total_number]]
quasi_rand_Vp_h_4_in = [quasi_rand_pos[0][5*total_number:6*total_number],
quasi_rand_pos[1][5*total_number:6*total_number],
quasi_rand_pos[2][5*total_number:6*total_number]]
quasi_rand_Vp_h_56_ex = [quasi_rand_pos[0][6*total_number:8*total_number],
quasi_rand_pos[1][6*total_number:8*total_number],
quasi_rand_pos[2][6*total_number:8*total_number]]
quasi_rand_Vp_h_56_in = [quasi_rand_pos[0][8*total_number:9*total_number],
quasi_rand_pos[1][8*total_number:9*total_number],
quasi_rand_pos[2][8*total_number:9*total_number]]
quasi_rand_Vp_v_23_ex = [quasi_rand_pos[0][9*total_number:11*total_number],
quasi_rand_pos[1][9*total_number:11*total_number],
quasi_rand_pos[2][9*total_number:11*total_number]]
quasi_rand_Vp_v_23_in = [quasi_rand_pos[0][11*total_number:12*total_number],
quasi_rand_pos[1][11*total_number:12*total_number],
quasi_rand_pos[2][11*total_number:12*total_number]]
quasi_rand_Vp_v_4_ex = [quasi_rand_pos[0][12*total_number:14*total_number],
quasi_rand_pos[1][12*total_number:14*total_number],
quasi_rand_pos[2][12*total_number:14*total_number]]
quasi_rand_Vp_v_4_in = [quasi_rand_pos[0][14*total_number:15*total_number],
quasi_rand_pos[1][14*total_number:15*total_number],
quasi_rand_pos[2][14*total_number:15*total_number]]
quasi_rand_Vp_v_56_ex = [quasi_rand_pos[0][15*total_number:17*total_number],
quasi_rand_pos[1][15*total_number:17*total_number],
quasi_rand_pos[2][15*total_number:17*total_number]]
quasi_rand_Vp_v_56_in = [quasi_rand_pos[0][17*total_number:18*total_number],
quasi_rand_pos[1][17*total_number:18*total_number],
quasi_rand_pos[2][17*total_number:18*total_number]]
quasi_rand_Tp_relay = [quasi_rand_pos[0][18*total_number:19*total_number],
quasi_rand_pos[1][18*total_number:19*total_number],
quasi_rand_pos[2][18*total_number:19*total_number]]
quasi_rand_Tp_inter = [quasi_rand_pos[0][19*total_number:20*total_number],
quasi_rand_pos[1][19*total_number:20*total_number],
quasi_rand_pos[2][19*total_number:20*total_number]]
quasi_rand_Rp = [quasi_rand_pos[0][20*total_number:21*total_number],
quasi_rand_pos[1][20*total_number:21*total_number],
quasi_rand_pos[2][20*total_number:21*total_number]]
x_extent = 1.0
y_extent = 0.5
z_extent = 1.0
dxVp_h_ex = x_extent / float( 1*total_number )
Vp_h_ex_x_start_pos = - ( ( x_extent - dxVp_h_ex ) / 2.0 ) + 0.6
Vp_h_ex_x_end_pos = ( x_extent - dxVp_h_ex ) / 2.0 + 0.6
dyVp_h_23_ex = y_extent / float( 1*total_number )
Vp_h_23_ex_y_start_pos = - ( ( y_extent - dyVp_h_23_ex ) / 2.0 ) + 1.1
Vp_h_23_ex_y_end_pos = ( y_extent - dyVp_h_23_ex ) / 2.0 + 1.1
dyVp_h_4_ex = y_extent / float( 1*total_number )
Vp_h_4_ex_y_start_pos = - ( ( y_extent - dyVp_h_4_ex ) / 2.0 ) + 0.6
Vp_h_4_ex_y_end_pos = ( y_extent - dyVp_h_4_ex ) / 2.0 + 0.6
dyVp_h_56_ex = y_extent / float( 1*total_number )
Vp_h_56_ex_y_start_pos = - ( ( y_extent - dyVp_h_56_ex ) / 2.0 ) + 0.1
Vp_h_56_ex_y_end_pos = ( y_extent - dyVp_h_56_ex ) / 2.0 + 0.1
dzVp_h_ex = z_extent / float( 1*total_number )
Vp_h_ex_z_start_pos = - ( ( z_extent - dzVp_h_ex ) / 2.0 )
Vp_h_ex_z_end_pos = ( z_extent - dzVp_h_ex ) / 2.0
Vp_h_23_ex_pos = [Vp_h_ex_x_start_pos + quasi_rand_Vp_h_23_ex[0] * (Vp_h_ex_x_end_pos - Vp_h_ex_x_start_pos),
Vp_h_23_ex_y_start_pos + quasi_rand_Vp_h_23_ex[1] * (Vp_h_23_ex_y_end_pos - Vp_h_23_ex_y_start_pos),
Vp_h_ex_z_start_pos + quasi_rand_Vp_h_23_ex[2] * (Vp_h_ex_z_end_pos - Vp_h_ex_z_start_pos)]
Vp_h_4_ex_pos = [Vp_h_ex_x_start_pos + quasi_rand_Vp_h_4_ex[0] * (Vp_h_ex_x_end_pos - Vp_h_ex_x_start_pos),
Vp_h_4_ex_y_start_pos + quasi_rand_Vp_h_4_ex[1] * (Vp_h_4_ex_y_end_pos - Vp_h_4_ex_y_start_pos),
Vp_h_ex_z_start_pos + quasi_rand_Vp_h_4_ex[2] * (Vp_h_ex_z_end_pos - Vp_h_ex_z_start_pos)]
Vp_h_56_ex_pos = [Vp_h_ex_x_start_pos + quasi_rand_Vp_h_56_ex[0] * (Vp_h_ex_x_end_pos - Vp_h_ex_x_start_pos),
Vp_h_56_ex_y_start_pos + quasi_rand_Vp_h_56_ex[1] * (Vp_h_56_ex_y_end_pos - Vp_h_56_ex_y_start_pos),
Vp_h_ex_z_start_pos + quasi_rand_Vp_h_56_ex[2] * (Vp_h_ex_z_end_pos - Vp_h_ex_z_start_pos)]
Vp_h_23_ex_positions = [[Vp_h_23_ex_pos[0][i],
Vp_h_23_ex_pos[1][i],
Vp_h_23_ex_pos[2][i]] for i in range(2*total_number)]
Vp_h_4_ex_positions = [[Vp_h_4_ex_pos[0][i],
Vp_h_4_ex_pos[1][i],
Vp_h_4_ex_pos[2][i]] for i in range(2*total_number)]
Vp_h_56_ex_positions = [[Vp_h_56_ex_pos[0][i],
Vp_h_56_ex_pos[1][i],
Vp_h_56_ex_pos[2][i]] for i in range(2*total_number)]
dxVp_h_in = x_extent / float( 1*total_number )
Vp_h_in_x_start_pos = - ( ( x_extent - dxVp_h_in ) / 2.0 ) + 0.6
Vp_h_in_x_end_pos = ( x_extent - dxVp_h_in ) / 2.0 + 0.6
dyVp_h_23_in = y_extent / float( 1*total_number )
Vp_h_23_in_y_start_pos = - ( ( y_extent - dyVp_h_23_in ) / 2.0 ) + 1.1
Vp_h_23_in_y_end_pos = ( y_extent - dyVp_h_23_in ) / 2.0 + 1.1
dyVp_h_4_in = y_extent / float( 1*total_number )
Vp_h_4_in_y_start_pos = - ( ( y_extent - dyVp_h_4_in ) / 2.0 ) + 0.6
Vp_h_4_in_y_end_pos = ( y_extent - dyVp_h_4_in ) / 2.0 + 0.6
dyVp_h_56_in = y_extent / float( 1*total_number )
Vp_h_56_in_y_start_pos = - ( ( y_extent - dyVp_h_56_in ) / 2.0 ) + 0.1
Vp_h_56_in_y_end_pos = ( y_extent - dyVp_h_56_in ) / 2.0 + 0.1
dzVp_h_in = z_extent / float( 1*total_number )
Vp_h_in_z_start_pos = - ( ( z_extent - dzVp_h_in ) / 2.0 )
Vp_h_in_z_end_pos = ( z_extent - dzVp_h_in ) / 2.0
Vp_h_23_in_pos = [Vp_h_in_x_start_pos + quasi_rand_Vp_h_23_in[0] * (Vp_h_in_x_end_pos - Vp_h_in_x_start_pos),
Vp_h_23_in_y_start_pos + quasi_rand_Vp_h_23_in[1] * (Vp_h_23_in_y_end_pos - Vp_h_23_in_y_start_pos),
Vp_h_in_z_start_pos + quasi_rand_Vp_h_23_in[2] * (Vp_h_in_z_end_pos - Vp_h_in_z_start_pos)]
Vp_h_4_in_pos = [Vp_h_in_x_start_pos + quasi_rand_Vp_h_4_in[0] * (Vp_h_in_x_end_pos - Vp_h_in_x_start_pos),
Vp_h_4_in_y_start_pos + quasi_rand_Vp_h_4_in[1] * (Vp_h_4_in_y_end_pos - Vp_h_4_in_y_start_pos),
Vp_h_in_z_start_pos + quasi_rand_Vp_h_4_in[2] * (Vp_h_in_z_end_pos - Vp_h_in_z_start_pos)]
Vp_h_56_in_pos = [Vp_h_in_x_start_pos + quasi_rand_Vp_h_56_in[0] * (Vp_h_in_x_end_pos - Vp_h_in_x_start_pos),
Vp_h_56_in_y_start_pos + quasi_rand_Vp_h_56_in[1] * (Vp_h_56_in_y_end_pos - Vp_h_56_in_y_start_pos),
Vp_h_in_z_start_pos + quasi_rand_Vp_h_56_in[2] * (Vp_h_in_z_end_pos - Vp_h_in_z_start_pos)]
Vp_h_23_in_positions = [[Vp_h_23_in_pos[0][i],
Vp_h_23_in_pos[1][i],
Vp_h_23_in_pos[2][i]] for i in range(1*total_number)]
Vp_h_4_in_positions = [[Vp_h_4_in_pos[0][i],
Vp_h_4_in_pos[1][i],
Vp_h_4_in_pos[2][i]] for i in range(1*total_number)]
Vp_h_56_in_positions = [[Vp_h_56_in_pos[0][i],
Vp_h_56_in_pos[1][i],
Vp_h_56_in_pos[2][i]] for i in range(1*total_number)]
dxVp_v_ex = x_extent / float( 1*total_number )
Vp_v_ex_x_start_pos = - ( ( x_extent - dxVp_v_ex ) / 2.0 ) - 0.6
Vp_v_ex_x_end_pos = ( x_extent - dxVp_v_ex ) / 2.0 - 0.6
dyVp_v_23_ex = y_extent / float( 1*total_number )
Vp_v_23_ex_y_start_pos = - ( ( y_extent - dyVp_v_23_ex ) / 2.0 ) + 1.1
Vp_v_23_ex_y_end_pos = ( y_extent - dyVp_v_23_ex ) / 2.0 + 1.1
dyVp_v_4_ex = y_extent / float( 1*total_number )
Vp_v_4_ex_y_start_pos = - ( ( y_extent - dyVp_v_4_ex ) / 2.0 ) + 0.6
Vp_v_4_ex_y_end_pos = ( y_extent - dyVp_v_4_ex ) / 2.0 + 0.6
dyVp_v_56_ex = y_extent / float( 1*total_number )
Vp_v_56_ex_y_start_pos = - ( ( y_extent - dyVp_v_56_ex ) / 2.0 ) + 0.1
Vp_v_56_ex_y_end_pos = ( y_extent - dyVp_v_56_ex ) / 2.0 + 0.1
dzVp_v_ex = z_extent / float( 1*total_number )
Vp_v_ex_z_start_pos = - ( ( z_extent - dzVp_v_ex ) / 2.0 )
Vp_v_ex_z_end_pos = ( z_extent - dzVp_v_ex ) / 2.0
Vp_v_23_ex_pos = [Vp_v_ex_x_start_pos + quasi_rand_Vp_v_23_ex[0] * (Vp_v_ex_x_end_pos - Vp_v_ex_x_start_pos),
Vp_v_23_ex_y_start_pos + quasi_rand_Vp_v_23_ex[1] * (Vp_v_23_ex_y_end_pos - Vp_v_23_ex_y_start_pos),
Vp_v_ex_z_start_pos + quasi_rand_Vp_v_23_ex[2] * (Vp_v_ex_z_end_pos - Vp_v_ex_z_start_pos)]
Vp_v_4_ex_pos = [Vp_v_ex_x_start_pos + quasi_rand_Vp_v_4_ex[0] * (Vp_v_ex_x_end_pos - Vp_v_ex_x_start_pos),
Vp_v_4_ex_y_start_pos + quasi_rand_Vp_v_4_ex[1] * (Vp_v_4_ex_y_end_pos - Vp_v_4_ex_y_start_pos),
Vp_v_ex_z_start_pos + quasi_rand_Vp_v_4_ex[2] * (Vp_v_ex_z_end_pos - Vp_v_ex_z_start_pos)]
Vp_v_56_ex_pos = [Vp_v_ex_x_start_pos + quasi_rand_Vp_v_56_ex[0] * (Vp_v_ex_x_end_pos - Vp_v_ex_x_start_pos),
Vp_v_56_ex_y_start_pos + quasi_rand_Vp_v_56_ex[1] * (Vp_v_56_ex_y_end_pos - Vp_v_56_ex_y_start_pos),
Vp_v_ex_z_start_pos + quasi_rand_Vp_v_56_ex[2] * (Vp_v_ex_z_end_pos - Vp_v_ex_z_start_pos)]
Vp_v_23_ex_positions = [[Vp_v_23_ex_pos[0][i],
Vp_v_23_ex_pos[1][i],
Vp_v_23_ex_pos[2][i]] for i in range(2*total_number)]
Vp_v_4_ex_positions = [[Vp_v_4_ex_pos[0][i],
Vp_v_4_ex_pos[1][i],
Vp_v_4_ex_pos[2][i]] for i in range(2*total_number)]
Vp_v_56_ex_positions = [[Vp_v_56_ex_pos[0][i],
Vp_v_56_ex_pos[1][i],
Vp_v_56_ex_pos[2][i]] for i in range(2*total_number)]
dxVp_v_in = x_extent / float( 1*total_number )
Vp_v_in_x_start_pos = - ( ( x_extent - dxVp_v_in ) / 2.0 ) - 0.6
Vp_v_in_x_end_pos = ( x_extent - dxVp_v_in ) / 2.0 - 0.6
dyVp_v_23_in = y_extent / float( 1*total_number )
Vp_v_23_in_y_start_pos = - ( ( y_extent - dyVp_v_23_in ) / 2.0 ) + 1.1
Vp_v_23_in_y_end_pos = ( y_extent - dyVp_v_23_in ) / 2.0 + 1.1
dyVp_v_4_in = y_extent / float( 1*total_number )
Vp_v_4_in_y_start_pos = - ( ( y_extent - dyVp_v_4_in ) / 2.0 ) + 0.6
Vp_v_4_in_y_end_pos = ( y_extent - dyVp_v_4_in ) / 2.0 + 0.6
dyVp_v_56_in = y_extent / float( 1*total_number )
Vp_v_56_in_y_start_pos = - ( ( y_extent - dyVp_v_56_in ) / 2.0 ) + 0.1
Vp_v_56_in_y_end_pos = ( y_extent - dyVp_v_56_in ) / 2.0 + 0.1
dzVp_v_in = z_extent / float( 1*total_number )
Vp_v_in_z_start_pos = - ( ( z_extent - dzVp_v_in ) / 2.0 )
Vp_v_in_z_end_pos = ( z_extent - dzVp_v_in ) / 2.0
Vp_v_23_in_pos = [Vp_v_in_x_start_pos + quasi_rand_Vp_v_23_in[0] * (Vp_v_in_x_end_pos - Vp_v_in_x_start_pos),
Vp_v_23_in_y_start_pos + quasi_rand_Vp_v_23_in[1] * (Vp_v_23_in_y_end_pos - Vp_v_23_in_y_start_pos),
Vp_v_in_z_start_pos + quasi_rand_Vp_v_23_in[2] * (Vp_v_in_z_end_pos - Vp_v_in_z_start_pos)]
Vp_v_4_in_pos = [Vp_v_in_x_start_pos + quasi_rand_Vp_v_4_in[0] * (Vp_v_in_x_end_pos - Vp_v_in_x_start_pos),
Vp_v_4_in_y_start_pos + quasi_rand_Vp_v_4_in[1] * (Vp_v_4_in_y_end_pos - Vp_v_4_in_y_start_pos),
Vp_v_in_z_start_pos + quasi_rand_Vp_v_4_in[2] * (Vp_v_in_z_end_pos - Vp_v_in_z_start_pos)]
Vp_v_56_in_pos = [Vp_v_in_x_start_pos + quasi_rand_Vp_v_56_in[0] * (Vp_v_in_x_end_pos - Vp_v_in_x_start_pos),
Vp_v_56_in_y_start_pos + quasi_rand_Vp_v_56_in[1] * (Vp_v_56_in_y_end_pos - Vp_v_56_in_y_start_pos),
Vp_v_in_z_start_pos + quasi_rand_Vp_v_56_in[2] * (Vp_v_in_z_end_pos - Vp_v_in_z_start_pos)]
Vp_v_23_in_positions = [[Vp_v_23_in_pos[0][i],
Vp_v_23_in_pos[1][i],
Vp_v_23_in_pos[2][i]] for i in range(1*total_number)]
Vp_v_4_in_positions = [[Vp_v_4_in_pos[0][i],
Vp_v_4_in_pos[1][i],
Vp_v_4_in_pos[2][i]] for i in range(1*total_number)]
Vp_v_56_in_positions = [[Vp_v_56_in_pos[0][i],
Vp_v_56_in_pos[1][i],
Vp_v_56_in_pos[2][i]] for i in range(1*total_number)]
dxTp_relay = x_extent / float( total_number )
Tp_relay_x_start_pos = - ( ( x_extent - dxTp_relay ) / 2.0 )
Tp_relay_x_end_pos = ( x_extent - dxTp_relay ) / 2.0
dyTp_relay = y_extent / float( total_number )
Tp_relay_y_start_pos = - ( ( y_extent - dyTp_relay ) / 2.0 ) - 0.7
Tp_relay_y_end_pos = ( y_extent - dyTp_relay ) / 2.0 - 0.7
dzTp_relay =z_extent / float( total_number )
Tp_relay_z_start_pos = - ( ( z_extent - dzTp_relay ) / 2.0 )
Tp_relay_z_end_pos = ( z_extent - dzTp_relay ) / 2.0
Tp_relay_pos = [Tp_relay_x_start_pos + quasi_rand_Tp_relay[0] * (Tp_relay_x_end_pos - Tp_relay_x_start_pos),
Tp_relay_y_start_pos + quasi_rand_Tp_relay[1] * (Tp_relay_y_end_pos - Tp_relay_y_start_pos),
Tp_relay_z_start_pos + quasi_rand_Tp_relay[2] * (Tp_relay_z_end_pos - Tp_relay_z_start_pos)]
Tp_relay_positions = [[Tp_relay_pos[0][i],
Tp_relay_pos[1][i],
Tp_relay_pos[2][i]] for i in range(total_number)]
dxTp_inter = x_extent / float( total_number )
Tp_inter_x_start_pos = - ( ( x_extent - dxTp_inter ) / 2.0 )
Tp_inter_x_end_pos = ( x_extent - dxTp_inter ) / 2.0
dyTp_inter = y_extent / float( total_number )
Tp_inter_y_start_pos = - ( ( y_extent - dyTp_inter ) / 2.0 ) - 0.7
Tp_inter_y_end_pos = ( y_extent - dyTp_inter ) / 2.0 - 0.7
dzTp_inter = z_extent / float( total_number )
Tp_inter_z_start_pos = - ( ( z_extent - dzTp_inter ) / 2.0 )
Tp_inter_z_end_pos = ( z_extent - dzTp_inter ) / 2.0
Tp_inter_pos = [Tp_inter_x_start_pos + quasi_rand_Tp_inter[0] * (Tp_inter_x_end_pos - Tp_inter_x_start_pos),
Tp_inter_y_start_pos + quasi_rand_Tp_inter[1] * (Tp_inter_y_end_pos - Tp_inter_y_start_pos),
Tp_inter_z_start_pos + quasi_rand_Tp_inter[2] * (Tp_inter_z_end_pos - Tp_inter_z_start_pos)]
Tp_inter_positions = [[Tp_inter_pos[0][i],
Tp_inter_pos[1][i],
Tp_inter_pos[2][i]] for i in range(total_number)]
dxRp = x_extent / float( total_number )
Rp_x_start_pos = - ( ( x_extent - dxRp ) / 2.0 )
Rp_x_end_pos = ( x_extent - dxRp ) / 2.0
dyRp = y_extent / float( total_number )
Rp_y_start_pos = - ( ( y_extent - dyRp ) / 2.0 ) - 1.2
Rp_y_end_pos = ( y_extent - dyRp ) / 2.0 - 1.2
dzRp = z_extent / float( total_number )
Rp_z_start_pos = - ( ( z_extent - dzRp ) / 2.0 )
Rp_z_end_pos = ( z_extent - dzRp ) / 2.0
Rp_pos = [Rp_x_start_pos + quasi_rand_Rp[0] * (Rp_x_end_pos - Rp_x_start_pos),
Rp_y_start_pos + quasi_rand_Rp[1] * (Rp_y_end_pos - Rp_y_start_pos),
Rp_z_start_pos + quasi_rand_Rp[2] * (Rp_z_end_pos - Rp_z_start_pos)]
Rp_positions = [[Rp_pos[0][i],
Rp_pos[1][i],
Rp_pos[2][i]] for i in range(total_number)]
# now layers, primary and secondary pathways
# layerPropsP = {'rows': params['Np'],
# 'columns': params['Np'],
# 'extent': [params['visSize'], params['visSize']],
# # 'center': [3, -1], # For testing purposes
# 'edge_wrap': True}
layerPropsPVp_h_23_ex = {'positions': Vp_h_23_ex_positions,
'neuronType': 'excitatory',
'extent': [x_extent, y_extent, z_extent],
'center': [0.6, 1.1, 0.0],
'edge_wrap': True}
layerPropsPVp_h_23_in = {'positions': Vp_h_23_in_positions,
'neuronType': 'inhibitory',
'extent': [x_extent, y_extent, z_extent],
'center': [0.6, 1.1, 0.0],
'edge_wrap': True}
layerPropsPVp_h_4_ex = {'positions': Vp_h_4_ex_positions,
'neuronType': 'excitatory',
'extent': [x_extent, y_extent, z_extent],
'center': [0.6, 0.6, 0.0],
'edge_wrap': True}
layerPropsPVp_h_4_in = {'positions': Vp_h_4_in_positions,
'neuronType': 'inhibitory',
'extent': [x_extent, y_extent, z_extent],
'center': [0.6, 0.6, 0.0],
'edge_wrap': True}
layerPropsPVp_h_56_ex = {'positions': Vp_h_56_ex_positions,
'neuronType': 'excitatory',
'extent': [x_extent, y_extent, z_extent],
'center': [0.6, 0.1, 0.0],
'edge_wrap': True}
layerPropsPVp_h_56_in = {'positions': Vp_h_56_in_positions,
'neuronType': 'inhibitory',
'extent': [x_extent, y_extent, z_extent],
'center': [0.6, 0.1, 0.0],
'edge_wrap': True}
layerPropsPVp_v_23_ex = {'positions': Vp_v_23_ex_positions,
'neuronType': 'excitatory',
'extent': [x_extent, y_extent, z_extent],
'center': [-0.6, 1.1, 0.0],
'edge_wrap': True}
layerPropsPVp_v_23_in = {'positions': Vp_v_23_in_positions,
'neuronType': 'inhibitory',
'extent': [x_extent, y_extent, z_extent],
'center': [-0.6, 1.1, 0.0],
'edge_wrap': True}
layerPropsPVp_v_4_ex = {'positions': Vp_v_4_ex_positions,
'neuronType': 'excitatory',
'extent': [x_extent, y_extent, z_extent],
'center': [-0.6, 0.6, 0.0],
'edge_wrap': True}
layerPropsPVp_v_4_in = {'positions': Vp_v_4_in_positions,
'neuronType': 'inhibitory',
'extent': [x_extent, y_extent, z_extent],
'center': [-0.6, 0.6, 0.0],
'edge_wrap': True}
layerPropsPVp_v_56_ex = {'positions': Vp_v_56_ex_positions,
'neuronType': 'excitatory',
'extent': [x_extent, y_extent, z_extent],
'center': [-0.6, 0.1, 0.0],
'edge_wrap': True}
layerPropsPVp_v_56_in = {'positions': Vp_v_56_in_positions,
'neuronType': 'inhibitory',
'extent': [x_extent, y_extent, z_extent],
'center': [-0.6, 0.1, 0.0],
'edge_wrap': True}
layerPropsPTp_relay = {'positions': Tp_relay_positions,
'extent': [x_extent, y_extent, z_extent],
'center': [0., -0.7, 0.],
'edge_wrap': True}
layerPropsPTp_inter = {'positions': Tp_inter_positions,
'extent': [x_extent, y_extent, z_extent],
'center': [0., -0.7, 0.],
'edge_wrap': True}
layerPropsPRp = {'positions': Rp_positions,
'extent': [x_extent, y_extent, z_extent],
'center': [0., -1.2, 0.],
'edge_wrap': True}
layers = [('Tp_relay', modified_copy(layerPropsPTp_relay, {'elements': 'Inter'})),
('Tp_inter', modified_copy(layerPropsPTp_inter, {'elements': 'Inter'})),
('Rp', modified_copy(layerPropsPRp, {'elements': 'RpNeuron'})),
('Vp_h_23_ex', modified_copy(layerPropsPVp_h_23_ex, {'elements':
'L23pyr'})),
('Vp_h_23_in', modified_copy(layerPropsPVp_h_23_in, {'elements':
'L23in'})),
('Vp_h_4_ex', modified_copy(layerPropsPVp_h_4_ex, {'elements':
'L4pyr'})),
('Vp_h_4_in', modified_copy(layerPropsPVp_h_4_in, {'elements':
'L4in'})),
('Vp_h_56_ex', modified_copy(layerPropsPVp_h_56_ex, {'elements':
'L56pyr'})),
('Vp_h_56_in', modified_copy(layerPropsPVp_h_56_in, {'elements':
'L56in'})),
('Vp_v_23_ex', modified_copy(layerPropsPVp_v_23_ex, {'elements':
'L23pyr'})),
('Vp_v_23_in', modified_copy(layerPropsPVp_v_23_in, {'elements':
'L23in'})),
('Vp_v_4_ex', modified_copy(layerPropsPVp_v_4_ex, {'elements':
'L4pyr'})),
('Vp_v_4_in', modified_copy(layerPropsPVp_v_4_in, {'elements':
'L4in'})),
('Vp_v_56_ex', modified_copy(layerPropsPVp_v_56_ex, {'elements':
'L56pyr'})),
('Vp_v_56_in', modified_copy(layerPropsPVp_v_56_in, {'elements':
'L56in'}))]
return layers, models, syn_models
def make_connections():
"""
Return list of dictionaries specifying connectivity.
NOTE: Connectivity is modified from Hill-Tononi for simplicity.
"""
# scaling parameters from grid elements to visual angle
dpcP = params['visSize'] / (params['Np'] - 1)
# dpcP = 8.0 / (params['Np'] - 1)
# ---------- PRIMARY PATHWAY ------------------------------------
ccConnections = []
ccxConnections = []
ctConnections = []
horIntraBase = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"spherical": {"radius": 12.0 * dpcP}},
"kernel": {"gaussian": {"p_center": 0.05, "sigma": 7.5 * dpcP}},
"weights": 1.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
for conn in [{"sources": {"model": "L23pyr"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L23pyr"}, "targets": {"model": "L23pyr"}, 'synapse_model': 'NMDA'},
{"sources": {"model": "L23pyr"}, "targets": {"model": "L23in" }},
{"sources": {"model": "L4pyr" }, "targets": {"model": "L4pyr" },
"mask" : {"spherical": {"radius": 7.0 * dpcP}}},
{"sources": {"model": "L4pyr" }, "targets": {"model": "L4in" },
"mask" : {"spherical": {"radius": 7.0 * dpcP}}},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L56pyr" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L56in" }}]:
ndict = horIntraBase.copy()
ndict.update(conn)
ccConnections.append(ndict)
verIntraBase = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"spherical": {"radius": 2.0 * dpcP}},
"kernel": {"gaussian": {"p_center": 1.0, "sigma": 7.5 * dpcP}},
"weights": 2.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
for conn in [{"sources": {"model": "L23pyr"}, "targets": {"model": "L56pyr"}, "weights": 1.0},
{"sources": {"model": "L23pyr"}, "targets": {"model": "L56pyr"}, "weights": 1.0, 'synapse_model': 'NMDA'},
{"sources": {"model": "L23pyr"}, "targets": {"model": "L56in" }, "weights": 1.0},
{"sources": {"model": "L4pyr" }, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L4pyr" }, "targets": {"model": "L23in" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L23in" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L4pyr" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L4in" }}]:
ndict = verIntraBase.copy()
ndict.update(conn)
ccConnections.append(ndict)
intraInhBase = {"connection_type": "divergent",
"synapse_model": "GABA_A",
"mask": {"spherical": {"radius": 7.0 * dpcP}},
"kernel": {"gaussian": {"p_center": 0.25, "sigma": 7.5 * dpcP}},
"weights": 1.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
for conn in [{"sources": {"model": "L23in"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L23in"}, "targets": {"model": "L23in" }},
{"sources": {"model": "L4in" }, "targets": {"model": "L4pyr" }},
{"sources": {"model": "L4in" }, "targets": {"model": "L4in" }},
{"sources": {"model": "L56in"}, "targets": {"model": "L56pyr"}},
{"sources": {"model": "L56in"}, "targets": {"model": "L56in" }}]:
ndict = intraInhBase.copy()
ndict.update(conn)
ccConnections.append(ndict)
ccxConnections.append(ndict)
intraInhBaseB = {"connection_type": "divergent",
"synapse_model": "GABA_B",
"mask": {"spherical": {"radius": 1.0 * dpcP}},
"kernel": 0.3,
"weights": 1.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
for conn in [{"sources": {"model": "L23in"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L4in" }, "targets": {"model": "L4pyr" }},
{"sources": {"model": "L56in"}, "targets": {"model": "L56pyr" }}]:
ndict = intraInhBaseB.copy()
ndict.update(conn)
ccConnections.append(ndict)
ccxConnections.append(ndict)
corThalBase = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"spherical": {"radius": 5.0 * dpcP}},
"kernel": {"gaussian": {"p_center": 0.5, "sigma": 7.5 * dpcP}},
"weights": 1.0,
"delays": {"uniform": {"min": 7.5, "max": 8.5}}}
for conn in [{"sources": {"model": "L56pyr"}, "targets": {"model": "Relay" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "Inter" }}]:
ndict = corThalBase.copy()
ndict.update(conn)
ctConnections.append(ndict)
corRet = corThalBase.copy()
corRet.update({"sources": {"model": "L56pyr"}, "targets": {"model": "RpNeuron"}, "weights": 2.5})
# build complete list of connections, build populations names
allconns = []
#! Cortico-cortical, same orientation
# [allconns.append(['Vp_h','Vp_h',c]) for c in ccConnections]
# [allconns.append(['Vp_v','Vp_v',c]) for c in ccConnections]
[allconns.append(['Vp_h_23_ex','Vp_h_23_ex',c]) for c in ccConnections] ####
#[allconns.append(['Vp_h_23_ex','Vp_h_4_ex',c]) for c in ccConnections]
[allconns.append(['Vp_h_23_ex','Vp_h_56_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_h_4_ex','Vp_h_23_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_h_4_ex','Vp_h_4_ex',c]) for c in ccConnections] ####
#[allconns.append(['Vp_h_4_ex','Vp_h_56_ex',c]) for c in ccConnections]
[allconns.append(['Vp_h_56_ex','Vp_h_56_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_h_56_ex','Vp_h_23_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_h_56_ex','Vp_h_4_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_h_23_in','Vp_h_23_in',c]) for c in ccConnections] ####
#[allconns.append(['Vp_h_23_in','Vp_h_4_in',c]) for c in ccConnections]
#[allconns.append(['Vp_h_23_in','Vp_h_56_in',c]) for c in ccConnections]
[allconns.append(['Vp_h_4_in','Vp_h_4_in',c]) for c in ccConnections] ####
#[allconns.append(['Vp_h_4_in','Vp_h_56_in',c]) for c in ccConnections]
[allconns.append(['Vp_h_56_in','Vp_h_56_in',c]) for c in ccConnections] ####
[allconns.append(['Vp_h_23_in','Vp_h_23_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_h_4_in','Vp_h_4_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_h_56_in','Vp_h_56_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_h_23_ex','Vp_h_23_in',c]) for c in ccConnections] ####
#[allconns.append(['Vp_h_23_ex','Vp_h_4_in',c]) for c in ccConnections]
[allconns.append(['Vp_h_23_ex','Vp_h_56_in',c]) for c in ccConnections] ####
[allconns.append(['Vp_h_4_ex','Vp_h_23_in',c]) for c in ccConnections] ####
[allconns.append(['Vp_h_4_ex','Vp_h_4_in',c]) for c in ccConnections] ####
#[allconns.append(['Vp_h_4_ex','Vp_h_56_in',c]) for c in ccConnections]
[allconns.append(['Vp_h_56_ex','Vp_h_56_in',c]) for c in ccConnections] ####
[allconns.append(['Vp_h_56_ex','Vp_h_23_in',c]) for c in ccConnections] ####
[allconns.append(['Vp_h_56_ex','Vp_h_4_in',c]) for c in ccConnections] ####
[allconns.append(['Vp_v_23_ex','Vp_v_23_ex',c]) for c in ccConnections] ####
#[allconns.append(['Vp_v_23_ex','Vp_v_4_ex',c]) for c in ccConnections]
[allconns.append(['Vp_v_23_ex','Vp_v_56_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_v_4_ex','Vp_v_4_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_v_4_ex','Vp_v_23_ex',c]) for c in ccConnections] ####
#[allconns.append(['Vp_v_4_ex','Vp_v_56_ex',c]) for c in ccConnections]
[allconns.append(['Vp_v_56_ex','Vp_v_56_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_v_56_ex','Vp_v_23_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_v_56_ex','Vp_v_4_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_v_23_in','Vp_v_23_in',c]) for c in ccConnections] ####
#[allconns.append(['Vp_v_23_in','Vp_v_4_in',c]) for c in ccConnections]
#[allconns.append(['Vp_v_23_in','Vp_v_56_in',c]) for c in ccConnections]
[allconns.append(['Vp_v_4_in','Vp_v_4_in',c]) for c in ccConnections] ####
#[allconns.append(['Vp_v_4_in','Vp_v_56_in',c]) for c in ccConnections]
[allconns.append(['Vp_v_56_in','Vp_v_56_in',c]) for c in ccConnections] ####
[allconns.append(['Vp_v_23_in','Vp_v_23_ex',c]) for c in ccConnections] ####
#[allconns.append(['Vp_v_23_in','Vp_v_4_in',c]) for c in ccConnections]
#[allconns.append(['Vp_v_23_in','Vp_v_56_in',c]) for c in ccConnections]
[allconns.append(['Vp_v_4_in','Vp_v_4_ex',c]) for c in ccConnections] ####
#[allconns.append(['Vp_v_4_in','Vp_v_56_in',c]) for c in ccConnections]
[allconns.append(['Vp_v_56_in','Vp_v_56_ex',c]) for c in ccConnections] ####
[allconns.append(['Vp_v_23_ex','Vp_v_23_in',c]) for c in ccConnections] ####
#[allconns.append(['Vp_v_23_ex','Vp_v_4_in',c]) for c in ccConnections]
[allconns.append(['Vp_v_23_ex','Vp_v_56_in',c]) for c in ccConnections] ####
[allconns.append(['Vp_v_4_ex','Vp_v_4_in',c]) for c in ccConnections] ####
[allconns.append(['Vp_v_4_ex','Vp_v_23_in',c]) for c in ccConnections] ####
#[allconns.append(['Vp_v_4_ex','Vp_v_56_in',c]) for c in ccConnections]
[allconns.append(['Vp_v_56_ex','Vp_v_56_in',c]) for c in ccConnections] ####
[allconns.append(['Vp_v_56_ex','Vp_v_23_in',c]) for c in ccConnections] ####
[allconns.append(['Vp_v_56_ex','Vp_v_4_in',c]) for c in ccConnections] ####
#! Cortico-cortical, cross-orientation
# [allconns.append(['Vp_h','Vp_v',c]) for c in ccxConnections]
# [allconns.append(['Vp_v','Vp_h',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_23_ex','Vp_v_23_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_23_ex','Vp_v_4_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_23_ex','Vp_v_56_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_23_ex','Vp_v_23_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_23_ex','Vp_v_4_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_23_ex','Vp_v_56_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_4_ex','Vp_v_23_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_4_ex','Vp_v_4_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_4_ex','Vp_v_56_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_4_ex','Vp_v_23_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_4_ex','Vp_v_4_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_4_ex','Vp_v_56_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_56_ex','Vp_v_23_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_56_ex','Vp_v_4_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_56_ex','Vp_v_56_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_56_ex','Vp_v_23_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_56_ex','Vp_v_4_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_56_ex','Vp_v_56_in',c]) for c in ccxConnections]
[allconns.append(['Vp_h_23_in','Vp_v_23_ex',c]) for c in ccxConnections] ####
#[allconns.append(['Vp_h_23_in','Vp_v_4_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_23_in','Vp_v_56_ex',c]) for c in ccxConnections]
[allconns.append(['Vp_h_23_in','Vp_v_23_in',c]) for c in ccxConnections] ####
#[allconns.append(['Vp_h_23_in','Vp_v_4_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_23_in','Vp_v_56_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_4_in','Vp_v_23_ex',c]) for c in ccxConnections]
[allconns.append(['Vp_h_4_in','Vp_v_4_ex',c]) for c in ccxConnections] ####
#[allconns.append(['Vp_h_4_in','Vp_v_56_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_4_in','Vp_v_23_in',c]) for c in ccxConnections]
[allconns.append(['Vp_h_4_in','Vp_v_4_in',c]) for c in ccxConnections] ####
#[allconns.append(['Vp_h_4_in','Vp_v_56_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_56_in','Vp_v_23_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_56_in','Vp_v_4_ex',c]) for c in ccxConnections]
[allconns.append(['Vp_h_56_in','Vp_v_56_ex',c]) for c in ccxConnections] ####
#[allconns.append(['Vp_h_56_in','Vp_v_23_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_h_56_in','Vp_v_4_in',c]) for c in ccxConnections]
[allconns.append(['Vp_h_56_in','Vp_v_56_in',c]) for c in ccxConnections] ####
#[allconns.append(['Vp_v_23_ex','Vp_h_23_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_23_ex','Vp_h_4_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_23_ex','Vp_h_56_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_23_ex','Vp_h_23_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_23_ex','Vp_h_4_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_23_ex','Vp_h_56_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_4_ex','Vp_h_23_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_4_ex','Vp_h_4_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_4_ex','Vp_h_56_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_4_ex','Vp_h_23_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_4_ex','Vp_h_4_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_4_ex','Vp_h_56_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_56_ex','Vp_h_23_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_56_ex','Vp_h_4_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_56_ex','Vp_h_56_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_56_ex','Vp_h_23_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_56_ex','Vp_h_4_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_56_ex','Vp_h_56_in',c]) for c in ccxConnections]
[allconns.append(['Vp_v_23_in','Vp_h_23_ex',c]) for c in ccxConnections] ####
#[allconns.append(['Vp_v_23_in','Vp_h_4_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_23_in','Vp_h_56_ex',c]) for c in ccxConnections]
[allconns.append(['Vp_v_23_in','Vp_h_23_in',c]) for c in ccxConnections] ####
#[allconns.append(['Vp_v_23_in','Vp_h_4_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_23_in','Vp_h_56_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_4_in','Vp_h_23_ex',c]) for c in ccxConnections]
[allconns.append(['Vp_v_4_in','Vp_h_4_ex',c]) for c in ccxConnections] ####
#[allconns.append(['Vp_v_4_in','Vp_h_56_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_4_in','Vp_h_23_in',c]) for c in ccxConnections]
[allconns.append(['Vp_v_4_in','Vp_h_4_in',c]) for c in ccxConnections] ####
#[allconns.append(['Vp_v_4_in','Vp_h_56_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_56_in','Vp_h_23_ex',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_56_in','Vp_h_4_ex',c]) for c in ccxConnections]
[allconns.append(['Vp_v_56_in','Vp_h_56_ex',c]) for c in ccxConnections] ####
#[allconns.append(['Vp_v_56_in','Vp_h_23_in',c]) for c in ccxConnections]
#[allconns.append(['Vp_v_56_in','Vp_h_4_in',c]) for c in ccxConnections]
[allconns.append(['Vp_v_56_in','Vp_h_56_in',c]) for c in ccxConnections] ####
#! Cortico-thalamic connections
# [allconns.append(['Vp_h','Tp',c]) for c in ctConnections]
# [allconns.append(['Vp_v','Tp',c]) for c in ctConnections]
#[allconns.append(['Vp_h_23_ex','Tp_relay',c]) for c in ctConnections]
#[allconns.append(['Vp_h_4_ex','Tp_relay',c]) for c in ctConnections]
[allconns.append(['Vp_h_56_ex','Tp_relay',c]) for c in ctConnections] ####
#[allconns.append(['Vp_h_23_in','Tp_relay',c]) for c in ctConnections]
#[allconns.append(['Vp_h_4_in','Tp_relay',c]) for c in ctConnections]
#[allconns.append(['Vp_h_56_in','Tp_relay',c]) for c in ctConnections]
#[allconns.append(['Vp_h_23_ex','Tp_inter',c]) for c in ctConnections]
#[allconns.append(['Vp_h_4_ex','Tp_inter',c]) for c in ctConnections]
[allconns.append(['Vp_h_56_ex','Tp_inter',c]) for c in ctConnections] ####
#[allconns.append(['Vp_h_23_in','Tp_inter',c]) for c in ctConnections]
#[allconns.append(['Vp_h_4_in','Tp_inter',c]) for c in ctConnections]
#[allconns.append(['Vp_h_56_in','Tp_inter',c]) for c in ctConnections]
#[allconns.append(['Vp_v_23_ex','Tp_relay',c]) for c in ctConnections]
#[allconns.append(['Vp_v_4_ex','Tp_relay',c]) for c in ctConnections]
[allconns.append(['Vp_v_56_ex','Tp_relay',c]) for c in ctConnections] ####
#[allconns.append(['Vp_v_23_in','Tp_relay',c]) for c in ctConnections]
#[allconns.append(['Vp_v_4_in','Tp_relay',c]) for c in ctConnections]
#[allconns.append(['Vp_v_56_in','Tp_relay',c]) for c in ctConnections]
#[allconns.append(['Vp_v_23_ex','Tp_inter',c]) for c in ctConnections]
#[allconns.append(['Vp_v_4_ex','Tp_inter',c]) for c in ctConnections]
[allconns.append(['Vp_v_56_ex','Tp_inter',c]) for c in ctConnections] ####
#[allconns.append(['Vp_v_23_in','Tp_inter',c]) for c in ctConnections]
#[allconns.append(['Vp_v_4_in','Tp_inter',c]) for c in ctConnections]
#[allconns.append(['Vp_v_56_in','Tp_inter',c]) for c in ctConnections]
# [allconns.append(['Vp_h','Rp',c]) for c in [corRet]]
# [allconns.append(['Vp_v','Rp',c]) for c in [corRet]]
#[allconns.append(['Vp_h_23_ex','Rp',c]) for c in [corRet]]
#[allconns.append(['Vp_h_4_ex','Rp',c]) for c in [corRet]]
[allconns.append(['Vp_h_56_ex','Rp',c]) for c in [corRet]] ####
#[allconns.append(['Vp_h_23_in','Rp',c]) for c in [corRet]]
#[allconns.append(['Vp_h_4_in','Rp',c]) for c in [corRet]]
#[allconns.append(['Vp_h_56_in','Rp',c]) for c in [corRet]]
#[allconns.append(['Vp_v_23_ex','Rp',c]) for c in [corRet]]
#[allconns.append(['Vp_v_4_ex','Rp',c]) for c in [corRet]]
[allconns.append(['Vp_v_56_ex','Rp',c]) for c in [corRet]] ####
#[allconns.append(['Vp_v_23_in','Rp',c]) for c in [corRet]]
#[allconns.append(['Vp_v_4_in','Rp',c]) for c in [corRet]]
#[allconns.append(['Vp_v_56_in','Rp',c]) for c in [corRet]]
#! Thalamo-cortical connections
thalCorRect = {"connection_type": "convergent",
"sources": {"model": "Relay"},
"synapse_model": "AMPA",
"weights": 5.0,
"delays": {"uniform": {"min": 2.75, "max": 3.25}}}
#! Horizontally tuned
thalCorRect.update({"mask": {"box": {"lower_left" : [-0.5*dpcP, -0.25*dpcP, -0.5*dpcP],
"upper_right": [ 0.5*dpcP, 0.25*dpcP, 0.5*dpcP]}}})
#{"lower_left" : [-4.05*dpcP, -1.05*dpcP, -4.05*dpcP],
# "upper_right": [ 4.05*dpcP, 1.05*dpcP, 4.05*dpcP]}}})
for conn in [{"targets": {"model": "L4pyr" }, "kernel": 0.5},
{"targets": {"model": "L56pyr"}, "kernel": 0.3}]:
thalCorRect.update(conn)
#allconns.append(['Tp','Vp_h', thalCorRect.copy()])
allconns.append(['Tp_relay','Vp_h_23_ex', thalCorRect.copy()])
allconns.append(['Tp_relay','Vp_h_4_ex', thalCorRect.copy()])
allconns.append(['Tp_relay','Vp_h_56_ex', thalCorRect.copy()])
allconns.append(['Tp_relay','Vp_h_23_in', thalCorRect.copy()])
allconns.append(['Tp_relay','Vp_h_4_in', thalCorRect.copy()])
allconns.append(['Tp_relay','Vp_h_56_in', thalCorRect.copy()])
allconns.append(['Tp_inter','Vp_h_23_ex', thalCorRect.copy()])
allconns.append(['Tp_inter','Vp_h_4_ex', thalCorRect.copy()])
allconns.append(['Tp_inter','Vp_h_56_ex', thalCorRect.copy()])
allconns.append(['Tp_inter','Vp_h_23_in', thalCorRect.copy()])
allconns.append(['Tp_inter','Vp_h_4_in', thalCorRect.copy()])
allconns.append(['Tp_inter','Vp_h_56_in', thalCorRect.copy()])
#! Vertically tuned
thalCorRect.update({"mask": {"box": {"lower_left" : [-0.25*dpcP, -0.5*dpcP, -0.25*dpcP],
"upper_right": [ 0.25*dpcP, 0.5*dpcP, 0.25*dpcP]}}})
#{"lower_left" : [-1.05*dpcP, -4.05*dpcP, -1.05*dpcP],
#"upper_right": [ 1.05*dpcP, 4.05*dpcP, 1.05*dpcP]}}})
for conn in [{"targets": {"model": "L4pyr" }, "kernel": 0.5},
{"targets": {"model": "L56pyr"}, "kernel": 0.3}]:
thalCorRect.update(conn)
#allconns.append(['Tp','Vp_v', thalCorRect.copy()])
allconns.append(['Tp_relay','Vp_v_23_ex', thalCorRect.copy()])
allconns.append(['Tp_relay','Vp_v_4_ex', thalCorRect.copy()])
allconns.append(['Tp_relay','Vp_v_56_ex', thalCorRect.copy()])
allconns.append(['Tp_relay','Vp_v_23_in', thalCorRect.copy()])
allconns.append(['Tp_relay','Vp_v_4_in', thalCorRect.copy()])
allconns.append(['Tp_relay','Vp_v_56_in', thalCorRect.copy()])
allconns.append(['Tp_inter','Vp_v_23_ex', thalCorRect.copy()])
allconns.append(['Tp_inter','Vp_v_4_ex', thalCorRect.copy()])
allconns.append(['Tp_inter','Vp_v_56_ex', thalCorRect.copy()])
allconns.append(['Tp_inter','Vp_v_23_in', thalCorRect.copy()])
allconns.append(['Tp_inter','Vp_v_4_in', thalCorRect.copy()])
allconns.append(['Tp_inter','Vp_v_56_in', thalCorRect.copy()])
#! Diffuse connections
thalCorDiff = {"connection_type": "divergent",
"sources": {"model": "Relay"},
"synapse_model": "AMPA",
"weights": 5.0,
"mask": {"spherical": {"radius": 5.0 * dpcP}},
"kernel": {"gaussian": {"p_center": 0.1, "sigma": 7.5 * dpcP}},
"delays": {"uniform": {"min": 2.75, "max": 3.25}}}
for conn in [{"targets": {"model": "L4in" }},
{"targets": {"model": "L56in"}}]:
thalCorDiff.update(conn)
#allconns.append(['Tp','Vp_h', thalCorDiff.copy()])
#allconns.append(['Tp','Vp_v', thalCorDiff.copy()])
allconns.append(['Tp_relay','Vp_h_23_ex', thalCorDiff.copy()])
allconns.append(['Tp_relay','Vp_h_4_ex', thalCorDiff.copy()])
allconns.append(['Tp_relay','Vp_h_56_ex', thalCorDiff.copy()])
allconns.append(['Tp_relay','Vp_h_23_in', thalCorDiff.copy()])
allconns.append(['Tp_relay','Vp_h_4_in', thalCorDiff.copy()])
allconns.append(['Tp_relay','Vp_h_56_in', thalCorDiff.copy()])
allconns.append(['Tp_inter','Vp_h_23_ex', thalCorDiff.copy()])
allconns.append(['Tp_inter','Vp_h_4_ex', thalCorDiff.copy()])
allconns.append(['Tp_inter','Vp_h_56_ex', thalCorDiff.copy()])
allconns.append(['Tp_inter','Vp_h_23_in', thalCorDiff.copy()])
allconns.append(['Tp_inter','Vp_h_4_in', thalCorDiff.copy()])
allconns.append(['Tp_inter','Vp_h_56_in', thalCorDiff.copy()])
allconns.append(['Tp_relay','Vp_v_23_ex', thalCorDiff.copy()])
allconns.append(['Tp_relay','Vp_v_4_ex', thalCorDiff.copy()])
allconns.append(['Tp_relay','Vp_v_56_ex', thalCorDiff.copy()])
allconns.append(['Tp_relay','Vp_v_23_in', thalCorDiff.copy()])
allconns.append(['Tp_relay','Vp_v_4_in', thalCorDiff.copy()])
allconns.append(['Tp_relay','Vp_v_56_in', thalCorDiff.copy()])
allconns.append(['Tp_inter','Vp_v_23_ex', thalCorDiff.copy()])
allconns.append(['Tp_inter','Vp_v_4_ex', thalCorDiff.copy()])
allconns.append(['Tp_inter','Vp_v_56_ex', thalCorDiff.copy()])
allconns.append(['Tp_inter','Vp_v_23_in', thalCorDiff.copy()])
allconns.append(['Tp_inter','Vp_v_4_in', thalCorDiff.copy()])
allconns.append(['Tp_inter','Vp_v_56_in', thalCorDiff.copy()])
#! Thalamic connections
thalBase = {"connection_type": "divergent",
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
for src, tgt, conn in [('Tp_relay', 'Rp', {"sources": {"model": "Relay"},
"synapse_model": "AMPA",
"mask": {"spherical": {"radius": 2.0 * dpcP}},
"kernel": {"gaussian": {"p_center": 1.0, "sigma": 7.5 * dpcP}},
"weights": 2.0}),
('Tp_inter', 'Tp_relay', {"sources": {"model": "Inter"},
"targets": {"model": "Relay"}, "synapse_model": "GABA_A",
"mask": {"spherical": {"radius": 2.0 * dpcP}},
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.25, "sigma": 7.5 * dpcP}}}),
('Tp_inter', 'Tp_inter', {"sources": {"model": "Inter"},
"targets": {"model": "Inter"}, "synapse_model": "GABA_A",
"mask": {"spherical": {"radius": 2.0 * dpcP}},
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.25, "sigma": 7.5 * dpcP}}}),
('Rp', 'Tp_relay', {"targets": {"model": "Relay"},
"mask": {"spherical": {"radius": 12.0 * dpcP}}, "synapse_model": "GABA_A",
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.15, "sigma": 7.5 * dpcP}}}),
('Rp', 'Tp_relay', {"targets": {"model": "Relay"},
"mask": {"spherical": {"radius": 12.0 * dpcP}}, "synapse_model": "GABA_B",
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.05, "sigma": 7.5 * dpcP}}}),
('Rp', 'Tp_inter', {"targets": {"model": "Inter"},
"mask": {"spherical": {"radius": 12.0 * dpcP}}, "synapse_model": "GABA_A",
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.15, "sigma": 7.5 * dpcP}}}),
('Rp', 'Tp_inter', {"targets": {"model": "Inter"},
"mask": {"spherical": {"radius": 12.0 * dpcP}}, "synapse_model": "GABA_B",
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.05, "sigma": 7.5 * dpcP}}}),
('Rp', 'Rp', {"mask": {"spherical": {"radius": 12.0 * dpcP}}, "synapse_model": "GABA_B",
"weights": 1.0,
"kernel": {"gaussian": {"p_center": 0.5, "sigma": 7.5 * dpcP}}})]:
thal = thalBase.copy()
thal.update(conn)
allconns.append([src,tgt,thal])
# Now fix Gaussians
for conn in allconns:
#print(conn)
cdict = conn[2]
kern = cdict["kernel"]
if isinstance(kern, dict) and "gaussian" in kern:
assert(cdict["connection_type"] == "divergent")
# find correct spatial-to-grid factor, depends on target (no Gaussian convergent conns.)
lam = dpcS if conn[1][:2] in ('Ts', 'Rs', 'Vs') else dpcP
# get mask size, assume here all are spherical, radius is r * lam
assert("spherical" in cdict["mask"])
r = cdict["mask"]["spherical"]["radius"] / lam
# get current sigma, which is w * lam
sig = kern["gaussian"]["sigma"]
# compute new sigma
nsig = (2*r+1)*lam/(2*np.pi)*np.sqrt(0.5*sig/lam)
# set new sigma
kern["gaussian"]["sigma"] = nsig
# print '%10.2f -> %10.2f (lam = %10.2f)' % (sig, nsig, lam)
# Now fix masks
for conn in allconns:
cdict = conn[2]
mask = cdict["mask"]
if "spherical" in mask:
# find correct spatial-to-grid factor
if cdict["connection_type"] == "divergent":
lam = dpcS if conn[1][:2] in ('Ts', 'Rs', 'Vs') else dpcP
else: # convergent, look at source
lam = dpcS if conn[0][:2] in ('Ts', 'Rs', 'Vs') else dpcP
# radius in grid units
r = mask["spherical"]["radius"] / lam
# corner dislocation from center for edge length 2r+1, in spatial units
d = 0.5 * (2*r+1) * lam
# new mask
cdict["mask"]={'box': {'lower_left': [-d, -d, -d], 'upper_right': [d, d, d]}}
return allconns
def presim_setup(nest_layers, **kwargs):
"""
Function to call before simulating from App.
May perform some setup.
"""
pass
|
compneuronmbu/NESTConnectionApp
|
static/examples/define_hill_tononi_3D.py
|
Python
|
gpl-2.0
| 57,744
|
[
"Gaussian"
] |
a56d9400bebbb12012670d59ee2f15328c26c83a3d7fce44db5d25801311feab
|
# Program to spin a wheel and choose a language
# Created By Jake Huxell For Advent Of Code 2015
"""
Some notes about the program.
TODO:
1. Upkeep and bug fix as I go along
"""
# Get pygame libraries
import pygame, sys, random
from pygame.locals import *
# Generate random seed for the program - Use current time
random.seed()
# Initiliase, needed to use classes
pygame.init()
# Global variables so user can easily change wheel properties
LANGUAGES = ("Python", "C++", "C", "Java", "JavaScript", "ASM", "RPG Maker",
"Prolog", "Lua")
# Set up the colours that make up the sections of the wheel
BLACK = (0, 0, 0)
MAROON_RED = (128, 0, 0)
RED = (255, 0, 0)
LIME_GREEN = (0, 128, 0)
GREEN = (0, 255, 0)
NAVY_BLUE = (0, 0, 128)
BLUE = (0, 0, 255)
PURPLE = (128, 0, 128)
YELLOW = (255, 255, 0)
ORANGE = (255, 128, 0)
WHITE = (255, 255, 255)
CREAM = (255, 255, 153)
# Slap them all in an array so the wheel can choose one at random
COLOURS = (MAROON_RED, RED, LIME_GREEN, GREEN, NAVY_BLUE, BLUE, PURPLE, YELLOW)
# Variables to hold size of the window
WINDOW_HEIGHT = 800
WINDOW_WIDTH = 600
# Pi - Used for drawing segments of the wheel
PI = 3.141592
# Clock to set the game fps
clock = pygame.time.Clock()
# Font objects
titleFont = pygame.font.Font(None, (20+(WINDOW_WIDTH/20)))
wheelFont = pygame.font.Font(None, 350/len(LANGUAGES))
whamFont = pygame.font.Font(None, 500/len(LANGUAGES))
buttonFont = pygame.font.Font(None, (WINDOW_WIDTH/24))
titleWham = titleFont.render("Poetry In Motion!", True, (10, 10, 10))
titleGrind = titleFont.render("PHYSICS ARE SHAMBLED", True, (10, 10, 10))
buttonSplash = buttonFont.render("Spin Here!", True, WHITE)
buttonWham = buttonFont.render("Click!", True, YELLOW)
# Function to draw a wheel on the screen
# Takes the distance the wheel has spun as variable
def drawWheel(currentDisplay, wheelCentre, currentOffset):
# Draw annoying flashing wheel outline!
for segment in range (0, len(LANGUAGES)):
pygame.draw.arc(currentDisplay, COLOURS[random.randint(0, len(COLOURS) - 1)],
(wheelCentre[0], wheelCentre[1], WINDOW_WIDTH/1.5, WINDOW_HEIGHT/1.5),
(((2*PI)/len(LANGUAGES)*segment)),
(((2*PI)/len(LANGUAGES)*(segment+1))), 10)
# Draw exciting wheel interior!
for segment in range (0, len(LANGUAGES)):
colour = (currentOffset+segment - 2)%len(COLOURS)
pygame.draw.arc(currentDisplay, COLOURS[colour],
((wheelCentre[0] + 10), (wheelCentre[1] + 10),
(WINDOW_WIDTH/1.5 - 20), (WINDOW_HEIGHT/1.5 - 20)),
((2*PI)/len(LANGUAGES)*segment),
((2*PI)/len(LANGUAGES)*(segment+1)), 50)
# Draw centre elipse and line that goes straight up to indicate the winner
pygame.draw.ellipse(currentDisplay, ORANGE, ((wheelCentre[0]+40),
(wheelCentre[1]+40), (WINDOW_WIDTH/1.5-80),
(WINDOW_HEIGHT/1.5-80)))
POINT = WINDOW_HEIGHT/2 - 100
# Draw line that points to the winner
pygame.draw.line(currentDisplay, BLACK,
(WINDOW_WIDTH/2 - 30, WINDOW_HEIGHT/2),
(WINDOW_WIDTH/2 - 30, WINDOW_HEIGHT/2 - WINDOW_HEIGHT/3)
, 10)
# Draw arrow on line
pygame.draw.polygon(currentDisplay, BLACK,
((WINDOW_WIDTH/2 - 25, WINDOW_HEIGHT/2 - WINDOW_HEIGHT/3),
(WINDOW_WIDTH/2 - 35, WINDOW_HEIGHT/2 - WINDOW_HEIGHT/3),
(WINDOW_WIDTH/2 - 30, WINDOW_HEIGHT/2 - WINDOW_HEIGHT/3 - 20)))
# Draw Bottom of wheel
pygame.draw.polygon(currentDisplay, BLACK,
((wheelCentre[0] + WINDOW_WIDTH/1.5,
wheelCentre[1] + WINDOW_HEIGHT/1.5 + WINDOW_HEIGHT/8),
(wheelCentre[0] + WINDOW_WIDTH/3,
wheelCentre[1] + WINDOW_HEIGHT/1.5),
(wheelCentre[0],
wheelCentre[1] + WINDOW_HEIGHT/1.5 + WINDOW_HEIGHT/8)))
return # End of drawWheel function
# Function to draw the text
# Which one is flashing depends on the segment at the top of the wheel.
def drawText(currentDisplay, textNormal, textWham, highlighted):
division = WINDOW_HEIGHT / len(LANGUAGES)
for selection in range(0, len(LANGUAGES)):
if selection == highlighted:
currentDisplay.blit(textWham[selection], (0, division*selection))
else:
currentDisplay.blit(textNormal[selection], (0, division *selection))
return
# Function to draw the button
# Will flash if someone hovers over it
def drawButton(currentDisplay):
mousePos = pygame.mouse.get_pos()
if mousePos[0] < 10*WINDOW_WIDTH/12 or mousePos[1] < 11*WINDOW_HEIGHT/12:
pygame.draw.rect(currentDisplay, BLACK, (10*WINDOW_WIDTH/12,
11*WINDOW_HEIGHT/12, WINDOW_WIDTH/6, WINDOW_HEIGHT/12))
currentDisplay.blit(buttonSplash, (10*WINDOW_WIDTH/12, 11*WINDOW_HEIGHT/12))
else:
pygame.draw.rect(currentDisplay, PURPLE, (10*WINDOW_WIDTH/12,
11*WINDOW_HEIGHT/12, WINDOW_WIDTH/6, WINDOW_HEIGHT/12))
currentDisplay.blit(buttonWham, (10*WINDOW_WIDTH/12, 11*WINDOW_HEIGHT/12))
return
# Function to check if the button has been pressed or not
def isPressed(currentDisplay):
clicks = pygame.mouse.get_pressed()
mousePos = pygame.mouse.get_pos()
if clicks[0] and (mousePos[0] >= 10*WINDOW_WIDTH/12 and
mousePos[1] >= 11*WINDOW_HEIGHT/12):
return True
else:
return False
# Main program that creates the window and sets the logic up
def main():
# Start up the window
pygame.mixer.music.load("assets/Waiting.wav")
pygame.mixer.music.play(-1, 0.0)
DISPLAY = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption("Spinner Of Syntax")
titleSplash = titleFont.render("Get Ready To Spin!", True, (10, 10, 10))
# Create arrays to hold the text for the wheel
languagesUnselected = []
languagesWham = []
wheelStopped = True
wheelBackSpinning = False
offset = random.randint(0, len(LANGUAGES))
clockSpeed = 1
newSong = False
upperBound = random.randrange(350, 400)
# Establish top-left corner of the wheel
wheelCentre = (WINDOW_WIDTH/8, WINDOW_HEIGHT/8)
for language in range(0, len(LANGUAGES)):
languagesUnselected.append(wheelFont.render(LANGUAGES[language], True,
(10, 10, 10)))
languagesWham.append(whamFont.render(LANGUAGES[language], True,
(255, 15, 30)))
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
# Stationary wheel logic
if wheelStopped:
if newSong:
pygame.mixer.music.load("assets/Waiting.wav")
pygame.mixer.music.play(-1, 0.0)
newSong = False
DISPLAY.fill((CREAM))
DISPLAY.blit(titleSplash, (3*WINDOW_WIDTH/12, 0))
drawWheel(DISPLAY, wheelCentre, offset)
drawText(DISPLAY, languagesUnselected, languagesWham, offset)
drawButton(DISPLAY)
wheelStopped = not isPressed(DISPLAY)
newSong = not wheelStopped
# Wheel spinning logic
else:
if not wheelBackSpinning:
if newSong:
pygame.mixer.music.load("assets/Spinning.wav")
pygame.mixer.music.play(-1, 0.0)
newSong = False
offset += 1
if offset >= len(LANGUAGES):
offset = 0
DISPLAY.fill((CREAM))
DISPLAY.blit(titleWham, (3*WINDOW_WIDTH/12, 0))
drawWheel(DISPLAY, wheelCentre, offset)
drawText(DISPLAY, languagesUnselected, languagesWham, offset)
pygame.time.wait(clockSpeed)
clockSpeed += random.randint(0, 4)
if clockSpeed >= upperBound:
wheelBackSpinning = True
newSong = True
else:
if newSong:
pygame.mixer.music.load("assets/GrindingHalt.wav")
pygame.mixer.music.play(-1, 0.0)
newSong = False
offset -= 1
if offset < 0:
offset = len(LANGUAGES) - 1
DISPLAY.fill((CREAM))
DISPLAY.blit(titleGrind, (3*WINDOW_WIDTH/12, 0))
drawWheel(DISPLAY, wheelCentre, offset)
drawText(DISPLAY, languagesUnselected, languagesWham, offset)
pygame.time.wait(clockSpeed)
clockSpeed += random.randrange(7, 13)
if clockSpeed >= 520:
wheelBackSpinning = False
wheelStopped = True
pygame.mixer.music.load("assets/WinnerRevealed.wav")
pygame.mixer.music.play(-1, 0.0)
titleSplash = titleFont.render(LANGUAGES[offset] + " Wins!", True,
COLOURS[offset])
clockSpeed = 1
pygame.display.flip()
clock.tick(30)
main()
|
Huxellberger/SpinnerOfSyntax
|
Wheel.py
|
Python
|
gpl-2.0
| 8,691
|
[
"exciting"
] |
0f4435def4bc7d4042a39a48960f9d29dacf054bc244a70557c87ccc519a95c3
|
from __future__ import division
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.naive_bayes import MultinomialNB, ComplementNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1., 2.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683,
0.174696337838317]]), 8)
assert_array_almost_equal(clf.class_prior_, np.array([0.3, 0.7]))
def test_gnb_wrong_nb_priors():
""" Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([.25, .25, .25, .25]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2., 1.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert_equal(clf.predict([[-0.1, -0.1]]), np.array([2]))
def test_check_update_with_no_data():
""" Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.
var = 1.
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean,
var, x_empty)
assert_equal(tmean, mean)
assert_equal(tvar, var)
def test_gnb_pfit_wrong_nb_features():
"""Test whether an error is raised when the number of feature changes
between two partial fit"""
clf = GaussianNB()
# Fit for the first time the GNB
clf.fit(X, y)
# Partial fit a second time with an incoherent X
assert_raises(ValueError, clf.partial_fit, np.hstack((X, X)), y)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_almost_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_almost_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(var_smoothing=0.1), X, y, cv=10)
assert_greater(scores.mean(), 0.89)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_almost_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_cnb():
# Tests ComplementNB when alpha=1.0 for the toy example in Manning,
# Raghavan, and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo.
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1).
Y = np.array([0, 0, 0, 1])
# Verify inputs are nonnegative.
clf = ComplementNB(alpha=1.0)
assert_raises(ValueError, clf.fit, -X, Y)
clf.fit(X, Y)
# Check that counts are correct.
feature_count = np.array([[1, 3, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1]])
assert_array_equal(clf.feature_count_, feature_count)
class_count = np.array([3, 1])
assert_array_equal(clf.class_count_, class_count)
feature_all = np.array([1, 4, 1, 1, 1, 1])
assert_array_equal(clf.feature_all_, feature_all)
# Check that weights are correct. See steps 4-6 in Table 4 of
# Rennie et al. (2003).
theta = np.array([
[
(0 + 1) / (3 + 6),
(1 + 1) / (3 + 6),
(1 + 1) / (3 + 6),
(0 + 1) / (3 + 6),
(0 + 1) / (3 + 6),
(1 + 1) / (3 + 6)
],
[
(1 + 1) / (6 + 6),
(3 + 1) / (6 + 6),
(0 + 1) / (6 + 6),
(1 + 1) / (6 + 6),
(1 + 1) / (6 + 6),
(0 + 1) / (6 + 6)
]])
weights = np.zeros(theta.shape)
for i in range(2):
weights[i] = np.log(theta[i])
weights[i] /= weights[i].sum()
assert_array_almost_equal(clf.feature_log_prob_, weights)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
def test_alpha():
# Setting alpha=0 should not output nan results when p(x_i|y_j)=0 is a case
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2./3, 1./3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test sparse X
X = scipy.sparse.csr_matrix(X)
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2./3, 1./3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test for alpha < 0
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
expected_msg = ('Smoothing parameter alpha = -1.0e-01. '
'alpha should be > 0.')
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.fit, X, y)
assert_raise_message(ValueError, expected_msg, m_nb.fit, X, y)
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.partial_fit,
X, y, classes=[0, 1])
assert_raise_message(ValueError, expected_msg, m_nb.partial_fit,
X, y, classes=[0, 1])
|
clemkoa/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
Python
|
bsd-3-clause
| 23,990
|
[
"Gaussian"
] |
16996561dbba647420e16b225f9bbd18843fa010b46d16f60e3e63fd20da4c08
|
# Copyright (c) 1998-2000 John Aycock
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Copy of John Aycock's SPARK parser included with Biopython (DEPRECATED).
To clarify, when we say deprecated we just mean to warn you that Biopython will
in a future release no longer include this module. This does not affect the
status of John Aycock's SPARK parser which can be installed separately from:
http://pages.cpsc.ucalgary.ca/~aycock/spark/
Biopython included this copy of SPARK purely for parsing GenBank/EMBL feature
location strings using Bio.GenBank.LocationParser, and that code has now been
replaced with something simpler and faster using regular expressions.
"""
# Don't issue a deprecation warning here, but via Bio.Parsers instead
# This avoids the user seeing multiple deprecation warnings.
__version__ = 'SPARK-0.6.1'
import re
import sys
def _namelist(instance):
namelist, namedict, classlist = [], {}, [instance.__class__]
for c in classlist:
for b in c.__bases__:
classlist.append(b)
for name in dir(c):
if name not in namedict:
namelist.append(name)
namedict[name] = 1
return namelist
class GenericScanner(object):
def __init__(self):
pattern = self.reflect()
self.re = re.compile(pattern, re.VERBOSE)
self.index2func = {}
for name, number in self.re.groupindex.items():
self.index2func[number-1] = getattr(self, 't_' + name)
def makeRE(self, name):
doc = getattr(self, name).__doc__
rv = '(?P<%s>%s)' % (name[2:], doc)
return rv
def reflect(self):
rv = []
for name in _namelist(self):
if name[:2] == 't_' and name != 't_default':
rv.append(self.makeRE(name))
rv.append(self.makeRE('t_default'))
return '|'.join(rv)
def error(self, s, pos):
print >>sys.stderr, "Lexical error at position %s" % pos
raise SystemExit
def tokenize(self, s):
pos = 0
n = len(s)
while pos < n:
m = self.re.match(s, pos)
if m is None:
self.error(s, pos)
groups = m.groups()
for i in range(len(groups)):
if groups[i] and i in self.index2func:
self.index2func[i](groups[i])
pos = m.end()
def t_default(self, s):
r'( . | \n )+'
pass
class GenericParser(object):
def __init__(self, start):
self.rules = {}
self.rule2func = {}
self.rule2name = {}
self.collectRules()
self.startRule = self.augment(start)
self.ruleschanged = 1
_START = 'START'
_EOF = 'EOF'
#
# A hook for GenericASTBuilder and GenericASTMatcher.
#
def preprocess(self, rule, func): return rule, func
def addRule(self, doc, func):
rules = doc.split()
index = []
for i in range(len(rules)):
if rules[i] == '::=':
index.append(i-1)
index.append(len(rules))
for i in range(len(index)-1):
lhs = rules[index[i]]
rhs = rules[index[i]+2:index[i+1]]
rule = (lhs, tuple(rhs))
rule, fn = self.preprocess(rule, func)
if lhs in self.rules:
self.rules[lhs].append(rule)
else:
self.rules[lhs] = [ rule ]
self.rule2func[rule] = fn
self.rule2name[rule] = func.__name__[2:]
self.ruleschanged = 1
def collectRules(self):
for name in _namelist(self):
if name[:2] == 'p_':
func = getattr(self, name)
doc = func.__doc__
self.addRule(doc, func)
def augment(self, start):
#
# Tempting though it is, this isn't made into a call
# to self.addRule() because the start rule shouldn't
# be subject to preprocessing.
#
startRule = (self._START, ( start, self._EOF ))
self.rule2func[startRule] = lambda args: args[0]
self.rules[self._START] = [ startRule ]
self.rule2name[startRule] = ''
return startRule
def makeFIRST(self):
union = {}
self.first = {}
for rulelist in self.rules.values():
for lhs, rhs in rulelist:
if lhs not in self.first:
self.first[lhs] = {}
if len(rhs) == 0:
self.first[lhs][None] = 1
continue
sym = rhs[0]
if sym not in self.rules:
self.first[lhs][sym] = 1
else:
union[(sym, lhs)] = 1
changes = 1
while changes:
changes = 0
for src, dest in union.keys():
destlen = len(self.first[dest])
self.first[dest].update(self.first[src])
if len(self.first[dest]) != destlen:
changes = 1
#
# An Earley parser, as per J. Earley, "An Efficient Context-Free
# Parsing Algorithm", CACM 13(2), pp. 94-102. Also J. C. Earley,
# "An Efficient Context-Free Parsing Algorithm", Ph.D. thesis,
# Carnegie-Mellon University, August 1968, p. 27.
#
def typestring(self, token):
return None
def error(self, token):
print >>sys.stderr, "Syntax error at or near `%s' token" % token
raise SystemExit
def parse(self, tokens):
tree = {}
tokens.append(self._EOF)
states = { 0: [ (self.startRule, 0, 0) ] }
if self.ruleschanged:
self.makeFIRST()
for i in xrange(len(tokens)):
states[i+1] = []
if states[i] == []:
break
self.buildState(tokens[i], states, i, tree)
#_dump(tokens, states)
if i < len(tokens)-1 or states[i+1] != [(self.startRule, 2, 0)]:
del tokens[-1]
self.error(tokens[i-1])
rv = self.buildTree(tokens, tree, ((self.startRule, 2, 0), i+1))
del tokens[-1]
return rv
def buildState(self, token, states, i, tree):
needsCompletion = {}
state = states[i]
predicted = {}
for item in state:
rule, pos, parent = item
lhs, rhs = rule
#
# A -> a . (completer)
#
if pos == len(rhs):
if len(rhs) == 0:
needsCompletion[lhs] = (item, i)
for pitem in states[parent]:
if pitem is item:
break
prule, ppos, pparent = pitem
plhs, prhs = prule
if prhs[ppos:ppos+1] == (lhs,):
new = (prule,
ppos+1,
pparent)
if new not in state:
state.append(new)
tree[(new, i)] = [(item, i)]
else:
tree[(new, i)].append((item, i))
continue
nextSym = rhs[pos]
#
# A -> a . B (predictor)
#
if nextSym in self.rules:
#
# Work on completer step some more; for rules
# with empty RHS, the "parent state" is the
# current state we're adding Earley items to,
# so the Earley items the completer step needs
# may not all be present when it runs.
#
if nextSym in needsCompletion:
new = (rule, pos+1, parent)
olditem_i = needsCompletion[nextSym]
if new not in state:
state.append(new)
tree[(new, i)] = [olditem_i]
else:
tree[(new, i)].append(olditem_i)
#
# Has this been predicted already?
#
if nextSym in predicted:
continue
predicted[nextSym] = 1
ttype = token is not self._EOF and \
self.typestring(token) or \
None
if ttype is not None:
#
# Even smarter predictor, when the
# token's type is known. The code is
# grungy, but runs pretty fast. Three
# cases are looked for: rules with
# empty RHS; first symbol on RHS is a
# terminal; first symbol on RHS is a
# nonterminal (and isn't nullable).
#
for prule in self.rules[nextSym]:
new = (prule, 0, i)
prhs = prule[1]
if len(prhs) == 0:
state.append(new)
continue
prhs0 = prhs[0]
if prhs0 not in self.rules:
if prhs0 != ttype:
continue
else:
state.append(new)
continue
first = self.first[prhs0]
if None not in first and \
ttype not in first:
continue
state.append(new)
continue
for prule in self.rules[nextSym]:
#
# Smarter predictor, as per Grune &
# Jacobs' _Parsing Techniques_. Not
# as good as FIRST sets though.
#
prhs = prule[1]
if len(prhs) > 0 and \
prhs[0] not in self.rules and \
token != prhs[0]:
continue
state.append((prule, 0, i))
#
# A -> a . c (scanner)
#
elif token == nextSym:
#assert new not in states[i+1]
states[i+1].append((rule, pos+1, parent))
def buildTree(self, tokens, tree, root):
stack = []
self.buildTree_r(stack, tokens, -1, tree, root)
return stack[0]
def buildTree_r(self, stack, tokens, tokpos, tree, root):
(rule, pos, parent), state = root
while pos > 0:
want = ((rule, pos, parent), state)
if want not in tree:
#
# Since pos > 0, it didn't come from closure,
# and if it isn't in tree[], then there must
# be a terminal symbol to the left of the dot.
# (It must be from a "scanner" step.)
#
pos = pos - 1
state = state - 1
stack.insert(0, tokens[tokpos])
tokpos = tokpos - 1
else:
#
# There's a NT to the left of the dot.
# Follow the tree pointer recursively (>1
# tree pointers from it indicates ambiguity).
# Since the item must have come about from a
# "completer" step, the state where the item
# came from must be the parent state of the
# item the tree pointer points to.
#
children = tree[want]
if len(children) > 1:
child = self.ambiguity(children)
else:
child = children[0]
tokpos = self.buildTree_r(stack,
tokens, tokpos,
tree, child)
pos = pos - 1
(crule, cpos, cparent), cstate = child
state = cparent
lhs, rhs = rule
result = self.rule2func[rule](stack[:len(rhs)])
stack[:len(rhs)] = [result]
return tokpos
def ambiguity(self, children):
#
# XXX - problem here and in collectRules() if the same
# rule appears in >1 method. But in that case the
# user probably gets what they deserve :-) Also
# undefined results if rules causing the ambiguity
# appear in the same method.
#
sortlist = []
name2index = {}
for i in range(len(children)):
((rule, pos, parent), index) = children[i]
lhs, rhs = rule
name = self.rule2name[rule]
sortlist.append((len(rhs), name))
name2index[name] = i
sortlist.sort()
list = map(lambda (a,b): b, sortlist)
return children[name2index[self.resolve(list)]]
def resolve(self, list):
#
# Resolve ambiguity in favor of the shortest RHS.
# Since we walk the tree from the top down, this
# should effectively resolve in favor of a "shift".
#
return list[0]
#
# GenericASTBuilder automagically constructs a concrete/abstract syntax tree
# for a given input. The extra argument is a class (not an instance!)
# which supports the "__setslice__" and "__len__" methods.
#
# XXX - silently overrides any user code in methods.
#
class GenericASTBuilder(GenericParser):
def __init__(self, AST, start):
GenericParser.__init__(self, start)
self.AST = AST
def preprocess(self, rule, func):
rebind = lambda lhs, self=self: \
lambda args, lhs=lhs, self=self: \
self.buildASTNode(args, lhs)
lhs, rhs = rule
return rule, rebind(lhs)
def buildASTNode(self, args, lhs):
children = []
for arg in args:
if isinstance(arg, self.AST):
children.append(arg)
else:
children.append(self.terminal(arg))
return self.nonterminal(lhs, children)
def terminal(self, token): return token
def nonterminal(self, type, args):
rv = self.AST(type)
rv[:len(args)] = args
return rv
#
# GenericASTTraversal is a Visitor pattern according to Design Patterns. For
# each node it attempts to invoke the method n_<node type>, falling
# back onto the default() method if the n_* can't be found. The preorder
# traversal also looks for an exit hook named n_<node type>_exit (no default
# routine is called if it's not found). To prematurely halt traversal
# of a subtree, call the prune() method -- this only makes sense for a
# preorder traversal. Node type is determined via the typestring() method.
#
class GenericASTTraversalPruningException(object):
pass
class GenericASTTraversal(object):
def __init__(self, ast):
self.ast = ast
def typestring(self, node):
return node.type
def prune(self):
raise GenericASTTraversalPruningException
def preorder(self, node=None):
if node is None:
node = self.ast
try:
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
except GenericASTTraversalPruningException:
return
for kid in node:
self.preorder(kid)
name = name + '_exit'
if hasattr(self, name):
func = getattr(self, name)
func(node)
def postorder(self, node=None):
if node is None:
node = self.ast
for kid in node:
self.postorder(kid)
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
def default(self, node):
pass
#
# GenericASTMatcher. AST nodes must have "__getitem__" and "__cmp__"
# implemented.
#
# XXX - makes assumptions about how GenericParser walks the parse tree.
#
class GenericASTMatcher(GenericParser):
def __init__(self, start, ast):
GenericParser.__init__(self, start)
self.ast = ast
def preprocess(self, rule, func):
rebind = lambda func, self=self: \
lambda args, func=func, self=self: \
self.foundMatch(args, func)
lhs, rhs = rule
rhslist = list(rhs)
rhslist.reverse()
return (lhs, tuple(rhslist)), rebind(func)
def foundMatch(self, args, func):
func(args[-1])
return args[-1]
def match_r(self, node):
self.input.insert(0, node)
children = 0
for child in node:
if children == 0:
self.input.insert(0, '(')
children = children + 1
self.match_r(child)
if children > 0:
self.input.insert(0, ')')
def match(self, ast=None):
if ast is None:
ast = self.ast
self.input = []
self.match_r(ast)
self.parse(self.input)
def resolve(self, list):
#
# Resolve ambiguity in favor of the longest RHS.
#
return list[-1]
def _dump(tokens, states):
for i in range(len(states)):
print 'state', i
for (lhs, rhs), pos, parent in states[i]:
print '\t', lhs, '::=',
print ' '.join(rhs[:pos]),
print '.',
print ' '.join(rhs[pos:]),
print ',', parent
if i < len(tokens):
print
print 'token', str(tokens[i])
print
|
asherkhb/coge
|
bin/last_wrapper/Bio/Parsers/spark.py
|
Python
|
bsd-2-clause
| 18,936
|
[
"Biopython"
] |
77ccd79b0e441708b969e6c9515c4f412b63b6d965451d20f688ba6f0b7d6ba5
|
#!/usr/bin/env python
from octopus.shelltool.ResultProcessor import BaseResultProcessor,NodeResultPropertyCleaner
class NodeResult(NodeResultPropertyCleaner):
def getElementType(self):
return 'node'
def getId(self):
return self.result['id']
def getProperties(self):
props = self.properties()
props['id'] = self.getId()
return props
class EdgeResult(BaseResultProcessor):
def getElementType(self):
return 'edge'
def getDest(self):
return self.result['inV']
def getSrc(self):
return self.result['outV']
def getId(self):
return self.result['id']
def getLabel(self):
return self.result['label']
def getProperties(self):
props = self.properties()
props['id'] = self.getId()
props['label'] = self.getLabel()
return props
|
octopus-platform/joern
|
python/joern-tools/joern/shelltool/PlotResult.py
|
Python
|
lgpl-3.0
| 868
|
[
"Octopus"
] |
2b543fc3bbccba51e0d5d49c1de8b5f87b62e4ab315c55a1940e245494b26d66
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create tensor ellipsoids
# Create the RenderWindow, Renderer and interactive renderer
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
#
# Create tensor ellipsoids
#
# generate tensors
ptLoad = vtk.vtkPointLoad()
ptLoad.SetLoadValue(100.0)
ptLoad.SetSampleDimensions(6,6,6)
ptLoad.ComputeEffectiveStressOn()
ptLoad.SetModelBounds(-10,10,-10,10,-10,10)
# extract plane of data
plane = vtk.vtkImageDataGeometryFilter()
plane.SetInputConnection(ptLoad.GetOutputPort())
plane.SetExtent(2,2,0,99,0,99)
# Generate ellipsoids
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(8)
sphere.SetPhiResolution(8)
ellipsoids = vtk.vtkTensorGlyph()
ellipsoids.SetInputConnection(ptLoad.GetOutputPort())
ellipsoids.SetSourceConnection(sphere.GetOutputPort())
ellipsoids.SetScaleFactor(10)
ellipsoids.ClampScalingOn()
ellipNormals = vtk.vtkPolyDataNormals()
ellipNormals.SetInputConnection(ellipsoids.GetOutputPort())
# Map contour
lut = vtk.vtkLogLookupTable()
lut.SetHueRange(.6667,0.0)
ellipMapper = vtk.vtkPolyDataMapper()
ellipMapper.SetInputConnection(ellipNormals.GetOutputPort())
ellipMapper.SetLookupTable(lut)
plane.Update()
#force update for scalar range
ellipMapper.SetScalarRange(plane.GetOutput().GetScalarRange())
ellipActor = vtk.vtkActor()
ellipActor.SetMapper(ellipMapper)
#
# Create outline around data
#
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(ptLoad.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0,0,0)
#
# Create cone indicating application of load
#
coneSrc = vtk.vtkConeSource()
coneSrc.SetRadius(.5)
coneSrc.SetHeight(2)
coneMap = vtk.vtkPolyDataMapper()
coneMap.SetInputConnection(coneSrc.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMap)
coneActor.SetPosition(0,0,11)
coneActor.RotateY(90)
coneActor.GetProperty().SetColor(1,0,0)
camera = vtk.vtkCamera()
camera.SetFocalPoint(0.113766,-1.13665,-1.01919)
camera.SetPosition(-29.4886,-63.1488,26.5807)
camera.SetViewAngle(24.4617)
camera.SetViewUp(0.17138,0.331163,0.927879)
camera.SetClippingRange(1,100)
ren1.AddActor(ellipActor)
ren1.AddActor(outlineActor)
ren1.AddActor(coneActor)
ren1.SetBackground(1.0,1.0,1.0)
ren1.SetActiveCamera(camera)
renWin.SetSize(400,400)
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/Modeling/Testing/Python/TenEllip.py
|
Python
|
bsd-3-clause
| 2,810
|
[
"VTK"
] |
6c7043574b2c3238e3ab68422f1b89b0757e8635f01a70fd2f4b16092546defc
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('yaps2/version.py') as f:
exec(f.read())
setup(
name='yaps2',
version=__version__,
description='Yet Another Pipeline Setup',
long_description=readme,
author='Indraniel Das',
author_email='idas@wustl.edu',
license=license,
url='https://github.com/indraniel/yaps2',
dependency_links=[
'https://github.com/indraniel/COSMOS2/tarball/enable-lsf-rebase#egg=cosmos-wfm-2.0.10.indraniel5',
'https://github.com/brentp/cyvcf2/tarball/cb8104c196a7f98f7128f39becd99ef829ecf14d#egg=cyvcf2-0.7.7',
],
install_requires=[
'cosmos-wfm==2.0.10.indraniel5',
'click==6.7',
'clint==0.5.1',
'matplotlib==1.5.3',
'scipy==0.18.1',
'pandas==0.18.1',
'seaborn==0.7.1',
'Cython==0.25.2',
'cyvcf2==0.7.7',
'bx-python==0.7.3',
'pysam==0.9.1.4',
],
entry_points='''
[console_scripts]
yaps2=yaps2.cli:cli
''',
packages=find_packages(exclude=('tests', 'docs')),
include_package_data=True,
)
|
indraniel/yaps2
|
setup.py
|
Python
|
bsd-2-clause
| 1,231
|
[
"pysam"
] |
01bc5c55e2e77d41e92ced3ecce15465cfd8e011ac446290a6eabee8e1bc63dd
|
#!/usr/bin/env python
# Perform Path Similarity Analysis on a submatrix
"""Perform Path Similarity Analysis (PSA) using MDAnalysis.analysis.psa.
Provide all trajectories and topology files in a JSON input file (two
lists, one with topology files, the other with trajectory files) or on
the command line. There *must* be one topology file TOPOL for each
trajectory file TRAJ. The `--nsplit N` argument is required: it
indicates where to split the list of trajectories T so that the
distance matrix between the trajectories in `T[:N]` and `T[N:]`.
"""
import sys
import time
import argparse
from collections import OrderedDict
import itertools
import json
import numpy as np
import MDAnalysis as mda
from MDAnalysis.analysis import psa
def psa_partial(universesA, universesB, metric="discrete_frechet", selection="name CA"):
"""Calculate path similarity metric between lists of universes.
Arguments
---------
universesA, universesB : lists
lists of MDAnalysis.Universe instances
metric : string, optional
label of the metric or distance function, can be one of "discrete_frechet",
"hausdorff", "weighted_average_hausdorff", "average_hausdorff",
"hausdorff_neighbors". Note that only "discrete_frechet" and "hausdorff"
are true metrics.
selection : string, optional
MDAnalysis selection string that, when applied to *all* universes generates
the subset of atoms that should be compared.
Returns
-------
numpy.array(len(universesA), len(universesB)) : distance matrix
The matrix of all the mutual distances D[i, j] with 0 <= i <
len(A) and 0 <= j < len(B)
Note
----
Each universe should be transferred to memory
(`Universe.transfer_to_memory()`) in order to speed up extraction
of coordinates. However, for very big trajectories, memory
problems might occur and then this code is not optimal because it does
not cache extracted coordinates.
"""
_metric = psa.get_path_metric_func(metric)
# submatrix of d[i, j] with i from A and j from B
D = np.zeros((len(universesA), len(universesB)))
# not optimized: could transpose to keep larger axis outside,
# cache results (compare u_i and u_j), and generate 0 for u_i == u_j
for i, u_i in enumerate(universesA):
g1 = u_i.select_atoms(selection)
P = u_i.trajectory.timeseries(asel=g1, format="fac")
for j, u_j in enumerate(universesB):
g2 = u_j.select_atoms(selection)
Q = u_j.trajectory.timeseries(asel=g2, format="fac")
# compute distance between paths
D[i, j] = _metric(P, Q)
return D
class StopWatch(OrderedDict):
fmt = "{0:30s} {1:8.3f} s"
def tic(self, label):
if label in self:
raise ValueError("label {} already exists".format(label))
self[label] = time.time()
def show(self):
print("----------[ TIMING ]--------------------")
labels = self.keys()
start = labels[0]
for stop in labels[1:]:
dt = self[stop] - self[start]
print(self.fmt.format(stop, dt))
start = stop
print("----------------------------------------")
print(self.fmt.format("total time",
self[labels[-1]] - self[labels[0]]))
print("----------------------------------------")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-n", "--nsplit", type=int, required=True, default=None,
metavar="N",
help="split list of trajectories T so that the distance "
"submatrix D[i, j] will be calculated over the cartesian "
"product T[:N] x T[N:].")
parser.add_argument("-f", "--inputfile",
help="JSON file with lists of topologies and "
"trajectories (or use --trajectories/--topologies)")
parser.add_argument("--topologies", required=False, nargs="+",
metavar="TOPOLOGY",
help="List of topology files"),
parser.add_argument("--trajectories", required=False, nargs="+",
metavar="TRAJ",
help="List of trajectory files")
parser.add_argument("-o", "--outfile", default="distances.npy",
help="Distance matrix in numpy format")
args = parser.parse_args()
if args.inputfile:
print("Loading paths from JSON file {}".format(args.inputfile))
with open(args.inputfile) as inp:
topologies, trajectories = json.load(inp)
else:
print("Using paths from command line")
topologies = args.topologies
trajectories = args.trajectories
if len(topologies) != len(trajectories):
raise ValueError("Need exactly one topology file for each trajectory")
print("Processing {} trajectories.".format(len(trajectories)))
print("Splitting trajectories in two blocks of length {0} and {1}".format(
args.nsplit, len(trajectories) - args.nsplit))
timer = StopWatch()
timer.tic('init')
# load trajectories
universes = [mda.Universe(topology, trajectory) for topology, trajectory in
itertools.izip(topologies, trajectories)]
timer.tic("load Universes")
# speed up by transferring to memory
# transfer to memory for instantaneous time series extraction
# (typically not a problem for these trajectories and submatrix sizes; if memory becomes an
# issue then do not transfer to memory and store intermediate trajectories on disk)
for u in universes:
u.transfer_to_memory()
timer.tic("universe.transfer_to_memory()")
# run distance calculation and produce submatrix
uA = universes[:args.nsplit]
uB = universes[args.nsplit:]
print("Calculating D (shape {0} x {1}) with {2} entries".format(
len(uA), len(uB), len(uA) * len(uB)))
D = psa_partial(uA, uB, metric="discrete_frechet", selection="name CA")
timer.tic("PSA distance matrix")
np.save(args.outfile, D)
timer.tic("saving output")
timer.show()
|
Becksteinlab/SPIDAL-MDAnalysis-Midas-tutorial
|
rp/mdanalysis_psa_partial.py
|
Python
|
bsd-3-clause
| 6,248
|
[
"MDAnalysis"
] |
0dba90c4120feb591400f1a0faa55ace335d54a2817a86d18b49e828db59ad09
|
import numpy as np
import natsort
import pandas as pd
import sys, os, re
import time
import netCDF4 as nc
from scipy import integrate
from etl.spa_ncdf import spa_netCDF4
__author__ = 'Rhys Whitley'
__email__ = 'rhys.whitley@gmail.com'
__created__ = "2015-10-15"
__modified__ = time.strftime("%c")
__version__ = '1.0'
__status__ = 'prototype'
class SPAoutput_ETL(object):
"""
A set of functions that perform pre-processing routines on the raw SPA v1
output CSV files. These files are imported into pandas dataframes at a
30min or 1hr time-step. Each output file is saved as a dataframe and then
pickled for later use.
dirpath = directory where spa outputs are located
savepath = directory where pickle and CSVs will be saved
The file processed as follows:
1. hourly.csv
2. l1-10.csv
3. soilwater.csv
4. upfrac.csv
"""
def __init__(self):
super(SPAoutput_ETL, self).__init__()
self.start_date = "2001-01-01"
self.time_freq = "30min"
self.soil_depth = None
self.glay = 5 # index in canopy layers where grasses begin
def reduce_canopy(self, data_list):
"""
Generalise the 10 canopy layer files into a dictionary of tree and
grass dataframes
"""
# split layer files into trees and grasses and
# concatenate these together
trees_raw = pd.concat(data_list[:self.glay], axis=0)
grass_raw = pd.concat(data_list[self.glay:], axis=0)
# columns to be summed
not_col = lambda x, y: x.columns - x.columns[y]
sums_col = [1, 2, 6, 8, 9, 13]
mean_col = not_col(trees_raw, sums_col)
# lambda for quick convert and group
convgrp = lambda x: x.convert_objects(convert_numeric=True)\
.groupby(level=0)
# do separate groupbys for sums and means so we can exploit
# the faster cython operability
# trees
trees_sums = convgrp(trees_raw.ix[:, sums_col]).sum()
trees_mean = convgrp(trees_raw.ix[:, mean_col]).mean()
trees_grp = pd.concat([trees_sums, trees_mean], axis=1)
# grasses
grass_sums = convgrp(grass_raw.ix[:, sums_col]).sum()
grass_mean = convgrp(grass_raw.ix[:, mean_col]).mean()
grass_grp = pd.concat([grass_sums, grass_mean], axis=1)
return {'trees' : trees_grp, 'grass' : grass_grp}
def flatten_dictlist(self, dlist):
"""Flattens the tree/grass dictionary into a single dataframe"""
# concatenate the two dataframes
tg_df = pd.concat(dlist, axis=1)
# set new column names
tg_df.columns = ["{0}_{1}".format(h1, h2) for (h1, h2) in tg_df.columns]
# return to user
return tg_df
def import_spa_canopy(self, file_name, clayer):
"""
Imports SPA canopy layer outputs into a pandas dataframe
* adds indexes on time and canopy layer
"""
# read in file
data = pd.read_csv(file_name, sep=r',\s+', engine='python', na_values=["Infinity"])
# remove header whitespace
data.columns = [dc.replace(' ', '') for dc in data.columns]
# add layer label (need to reduce on this)
data['clayer'] = clayer
# re-do the datetime column as it is out by 30 minutes
data['DATE'] = pd.date_range(start=self.start_date, periods=len(data), \
freq=self.time_freq)
# set dataframe index on this column
data.drop('Time', axis=1, inplace=True)
# set new indices
data_dated = data.set_index(['DATE', 'clayer'])
# return to user
return data_dated.bfill()
def import_spa_output(self, file_name):
"""
Imports hourly SPA land-surface flux outputs with the index on the time
column
"""
# read in file
data = pd.read_csv(file_name, sep=r',', engine='python', na_values=["Infinity"])
# remove header whitespace
data.columns = [dc.replace(' ', '') for dc in data.columns]
# re-do the datetime column as it is out by 30 minutes
data['DATE'] = pd.date_range(start=self.start_date, periods=len(data), \
freq=self.time_freq)
# set dataframe index on this column
data_dated = data.set_index(['DATE'])
# return to user
return data_dated.bfill()
def import_soil_profile(self, file_name):
# import the soil profile input file
soil_prof = pd.read_csv(file_name)
# take the first row which has the soil layer thicknesses
soil_layer = soil_prof.ix[0, 1:-1].values.astype('float')
# calculate the cumulative thickness, equivalent to soil depth at each layer
cumul_depth = np.cumsum(soil_layer)
# return to user
return cumul_depth
def load_gasex_raw(self, filepaths):
"""
Special wrapper function to perform a mass import of the SPA canopy
layer outputs from a simulation folders. The canopy layer files are
individually imported and then aggregated into tree and grass
components using the reduce_canopy and flatten functions. These are
then added with the land-surface outputs to simulation ncdf files.
"""
# Import the canopy layer output files from each model experiment
canopy_data = [self.import_spa_canopy(fname, i) \
for (i, fname) in enumerate(filepaths)]
# Reduce and flatten the canopy layers into tree and grass outputs
treegrass_data = self.reduce_canopy(canopy_data)
treegrass_flat = self.flatten_dictlist(treegrass_data)
# Return to user
return treegrass_flat
def load_hourly_raw(self, filepaths):
"""
Wrapper function to perform a mass import of SPA outputs from a
simulation files using Pandas. Returns a dictionary that can be used
later to process outputs into ncdf file.
"""
# import the land-surface output files from each model experiment
land_surf = {fname.split(".")[0].split("/")[-1]: \
self.import_spa_output(fname) \
for fname in filepaths}
return land_surf
def get_spa_filepaths(self, dir_path):
"""
Given the top directory, find all subfolders that contain
outputs from the SPA 1 model.
regex_remove: controls the files that are ignored in the scan
regex_take: controls the subfolders that are search for SPA outputs
Returns file_list, which contains the full paths for each output
for each SPA simulation folder.
"""
# Files to be ignored in the search
files_remove = ['canopy','DS_Store','ci','drivers','energy','iceprop', \
'parcheck','power','waterfluxes','test','solar','daily']
# Turn list into a regex argument
regex_remove = r'^((?!{0}).)*$'.format("|".join(files_remove))
# Subfolders to be searched
regex_take = r'(\binputs\b)|(\boutputs\b)$'
# Number of subfolders
n_subfold = len(regex_take.split("|"))
# Walk down through the directory path looking for SPA files
raw_list = [[os.path.join(dp, f) for f in fn \
if re.search(regex_remove, f)] \
for (dp, dn, fn) in os.walk(dir_path, followlinks=False) \
if re.search(regex_take, dp)]
# Exit program if no files found
if len(raw_list) == 0:
# Echo user
sys.stderr.write('No output files found. Check your FilePath. \n')
sys.exit(-1)
else:
# Attach the two subfolders in each simulation together
file_list = [natsort.natsorted(raw_list[i] + raw_list[i+1]) \
for i in np.arange(0, len(raw_list), n_subfold)]
# Pass back to user
return natsort.natsorted(file_list)
def integrated_mean_soil(self, soil_matrix, soil_depths):
soil_int = lambda y, x: integrate.trapz(y=y, x=x)/max(x)
int_soil_prof = soil_matrix.apply(soil_int, axis=1, args=(soil_depths,))
return int_soil_prof
def process_outputs(self, fpack):
"""
Given a list of files from a simulation folder load them using pandas
and process them into netcdf4 files
"""
fpath_list, nc_fout = fpack
# Static soil profile path
profpaths = [fp for fp in fpath_list if \
re.search(r'_soils', os.path.basename(fp))]
# Paths for soil profile water fluxes
soilpaths = [fp for fp in fpath_list \
if re.search(r'soil(?!s)|upfrac', os.path.basename(fp))]
# Canopy layer outputs
canopaths = [fp for fp in fpath_list \
if re.search(r'l.*[0-9]+.csv$', os.path.basename(fp))]
# Paths for land-surface fluxes
landpaths = [fp for fp in fpath_list \
if re.search(r'^((?!l\d+|soil|upfrac|temp|phen).)*$', os.path.basename(fp))]
# Canopy layer outputs
canopy10 = self.load_gasex_raw(canopaths)
# Land-surface and meteorology fluxes
landsurf = self.load_hourly_raw(landpaths)
# Below-ground water fluxes
watrprof = self.load_hourly_raw(soilpaths)
# soil layer thicknesses
soil_depths = self.import_soil_profile(profpaths[0])
# Determine the integrated average soil state properties
intProf_swc = self.integrated_mean_soil(watrprof['soilwater'].ix[:, 1:-1], soil_depths)
print("> writing netCDF file: {0}".format(nc_fout))
#import ipdb; ipdb.set_trace()
# Open a NCDF4 file for SPA simulation outputs
nc_file = nc.Dataset(nc_fout, 'w', format='NETCDF4')
# Assign attributes
ncdf_attr = spa_netCDF4()
ncdf_attr.assign_variables(nc_file)
ncdf_attr.assign_units(nc_file)
ncdf_attr.assign_longNames(nc_file)
# Assign values to variables
tseries = pd.timedelta_range(0, periods=len(landsurf['hourly']), freq="1800s") \
.astype('timedelta64[s]')
# Get time from netcdf driver file
nc_file.variables['time'][:] = tseries.values
nc_file.variables['soildepth'][:] = soil_depths
# [Land-surface fluxes]
nc_file.variables['GPP'][:] = landsurf['hourly']['gpp'].values
nc_file.variables['Qle'][:] = landsurf['hourly']['lemod'].values
nc_file.variables['TVeg'][:] = landsurf['hourly']['transle'].values
nc_file.variables['Esoil'][:] = landsurf['hourly']['soille'].values
nc_file.variables['Ecanop'][:] = landsurf['hourly']['wetle'].values
nc_file.variables['AutoResp'][:] = landsurf['hourly']['resp'].values
# [Vegetation fluxes]
nc_file.variables['Atree'][:] = canopy10['trees_Ag'].values
nc_file.variables['Agrass'][:] = canopy10['grass_Ag'].values
nc_file.variables['Rtree'][:] = canopy10['trees_Rd'].values
nc_file.variables['Rgrass'][:] = canopy10['grass_Rd'].values
nc_file.variables['Etree'][:] = canopy10['trees_Et'].values
nc_file.variables['Egrass'][:] = canopy10['grass_Et'].values
nc_file.variables['Gtree'][:] = canopy10['trees_gs'].values
nc_file.variables['Ggrass'][:] = canopy10['grass_gs'].values
nc_file.variables['LAItree'][:] = canopy10['trees_LAI'].values
nc_file.variables['LAIgrass'][:] = canopy10['grass_LAI'].values
# [Soil Profile]
nc_file.variables['SWC20'][:] = watrprof['soilwater']['w1'].values
nc_file.variables['SWC80'][:] = watrprof['soilwater']['w4'].values
nc_file.variables['IntSWC'][:] = intProf_swc.values
nc_file.variables['IntSWP'][:] = watrprof['soilwater']['w_swp'].values
nc_file.variables['SoilMoist'][:] = watrprof['soilwater'].ix[:, 1:-1].values
# Close file
nc_file.close()
|
rhyswhitley/savanna_iav
|
src/data_preproc/etl/SPAoutput_ETL.py
|
Python
|
cc0-1.0
| 12,029
|
[
"NetCDF"
] |
2463e9835fc7dfda7b5ac9fcf0b8ff0a296325b65459c64e87c61361b06b0ebf
|
"""Traits-based GUI for head-MRI coregistration"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
from ..externals.six.moves import queue
import re
from threading import Thread
import traceback
import warnings
import numpy as np
from scipy.spatial.distance import cdist
# allow import without traits
try:
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import (error, confirm, warning, OK, YES, NO, CANCEL,
information, FileDialog, GUI)
from traits.api import (Bool, Button, cached_property, DelegatesTo,
Directory, Enum, Float, HasTraits,
HasPrivateTraits, Instance, Int, on_trait_change,
Property, Str)
from traitsui.api import (View, Item, Group, HGroup, VGroup, VGrid,
EnumEditor, Handler, Label, TextEditor)
from traitsui.menu import Action, UndoButton, CancelButton, NoButtons
from tvtk.pyface.scene_editor import SceneEditor
except Exception:
from ..utils import trait_wraith
HasTraits = HasPrivateTraits = Handler = object
cached_property = on_trait_change = MayaviScene = MlabSceneModel =\
Bool = Button = DelegatesTo = Directory = Enum = Float = Instance =\
Int = Property = Str = View = Item = Group = HGroup = VGroup = VGrid =\
EnumEditor = Label = TextEditor = Action = UndoButton = CancelButton =\
NoButtons = SceneEditor = trait_wraith
from ..bem import make_bem_solution, write_bem_solution
from ..coreg import bem_fname, trans_fname
from ..transforms import (write_trans, read_trans, apply_trans, rotation,
translation, scaling, rotation_angles, Transform)
from ..coreg import (fit_matched_points, fit_point_cloud, scale_mri,
_find_fiducials_files, _point_cloud_error)
from ..utils import get_subjects_dir, logger
from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel
from ._file_traits import (set_mne_root, trans_wildcard, InstSource,
SubjectSelectorPanel)
from ._viewer import (defaults, HeadViewController, PointObject, SurfaceObject,
_testing_mode)
laggy_float_editor = TextEditor(auto_set=False, enter_set=True, evaluate=float)
class CoregModel(HasPrivateTraits):
"""Traits object for estimating the head mri transform.
Notes
-----
Transform from head to mri space is modelled with the following steps:
* move the head shape to its nasion position
* rotate the head shape with user defined rotation around its nasion
* move the head shape by user defined translation
* move the head shape origin to the mri nasion
If MRI scaling is enabled,
* the MRI is scaled relative to its origin center (prior to any
transformation of the digitizer head)
Don't sync transforms to anything to prevent them from being recomputed
upon every parameter change.
"""
# data sources
mri = Instance(MRIHeadWithFiducialsModel, ())
hsp = Instance(InstSource, ())
# parameters
grow_hair = Float(label="Grow Hair [mm]", desc="Move the back of the MRI "
"head outwards to compensate for hair on the digitizer "
"head shape")
n_scale_params = Enum(0, 1, 3, desc="Scale the MRI to better fit the "
"subject's head shape (a new MRI subject will be "
"created with a name specified upon saving)")
scale_x = Float(1, label="Right (X)")
scale_y = Float(1, label="Anterior (Y)")
scale_z = Float(1, label="Superior (Z)")
rot_x = Float(0, label="Right (X)")
rot_y = Float(0, label="Anterior (Y)")
rot_z = Float(0, label="Superior (Z)")
trans_x = Float(0, label="Right (X)")
trans_y = Float(0, label="Anterior (Y)")
trans_z = Float(0, label="Superior (Z)")
prepare_bem_model = Bool(True, desc="whether to run mne_prepare_bem_model "
"after scaling the MRI")
# secondary to parameters
scale = Property(depends_on=['n_scale_params', 'scale_x', 'scale_y',
'scale_z'])
has_fid_data = Property(Bool, depends_on=['mri_origin', 'hsp.nasion'],
desc="Required fiducials data is present.")
has_pts_data = Property(Bool, depends_on=['mri.points', 'hsp.points'])
# MRI dependent
mri_origin = Property(depends_on=['mri.nasion', 'scale'],
desc="Coordinates of the scaled MRI's nasion.")
# target transforms
mri_scale_trans = Property(depends_on=['scale'])
head_mri_trans = Property(depends_on=['hsp.nasion', 'rot_x', 'rot_y',
'rot_z', 'trans_x', 'trans_y',
'trans_z', 'mri_origin'],
desc="Transformaiton of the head shape to "
"match the scaled MRI.")
# info
subject_has_bem = DelegatesTo('mri')
lock_fiducials = DelegatesTo('mri')
can_prepare_bem_model = Property(Bool, depends_on=['n_scale_params',
'subject_has_bem'])
can_save = Property(Bool, depends_on=['head_mri_trans'])
raw_subject = Property(depends_on='hsp.inst_fname', desc="Subject guess "
"based on the raw file name.")
# transformed geometry
processed_mri_points = Property(depends_on=['mri.points', 'grow_hair'])
transformed_mri_points = Property(depends_on=['processed_mri_points',
'mri_scale_trans'])
transformed_hsp_points = Property(depends_on=['hsp.points',
'head_mri_trans'])
transformed_mri_lpa = Property(depends_on=['mri.lpa', 'mri_scale_trans'])
transformed_hsp_lpa = Property(depends_on=['hsp.lpa', 'head_mri_trans'])
transformed_mri_nasion = Property(depends_on=['mri.nasion',
'mri_scale_trans'])
transformed_hsp_nasion = Property(depends_on=['hsp.nasion',
'head_mri_trans'])
transformed_mri_rpa = Property(depends_on=['mri.rpa', 'mri_scale_trans'])
transformed_hsp_rpa = Property(depends_on=['hsp.rpa', 'head_mri_trans'])
# fit properties
lpa_distance = Property(depends_on=['transformed_mri_lpa',
'transformed_hsp_lpa'])
nasion_distance = Property(depends_on=['transformed_mri_nasion',
'transformed_hsp_nasion'])
rpa_distance = Property(depends_on=['transformed_mri_rpa',
'transformed_hsp_rpa'])
point_distance = Property(depends_on=['transformed_mri_points',
'transformed_hsp_points'])
# fit property info strings
fid_eval_str = Property(depends_on=['lpa_distance', 'nasion_distance',
'rpa_distance'])
points_eval_str = Property(depends_on='point_distance')
@cached_property
def _get_can_prepare_bem_model(self):
return self.subject_has_bem and self.n_scale_params > 0
@cached_property
def _get_can_save(self):
return np.any(self.head_mri_trans != np.eye(4))
@cached_property
def _get_has_pts_data(self):
has = (np.any(self.mri.points) and np.any(self.hsp.points))
return has
@cached_property
def _get_has_fid_data(self):
has = (np.any(self.mri_origin) and np.any(self.hsp.nasion))
return has
@cached_property
def _get_scale(self):
if self.n_scale_params == 0:
return np.array(1)
elif self.n_scale_params == 1:
return np.array(self.scale_x)
else:
return np.array([self.scale_x, self.scale_y, self.scale_z])
@cached_property
def _get_mri_scale_trans(self):
if np.isscalar(self.scale) or self.scale.ndim == 0:
if self.scale == 1:
return np.eye(4)
else:
s = self.scale
return scaling(s, s, s)
else:
return scaling(*self.scale)
@cached_property
def _get_mri_origin(self):
if np.isscalar(self.scale) and self.scale == 1:
return self.mri.nasion
else:
return self.mri.nasion * self.scale
@cached_property
def _get_head_mri_trans(self):
if not self.has_fid_data:
return np.eye(4)
# move hsp so that its nasion becomes the origin
x, y, z = -self.hsp.nasion[0]
trans = translation(x, y, z)
# rotate hsp by rotation parameters
rot = rotation(self.rot_x, self.rot_y, self.rot_z)
trans = np.dot(rot, trans)
# move hsp by translation parameters
transl = translation(self.trans_x, self.trans_y, self.trans_z)
trans = np.dot(transl, trans)
# move the hsp origin(/nasion) to the MRI's nasion
x, y, z = self.mri_origin[0]
tgt_mri_trans = translation(x, y, z)
trans = np.dot(tgt_mri_trans, trans)
return trans
@cached_property
def _get_processed_mri_points(self):
if self.grow_hair:
if len(self.mri.norms):
if self.n_scale_params == 0:
scaled_hair_dist = self.grow_hair / 1000
else:
scaled_hair_dist = self.grow_hair / self.scale / 1000
points = self.mri.points.copy()
hair = points[:, 2] > points[:, 1]
points[hair] += self.mri.norms[hair] * scaled_hair_dist
return points
else:
error(None, "Norms missing form bem, can't grow hair")
self.grow_hair = 0
return self.mri.points
@cached_property
def _get_transformed_mri_points(self):
points = apply_trans(self.mri_scale_trans, self.processed_mri_points)
return points
@cached_property
def _get_transformed_mri_lpa(self):
return apply_trans(self.mri_scale_trans, self.mri.lpa)
@cached_property
def _get_transformed_mri_nasion(self):
return apply_trans(self.mri_scale_trans, self.mri.nasion)
@cached_property
def _get_transformed_mri_rpa(self):
return apply_trans(self.mri_scale_trans, self.mri.rpa)
@cached_property
def _get_transformed_hsp_points(self):
return apply_trans(self.head_mri_trans, self.hsp.points)
@cached_property
def _get_transformed_hsp_lpa(self):
return apply_trans(self.head_mri_trans, self.hsp.lpa)
@cached_property
def _get_transformed_hsp_nasion(self):
return apply_trans(self.head_mri_trans, self.hsp.nasion)
@cached_property
def _get_transformed_hsp_rpa(self):
return apply_trans(self.head_mri_trans, self.hsp.rpa)
@cached_property
def _get_lpa_distance(self):
d = np.ravel(self.transformed_mri_lpa - self.transformed_hsp_lpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_nasion_distance(self):
d = np.ravel(self.transformed_mri_nasion - self.transformed_hsp_nasion)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_rpa_distance(self):
d = np.ravel(self.transformed_mri_rpa - self.transformed_hsp_rpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_point_distance(self):
if (len(self.transformed_hsp_points) == 0 or
len(self.transformed_mri_points) == 0):
return
dists = cdist(self.transformed_hsp_points, self.transformed_mri_points,
'euclidean')
dists = np.min(dists, 1)
return dists
@cached_property
def _get_fid_eval_str(self):
d = (self.lpa_distance * 1000, self.nasion_distance * 1000,
self.rpa_distance * 1000)
txt = ("Fiducials Error: LPA %.1f mm, NAS %.1f mm, RPA %.1f mm" % d)
return txt
@cached_property
def _get_points_eval_str(self):
if self.point_distance is None:
return ""
av_dist = np.mean(self.point_distance)
return "Average Points Error: %.1f mm" % (av_dist * 1000)
def _get_raw_subject(self):
# subject name guessed based on the inst file name
if '_' in self.hsp.inst_fname:
subject, _ = self.hsp.inst_fname.split('_', 1)
if not subject:
subject = None
else:
subject = None
return subject
@on_trait_change('raw_subject')
def _on_raw_subject_change(self, subject):
if subject in self.mri.subject_source.subjects:
self.mri.subject = subject
elif 'fsaverage' in self.mri.subject_source.subjects:
self.mri.subject = 'fsaverage'
def omit_hsp_points(self, distance=0, reset=False):
"""Exclude head shape points that are far away from the MRI head
Parameters
----------
distance : float
Exclude all points that are further away from the MRI head than
this distance. Previously excluded points are still excluded unless
reset=True is specified. A value of distance <= 0 excludes nothing.
reset : bool
Reset the filter before calculating new omission (default is
False).
"""
distance = float(distance)
if reset:
logger.info("Coregistration: Reset excluded head shape points")
with warnings.catch_warnings(record=True): # Traits None comp
self.hsp.points_filter = None
if distance <= 0:
return
# find the new filter
hsp_pts = self.transformed_hsp_points
mri_pts = self.transformed_mri_points
point_distance = _point_cloud_error(hsp_pts, mri_pts)
new_sub_filter = point_distance <= distance
n_excluded = np.sum(new_sub_filter == False) # noqa
logger.info("Coregistration: Excluding %i head shape points with "
"distance >= %.3f m.", n_excluded, distance)
# combine the new filter with the previous filter
old_filter = self.hsp.points_filter
if old_filter is None:
new_filter = new_sub_filter
else:
new_filter = np.ones(len(self.hsp.raw_points), np.bool8)
new_filter[old_filter] = new_sub_filter
# set the filter
with warnings.catch_warnings(record=True): # comp to None in Traits
self.hsp.points_filter = new_filter
def fit_auricular_points(self):
"Find rotation to fit LPA and RPA"
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_matched_points(src_fid, tgt_fid, rotate=True,
translate=False, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = rot
def fit_fiducials(self):
"Find rotation and translation to fit all 3 fiducials"
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z)
est = fit_matched_points(src_fid, tgt_fid, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:]
def fit_hsp_points(self):
"Find rotation to fit head shapes"
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.processed_mri_points - self.mri.nasion
tgt_pts *= self.scale
tgt_pts -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
x0=x0)
self.rot_x, self.rot_y, self.rot_z = rot
def fit_scale_auricular_points(self):
"Find rotation and MRI scaling based on LPA and RPA"
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
x = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=False,
scale=1, x0=x0, out='params')
self.scale_x = 1. / x[3]
self.rot_x, self.rot_y, self.rot_z = x[:3]
def fit_scale_fiducials(self):
"Find translation, rotation and scaling based on the three fiducials"
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z, 1. / self.scale_x,)
est = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=True,
scale=1, x0=x0, out='params')
self.scale_x = 1. / est[6]
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:6]
def fit_scale_hsp_points(self):
"Find MRI scaling and rotation to match head shape points"
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.processed_mri_points - self.mri.nasion
if self.n_scale_params == 1:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=1, x0=x0)
self.scale_x = 1. / est[3]
else:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x,
1. / self.scale_y, 1. / self.scale_z)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=3, x0=x0)
self.scale_x, self.scale_y, self.scale_z = 1. / est[3:]
self.rot_x, self.rot_y, self.rot_z = est[:3]
def get_scaling_job(self, subject_to, skip_fiducials, do_bem_sol):
"Find all arguments needed for the scaling worker"
subjects_dir = self.mri.subjects_dir
subject_from = self.mri.subject
bem_names = []
if do_bem_sol:
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_from, name='(.+-bem)')
bem_dir, pattern = os.path.split(pattern)
for filename in os.listdir(bem_dir):
match = re.match(pattern, filename)
if match:
bem_names.append(match.group(1))
return (subjects_dir, subject_from, subject_to, self.scale,
skip_fiducials, bem_names)
def load_trans(self, fname):
"""Load the head-mri transform from a fif file
Parameters
----------
fname : str
File path.
"""
info = read_trans(fname)
head_mri_trans = info['trans']
self.set_trans(head_mri_trans)
def reset(self):
"""Reset all the parameters affecting the coregistration"""
self.reset_traits(('grow_hair', 'n_scaling_params', 'scale_x',
'scale_y', 'scale_z', 'rot_x', 'rot_y', 'rot_z',
'trans_x', 'trans_y', 'trans_z'))
def set_trans(self, head_mri_trans):
"""Set rotation and translation parameters from a transformation matrix
Parameters
----------
head_mri_trans : array, shape (4, 4)
Transformation matrix from head to MRI space.
"""
x, y, z = -self.mri_origin[0]
mri_tgt_trans = translation(x, y, z)
head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)
x, y, z = self.hsp.nasion[0]
src_hsp_trans = translation(x, y, z)
src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)
rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])
x, y, z = src_tgt_trans[:3, 3]
self.rot_x = rot_x
self.rot_y = rot_y
self.rot_z = rot_z
self.trans_x = x
self.trans_y = y
self.trans_z = z
def save_trans(self, fname):
"""Save the head-mri transform as a fif file
Parameters
----------
fname : str
Target file path.
"""
if not self.can_save:
raise RuntimeError("Not enough information for saving transform")
write_trans(fname, Transform('head', 'mri', self.head_mri_trans))
class CoregFrameHandler(Handler):
"""Handler that checks for unfinished processes before closing its window
"""
def close(self, info, is_ok):
if info.object.queue.unfinished_tasks:
information(None, "Can not close the window while saving is still "
"in progress. Please wait until all MRIs are "
"processed.", "Saving Still in Progress")
return False
else:
return True
class CoregPanel(HasPrivateTraits):
model = Instance(CoregModel)
# parameters
reset_params = Button(label='Reset')
grow_hair = DelegatesTo('model')
n_scale_params = DelegatesTo('model')
scale_step = Float(0.01)
scale_x = DelegatesTo('model')
scale_x_dec = Button('-')
scale_x_inc = Button('+')
scale_y = DelegatesTo('model')
scale_y_dec = Button('-')
scale_y_inc = Button('+')
scale_z = DelegatesTo('model')
scale_z_dec = Button('-')
scale_z_inc = Button('+')
rot_step = Float(0.01)
rot_x = DelegatesTo('model')
rot_x_dec = Button('-')
rot_x_inc = Button('+')
rot_y = DelegatesTo('model')
rot_y_dec = Button('-')
rot_y_inc = Button('+')
rot_z = DelegatesTo('model')
rot_z_dec = Button('-')
rot_z_inc = Button('+')
trans_step = Float(0.001)
trans_x = DelegatesTo('model')
trans_x_dec = Button('-')
trans_x_inc = Button('+')
trans_y = DelegatesTo('model')
trans_y_dec = Button('-')
trans_y_inc = Button('+')
trans_z = DelegatesTo('model')
trans_z_dec = Button('-')
trans_z_inc = Button('+')
# fitting
has_fid_data = DelegatesTo('model')
has_pts_data = DelegatesTo('model')
# fitting with scaling
fits_hsp_points = Button(label='Fit Head Shape')
fits_fid = Button(label='Fit Fiducials')
fits_ap = Button(label='Fit LPA/RPA')
# fitting without scaling
fit_hsp_points = Button(label='Fit Head Shape')
fit_fid = Button(label='Fit Fiducials')
fit_ap = Button(label='Fit LPA/RPA')
# fit info
fid_eval_str = DelegatesTo('model')
points_eval_str = DelegatesTo('model')
# saving
can_prepare_bem_model = DelegatesTo('model')
can_save = DelegatesTo('model')
prepare_bem_model = DelegatesTo('model')
save = Button(label="Save As...")
load_trans = Button
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_len_str = Property(Str, depends_on=['queue_len'])
view = View(VGroup(Item('grow_hair', show_label=True),
Item('n_scale_params', label='MRI Scaling',
style='custom', show_label=True,
editor=EnumEditor(values={0: '1:No Scaling',
1: '2:1 Param',
3: '3:3 Params'},
cols=3)),
VGrid(Item('scale_x', editor=laggy_float_editor,
show_label=True, tooltip="Scale along "
"right-left axis",
enabled_when='n_scale_params > 0'),
Item('scale_x_dec',
enabled_when='n_scale_params > 0'),
Item('scale_x_inc',
enabled_when='n_scale_params > 0'),
Item('scale_step', tooltip="Scaling step",
enabled_when='n_scale_params > 0'),
Item('scale_y', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis"),
Item('scale_y_dec',
enabled_when='n_scale_params > 1'),
Item('scale_y_inc',
enabled_when='n_scale_params > 1'),
Label('(Step)'),
Item('scale_z', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis"),
Item('scale_z_dec',
enabled_when='n_scale_params > 1'),
Item('scale_z_inc',
enabled_when='n_scale_params > 1'),
show_labels=False, columns=4),
HGroup(Item('fits_hsp_points',
enabled_when='n_scale_params',
tooltip="Rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance from each digitizer point to the "
"closest MRI point"),
Item('fits_ap',
enabled_when='n_scale_params == 1',
tooltip="While leaving the nasion in "
"place, rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance of the two auricular points"),
Item('fits_fid',
enabled_when='n_scale_params == 1',
tooltip="Move and rotate the digitizer "
"head shape, and scale the MRI so as to "
"minimize the distance of the three "
"fiducials."),
show_labels=False),
'_',
Label("Translation:"),
VGrid(Item('trans_x', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"right-left axis"),
'trans_x_dec', 'trans_x_inc',
Item('trans_step', tooltip="Movement step"),
Item('trans_y', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis"),
'trans_y_dec', 'trans_y_inc',
Label('(Step)'),
Item('trans_z', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis"),
'trans_z_dec', 'trans_z_inc',
show_labels=False, columns=4),
Label("Rotation:"),
VGrid(Item('rot_x', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"right-left axis"),
'rot_x_dec', 'rot_x_inc',
Item('rot_step', tooltip="Rotation step"),
Item('rot_y', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis"),
'rot_y_dec', 'rot_y_inc',
Label('(Step)'),
Item('rot_z', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis"),
'rot_z_dec', 'rot_z_inc',
show_labels=False, columns=4),
# buttons
HGroup(Item('fit_hsp_points',
enabled_when='has_pts_data',
tooltip="Rotate the head shape (around the "
"nasion) so as to minimize the distance "
"from each head shape point to its closest "
"MRI point"),
Item('fit_ap', enabled_when='has_fid_data',
tooltip="Try to match the LPA and the RPA, "
"leaving the Nasion in place"),
Item('fit_fid', enabled_when='has_fid_data',
tooltip="Move and rotate the head shape so "
"as to minimize the distance between the "
"MRI and head shape fiducials"),
Item('load_trans', enabled_when='has_fid_data'),
show_labels=False),
'_',
Item('fid_eval_str', style='readonly'),
Item('points_eval_str', style='readonly'),
'_',
HGroup(Item('prepare_bem_model'),
Label("Run mne_prepare_bem_model"),
show_labels=False,
enabled_when='can_prepare_bem_model'),
HGroup(Item('save', enabled_when='can_save',
tooltip="Save the trans file and (if "
"scaling is enabled) the scaled MRI"),
Item('reset_params', tooltip="Reset all "
"coregistration parameters"),
show_labels=False),
Item('queue_feedback', style='readonly'),
Item('queue_current', style='readonly'),
Item('queue_len_str', style='readonly'),
show_labels=False),
kind='panel', buttons=[UndoButton])
def __init__(self, *args, **kwargs):
super(CoregPanel, self).__init__(*args, **kwargs)
# Setup scaling worker
def worker():
while True:
(subjects_dir, subject_from, subject_to, scale, skip_fiducials,
bem_names) = self.queue.get()
self.queue_len -= 1
# Scale MRI files
self.queue_current = 'Scaling %s...' % subject_to
try:
scale_mri(subject_from, subject_to, scale, True,
subjects_dir, skip_fiducials)
except:
logger.error('Error scaling %s:\n' % subject_to +
traceback.format_exc())
self.queue_feedback = ('Error scaling %s (see Terminal)' %
subject_to)
bem_names = () # skip bem solutions
else:
self.queue_feedback = 'Done scaling %s.' % subject_to
# Precompute BEM solutions
for bem_name in bem_names:
self.queue_current = ('Computing %s solution...' %
bem_name)
try:
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to,
name=bem_name)
bemsol = make_bem_solution(bem_file)
write_bem_solution(bem_file[:-4] + '-sol.fif', bemsol)
except:
logger.error('Error computing %s solution:\n' %
bem_name + traceback.format_exc())
self.queue_feedback = ('Error computing %s solution '
'(see Terminal)' % bem_name)
else:
self.queue_feedback = ('Done computing %s solution.' %
bem_name)
# Finalize
self.queue_current = ''
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
@cached_property
def _get_queue_len_str(self):
if self.queue_len:
return "Queue length: %i" % self.queue_len
else:
return ''
@cached_property
def _get_rotation(self):
rot = np.array([self.rot_x, self.rot_y, self.rot_z])
return rot
@cached_property
def _get_src_pts(self):
return self.hsp_pts - self.hsp_fid[0]
@cached_property
def _get_src_fid(self):
return self.hsp_fid - self.hsp_fid[0]
@cached_property
def _get_tgt_origin(self):
return self.mri_fid[0] * self.scale
@cached_property
def _get_tgt_pts(self):
pts = self.mri_pts * self.scale
pts -= self.tgt_origin
return pts
@cached_property
def _get_tgt_fid(self):
fid = self.mri_fid * self.scale
fid -= self.tgt_origin
return fid
@cached_property
def _get_translation(self):
trans = np.array([self.trans_x, self.trans_y, self.trans_z])
return trans
def _fit_ap_fired(self):
GUI.set_busy()
self.model.fit_auricular_points()
GUI.set_busy(False)
def _fit_fid_fired(self):
GUI.set_busy()
self.model.fit_fiducials()
GUI.set_busy(False)
def _fit_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_hsp_points()
GUI.set_busy(False)
def _fits_ap_fired(self):
GUI.set_busy()
self.model.fit_scale_auricular_points()
GUI.set_busy(False)
def _fits_fid_fired(self):
GUI.set_busy()
self.model.fit_scale_fiducials()
GUI.set_busy(False)
def _fits_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_scale_hsp_points()
GUI.set_busy(False)
def _n_scale_params_changed(self, new):
if not new:
return
# Make sure that MNE_ROOT environment variable is set
if not set_mne_root(True):
err = ("MNE_ROOT environment variable could not be set. "
"You will be able to scale MRIs, but the "
"mne_prepare_bem_model tool will fail. Please install "
"MNE.")
warning(None, err, "MNE_ROOT Not Set")
def _reset_params_fired(self):
self.model.reset()
def _rot_x_dec_fired(self):
self.rot_x -= self.rot_step
def _rot_x_inc_fired(self):
self.rot_x += self.rot_step
def _rot_y_dec_fired(self):
self.rot_y -= self.rot_step
def _rot_y_inc_fired(self):
self.rot_y += self.rot_step
def _rot_z_dec_fired(self):
self.rot_z -= self.rot_step
def _rot_z_inc_fired(self):
self.rot_z += self.rot_step
def _load_trans_fired(self):
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
subject = self.model.mri.subject
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject)
dlg = FileDialog(action="open", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
self.model.load_trans(trans_file)
def _save_fired(self):
subjects_dir = self.model.mri.subjects_dir
subject_from = self.model.mri.subject
# check that fiducials are saved
skip_fiducials = False
if self.n_scale_params and not _find_fiducials_files(subject_from,
subjects_dir):
msg = ("No fiducials file has been found for {src}. If fiducials "
"are not saved, they will not be available in the scaled "
"MRI. Should the current fiducials be saved now? "
"Select Yes to save the fiducials at "
"{src}/bem/{src}-fiducials.fif. "
"Select No to proceed scaling the MRI without fiducials.".
format(src=subject_from))
title = "Save Fiducials for %s?" % subject_from
rc = confirm(None, msg, title, cancel=True, default=CANCEL)
if rc == CANCEL:
return
elif rc == YES:
self.model.mri.save(self.model.mri.default_fid_fname)
elif rc == NO:
skip_fiducials = True
else:
raise RuntimeError("rc=%s" % repr(rc))
# find target subject
if self.n_scale_params:
subject_to = self.model.raw_subject or subject_from
mridlg = NewMriDialog(subjects_dir=subjects_dir,
subject_from=subject_from,
subject_to=subject_to)
ui = mridlg.edit_traits(kind='modal')
if not ui.result: # i.e., user pressed cancel
return
subject_to = mridlg.subject_to
else:
subject_to = subject_from
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject_to)
dlg = FileDialog(action="save as", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
if not trans_file.endswith('.fif'):
trans_file += '.fif'
if os.path.exists(trans_file):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
# save the trans file
try:
self.model.save_trans(trans_file)
except Exception as e:
error(None, "Error saving -trans.fif file: %s (See terminal for "
"details)" % str(e), "Error Saving Trans File")
raise
# save the scaled MRI
if self.n_scale_params:
do_bem_sol = self.can_prepare_bem_model and self.prepare_bem_model
job = self.model.get_scaling_job(subject_to, skip_fiducials,
do_bem_sol)
self.queue.put(job)
self.queue_len += 1
def _scale_x_dec_fired(self):
self.scale_x -= self.scale_step
def _scale_x_inc_fired(self):
self.scale_x += self.scale_step
def _scale_x_changed(self, old, new):
if self.n_scale_params == 1:
self.scale_y = new
self.scale_z = new
def _scale_y_dec_fired(self):
step = 1. / self.scale_step
self.scale_y *= step
def _scale_y_inc_fired(self):
self.scale_y *= self.scale_step
def _scale_z_dec_fired(self):
step = 1. / self.scale_step
self.scale_z *= step
def _scale_z_inc_fired(self):
self.scale_z *= self.scale_step
def _trans_x_dec_fired(self):
self.trans_x -= self.trans_step
def _trans_x_inc_fired(self):
self.trans_x += self.trans_step
def _trans_y_dec_fired(self):
self.trans_y -= self.trans_step
def _trans_y_inc_fired(self):
self.trans_y += self.trans_step
def _trans_z_dec_fired(self):
self.trans_z -= self.trans_step
def _trans_z_inc_fired(self):
self.trans_z += self.trans_step
class NewMriDialog(HasPrivateTraits):
# Dialog to determine target subject name for a scaled MRI
subjects_dir = Directory
subject_to = Str
subject_from = Str
subject_to_dir = Property(depends_on=['subjects_dir', 'subject_to'])
subject_to_exists = Property(Bool, depends_on='subject_to_dir')
feedback = Str(' ' * 100)
can_overwrite = Bool
overwrite = Bool
can_save = Bool
view = View(Item('subject_to', label='New MRI Subject Name', tooltip="A "
"new folder with this name will be created in the "
"current subjects_dir for the scaled MRI files"),
Item('feedback', show_label=False, style='readonly'),
Item('overwrite', enabled_when='can_overwrite', tooltip="If a "
"subject with the chosen name exists, delete the old "
"subject"),
width=500,
buttons=[CancelButton,
Action(name='OK', enabled_when='can_save')])
def _can_overwrite_changed(self, new):
if not new:
self.overwrite = False
@cached_property
def _get_subject_to_dir(self):
return os.path.join(self.subjects_dir, self.subject_to)
@cached_property
def _get_subject_to_exists(self):
if not self.subject_to:
return False
elif os.path.exists(self.subject_to_dir):
return True
else:
return False
@on_trait_change('subject_to_dir,overwrite')
def update_dialog(self):
if not self.subject_from:
# weird trait state that occurs even when subject_from is set
return
elif not self.subject_to:
self.feedback = "No subject specified..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to == self.subject_from:
self.feedback = "Must be different from MRI source subject..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to_exists:
if self.overwrite:
self.feedback = "%s will be overwritten." % self.subject_to
self.can_save = True
self.can_overwrite = True
else:
self.feedback = "Subject already exists..."
self.can_save = False
self.can_overwrite = True
else:
self.feedback = "Name ok."
self.can_save = True
self.can_overwrite = False
def _make_view(tabbed=False, split=False, scene_width=-1):
"""Create a view for the CoregFrame
Parameters
----------
tabbed : bool
Combine the data source panel and the coregistration panel into a
single panel with tabs.
split : bool
Split the main panels with a movable splitter (good for QT4 but
unnecessary for wx backend).
scene_width : int
Specify a minimum width for the 3d scene (in pixels).
returns
-------
view : traits View
View object for the CoregFrame.
"""
view_options = VGroup(Item('headview', style='custom'), 'view_options',
show_border=True, show_labels=False, label='View')
scene = VGroup(Item('scene', show_label=False,
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical', width=500),
view_options)
data_panel = VGroup(VGroup(Item('subject_panel', style='custom'),
label="MRI Subject", show_border=True,
show_labels=False),
VGroup(Item('lock_fiducials', style='custom',
editor=EnumEditor(cols=2,
values={False: '2:Edit',
True: '1:Lock'}),
enabled_when='fid_ok'),
HGroup('hsp_always_visible',
Label("Always Show Head Shape Points"),
show_labels=False),
Item('fid_panel', style='custom'),
label="MRI Fiducials", show_border=True,
show_labels=False),
VGroup(Item('raw_src', style="custom"),
HGroup(Item('distance', show_label=True),
'omit_points', 'reset_omit_points',
show_labels=False),
Item('omitted_info', style='readonly',
show_label=False),
label='Head Shape Source (Raw/Epochs/Evoked)',
show_border=True, show_labels=False),
show_labels=False, label="Data Source")
coreg_panel = VGroup(Item('coreg_panel', style='custom'),
label="Coregistration", show_border=True,
show_labels=False,
enabled_when="fid_panel.locked")
if split:
main_layout = 'split'
else:
main_layout = 'normal'
if tabbed:
main = HGroup(scene,
Group(data_panel, coreg_panel, show_labels=False,
layout='tabbed'),
layout=main_layout)
else:
main = HGroup(data_panel, scene, coreg_panel, show_labels=False,
layout=main_layout)
view = View(main, resizable=True, handler=CoregFrameHandler(),
buttons=NoButtons)
return view
class ViewOptionsPanel(HasTraits):
mri_obj = Instance(SurfaceObject)
hsp_obj = Instance(PointObject)
view = View(VGroup(Item('mri_obj', style='custom', # show_border=True,
label="MRI Head Surface"),
Item('hsp_obj', style='custom', # show_border=True,
label="Head Shape Points")),
title="View Options")
class CoregFrame(HasTraits):
"""GUI for head-MRI coregistration
"""
model = Instance(CoregModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
subject_panel = Instance(SubjectSelectorPanel)
fid_panel = Instance(FiducialsPanel)
coreg_panel = Instance(CoregPanel)
raw_src = DelegatesTo('model', 'hsp')
# Omit Points
distance = Float(5., label="Distance [mm]", desc="Maximal distance for "
"head shape points from MRI in mm")
omit_points = Button(label='Omit Points', desc="Omit head shape points "
"for the purpose of the automatic coregistration "
"procedure.")
reset_omit_points = Button(label='Reset Omission', desc="Reset the "
"omission of head shape points to include all.")
omitted_info = Property(Str, depends_on=['model.hsp.n_omitted'])
fid_ok = DelegatesTo('model', 'mri.fid_ok')
lock_fiducials = DelegatesTo('model')
hsp_always_visible = Bool(False, label="Always Show Head Shape")
# visualization
hsp_obj = Instance(PointObject)
mri_obj = Instance(SurfaceObject)
lpa_obj = Instance(PointObject)
nasion_obj = Instance(PointObject)
rpa_obj = Instance(PointObject)
hsp_lpa_obj = Instance(PointObject)
hsp_nasion_obj = Instance(PointObject)
hsp_rpa_obj = Instance(PointObject)
hsp_visible = Property(depends_on=['hsp_always_visible', 'lock_fiducials'])
view_options = Button(label="View Options")
picker = Instance(object)
view_options_panel = Instance(ViewOptionsPanel)
# Processing
queue = DelegatesTo('coreg_panel')
view = _make_view()
def _subject_panel_default(self):
return SubjectSelectorPanel(model=self.model.mri.subject_source)
def _fid_panel_default(self):
panel = FiducialsPanel(model=self.model.mri, headview=self.headview)
return panel
def _coreg_panel_default(self):
panel = CoregPanel(model=self.model)
return panel
def _headview_default(self):
return HeadViewController(scene=self.scene, system='RAS')
def __init__(self, raw=None, subject=None, subjects_dir=None):
super(CoregFrame, self).__init__()
subjects_dir = get_subjects_dir(subjects_dir)
if (subjects_dir is not None) and os.path.isdir(subjects_dir):
self.model.mri.subjects_dir = subjects_dir
if subject is not None:
self.model.mri.subject = subject
if raw is not None:
self.model.hsp.file = raw
@on_trait_change('scene.activated')
def _init_plot(self):
self.scene.disable_render = True
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# MRI scalp
color = defaults['mri_color']
self.mri_obj = SurfaceObject(points=self.model.transformed_mri_points,
color=color, tri=self.model.mri.tris,
scene=self.scene, name="MRI Scalp")
# on_trait_change was unreliable, so link it another way:
self.model.mri.on_trait_change(self._on_mri_src_change, 'tris')
self.model.sync_trait('transformed_mri_points', self.mri_obj, 'points',
mutual=False)
self.fid_panel.hsp_obj = self.mri_obj
# MRI Fiducials
point_scale = defaults['mri_fid_scale']
self.lpa_obj = PointObject(scene=self.scene, color=lpa_color,
point_scale=point_scale, name='LPA')
self.model.mri.sync_trait('lpa', self.lpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.lpa_obj, 'trans', mutual=False)
self.nasion_obj = PointObject(scene=self.scene, color=nasion_color,
point_scale=point_scale, name='Nasion')
self.model.mri.sync_trait('nasion', self.nasion_obj, 'points',
mutual=False)
self.model.sync_trait('scale', self.nasion_obj, 'trans', mutual=False)
self.rpa_obj = PointObject(scene=self.scene, color=rpa_color,
point_scale=point_scale, name='RPA')
self.model.mri.sync_trait('rpa', self.rpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.rpa_obj, 'trans', mutual=False)
# Digitizer Head Shape
color = defaults['hsp_point_color']
point_scale = defaults['hsp_points_scale']
p = PointObject(view='cloud', scene=self.scene, color=color,
point_scale=point_scale, resolution=5, name='HSP')
self.hsp_obj = p
self.model.hsp.sync_trait('points', p, mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
# Digitizer Fiducials
point_scale = defaults['hsp_fid_scale']
opacity = defaults['hsp_fid_opacity']
p = PointObject(scene=self.scene, color=lpa_color, opacity=opacity,
point_scale=point_scale, name='HSP-LPA')
self.hsp_lpa_obj = p
self.model.hsp.sync_trait('lpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=nasion_color, opacity=opacity,
point_scale=point_scale, name='HSP-Nasion')
self.hsp_nasion_obj = p
self.model.hsp.sync_trait('nasion', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=rpa_color, opacity=opacity,
point_scale=point_scale, name='HSP-RPA')
self.hsp_rpa_obj = p
self.model.hsp.sync_trait('rpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
on_pick = self.scene.mayavi_scene.on_mouse_pick
if not _testing_mode():
self.picker = on_pick(self.fid_panel._on_pick, type='cell')
self.headview.left = True
self.scene.disable_render = False
self.view_options_panel = ViewOptionsPanel(mri_obj=self.mri_obj,
hsp_obj=self.hsp_obj)
@cached_property
def _get_hsp_visible(self):
return self.hsp_always_visible or self.lock_fiducials
@cached_property
def _get_omitted_info(self):
if self.model.hsp.n_omitted == 0:
return "No points omitted"
elif self.model.hsp.n_omitted == 1:
return "1 point omitted"
else:
return "%i points omitted" % self.model.hsp.n_omitted
def _omit_points_fired(self):
distance = self.distance / 1000.
self.model.omit_hsp_points(distance)
def _reset_omit_points_fired(self):
self.model.omit_hsp_points(0, True)
@on_trait_change('model.mri.tris')
def _on_mri_src_change(self):
if self.mri_obj is None:
return
if not (np.any(self.model.mri.points) and np.any(self.model.mri.tris)):
self.mri_obj.clear()
return
self.mri_obj.points = self.model.mri.points
self.mri_obj.tri = self.model.mri.tris
self.mri_obj.plot()
# automatically lock fiducials if a good fiducials file is loaded
@on_trait_change('model.mri.fid_file')
def _on_fid_file_loaded(self):
if self.model.mri.fid_file:
self.fid_panel.locked = True
else:
self.fid_panel.locked = False
def _view_options_fired(self):
self.view_options_panel.edit_traits()
|
alexandrebarachant/mne-python
|
mne/gui/_coreg_gui.py
|
Python
|
bsd-3-clause
| 55,647
|
[
"Mayavi"
] |
a87ff8a2975524734f96810edd1c91b16879b66e534f67d70269283af8f2a612
|
# -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max-Planck-Institute of Neurobiology, Munich, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
from PythonQt import QtGui, Qt, QtCore
from PythonQt.QtGui import QTableWidget, QTableWidgetItem
try:
import KnossosModule
except ImportError:
import knossos as KnossosModule
import sys
import requests
import re
import json
sys.dont_write_bytecode = True
import time
from Queue import Queue
from threading import Thread
import numpy as np
class SyConnGateInteraction(object):
"""
Query the SyConn backend server.
"""
def __init__(self, server, synthresh=0.5, axodend_only=True):
self.server = server
self.session = requests.Session()
self.ssv_from_sv_cache = dict()
self.ct_from_cache = dict()
self.ctcertain_from_cache = dict()
self.svs_from_ssv = dict()
self.synthresh = synthresh
self.axodend_only = axodend_only
self.get_download_queue = Queue()
self.init_get_download_queue_worker()
self.get_download_done = Queue()
self.get_download_results_store = dict()
def get_ssv_mesh(self, ssv_id):
"""
Returns a mesh for a given ssv_id.
Parameters
----------
ssv_id
Returns
-------
"""
r1 = self.session.get(self.server + '/ssv_ind/{0}'.format(ssv_id))
r2 = self.session.get(self.server + '/ssv_vert/{0}'.format(ssv_id))
r3 = self.session.get(self.server + '/ssv_norm/{0}'.format(ssv_id))
ind = np.array(json.loads(r1.content)['ind'], dtype=np.uint32)
vert = np.array(json.loads(r2.content)['vert'], dtype=np.float32)
norm = np.array(json.loads(r3.content)['norm'], dtype=np.float32)
if len(norm) == 0:
norm = []
return ind, vert, norm
def get_ssv_skel(self, ssv_id):
"""
Returns a skeleton for a given ssv_id.
Parameters
----------
ssv_id : int
Returns
-------
dict
Keys: "nodes", "edges", "diameters"
"""
r = self.session.get(self.server + '/ssv_skeleton/{0}'.format(ssv_id))
skel = json.loads(r.content)
skel["nodes"] = np.array(skel["nodes"], dtype=np.uint32).reshape(-1, 3)
skel_nodes = np.array(skel["nodes"])
skel["nodes"][:, 0] = skel_nodes[:, 1]
skel["nodes"][:, 1] = skel_nodes[:, 0]
skel["edges"] = np.array(skel["edges"], dtype=np.uint32).reshape(-1, 2)
for k in skel:
if k in ['nodes', 'edges']:
continue
skel[k] = np.array(skel[k], dtype=np.float32)
return skel if len(skel) > 0 else None
def init_get_download_queue_worker(self):
"""
Initialize mesh queue daemon workers.
:return:
"""
for i in range(20):
worker = Thread(target=self.get_download_queue_worker)
worker.setDaemon(True)
worker.start()
return
def wait_for_all_downloads(self):
while not self.get_download_done.empty():
time.sleep(0.05)
def get_download_queue_worker(self):
while True:
# this is blocking and therefore fine
get_request = self.get_download_queue.get()
r = self.session.get(self.server + get_request)
self.get_download_results_store[get_request] = r
self.get_download_queue.task_done() # not sure whether this is needed
_ = self.get_download_done.get() # signal download done by removal
return
def add_ssv_obj_mesh_to_down_queue(self, ssv_id, obj_type):
# if this queue is empty, all downloads will be done,
# a poor man's sync mechanism
for i in range(3):
self.get_download_done.put('working')
self.get_download_queue.put('/ssv_obj_ind/{0}/{1}'.format(ssv_id, obj_type))
self.get_download_queue.put('/ssv_obj_vert/{0}/{1}'.format(ssv_id, obj_type))
self.get_download_queue.put('/ssv_obj_norm/{0}/{1}'.format(ssv_id, obj_type))
def get_ssv_obj_mesh_from_results_store(self, ssv_id, obj_type):
ind_hash = '/ssv_obj_ind/{0}/{1}'.format(ssv_id, obj_type)
vert_hash = '/ssv_obj_vert/{0}/{1}'.format(ssv_id, obj_type)
norm_hash = '/ssv_obj_norm/{0}/{1}'.format(ssv_id, obj_type)
ind = np.array(json.loads(self.get_download_results_store[ind_hash].content)['ind'], dtype=np.uint32)
vert = np.array(json.loads(self.get_download_results_store[vert_hash].content)['vert'], dtype=np.float32)
norm = np.array(json.loads(self.get_download_results_store[norm_hash].content)['norm'], dtype=np.float32)
# clean up - could also be extended into some more permanent results cache
self.get_download_results_store.pop(ind_hash, None)
self.get_download_results_store.pop(vert_hash, None)
self.get_download_results_store.pop(norm_hash, None)
return ind, vert, -norm # invert normals
def get_ssv_obj_mesh(self, ssv_id, obj_type):
"""
Returns a mesh for a given ssv_id and a specified obj_type.
obj_type can be sj, vc, mi ATM.
Parameters
----------
ssv_id
obj_type
Returns
-------
"""
r1 = self.session.get(self.server + '/ssv_obj_ind/{0}/{1}'.format(ssv_id, obj_type))
r2 = self.session.get(self.server + '/ssv_obj_vert/{0}/{1}'.format(ssv_id, obj_type))
r3 = self.session.get(self.server + '/ssv_obj_norm/{0}/{1}'.format(ssv_id, obj_type))
ind = np.array(json.loads(r1.content)['ind'], dtype=np.uint32)
vert = np.array(json.loads(r2.content)['vert'], dtype=np.float32)
norm = np.array(json.loads(r3.content)['norm'], dtype=np.float32)
return ind, vert, -norm # invert normals
def get_list_of_all_ssv_ids(self):
"""
Returns a list of all ssvs in the dataset
Returns
-------
"""
r = self.session.get(self.server + '/ssv_list')
return json.loads(r.content)
def get_svs_of_ssv(self, ssv_id):
"""
Returns a list of all svs of a given ssv.
Parameters
----------
ssv_id
Returns
-------
"""
if ssv_id not in self.svs_from_ssv:
r = self.session.get(self.server + '/svs_of_ssv/{0}'.format(ssv_id))
self.svs_from_ssv[ssv_id] = json.loads(r.content)
return self.svs_from_ssv[ssv_id]
def get_ssv_of_sv(self, sv_id):
"""
Gets the ssv for a given sv.
Parameters
----------
sv_id
Returns
-------
"""
if sv_id not in self.ssv_from_sv_cache:
start = time.time()
r = self.session.get(self.server + '/ssv_of_sv/{0}'.format(sv_id))
self.ssv_from_sv_cache[sv_id] = json.loads(r.content)
print('Get ssv of sv {} without cache took {}'.format(sv_id, time.time() - start))
return self.ssv_from_sv_cache[sv_id]
def get_celltype_of_ssv(self, ssv_id):
"""
Get SSV cell type if available.
Parameters
----------
ssv_id : int
Returns
-------
str
"""
# if not ssv_id in self.ct_from_cache:
r = self.session.get(self.server + '/ct_of_ssv/{0}'.format(ssv_id))
dc = json.loads(r.content)
self.ct_from_cache[ssv_id] = dc["ct"]
if 'certainty' in dc:
certainty = '{:.3f}'.format(dc["certainty"])
else:
certainty = 'nan'
self.ctcertain_from_cache[ssv_id] = certainty
print("Celltype: {}".format(self.ct_from_cache[ssv_id]))
return self.ct_from_cache[ssv_id], certainty
def get_all_syn_metda_data(self):
"""
Returns
-------
"""
params = {'synthresh': self.synthresh, 'axodend_only': self.axodend_only}
r = self.session.get('{}/all_syn_meta/{}'.format(self.server, json.dumps(params)))
return json.loads(r.content)
def push_so_attr(self, so_id, so_type, attr_key, attr_value):
"""
Will invoke `so.save_attributes([attr_key], [attr_value)` of
`so = SegmentationDataset(obj_type=so_type).get_segmentation_object(so_id)`
on the server.
Parameters
----------
so_id :
so_type :
attr_key :
attr_value :
Returns
-------
str | bytes
Server response
"""
r = self.session.get(self.server + '/push_so_attr/{}/{}/{}/{}'.format(
so_id, so_type, attr_key, attr_value))
return r.content
def pull_so_attr(self, so_id, so_type, attr_key):
"""
Will invoke `so.save_attributes([attr_key], [attr_value)` of
`so = SegmentationDataset(obj_type=so_type).get_segmentation_object(so_id)`
on the server.
Parameters
----------
so_id :
so_type :
attr_key :
Returns
-------
str | bytes
Server response
"""
r = self.session.get(self.server + '/pull_so_attr/{}/{}/{}'.format(
so_id, so_type, attr_key))
return r.content
class InputDialog(QtGui.QDialog):
"""
https://stackoverflow.com/questions/7046882/launch-a-pyqt-window-from-a-main-pyqt-window-and-get-the-user-input
inputter = InputDialog(mainWindowUI, title="comments", label="comments", text="")
inputter.exec_()
comment = inputter.text.text()
print comment
"""
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.aborted = False
# --Layout Stuff---------------------------#
mainLayout = QtGui.QVBoxLayout()
layout = QtGui.QHBoxLayout()
self.label = QtGui.QLabel()
self.label.setText("port")
layout.addWidget(self.label)
self.text = QtGui.QLineEdit("10001")
layout.addWidget(self.text)
self.label_dataset = QtGui.QLabel()
self.label_dataset.setText("Dataset")
layout.addWidget(self.label_dataset)
self.text_dataset = QtGui.QLineEdit("j0251")
layout.addWidget(self.text_dataset)
self.ip = QtGui.QLabel()
self.ip.setText("host")
layout.addWidget(self.ip)
self.text_ip = QtGui.QLineEdit("localhost")
layout.addWidget(self.text_ip)
mainLayout.addLayout(layout)
layout = QtGui.QHBoxLayout()
self.synapse_tresh = QtGui.QLabel()
self.synapse_tresh.setText("Syn. prob. thresh.")
layout.addWidget(self.synapse_tresh)
self.text_synthresh = QtGui.QLineEdit("0.5")
layout.addWidget(self.text_synthresh)
self.axodend_button = QtGui.QPushButton("Axo-dendr. syn. only")
self.axodend_button.setCheckable(True)
self.axodend_button.toggle()
layout.addWidget(self.axodend_button)
mainLayout.addLayout(layout)
# --The Button------------------------------#
layout = QtGui.QHBoxLayout()
button = QtGui.QPushButton("connect") # string or icon
self.connect(button, QtCore.SIGNAL("clicked()"), self.close)
layout.addWidget(button)
button = QtGui.QPushButton("abort") # string or icon
self.connect(button, QtCore.SIGNAL("clicked()"), self.abort_button_clicked)
layout.addWidget(button)
mainLayout.addLayout(layout)
self.setLayout(mainLayout)
self.resize(450, 300)
self.setWindowTitle("SyConnGate Settings")
def abort_button_clicked(self):
print('Closing SyConnGate.')
self.aborted = True
self.close()
class main_class(QtGui.QDialog):
"""
KNOSSOS plugin class for the SyConn KNOSSOS viewer.
"""
def __init__(self, parent=KnossosModule.knossos_global_mainwindow):
#Qt.QApplication.processEvents()
super(main_class, self).__init__(parent, Qt.Qt.WA_DeleteOnClose)
try:
exec(KnossosModule.scripting.getInstanceInContainerStr(__name__)
+ " = self")
except KeyError:
# Allow running from __main__ context
pass
# get port
while True:
inputter = InputDialog(parent)
inputter.exec_()
if inputter.aborted:
return
port = int(inputter.text.text.decode())
host = inputter.text_ip.text.decode()
dataset = inputter.text_dataset.text.decode()
self._synthresh = float(inputter.text_synthresh.text.decode())
self._axodend_only = inputter.axodend_button.isChecked()
self.syconn_gate = None
self.host = host
self.port = port
self.ssv_selected1 = 0
self.syn_selected1 = None
self.obj_tree_ids = set()
self.obj_id_offs = 2000000000
self.all_syns = None
self.dataset_ident = dataset
try:
self.init_syconn()
self.build_gui()
#self.timer = QtCore.QTimer()
#self.timer.timeout.connect(self.exploration_mode_callback_check)
#self.timer.start(1000)
#self.timer2 = QtCore.QTimer()
#self.timer2.timeout.connect(self.release_gil_hack)
#self.timer2.start(50)
break
except requests.exceptions.ConnectionError as e:
print("Failed to establish connection to SyConn Server.", str(e))
pass
def release_gil_hack(self):
time.sleep(0.01)
return
def init_syconn(self):
# move to config file
syconn_gate_server = 'http://{}:{}'.format(self.host, self.port)
self.syconn_gate = SyConnGateInteraction(syconn_gate_server,
self._synthresh,
self._axodend_only)
def populate_ssv_list(self):
all_ssv_ids = self.syconn_gate.get_list_of_all_ssv_ids()['ssvs']
for ssv_id in all_ssv_ids:
item = QtGui.QStandardItem(str(int(ssv_id)))
self.ssv_item_model.appendRow(item)
self.ssv_selector.setModel(self.ssv_item_model)
return
def populate_syn_list(self):
self.all_syns = self.syconn_gate.get_all_syn_metda_data()
for syn in zip(self.all_syns['ssv_partner_0'], self.all_syns['ssv_partner_1']):
item = QtGui.QStandardItem(str(syn))
self.syn_item_model.appendRow(item)
self.syn_selector.setModel(self.syn_item_model)
return
def on_ssv_selector_changed(self, index):
self.ssv_selected1 = int(self.ssv_selector.model().itemData(index)[0])
#current, previous
#print('selected: ' + str(self.ssv_selector.model().itemData(index)[0]))
#ssv = self.get_ssv(self.ssv_selected1)
#self.ssv_to_knossos(ssv)
return
def on_syn_selector_changed(self, index, signal_block=True):
"""
`all_syns` contains the following keys:
cd_dict['syn_size'] =\
csd.load_numpy_data('mesh_area') / 2 # as used in syn_analysis.py -> export_matrix
cd_dict['synaptivity_proba'] = \
csd.load_numpy_data('syn_prob')
cd_dict['coord_x'] = \
csd.load_numpy_data('rep_coord')[:, 0].astype(np.int)
cd_dict['coord_y'] = \
csd.load_numpy_data('rep_coord')[:, 1].astype(np.int)
cd_dict['coord_z'] = \
csd.load_numpy_data('rep_coord')[:, 2].astype(np.int)
cd_dict['ssv_partner_0'] = \
csd.load_numpy_data('neuron_partners')[:, 0].astype(np.int)
cd_dict['ssv_partner_1'] = \
csd.load_numpy_data('neuron_partners')[:, 1].astype(np.int)
cd_dict['neuron_partner_ax_0'] = \
csd.load_numpy_data('partner_axoness')[:, 0].astype(np.int)
cd_dict['neuron_partner_ax_1'] = \
csd.load_numpy_data('partner_axoness')[:, 1].astype(np.int)
cd_dict['neuron_partner_ct_0'] = \
csd.load_numpy_data('partner_celltypes')[:, 0].astype(np.int)
cd_dict['neuron_partner_ct_1'] = \
csd.load_numpy_data('partner_celltypes')[:, 1].astype(np.int)
cd_dict['neuron_partner_sp_0'] = \
csd.load_numpy_data('partner_spiness')[:, 0].astype(np.int)
cd_dict['neuron_partner_sp_1'] = \
csd.load_numpy_data('partner_spiness')[:, 1].astype(np.int)
Parameters
----------
index :
signal_block :
Returns
-------
"""
# disable knossos signal emission first - O(n^2) otherwise
if signal_block:
signalsBlocked = KnossosModule.knossos_global_skeletonizer.blockSignals(
True)
inp_str = self.syn_selector.model().itemData(index)[0]
ssv1 = int(re.findall(r'\((\d+),', inp_str)[0])
ssv2 = int(re.findall(r', (\d+)\)', inp_str)[0])
ix = index.row()
tree_id = hash((ssv1, ssv2))
syn_id = self.all_syns['ids'][ix]
self._currently_active_syn = syn_id
# TODO: pull_so_attr and writing its results to `synapsetype_label_text` should run as a thread
syn_gt_syntype = ""
# syn_gt_syntype = self.syconn_gate.pull_so_attr(so_id=syn_id, so_type='syn_ssv',
# attr_key='gt_syntype')
if len(syn_gt_syntype) == 0:
self.synapsetype_label_text.clear()
else:
self.synapsetype_label_text.setText(syn_gt_syntype)
c = [self.all_syns['coord_x'][ix], self.all_syns['coord_y'][ix],
self.all_syns['coord_z'][ix]]
k_tree = KnossosModule.skeleton.find_tree_by_id(tree_id)
if k_tree is None:
k_tree = KnossosModule.skeleton.add_tree(tree_id)
# add synapse location
kn = KnossosModule.skeleton.add_node([c[0] + 1, c[1] + 1, c[2] + 1], k_tree, {})
KnossosModule.skeleton.jump_to_node(kn)
# syn properties
syn_size = self.all_syns["syn_size"][ix]
syn_size = np.abs(syn_size)
# coordinate
self.synapse_field1.setItem(0, 1, QTableWidgetItem(str(c)))
# synapse type
self.synapse_field1.setItem(1, 1, QTableWidgetItem(str(self.all_syns['syn_sign'][ix])))
# synaptic probability
self.synapse_field1.setItem(2, 1, QTableWidgetItem(str(self.all_syns["synaptivity_proba"][ix])))
# synaptic size (area in um^2)
self.synapse_field1.setItem(3, 1, QTableWidgetItem(str(syn_size)))
# object ID
self.synapse_field1.setItem(4, 1, QTableWidgetItem(str(syn_id)))
# pre- and post synaptic properties
# IDs
self.synapse_field2.setItem(1, 1, QTableWidgetItem(str(self.all_syns["ssv_partner_0"][ix])))
self.synapse_field2.setItem(1, 2, QTableWidgetItem(str(self.all_syns["ssv_partner_1"][ix])))
# cell type
gt_type_ct = 'ctgt_v2' if 'j0251' not in self.dataset_ident else 'ctgt_j0251_v2'
self.synapse_field2.setItem(2, 1, QTableWidgetItem(int2str_label_converter(self.all_syns["neuron_partner_ct_0"][ix], gt_type_ct)))
self.synapse_field2.setItem(2, 2, QTableWidgetItem(int2str_label_converter(self.all_syns["neuron_partner_ct_1"][ix], gt_type_ct)))
# cell compartments
self.synapse_field2.setItem(3, 1, QTableWidgetItem(int2str_label_converter(self.all_syns["neuron_partner_ax_0"][ix], "axgt")))
self.synapse_field2.setItem(3, 2, QTableWidgetItem(int2str_label_converter(self.all_syns["neuron_partner_ax_1"][ix], "axgt")))
# cell compartments
self.synapse_field2.setItem(4, 1, QTableWidgetItem(int2str_label_converter(self.all_syns["neuron_partner_sp_0"][ix], "spgt")))
self.synapse_field2.setItem(4, 2, QTableWidgetItem(int2str_label_converter(self.all_syns["neuron_partner_sp_1"][ix], "spgt")))
# enable signals again
if signal_block:
KnossosModule.knossos_global_skeletonizer.blockSignals(
signalsBlocked)
KnossosModule.knossos_global_skeletonizer.resetData()
return
def build_gui(self):
self.setWindowFlags(Qt.Qt.Window)
layout = QtGui.QGridLayout()
layout.setSpacing(10)
# Window layout
#layout = QtGui.QVBoxLayout()
self.setLayout(layout)
self.show_button_neurite = QtGui.QPushButton('Show neurite')
self.show_button_selected_neurite = QtGui.QPushButton('Add selected neurite(s)')
self.show_button_synapse = QtGui.QPushButton('Show synapse')
self.clear_knossos_view_button = QtGui.QPushButton('Clear view')
self.ssv_selector = QtGui.QListView()
self.ssv_selector.setUniformItemSizes(True) # better performance
self.ssv_item_model = QtGui.QStandardItemModel(self.ssv_selector)
self.syn_selector = QtGui.QListView()
self.syn_selector.setUniformItemSizes(True) # better performance
self.syn_item_model = QtGui.QStandardItemModel(self.syn_selector)
self.direct_ssv_id_input = QtGui.QLineEdit()
self.direct_ssv_id_input.setValidator(QtGui.QIntValidator())
self.direct_syn_id_input = QtGui.QLineEdit()
self.direct_syn_id_input.setValidator(QtGui.QIntValidator())
# celltype
self.celltype_field = QtGui.QLabel("CellType: ", self)
# synapse
self.synapse_field1 = QTableWidget()
self.synapse_field1.setRowCount(5)
self.synapse_field1.setColumnCount(2)
self.synapse_field1.setItem(0, 0, QTableWidgetItem("coordinate"))
self.synapse_field1.setItem(0, 1, QTableWidgetItem(""))
self.synapse_field1.setItem(1, 0, QTableWidgetItem("synaptic type"))
self.synapse_field1.setItem(1, 1, QTableWidgetItem(""))
self.synapse_field1.setItem(2, 0, QTableWidgetItem("syn. proba."))
self.synapse_field1.setItem(2, 1, QTableWidgetItem(""))
self.synapse_field1.setItem(3, 0, QTableWidgetItem("size [um^2]"))
self.synapse_field1.setItem(3, 1, QTableWidgetItem(""))
self.synapse_field1.setItem(4, 0, QTableWidgetItem("Object ID"))
self.synapse_field1.setItem(4, 1, QTableWidgetItem(""))
# self.synapse_field1.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers) # qt5
header = self.synapse_field1.horizontalHeader()
header.setSectionResizeMode(0, QtGui.QHeaderView.Stretch)
header.setSectionResizeMode(1, QtGui.QHeaderView.ResizeToContents)
self.synapse_field1.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.synapse_field2 = QTableWidget()
self.synapse_field2.setRowCount(5)
self.synapse_field2.setColumnCount(3)
# TODO: sort by pre and post in 'on_syn_selector_changed' and replace neuron1 and neuron2 by pre and post
self.synapse_field2.setItem(0, 1, QTableWidgetItem("neuron 1"))
self.synapse_field2.setItem(0, 2, QTableWidgetItem("neuron 2"))
self.synapse_field2.setItem(1, 0, QTableWidgetItem("SSV ID"))
self.synapse_field2.setItem(2, 0, QTableWidgetItem("cell type"))
self.synapse_field2.setItem(3, 0, QTableWidgetItem("cell comp."))
self.synapse_field2.setItem(4, 0, QTableWidgetItem("spiness"))
# self.synapse_field2.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers) # qt5
self.synapse_field2.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
header = self.synapse_field2.horizontalHeader()
header.setSectionResizeMode(0, QtGui.QHeaderView.Stretch)
header.setSectionResizeMode(1, QtGui.QHeaderView.ResizeToContents)
header.setSectionResizeMode(2, QtGui.QHeaderView.ResizeToContents)
self.send_synapsetype_label_button = QtGui.QPushButton('Send')
self.synapsetype_label = QtGui.QLabel()
self.synapsetype_label.setText("Synapse type label [-1: inhib.; 0: non-syn.; 1: "
"excit.]:")
self.synapsetype_label_text = QtGui.QLineEdit()
self.send_button_response_label = QtGui.QLabel()
self.send_button_response_label.setText(None)
#self.exploration_mode_chk_box = QtGui.QCheckBox('Exploration mode')
#self.exploration_mode_chk_box.setChecked(True)
#self.ssv_selection_model =
# QtGui.QItemSelectionModel(self.ssv_select_model)
#self.selectionModel =
# self.ssv_selector.selectionModel(self.ssv_selector)
#self.ssv_selector.setSelectionModel(self.ssv_selection_model)
#print('selection model: ' + str(self.ssv_selector.selectionModel()))
self.ssv_selector.clicked.connect(self.on_ssv_selector_changed)
self.syn_selector.clicked.connect(self.on_syn_selector_changed)
self.populate_ssv_list()
self.populate_syn_list()
print('Connected to SyConnGate.')
layout.addWidget(self.direct_ssv_id_input, 1, 0, 1, 1)
layout.addWidget(self.direct_syn_id_input, 1, 1, 1, 1)
layout.addWidget(self.ssv_selector, 2, 0, 1, 1)
layout.addWidget(self.syn_selector, 2, 1, 1, 1)
layout.addWidget(self.show_button_neurite, 3, 0, 1, 1)
layout.addWidget(self.show_button_synapse, 3, 1, 1, 1)
layout.addWidget(self.clear_knossos_view_button, 4, 0, 1, 1)
layout.addWidget(self.show_button_selected_neurite, 5, 0, 1, 1)
layout.addWidget(self.celltype_field, 1, 2, 1, 2)
layout.addWidget(self.synapse_field1, 2, 2, 1, 1)
layout.addWidget(self.synapse_field2, 3, 2, 1, 1)
layout.addWidget(self.synapsetype_label, 4, 1, 1, 1)
layout.addWidget(self.synapsetype_label_text, 4, 2, 1, 2)
layout.addWidget(self.send_button_response_label, 5, 1, 1, 1)
layout.addWidget(self.send_synapsetype_label_button, 5, 2, 1, 1)
#self.ssv_select_model.itemChanged.connect(self.on_ssv_selector_changed)
#self.selectionModel.selectionChanged.connect(self.on_ssv_selector_changed)
self.show_button_neurite.clicked.connect(self.show_button_neurite_clicked)
self.show_button_selected_neurite.clicked.connect(self.show_button_selected_neurite_clicked)
self.show_button_synapse.clicked.connect(self.show_button_synapse_clicked)
self.clear_knossos_view_button.clicked.connect(self.clear_knossos_view_button_clicked)
self.send_synapsetype_label_button.clicked.connect(self.send_synapsetype_label_button_clicked)
#self.exploration_mode_chk_box.stateChanged.connect(self.exploration_mode_changed)
# self.setGeometry(300, 300, 450, 300)
self.setWindowTitle('SyConn Viewer v2 ({}:{})'.format(self.host, self.port))
self.show()
#self.merge_button = QtGui.QPushButton('Merge')
#self.split_button = QtGui.QPushButton('Split')
# QList < quint64 > subobjectIdsOfObject(const
# quint64
# objId);
# QList < quint64 > objects();
# QList < quint64 > selectedObjects();
#self.bad_button = QtGui.QPushButton('Bad SV')
#self.graph_split_button = QtGui.QPushButton('Graph split')
#self.add_selected_sv_button = QtGui.QPushButton('Add selected SV')
#self.mode_combo = QtGui.QComboBox()
#self.stop_button = QtGui.QPushButton('Stop')
#self.undo_button = QtGui.QPushButton('Undo')
#self.redo_button = QtGui.QPushButton('Redo')
#self.skip_task_line_edit = QtGui.QLineEdit('Skip reason')
#self.server_line_edit = QtGui.QLineEdit()
#self.password_line_edit = QtGui.QLineEdit()
# set echo mode to QLineEdit::PasswordEchoOnEdit
#self.gui_auto_agglo_line_edit = QtGui.QLineEdit()
#self.gui_auto_agglo_line_edit.setText('0')
#def exploration_mode_changed(self):
# if self.exploration_mode_chk_box.isChecked():
# pass
# enable selection polling timer
# else:
# pass
# disable selection polling timer
def exploration_mode_callback_check(self):
#if self.exploration_mode_chk_box.isChecked():
#print('expl')
sel_seg_objs = KnossosModule.segmentation.selected_objects()
if len(sel_seg_objs) == 0:
return
sel_sv_ids = []
for sel_seg_obj in sel_seg_objs:
sel_sv_ids.append(KnossosModule.segmentation.subobject_ids_of_object(sel_seg_obj)[0])
trees = KnossosModule.skeleton.trees()
ids_in_k = set([tree.tree_id() for tree in trees if
tree.tree_id() < self.obj_id_offs])
# get selected ssv ids
# this should be done for all ids at once to improve speed
ssv_ids_selected = [self.syconn_gate.get_ssv_of_sv(sv_id)['ssv'] for sv_id in sel_sv_ids] #if not sv_id in ids_in_k
# ssv_id is 0 for a supervoxel that is unconnected, add support for single svs
ssv_ids_selected = [ssv_id for ssv_id in ssv_ids_selected if ssv_id != 0]
#print('ssv_ids_selected {0}'.format(ssv_ids_selected))
#print('self.obj_tree_ids {0}'.format(self.obj_tree_ids))
#print('ids_in_k 1 {0}'.format(ids_in_k))
#print('ids_in_k 2 {0}'.format(ids_in_k))
# compare with the selected segmentation objects
ids_selected = set(ssv_ids_selected)
# add missing ones to knossos, delete if not needed anymore
ids_to_add = ids_selected - ids_in_k
ids_to_del = ids_in_k - ids_selected
# remove segmentation objects that are not needed anymore
all_objects = KnossosModule.segmentation.objects()
objs_to_del = set(all_objects) - set(ids_selected)
[KnossosModule.segmentation.remove_object(obj) for obj in objs_to_del]
#print('ids to del {0} ids to add {1}'.format(ids_to_del, ids_to_add))
#print('ids_selected {0}'.format(ids_selected))
self.ids_selected = ids_selected
[self.remove_ssv_from_knossos(ssv_id) for ssv_id in ids_to_del]
[self.ssv_to_knossos(ssv_id) for ssv_id in ids_to_add]
[self.ssv_skel_to_knossos_tree(ssv_id) for ssv_id in ids_to_add]
[self.update_celltype(ssv_id) for ssv_id in ids_to_add]
#if len(ids_in_k) != 1 or len(ids_to_del) > 0:
# [KnossosModule.skeleton.delete_tree(sv_id) for sv_id in
# self.obj_tree_ids]
# self.obj_tree_ids = set()
return
def remove_ssv_from_knossos(self, ssv_id):
return
KnossosModule.skeleton.delete_tree(ssv_id)
# check whether there are object meshes that need to be deleted as well
trees = KnossosModule.skeleton.trees()
obj_mesh_ids = set([tree.tree_id() for tree in trees if
tree.tree_id() > self.obj_id_offs])
for i in range(1, 4):
obj_id_to_test = ssv_id + self.obj_id_offs + i
if obj_id_to_test in obj_mesh_ids:
KnossosModule.skeleton.delete_tree(obj_id_to_test)
def show_button_selected_neurite_clicked(self):
self.exploration_mode_callback_check()
def show_button_neurite_clicked(self):
try:
ssvs = [x.strip() for x in self.direct_ssv_id_input.text.split(',')]
ssvs = map(int, ssvs)
except:
ssvs = []
for ssv in ssvs:
self.ssv_to_knossos(ssv)
self.ssv_skel_to_knossos_tree(ssv)
self.update_celltype(ssv)
self.ssv_selected1 = ssv
return
def show_button_synapse_clicked(self):
try:
self.syn_selected1 = int(self.direct_syn_id_input.text)
except:
pass
# TODO
if self.syn_selected1:
# TODO: could be optimized: currently we need to get the index,
# and in on_syn_selector_changed the synapse ID is retrieved again
syn_ix = self.syn_item_model.index(self.all_syns['ids'].index(self.syn_selected1), 0)
self.on_syn_selector_changed(syn_ix)
return
def clear_knossos_view_button_clicked(self):
# delete all existing objects in mergelist
all_objects = KnossosModule.segmentation.objects()
[KnossosModule.segmentation.remove_object(obj) for obj in all_objects]
# iterate over all trees in knossos and delete
trees = KnossosModule.skeleton.trees()
ids_in_k = set([tree.tree_id() for tree in trees])
[KnossosModule.skeleton.delete_tree(sv_id) for sv_id in ids_in_k]
return
def send_synapsetype_label_button_clicked(self):
syntype_label = self.synapsetype_label_text.text.decode()
if not syntype_label in ["-1", "0", "1"]:
self.send_button_response_label.setText("INVALID LABEL '{}'".format(syntype_label))
else:
# TODO: parse syn_ssv ID from currently clicked synapse
curr_syn_id = self._currently_active_syn
r = ""
# r = self.syconn_gate.push_so_attr(so_id=str(curr_syn_id), so_type='syn_ssv',
# attr_key='gt_syntype_viewer',
# attr_value=syntype_label)
if len(r) == 0:
r = "push successful."
self.send_button_response_label.setText(r)
return
def update_celltype(self, ssv_id):
ct, certainty = self.syconn_gate.get_celltype_of_ssv(ssv_id)
self.celltype_field.setText("CellType: {} ({})".format(ct, certainty))
def ssv_to_knossos(self, ssv_id):
start_tot = time.time()
#self.clear_knossos_view_button_clicked()
# to mergelist
start = time.time()
sv_ids = self.syconn_gate.get_svs_of_ssv(ssv_id)['svs']
print('Get svs of ssv took {}'.format(time.time()-start))
sv_ids = map(int, sv_ids)
KnossosModule.segmentation.create_object(ssv_id, sv_ids[0], (1,1,1))
#KnossosModule.segmentation.select_object(ssv.id)
# query object should be red
#KnossosModule.segmentation.changeColor(ssv_id, QtGui.QColor(255, 0, 0, 255))
# one could cache this, not necessary to rebuild at every step
for sv_id in sv_ids:
try:
KnossosModule.segmentation.add_subobject(ssv_id, sv_id)
except:
pass
KnossosModule.segmentation.select_object(ssv_id)
KnossosModule.segmentation.set_render_only_selected_objs(True)
# create a 'fake' knossos tree for each obj mesh category;
# this is very hacky since it can generate nasty ID collisions.
mi_id = self.obj_id_offs + ssv_id + 1
syn_id = self.obj_id_offs + ssv_id + 2
sym_id = self.obj_id_offs + ssv_id + 3
asym_id = self.obj_id_offs + ssv_id + 4
vc_id = self.obj_id_offs + ssv_id + 5
neuron_id = self.obj_id_offs + ssv_id + 6
params = [(self, ssv_id, neuron_id, 'sv', (128, 128, 128, 128)),
(self, ssv_id, mi_id, 'mi', (0, 153, 255, 255)),
(self, ssv_id, vc_id, 'vc', (int(0.175 * 255), int(0.585 * 255), int(0.301 * 255), 255)),
(self, ssv_id, syn_id, 'syn_ssv', (240, 50, 50, 255)),]
# (self, ssv_id, sym_id, 'syn_ssv_sym', (50, 50, 240, 255)),
# (self, ssv_id, asym_id, 'syn_ssv_asym', (240, 50, 50, 255))]
start = time.time()
# add all meshes to download queue
for par in params:
mesh_loader_threaded(*par)
# wait for downloads
self.syconn_gate.wait_for_all_downloads()
print('Mesh download took {}'.format(time.time() - start))
start = time.time()
# add all to knossos
for par in params:
mesh_to_K(*par)
print('Mesh to K took {}'.format(time.time() - start))
return
def ssv_skel_to_knossos_tree(self, ssv_id, signal_block=True):
# disable knossos signal emission first - O(n^2) otherwise
start = time.time()
if signal_block:
signalsBlocked = KnossosModule.knossos_global_skeletonizer.blockSignals(
True)
try:
k_tree = KnossosModule.skeleton.find_tree_by_id(ssv_id)
if k_tree is None:
k_tree = KnossosModule.skeleton.add_tree(ssv_id)
skel = self.syconn_gate.get_ssv_skel(ssv_id)
#skel = None
if skel is None:
print("Loaded skeleton is None.")
return
# add nodes
nx_knossos_id_map = dict()
for ii, n_coord in enumerate(skel["nodes"]):
# newsk_node.from_scratch(newsk_anno, nx_coord[1]+1, nx_coord[0]+1, nx_coord[2]+1, ID=nx_node)
n_proeprties = {}
for k in skel:
if k in ["nodes", "edges", "diameters"]:
continue
n_proeprties[k] = float(skel[k][ii])
k_node = KnossosModule.skeleton.add_node(
[n_coord[1] + 1, n_coord[0] + 1, n_coord[2] + 1], k_tree,
n_proeprties)
KnossosModule.skeleton.set_radius(k_node.node_id(),
skel["diameters"][ii] / 2)
nx_knossos_id_map[ii] = k_node.node_id()
# add edges
for nx_src, nx_tgt in skel["edges"]:
KnossosModule.skeleton.add_segment(nx_knossos_id_map[nx_src],
nx_knossos_id_map[nx_tgt])
finally:
# enable signals again
if signal_block:
KnossosModule.knossos_global_skeletonizer.blockSignals(
signalsBlocked)
KnossosModule.knossos_global_skeletonizer.resetData()
print('Skel down and to K took {}'.format(time.time()-start))
return
def mesh_loader(gate_obj, ssv_id, tree_id, obj_type, color):
start = time.time()
mesh = gate_obj.syconn_gate.get_ssv_obj_mesh(ssv_id, obj_type)
print("Download time:", time.time() - start)
start = time.time()
if len(mesh[0]) > 0:
KnossosModule.skeleton.add_tree_mesh(tree_id, mesh[1], mesh[2],
mesh[0],
[], 4, False)
KnossosModule.skeleton.set_tree_color(tree_id,
QtGui.QColor(*color))
print("Loading {}-mesh time (pure KNOSSOS): {:.2f} s".format(
obj_type, time.time() - start))
def mesh_loader_threaded(gate_obj, ssv_id, tree_id, obj_type, color):
gate_obj.syconn_gate.add_ssv_obj_mesh_to_down_queue(ssv_id, obj_type)
def mesh_to_K(gate_obj, ssv_id, tree_id, obj_type, color):
mesh = gate_obj.syconn_gate.get_ssv_obj_mesh_from_results_store(ssv_id, obj_type)
if len(mesh[0]) > 0:
KnossosModule.skeleton.add_tree_mesh(tree_id, mesh[1], mesh[2],
mesh[0],
[], 4, False)
KnossosModule.skeleton.set_tree_color(tree_id,
QtGui.QColor(*color))
def int2str_label_converter(label, gt_type):
"""
Converts integer label into semantic string.
Parameters
----------
label : int
gt_type : str
e.g. spgt for spines, axgt for cell compartments or ctgt for cell type
Returns
-------
str
"""
if type(label) is list:
if len(label) != 1:
raise ValueError('Multiple labels given.')
label = label[0]
if gt_type == "axgt":
if label == 1:
return "axon"
elif label == 0:
return "dendrite"
elif label == 2:
return "soma"
else:
return "N/A"
elif gt_type == "spgt":
if label == 1:
return "head"
elif label == 0:
return "neck"
elif label == 2:
return "shaft"
elif label == 3:
return "other"
else:
return "N/A"
elif gt_type == 'ctgt':
if label == 1:
return "MSN"
elif label == 0:
return "EA"
elif label == 2:
return "GP"
elif label == 3:
return "INT"
else:
return "N/A"
elif gt_type == 'ctgt_v2':
l_dc_inv = dict(STN=0, modulatory=1, MSN=2, LMAN=3, HVC=4, GP=5, INT=6)
l_dc = {v: k for k, v in l_dc_inv.items()}
try:
return l_dc[label]
except KeyError:
print('Unknown label "{}"'.format(label))
return "N/A"
elif gt_type == 'ctgt_v2_old':
l_dc_inv = dict(STN=0, DA=1, MSN=2, LMAN=3, HVC=4, GP=5, FS=6, TAN=7)
l_dc_inv["?"] = 8
l_dc = {v: k for k, v in l_dc_inv.items()}
# Do not distinguish between FS and INT/?
l_dc[8] = "INT"
l_dc[6] = "INT"
try:
return l_dc[label]
except KeyError:
print('Unknown label "{}"'.format(label))
elif gt_type == 'ctgt_j0251':
str2int_label = dict(STN=0, DA=1, MSN=2, LMAN=3, HVC=4, TAN=5, GPe=6, GPi=7,
FS=8, LTS=9)
int2str_label = {v: k for k, v in str2int_label.items()}
return int2str_label[label]
elif gt_type == 'ctgt_j0251_v2':
str2int_label = dict(STN=0, DA=1, MSN=2, LMAN=3, HVC=4, TAN=5, GPe=6, GPi=7,
FS=8, LTS=9, NGF=10)
int2str_label = {v: k for k, v in str2int_label.items()}
return int2str_label[label]
else:
raise ValueError("Given ground truth type is not valid.")
if __name__ == '__main__':
A = main_class()
|
StructuralNeurobiologyLab/SyConn
|
syconn/analysis/syconn_knossos_viewer.py
|
Python
|
gpl-2.0
| 41,864
|
[
"NEURON"
] |
b72553805a4c9fce4e4b4a4215b1e4eb1eb9a171310f0561beda8fbfa8ded346
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Grégory Starck, g.starck@gmail.com
# Olivier Hanesse, olivier.hanesse@gmail.com
# Jean Gabes, naparuba@gmail.com
# Sebastien Coavoux, s.coavoux@free.fr
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import threading
import time
import json
import hashlib
import base64
import socket
from alignak.log import logger
# For old users python-crypto was not mandatory, don't break their setup
try:
from Crypto.Cipher import AES
except ImportError:
logger.error('Cannot find python lib crypto: export to kernel.alignak.io isnot available')
AES = None
from alignak.http_client import HTTPClient, HTTPException
BLOCK_SIZE = 16
def pad(data):
pad = BLOCK_SIZE - len(data) % BLOCK_SIZE
return data + pad * chr(pad)
def unpad(padded):
pad = ord(padded[-1])
return padded[:-pad]
class Stats(object):
def __init__(self):
self.name = ''
self.type = ''
self.app = None
self.stats = {}
# There are two modes that are not exclusive
# first the kernel mode
self.api_key = ''
self.secret = ''
self.http_proxy = ''
self.con = HTTPClient(uri='http://kernel.alignak.io')
# then the statsd one
self.statsd_sock = None
self.statsd_addr = None
def launch_reaper_thread(self):
self.reaper_thread = threading.Thread(None, target=self.reaper, name='stats-reaper')
self.reaper_thread.daemon = True
self.reaper_thread.start()
def register(self, app, name, _type, api_key='', secret='', http_proxy='',
statsd_host='localhost', statsd_port=8125, statsd_prefix='alignak',
statsd_enabled=False):
self.app = app
self.name = name
self.type = _type
# kernel.io part
self.api_key = api_key
self.secret = secret
self.http_proxy = http_proxy
# local statsd part
self.statsd_host = statsd_host
self.statsd_port = statsd_port
self.statsd_prefix = statsd_prefix
self.statsd_enabled = statsd_enabled
if self.statsd_enabled:
logger.debug('Loading statsd communication with %s:%s.%s',
self.statsd_host, self.statsd_port, self.statsd_prefix)
self.load_statsd()
# Also load the proxy if need
self.con.set_proxy(self.http_proxy)
# Let be crystal clear about why I don't use the statsd lib in python: it's crappy.
# how guys did you fuck this up to this point? django by default for the conf?? really?...
# So raw socket are far better here
def load_statsd(self):
try:
self.statsd_addr = (socket.gethostbyname(self.statsd_host), self.statsd_port)
self.statsd_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except (socket.error, socket.gaierror), exp:
logger.error('Cannot create statsd socket: %s' % exp)
return
# Will increment a stat key, if None, start at 0
def incr(self, k, v):
_min, _max, nb, _sum = self.stats.get(k, (None, None, 0, 0))
nb += 1
_sum += v
if _min is None or v < _min:
_min = v
if _max is None or v > _max:
_max = v
self.stats[k] = (_min, _max, nb, _sum)
# Manage local statd part
if self.statsd_sock and self.name:
# beware, we are sending ms here, v is in s
packet = '%s.%s.%s:%d|ms' % (self.statsd_prefix, self.name, k, v * 1000)
try:
self.statsd_sock.sendto(packet, self.statsd_addr)
except (socket.error, socket.gaierror), exp:
pass # cannot send? ok not a huge problem here and cannot
# log because it will be far too verbose :p
def _encrypt(self, data):
m = hashlib.md5()
m.update(self.secret)
key = m.hexdigest()
m = hashlib.md5()
m.update(self.secret + key)
iv = m.hexdigest()
data = pad(data)
aes = AES.new(key, AES.MODE_CBC, iv[:16])
encrypted = aes.encrypt(data)
return base64.urlsafe_b64encode(encrypted)
def reaper(self):
while True:
now = int(time.time())
stats = self.stats
self.stats = {}
if len(stats) != 0:
s = ', '.join(['%s:%s' % (k, v) for (k, v) in stats.iteritems()])
# If we are not in an initializer daemon we skip, we cannot have a real name, it sucks
# to find the data after this
if not self.name or not self.api_key or not self.secret:
time.sleep(60)
continue
metrics = []
for (k, e) in stats.iteritems():
nk = '%s.%s.%s' % (self.type, self.name, k)
_min, _max, nb, _sum = e
_avg = float(_sum) / nb
# nb can't be 0 here and _min_max can't be None too
s = '%s.avg %f %d' % (nk, _avg, now)
metrics.append(s)
s = '%s.min %f %d' % (nk, _min, now)
metrics.append(s)
s = '%s.max %f %d' % (nk, _max, now)
metrics.append(s)
s = '%s.count %f %d' % (nk, nb, now)
metrics.append(s)
# logger.debug('REAPER metrics to send %s (%d)' % (metrics, len(str(metrics))) )
# get the inner data for the daemon
struct = self.app.get_stats_struct()
struct['metrics'].extend(metrics)
# logger.debug('REAPER whole struct %s' % struct)
j = json.dumps(struct)
if AES is not None and self.secret != '':
logger.debug('Stats PUT to kernel.alignak.io/api/v1/put/ with %s %s' % (
self.api_key, self.secret))
# assume a %16 length messagexs
encrypted_text = self._encrypt(j)
try:
r = self.con.put('/api/v1/put/?api_key=%s' % (self.api_key), encrypted_text)
except HTTPException, exp:
logger.error('Stats REAPER cannot put to the metric server %s' % exp)
time.sleep(60)
statsmgr = Stats()
|
ddurieux/alignak
|
alignak/stats.py
|
Python
|
agpl-3.0
| 7,812
|
[
"CRYSTAL"
] |
afe1309db71ed51adaa03a98919dbedb9d925f30067ca0c1cca45222c4e8ca21
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
================
desiutil.sklearn
================
Useful functions from the sklearn python package.
"""
class GaussianMixtureModel(object):
"""Read and sample from a pre-defined Gaussian mixture model.
Parameters
----------
weights : :class:`numpy.ndarray`
A 1D array of weights. The length of the array is the number of
components
means : :class:`numpy.ndarray`
A 2D array of means. The number of rows is the number of components.
The number of columns is the number of dimensions.
covars : :class:`numpy.ndarray`
A 3D array of covariances. The first dimension is the number of
components. Each component has a 2D array with size given by the
number of dimensions.
covtype : :class:`str`, optional
Type of covariance. Defaults to 'full'.
"""
def __init__(self, weights, means, covars, covtype='full'):
self.weights = weights
self.means = means
self.covars = covars
self.covtype = covtype
self.n_components, self.n_dimensions = self.means.shape
@staticmethod
def save(model, filename):
"""Save a model to a file.
Parameters
----------
model : :class:`desiutil.sklearn.GaussianMixtureModel`
The model to be saved.
filename : :class:`str`
The name of the file to save to.
"""
from astropy.io import fits
hdus = fits.HDUList()
hdr = fits.Header()
try:
hdr['covtype'] = model.covariance_type
hdus.append(fits.ImageHDU(model.weights_, name='weights', header=hdr))
hdus.append(fits.ImageHDU(model.means_, name='means'))
hdus.append(fits.ImageHDU(model.covariances_, name='covars'))
except AttributeError:
hdr['covtype'] = model.covtype
hdus.append(fits.ImageHDU(model.weights, name='weights', header=hdr))
hdus.append(fits.ImageHDU(model.means, name='means'))
hdus.append(fits.ImageHDU(model.covars, name='covars'))
hdus.writeto(filename, overwrite=True)
@staticmethod
def load(filename):
"""Load a model from a file.
Parameters
----------
filename : :class:`str`
The name of the file to load from.
Returns
-------
:class:`desiutil.sklearn.GaussianMixtureModel`
The model that was in `filename`.
"""
from astropy.io import fits
hdus = fits.open(filename, memmap=False)
hdr = hdus[0].header
covtype = hdr['covtype']
model = GaussianMixtureModel(hdus['weights'].data, hdus['means'].data,
hdus['covars'].data, covtype)
hdus.close()
return model
def sample(self, n_samples=1, random_state=None):
"""Sample from a model.
Parameters
----------
n_samples : :class:`int`, optional
Number of samples to return, default 1.
random_state : :class:`numpy.random.RandomState`, optional
A random state object.
Returns
-------
:class:`numpy.ndarray`
An array containing the samples.
Raises
------
ValueError
If the covariance type is unknown.
"""
import numpy as np
if self.covtype != 'full':
raise ValueError(('Covariance type "{0}" is not yet ' +
'implemented.').format(self.covtype))
# Code adapted from sklearn's GMM.sample()
if random_state is None:
random_state = np.random.RandomState()
weight_cdf = np.cumsum(self.weights)
X = np.empty((n_samples, self.n_dimensions))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
X[comp_in_X] = random_state.multivariate_normal(
self.means[comp], self.covars[comp], num_comp_in_X)
return X
|
desihub/desiutil
|
py/desiutil/sklearn.py
|
Python
|
bsd-3-clause
| 4,488
|
[
"Gaussian"
] |
175d2cd9f396a9ae673ccd267c13eac54edf9af0ab53e8aa6227b2f6ca569562
|
#!/usr/bin/env python
#
# Code related to ESET's Linux/Moose research
# For feedback or questions contact us at: github@eset.com
# https://github.com/eset/malware-research/
#
# This code is provided to the community under the two-clause BSD license as
# follows:
#
# Copyright (C) 2015 ESET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Olivier Bilodeau <bilodeau@eset.com>
#
# Place a comment in IDA describing the syscall for MIPS with o32 ABI
#
# example syscall:
# move $a0, $s0
# li $a1, 9
# li $v0, 4037
# syscall 0 # kill
# 4037 is the syscall number which is kill
#
DEBUG = False
# syscalls taken from:
# http://git.linux-mips.org/cgit/ralf/linux.git/plain/arch/mips/include/uapi/asm/unistd.h
# and processed with syscalls-to-dict.sh and then inlined here
__NR_Linux = 4000
syscall_table = dict()
syscall_table[(__NR_Linux+0)] = "__NR_syscall"
syscall_table[(__NR_Linux+1)] = "__NR_exit"
syscall_table[(__NR_Linux+2)] = "__NR_fork"
syscall_table[(__NR_Linux+3)] = "__NR_read"
syscall_table[(__NR_Linux+4)] = "__NR_write"
syscall_table[(__NR_Linux+5)] = "__NR_open"
syscall_table[(__NR_Linux+6)] = "__NR_close"
syscall_table[(__NR_Linux+7)] = "__NR_waitpid"
syscall_table[(__NR_Linux+8)] = "__NR_creat"
syscall_table[(__NR_Linux+9)] = "__NR_link"
syscall_table[(__NR_Linux+10)] = "__NR_unlink"
syscall_table[(__NR_Linux+11)] = "__NR_execve"
syscall_table[(__NR_Linux+12)] = "__NR_chdir"
syscall_table[(__NR_Linux+13)] = "__NR_time"
syscall_table[(__NR_Linux+14)] = "__NR_mknod"
syscall_table[(__NR_Linux+15)] = "__NR_chmod"
syscall_table[(__NR_Linux+16)] = "__NR_lchown"
syscall_table[(__NR_Linux+17)] = "__NR_break"
syscall_table[(__NR_Linux+18)] = "__NR_unused18"
syscall_table[(__NR_Linux+19)] = "__NR_lseek"
syscall_table[(__NR_Linux+20)] = "__NR_getpid"
syscall_table[(__NR_Linux+21)] = "__NR_mount"
syscall_table[(__NR_Linux+22)] = "__NR_umount"
syscall_table[(__NR_Linux+23)] = "__NR_setuid"
syscall_table[(__NR_Linux+24)] = "__NR_getuid"
syscall_table[(__NR_Linux+25)] = "__NR_stime"
syscall_table[(__NR_Linux+26)] = "__NR_ptrace"
syscall_table[(__NR_Linux+27)] = "__NR_alarm"
syscall_table[(__NR_Linux+28)] = "__NR_unused28"
syscall_table[(__NR_Linux+29)] = "__NR_pause"
syscall_table[(__NR_Linux+30)] = "__NR_utime"
syscall_table[(__NR_Linux+31)] = "__NR_stty"
syscall_table[(__NR_Linux+32)] = "__NR_gtty"
syscall_table[(__NR_Linux+33)] = "__NR_access"
syscall_table[(__NR_Linux+34)] = "__NR_nice"
syscall_table[(__NR_Linux+35)] = "__NR_ftime"
syscall_table[(__NR_Linux+36)] = "__NR_sync"
syscall_table[(__NR_Linux+37)] = "__NR_kill"
syscall_table[(__NR_Linux+38)] = "__NR_rename"
syscall_table[(__NR_Linux+39)] = "__NR_mkdir"
syscall_table[(__NR_Linux+40)] = "__NR_rmdir"
syscall_table[(__NR_Linux+41)] = "__NR_dup"
syscall_table[(__NR_Linux+42)] = "__NR_pipe"
syscall_table[(__NR_Linux+43)] = "__NR_times"
syscall_table[(__NR_Linux+44)] = "__NR_prof"
syscall_table[(__NR_Linux+45)] = "__NR_brk"
syscall_table[(__NR_Linux+46)] = "__NR_setgid"
syscall_table[(__NR_Linux+47)] = "__NR_getgid"
syscall_table[(__NR_Linux+48)] = "__NR_signal"
syscall_table[(__NR_Linux+49)] = "__NR_geteuid"
syscall_table[(__NR_Linux+50)] = "__NR_getegid"
syscall_table[(__NR_Linux+51)] = "__NR_acct"
syscall_table[(__NR_Linux+52)] = "__NR_umount2"
syscall_table[(__NR_Linux+53)] = "__NR_lock"
syscall_table[(__NR_Linux+54)] = "__NR_ioctl"
syscall_table[(__NR_Linux+55)] = "__NR_fcntl"
syscall_table[(__NR_Linux+56)] = "__NR_mpx"
syscall_table[(__NR_Linux+57)] = "__NR_setpgid"
syscall_table[(__NR_Linux+58)] = "__NR_ulimit"
syscall_table[(__NR_Linux+59)] = "__NR_unused59"
syscall_table[(__NR_Linux+60)] = "__NR_umask"
syscall_table[(__NR_Linux+61)] = "__NR_chroot"
syscall_table[(__NR_Linux+62)] = "__NR_ustat"
syscall_table[(__NR_Linux+63)] = "__NR_dup2"
syscall_table[(__NR_Linux+64)] = "__NR_getppid"
syscall_table[(__NR_Linux+65)] = "__NR_getpgrp"
syscall_table[(__NR_Linux+66)] = "__NR_setsid"
syscall_table[(__NR_Linux+67)] = "__NR_sigaction"
syscall_table[(__NR_Linux+68)] = "__NR_sgetmask"
syscall_table[(__NR_Linux+69)] = "__NR_ssetmask"
syscall_table[(__NR_Linux+70)] = "__NR_setreuid"
syscall_table[(__NR_Linux+71)] = "__NR_setregid"
syscall_table[(__NR_Linux+72)] = "__NR_sigsuspend"
syscall_table[(__NR_Linux+73)] = "__NR_sigpending"
syscall_table[(__NR_Linux+74)] = "__NR_sethostname"
syscall_table[(__NR_Linux+75)] = "__NR_setrlimit"
syscall_table[(__NR_Linux+76)] = "__NR_getrlimit"
syscall_table[(__NR_Linux+77)] = "__NR_getrusage"
syscall_table[(__NR_Linux+78)] = "__NR_gettimeofday"
syscall_table[(__NR_Linux+79)] = "__NR_settimeofday"
syscall_table[(__NR_Linux+80)] = "__NR_getgroups"
syscall_table[(__NR_Linux+81)] = "__NR_setgroups"
syscall_table[(__NR_Linux+82)] = "__NR_reserved82"
syscall_table[(__NR_Linux+83)] = "__NR_symlink"
syscall_table[(__NR_Linux+84)] = "__NR_unused84"
syscall_table[(__NR_Linux+85)] = "__NR_readlink"
syscall_table[(__NR_Linux+86)] = "__NR_uselib"
syscall_table[(__NR_Linux+87)] = "__NR_swapon"
syscall_table[(__NR_Linux+88)] = "__NR_reboot"
syscall_table[(__NR_Linux+89)] = "__NR_readdir"
syscall_table[(__NR_Linux+90)] = "__NR_mmap"
syscall_table[(__NR_Linux+91)] = "__NR_munmap"
syscall_table[(__NR_Linux+92)] = "__NR_truncate"
syscall_table[(__NR_Linux+93)] = "__NR_ftruncate"
syscall_table[(__NR_Linux+94)] = "__NR_fchmod"
syscall_table[(__NR_Linux+95)] = "__NR_fchown"
syscall_table[(__NR_Linux+96)] = "__NR_getpriority"
syscall_table[(__NR_Linux+97)] = "__NR_setpriority"
syscall_table[(__NR_Linux+98)] = "__NR_profil"
syscall_table[(__NR_Linux+99)] = "__NR_statfs"
syscall_table[(__NR_Linux+100)] = "__NR_fstatfs"
syscall_table[(__NR_Linux+101)] = "__NR_ioperm"
syscall_table[(__NR_Linux+102)] = "__NR_socketcall"
syscall_table[(__NR_Linux+103)] = "__NR_syslog"
syscall_table[(__NR_Linux+104)] = "__NR_setitimer"
syscall_table[(__NR_Linux+105)] = "__NR_getitimer"
syscall_table[(__NR_Linux+106)] = "__NR_stat"
syscall_table[(__NR_Linux+107)] = "__NR_lstat"
syscall_table[(__NR_Linux+108)] = "__NR_fstat"
syscall_table[(__NR_Linux+109)] = "__NR_unused109"
syscall_table[(__NR_Linux+110)] = "__NR_iopl"
syscall_table[(__NR_Linux+111)] = "__NR_vhangup"
syscall_table[(__NR_Linux+112)] = "__NR_idle"
syscall_table[(__NR_Linux+113)] = "__NR_vm86"
syscall_table[(__NR_Linux+114)] = "__NR_wait4"
syscall_table[(__NR_Linux+115)] = "__NR_swapoff"
syscall_table[(__NR_Linux+116)] = "__NR_sysinfo"
syscall_table[(__NR_Linux+117)] = "__NR_ipc"
syscall_table[(__NR_Linux+118)] = "__NR_fsync"
syscall_table[(__NR_Linux+119)] = "__NR_sigreturn"
syscall_table[(__NR_Linux+120)] = "__NR_clone"
syscall_table[(__NR_Linux+121)] = "__NR_setdomainname"
syscall_table[(__NR_Linux+122)] = "__NR_uname"
syscall_table[(__NR_Linux+123)] = "__NR_modify_ldt"
syscall_table[(__NR_Linux+124)] = "__NR_adjtimex"
syscall_table[(__NR_Linux+125)] = "__NR_mprotect"
syscall_table[(__NR_Linux+126)] = "__NR_sigprocmask"
syscall_table[(__NR_Linux+127)] = "__NR_create_module"
syscall_table[(__NR_Linux+128)] = "__NR_init_module"
syscall_table[(__NR_Linux+129)] = "__NR_delete_module"
syscall_table[(__NR_Linux+130)] = "__NR_get_kernel_syms"
syscall_table[(__NR_Linux+131)] = "__NR_quotactl"
syscall_table[(__NR_Linux+132)] = "__NR_getpgid"
syscall_table[(__NR_Linux+133)] = "__NR_fchdir"
syscall_table[(__NR_Linux+134)] = "__NR_bdflush"
syscall_table[(__NR_Linux+135)] = "__NR_sysfs"
syscall_table[(__NR_Linux+136)] = "__NR_personality"
syscall_table[(__NR_Linux+137)] = "__NR_afs_syscall"
syscall_table[(__NR_Linux+138)] = "__NR_setfsuid"
syscall_table[(__NR_Linux+139)] = "__NR_setfsgid"
syscall_table[(__NR_Linux+140)] = "__NR__llseek"
syscall_table[(__NR_Linux+141)] = "__NR_getdents"
syscall_table[(__NR_Linux+142)] = "__NR__newselect"
syscall_table[(__NR_Linux+143)] = "__NR_flock"
syscall_table[(__NR_Linux+144)] = "__NR_msync"
syscall_table[(__NR_Linux+145)] = "__NR_readv"
syscall_table[(__NR_Linux+146)] = "__NR_writev"
syscall_table[(__NR_Linux+147)] = "__NR_cacheflush"
syscall_table[(__NR_Linux+148)] = "__NR_cachectl"
syscall_table[(__NR_Linux+149)] = "__NR_sysmips"
syscall_table[(__NR_Linux+150)] = "__NR_unused150"
syscall_table[(__NR_Linux+151)] = "__NR_getsid"
syscall_table[(__NR_Linux+152)] = "__NR_fdatasync"
syscall_table[(__NR_Linux+153)] = "__NR__sysctl"
syscall_table[(__NR_Linux+154)] = "__NR_mlock"
syscall_table[(__NR_Linux+155)] = "__NR_munlock"
syscall_table[(__NR_Linux+156)] = "__NR_mlockall"
syscall_table[(__NR_Linux+157)] = "__NR_munlockall"
syscall_table[(__NR_Linux+158)] = "__NR_sched_setparam"
syscall_table[(__NR_Linux+159)] = "__NR_sched_getparam"
syscall_table[(__NR_Linux+160)] = "__NR_sched_setscheduler"
syscall_table[(__NR_Linux+161)] = "__NR_sched_getscheduler"
syscall_table[(__NR_Linux+162)] = "__NR_sched_yield"
syscall_table[(__NR_Linux+163)] = "__NR_sched_get_priority_max"
syscall_table[(__NR_Linux+164)] = "__NR_sched_get_priority_min"
syscall_table[(__NR_Linux+165)] = "__NR_sched_rr_get_interval"
syscall_table[(__NR_Linux+166)] = "__NR_nanosleep"
syscall_table[(__NR_Linux+167)] = "__NR_mremap"
syscall_table[(__NR_Linux+168)] = "__NR_accept"
syscall_table[(__NR_Linux+169)] = "__NR_bind"
syscall_table[(__NR_Linux+170)] = "__NR_connect"
syscall_table[(__NR_Linux+171)] = "__NR_getpeername"
syscall_table[(__NR_Linux+172)] = "__NR_getsockname"
syscall_table[(__NR_Linux+173)] = "__NR_getsockopt"
syscall_table[(__NR_Linux+174)] = "__NR_listen"
syscall_table[(__NR_Linux+175)] = "__NR_recv"
syscall_table[(__NR_Linux+176)] = "__NR_recvfrom"
syscall_table[(__NR_Linux+177)] = "__NR_recvmsg"
syscall_table[(__NR_Linux+178)] = "__NR_send"
syscall_table[(__NR_Linux+179)] = "__NR_sendmsg"
syscall_table[(__NR_Linux+180)] = "__NR_sendto"
syscall_table[(__NR_Linux+181)] = "__NR_setsockopt"
syscall_table[(__NR_Linux+182)] = "__NR_shutdown"
syscall_table[(__NR_Linux+183)] = "__NR_socket"
syscall_table[(__NR_Linux+184)] = "__NR_socketpair"
syscall_table[(__NR_Linux+185)] = "__NR_setresuid"
syscall_table[(__NR_Linux+186)] = "__NR_getresuid"
syscall_table[(__NR_Linux+187)] = "__NR_query_module"
syscall_table[(__NR_Linux+188)] = "__NR_poll"
syscall_table[(__NR_Linux+189)] = "__NR_nfsservctl"
syscall_table[(__NR_Linux+190)] = "__NR_setresgid"
syscall_table[(__NR_Linux+191)] = "__NR_getresgid"
syscall_table[(__NR_Linux+192)] = "__NR_prctl"
syscall_table[(__NR_Linux+193)] = "__NR_rt_sigreturn"
syscall_table[(__NR_Linux+194)] = "__NR_rt_sigaction"
syscall_table[(__NR_Linux+195)] = "__NR_rt_sigprocmask"
syscall_table[(__NR_Linux+196)] = "__NR_rt_sigpending"
syscall_table[(__NR_Linux+197)] = "__NR_rt_sigtimedwait"
syscall_table[(__NR_Linux+198)] = "__NR_rt_sigqueueinfo"
syscall_table[(__NR_Linux+199)] = "__NR_rt_sigsuspend"
syscall_table[(__NR_Linux+200)] = "__NR_pread64"
syscall_table[(__NR_Linux+201)] = "__NR_pwrite64"
syscall_table[(__NR_Linux+202)] = "__NR_chown"
syscall_table[(__NR_Linux+203)] = "__NR_getcwd"
syscall_table[(__NR_Linux+204)] = "__NR_capget"
syscall_table[(__NR_Linux+205)] = "__NR_capset"
syscall_table[(__NR_Linux+206)] = "__NR_sigaltstack"
syscall_table[(__NR_Linux+207)] = "__NR_sendfile"
syscall_table[(__NR_Linux+208)] = "__NR_getpmsg"
syscall_table[(__NR_Linux+209)] = "__NR_putpmsg"
syscall_table[(__NR_Linux+210)] = "__NR_mmap2"
syscall_table[(__NR_Linux+211)] = "__NR_truncate64"
syscall_table[(__NR_Linux+212)] = "__NR_ftruncate64"
syscall_table[(__NR_Linux+213)] = "__NR_stat64"
syscall_table[(__NR_Linux+214)] = "__NR_lstat64"
syscall_table[(__NR_Linux+215)] = "__NR_fstat64"
syscall_table[(__NR_Linux+216)] = "__NR_pivot_root"
syscall_table[(__NR_Linux+217)] = "__NR_mincore"
syscall_table[(__NR_Linux+218)] = "__NR_madvise"
syscall_table[(__NR_Linux+219)] = "__NR_getdents64"
syscall_table[(__NR_Linux+220)] = "__NR_fcntl64"
syscall_table[(__NR_Linux+221)] = "__NR_reserved221"
syscall_table[(__NR_Linux+222)] = "__NR_gettid"
syscall_table[(__NR_Linux+223)] = "__NR_readahead"
syscall_table[(__NR_Linux+224)] = "__NR_setxattr"
syscall_table[(__NR_Linux+225)] = "__NR_lsetxattr"
syscall_table[(__NR_Linux+226)] = "__NR_fsetxattr"
syscall_table[(__NR_Linux+227)] = "__NR_getxattr"
syscall_table[(__NR_Linux+228)] = "__NR_lgetxattr"
syscall_table[(__NR_Linux+229)] = "__NR_fgetxattr"
syscall_table[(__NR_Linux+230)] = "__NR_listxattr"
syscall_table[(__NR_Linux+231)] = "__NR_llistxattr"
syscall_table[(__NR_Linux+232)] = "__NR_flistxattr"
syscall_table[(__NR_Linux+233)] = "__NR_removexattr"
syscall_table[(__NR_Linux+234)] = "__NR_lremovexattr"
syscall_table[(__NR_Linux+235)] = "__NR_fremovexattr"
syscall_table[(__NR_Linux+236)] = "__NR_tkill"
syscall_table[(__NR_Linux+237)] = "__NR_sendfile64"
syscall_table[(__NR_Linux+238)] = "__NR_futex"
syscall_table[(__NR_Linux+239)] = "__NR_sched_setaffinity"
syscall_table[(__NR_Linux+240)] = "__NR_sched_getaffinity"
syscall_table[(__NR_Linux+241)] = "__NR_io_setup"
syscall_table[(__NR_Linux+242)] = "__NR_io_destroy"
syscall_table[(__NR_Linux+243)] = "__NR_io_getevents"
syscall_table[(__NR_Linux+244)] = "__NR_io_submit"
syscall_table[(__NR_Linux+245)] = "__NR_io_cancel"
syscall_table[(__NR_Linux+246)] = "__NR_exit_group"
syscall_table[(__NR_Linux+247)] = "__NR_lookup_dcookie"
syscall_table[(__NR_Linux+248)] = "__NR_epoll_create"
syscall_table[(__NR_Linux+249)] = "__NR_epoll_ctl"
syscall_table[(__NR_Linux+250)] = "__NR_epoll_wait"
syscall_table[(__NR_Linux+251)] = "__NR_remap_file_pages"
syscall_table[(__NR_Linux+252)] = "__NR_set_tid_address"
syscall_table[(__NR_Linux+253)] = "__NR_restart_syscall"
syscall_table[(__NR_Linux+254)] = "__NR_fadvise64"
syscall_table[(__NR_Linux+255)] = "__NR_statfs64"
syscall_table[(__NR_Linux+256)] = "__NR_fstatfs64"
syscall_table[(__NR_Linux+257)] = "__NR_timer_create"
syscall_table[(__NR_Linux+258)] = "__NR_timer_settime"
syscall_table[(__NR_Linux+259)] = "__NR_timer_gettime"
syscall_table[(__NR_Linux+260)] = "__NR_timer_getoverrun"
syscall_table[(__NR_Linux+261)] = "__NR_timer_delete"
syscall_table[(__NR_Linux+262)] = "__NR_clock_settime"
syscall_table[(__NR_Linux+263)] = "__NR_clock_gettime"
syscall_table[(__NR_Linux+264)] = "__NR_clock_getres"
syscall_table[(__NR_Linux+265)] = "__NR_clock_nanosleep"
syscall_table[(__NR_Linux+266)] = "__NR_tgkill"
syscall_table[(__NR_Linux+267)] = "__NR_utimes"
syscall_table[(__NR_Linux+268)] = "__NR_mbind"
syscall_table[(__NR_Linux+269)] = "__NR_get_mempolicy"
syscall_table[(__NR_Linux+270)] = "__NR_set_mempolicy"
syscall_table[(__NR_Linux+271)] = "__NR_mq_open"
syscall_table[(__NR_Linux+272)] = "__NR_mq_unlink"
syscall_table[(__NR_Linux+273)] = "__NR_mq_timedsend"
syscall_table[(__NR_Linux+274)] = "__NR_mq_timedreceive"
syscall_table[(__NR_Linux+275)] = "__NR_mq_notify"
syscall_table[(__NR_Linux+276)] = "__NR_mq_getsetattr"
syscall_table[(__NR_Linux+277)] = "__NR_vserver"
syscall_table[(__NR_Linux+278)] = "__NR_waitid"
syscall_table[(__NR_Linux+280)] = "__NR_add_key"
syscall_table[(__NR_Linux+281)] = "__NR_request_key"
syscall_table[(__NR_Linux+282)] = "__NR_keyctl"
syscall_table[(__NR_Linux+283)] = "__NR_set_thread_area"
syscall_table[(__NR_Linux+284)] = "__NR_inotify_init"
syscall_table[(__NR_Linux+285)] = "__NR_inotify_add_watch"
syscall_table[(__NR_Linux+286)] = "__NR_inotify_rm_watch"
syscall_table[(__NR_Linux+287)] = "__NR_migrate_pages"
syscall_table[(__NR_Linux+288)] = "__NR_openat"
syscall_table[(__NR_Linux+289)] = "__NR_mkdirat"
syscall_table[(__NR_Linux+290)] = "__NR_mknodat"
syscall_table[(__NR_Linux+291)] = "__NR_fchownat"
syscall_table[(__NR_Linux+292)] = "__NR_futimesat"
syscall_table[(__NR_Linux+293)] = "__NR_fstatat64"
syscall_table[(__NR_Linux+294)] = "__NR_unlinkat"
syscall_table[(__NR_Linux+295)] = "__NR_renameat"
syscall_table[(__NR_Linux+296)] = "__NR_linkat"
syscall_table[(__NR_Linux+297)] = "__NR_symlinkat"
syscall_table[(__NR_Linux+298)] = "__NR_readlinkat"
syscall_table[(__NR_Linux+299)] = "__NR_fchmodat"
syscall_table[(__NR_Linux+300)] = "__NR_faccessat"
syscall_table[(__NR_Linux+301)] = "__NR_pselect6"
syscall_table[(__NR_Linux+302)] = "__NR_ppoll"
syscall_table[(__NR_Linux+303)] = "__NR_unshare"
syscall_table[(__NR_Linux+304)] = "__NR_splice"
syscall_table[(__NR_Linux+305)] = "__NR_sync_file_range"
syscall_table[(__NR_Linux+306)] = "__NR_tee"
syscall_table[(__NR_Linux+307)] = "__NR_vmsplice"
syscall_table[(__NR_Linux+308)] = "__NR_move_pages"
syscall_table[(__NR_Linux+309)] = "__NR_set_robust_list"
syscall_table[(__NR_Linux+310)] = "__NR_get_robust_list"
syscall_table[(__NR_Linux+311)] = "__NR_kexec_load"
syscall_table[(__NR_Linux+312)] = "__NR_getcpu"
syscall_table[(__NR_Linux+313)] = "__NR_epoll_pwait"
syscall_table[(__NR_Linux+314)] = "__NR_ioprio_set"
syscall_table[(__NR_Linux+315)] = "__NR_ioprio_get"
syscall_table[(__NR_Linux+316)] = "__NR_utimensat"
syscall_table[(__NR_Linux+317)] = "__NR_signalfd"
syscall_table[(__NR_Linux+318)] = "__NR_timerfd"
syscall_table[(__NR_Linux+319)] = "__NR_eventfd"
syscall_table[(__NR_Linux+320)] = "__NR_fallocate"
syscall_table[(__NR_Linux+321)] = "__NR_timerfd_create"
syscall_table[(__NR_Linux+322)] = "__NR_timerfd_gettime"
syscall_table[(__NR_Linux+323)] = "__NR_timerfd_settime"
syscall_table[(__NR_Linux+324)] = "__NR_signalfd4"
syscall_table[(__NR_Linux+325)] = "__NR_eventfd2"
syscall_table[(__NR_Linux+326)] = "__NR_epoll_create1"
syscall_table[(__NR_Linux+327)] = "__NR_dup3"
syscall_table[(__NR_Linux+328)] = "__NR_pipe2"
syscall_table[(__NR_Linux+329)] = "__NR_inotify_init1"
syscall_table[(__NR_Linux+330)] = "__NR_preadv"
syscall_table[(__NR_Linux+331)] = "__NR_pwritev"
syscall_table[(__NR_Linux+332)] = "__NR_rt_tgsigqueueinfo"
syscall_table[(__NR_Linux+333)] = "__NR_perf_event_open"
syscall_table[(__NR_Linux+334)] = "__NR_accept4"
syscall_table[(__NR_Linux+335)] = "__NR_recvmmsg"
syscall_table[(__NR_Linux+336)] = "__NR_fanotify_init"
syscall_table[(__NR_Linux+337)] = "__NR_fanotify_mark"
syscall_table[(__NR_Linux+338)] = "__NR_prlimit64"
syscall_table[(__NR_Linux+339)] = "__NR_name_to_handle_at"
syscall_table[(__NR_Linux+340)] = "__NR_open_by_handle_at"
syscall_table[(__NR_Linux+341)] = "__NR_clock_adjtime"
syscall_table[(__NR_Linux+342)] = "__NR_syncfs"
syscall_table[(__NR_Linux+343)] = "__NR_sendmmsg"
syscall_table[(__NR_Linux+344)] = "__NR_setns"
syscall_table[(__NR_Linux+345)] = "__NR_process_vm_readv"
syscall_table[(__NR_Linux+346)] = "__NR_process_vm_writev"
syscall_table[(__NR_Linux+347)] = "__NR_kcmp"
syscall_table[(__NR_Linux+348)] = "__NR_finit_module"
syscall_table[(__NR_Linux+349)] = "__NR_sched_setattr"
syscall_table[(__NR_Linux+350)] = "__NR_sched_getattr"
syscall_table[(__NR_Linux+351)] = "__NR_renameat2"
syscall_table[(__NR_Linux+352)] = "__NR_seccomp"
syscall_table[(__NR_Linux+353)] = "__NR_getrandom"
syscall_table[(__NR_Linux+354)] = "__NR_memfd_create"
syscall_table[(__NR_Linux+355)] = "__NR_bpf"
def debug(text):
if DEBUG:
print text
ea = ScreenEA()
seg_ea = SegStart(ea)
for addr in Heads(seg_ea, SegEnd(seg_ea)):
if isCode(GetFlags(addr)):
mne = GetMnem(addr)
if mne == "syscall":
prev_head = PrevHead(addr, seg_ea)
if GetOpnd(prev_head, 0) == "$v0":
# second operand is our syscall number
syscall_id = GetOperandValue(prev_head, 1)
if syscall_id == -1:
raise Exception("Invalid operand value")
syscall = syscall_table[syscall_id]
debug("syscall: {}/{} at {}".format(syscall_id, syscall,
hex(addr)))
# identify syscall in comments
MakeRptCmt(addr, syscall)
else:
print("previous instruction is not about $v0, " \
"investigate: {}").format(hex(addr))
|
eset/malware-research
|
moose/ida/mips_identify_syscalls.py
|
Python
|
bsd-2-clause
| 20,841
|
[
"MOOSE"
] |
fd9fb3e8d55985dc2a0670767cc65061748f1d724f08d6ca2db1635a56dcc177
|
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2016-2018 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Standard imports.
import os
import fnmatch
# Maya imports.
import pymel.core as pm
import maya.cmds as mc
import maya.mel as mel
import maya.OpenMaya as om
# appleseedMaya imports.
from AETemplates import appleseedAETemplateCallback
from hypershadeCallbacks import *
from logger import logger
from menu import createMenu, deleteMenu
from renderer import createRenderMelProcedures
from renderGlobals import (
createRenderTabsMelProcedures,
renderSettingsBuiltCallback,
addRenderGlobalsScriptJobs,
removeRenderGlobalsScriptJobs)
from translator import createTranslatorMelProcedures
thisDir = os.path.normpath(os.path.dirname(__file__))
asXGenCallbacks = [
("RenderAPIRendererTabUIInit", "appleseedMaya.xgenseedui.xgseedUI"),
("RenderAPIRendererTabUIRefresh", "appleseedMaya.xgenseedui.xgseedRefresh"),
("PostDescriptionCreate", "appleseedMaya.xgenseedui.xgseedOnCreateDescription"),
("ArchiveExport", "appleseedMaya.xgenseed.xgseedArchiveExport"),
("ArchiveExportInfo", "appleseedMaya.xgenseed.xgseedArchiveExportInfo"),
("ArchiveExportInit", "appleseedMaya.xgenseed.xgseedArchiveExportInit")
]
def register():
logger.info("Registering appleseed renderer.")
# Register render.
pm.renderer("appleseed", rendererUIName="appleseed")
createRenderMelProcedures()
# Final Render procedures.
pm.renderer(
"appleseed",
edit=True,
renderProcedure="appleseedRenderProcedure",
commandRenderProcedure="appleseedBatchRenderProcedure",
batchRenderProcedure="appleseedBatchRenderProcedure",
cancelBatchRenderProcedure="appleseedCancelBatchRenderProcedure",
renderRegionProcedure="mayaRenderRegion"
)
# Ipr Render procedures.
pm.renderer(
"appleseed",
edit=True,
iprRenderProcedure="appleseedIprRenderProcedure",
isRunningIprProcedure="appleseedIsRunningIprRenderProcedure",
startIprRenderProcedure="appleseedStartIprRenderProcedure",
stopIprRenderProcedure="appleseedStopIprRenderProcedure",
refreshIprRenderProcedure="appleseedRefreshIprRenderProcedure",
pauseIprRenderProcedure="appleseedPauseIprRenderProcedure",
changeIprRegionProcedure="appleseedChangeIprRegionProcedure",
)
# Globals
createRenderTabsMelProcedures()
renderSettingsBuiltCallback('appleseed')
pm.renderer("appleseed", edit=True, addGlobalsNode="defaultRenderGlobals")
pm.renderer("appleseed", edit=True, addGlobalsNode="defaultResolution")
pm.renderer(
"appleseed", edit=True, addGlobalsNode="appleseedRenderGlobals")
pm.callbacks(
addCallback=renderSettingsBuiltCallback,
hook="renderSettingsBuilt",
owner="appleseed")
addRenderGlobalsScriptJobs()
# AE templates.
pm.callbacks(
addCallback=appleseedAETemplateCallback,
hook="AETemplateCustomContent",
owner="appleseed")
# Manually load templates in AETemplates folder.
templatesDir = os.path.join(thisDir, "AETemplates")
logger.debug("Registering AETemplates in %s" % templatesDir)
for file in os.listdir(templatesDir):
if fnmatch.fnmatch(file, '*Template.py'):
templateModule = file.replace(".py", "")
logger.debug("Registering AE template %s" % templateModule)
mel.eval('python("import appleseedMaya.AETemplates.%s")' % templateModule)
# Hypershade callbacks
asHypershadeCallbacks = [
("hyperShadePanelBuildCreateMenu", hyperShadePanelBuildCreateMenuCallback),
("hyperShadePanelBuildCreateSubMenu", hyperShadePanelBuildCreateSubMenuCallback),
("hyperShadePanelPluginChange", hyperShadePanelPluginChangeCallback),
("createRenderNodeSelectNodeCategories", createRenderNodeSelectNodeCategoriesCallback),
("createRenderNodePluginChange", createRenderNodePluginChangeCallback),
("renderNodeClassification", renderNodeClassificationCallback),
("createRenderNodeCommand", createRenderNodeCallback),
("nodeCanBeUsedAsMaterial", nodeCanBeUsedAsMaterialCallback),
("buildRenderNodeTreeListerContent", buildRenderNodeTreeListerContentCallback)
]
for h, c in asHypershadeCallbacks:
logger.debug("Adding {0} callback.".format(h))
pm.callbacks(addCallback=c, hook=h, owner="appleseed")
# appleseed translator.
createTranslatorMelProcedures()
# Logos.
pm.renderer(
"appleseed",
edit=True,
logoImageName="appleseed.png"
)
mel.eval('''
global proc appleseedLogoCallback()
{
evalDeferred("showHelp -absolute \\\"https://appleseedhq.net\\\"");
}
'''
)
pm.renderer(
"appleseed",
edit=True,
logoCallbackProcedure="appleseedLogoCallback"
)
# Menu
if om.MGlobal.mayaState() == om.MGlobal.kInteractive:
createMenu()
# XGen
try:
import xgenm as xg
for h, c in asXGenCallbacks:
xg.registerCallback(h, c)
logger.info("appleseedMaya: initialized xgenseed")
except Exception as e:
logger.info(
"appleseedMaya: could not initialize xgenseed. error = %s" % e)
def unregister():
logger.info("Unregistering appleseed renderer.")
# XGen
try:
import xgenm as xg
for h, c in asXGenCallbacks:
xg.deregisterCallback(h, c)
logger.info("appleseedMaya: uninitialized xgenseed")
except Exception as e:
logger.info(
"appleseedMaya: could not uninitialize xgenseed. error = %s" % e)
if om.MGlobal.mayaState() == om.MGlobal.kInteractive:
deleteMenu()
pm.callbacks(clearCallbacks=True, owner="appleseed")
removeRenderGlobalsScriptJobs()
if pm.renderer("appleseed", q=True, ex=True):
pm.renderer("appleseed", unregisterRenderer=True)
|
appleseedhq/appleseed-maya
|
scripts/appleseedMaya/register.py
|
Python
|
mit
| 7,230
|
[
"VisIt"
] |
c981a435dbfbaa43cfc3126544fe5c4170d5da468d19bf89e1623c75964e86c8
|
#!/opt/local/bin/ipython
#"""
#Usage:
#./subim_gaussfit.py @file_list.txt x_cen y_cen [outfile.txt]
#%run subim_gaussfit.py @file_list.txt x_cen y_cen [outfile.txt] in an interactive session
# or %run subim_gaussfit.py @file_list.txt @coord_list.txt [outfile.txt]
#"""
import sys
#sys.path.append('/Users/adam/classes/probstat') #the gaussfitter.py file is in this directory
try:
import pyfits
except ImportError:
print "subim_gaussfit requires pyfits"
from numpy import *
from scipy import *
from pylab import *
import pylab
for k,v in pylab.__dict__.iteritems():
if hasattr(v,'__module__'):
if v.__module__ is None:
locals()[k].__module__ = 'pylab'
from gaussfitter import *
# read the input files
if len(sys.argv) > 2:
if sys.argv[1][0] == "@":
filename = sys.argv[1].strip("@")
filelist = open(filename).readlines()
else:
filelist = [sys.argv[1]]
if sys.argv[2][0] == "@":
coord_filename = sys.argv[2].strip("@")
coord_file = open(coord_filename)
x_cen = []
y_cen = []
for myline in coord_file.readlines():
x_cen.append(myline.split()[0])
y_cen.append(myline.split()[1])
elif sys.argv[2][0] != "@":
x_cen,y_cen = sys.argv[2:4]
if sys.argv[2][0] == "@" and len(sys.argv) > 3:
outfile = sys.argv[3]
elif sys.argv[2][0] != "@" and len(sys.argv) > 4:
outfile = sys.argv[4]
else:
raise ValueError("Wrong number of input parameters. Input should be of form: " +\
"\n./subim_gaussfit.py file_list.txt x_cen y_cen ")
# none of these are necessary, they should probably be removed
dates=[]
width=[]
amp=[]
back=[]
ImArr=[]
ModArr=[]
flux=[]
# fit multiple stars function (assumes filelist, x_cen, y_cen are all lists)
# return is a list of lists of lists:
# outermost list the list of stars
# each star has a list of each time point
# each time point has a list of parameters
def fitstars(filelist,x_cen,y_cen):
outdata = []
if type(x_cen) == type([]):
for i in xrange(len(x_cen)):
xc,yc = float(x_cen[i]),float(y_cen[i])
star_data=[]
if type(filelist)==type([]):
for filename in filelist:
filename = filename.rstrip('\n').split()[0]
if len(filename.split()) > 1:
errfilename = filename.split()[1]
star_data.append(fitstar(filename,x_cen,y_cen,err=errfilename))
else:
star_data.append(fitstar(filename,xc,yc))
else:
star_data.append(fitstar(filename,xc,yc))
outdata.append(star_data)
else:
star_data=[]
x_cen,y_cen = float(x_cen),float(y_cen)
if type(filelist)==type([]):
for filename in filelist:
filename = filename.rstrip('\n')
if len(filename.split()) > 1:
errfilename = filename.split()[1]
star_data.append(fitstar(filename,x_cen,y_cen,err=errfilename))
else:
star_data.append(fitstar(filename,x_cen,y_cen))
else:
star_data.append(fitstar(filename,x_cen,y_cen))
outdata.append(star_data)
return outdata
# prints gaussian fit parameters, julian date, and measured flux to file
def printfits(outfilename,filelist,xcen,ycen):
file = open(outfilename,'w')
data = fitstars(filelist,xcen,ycen)
pickle.dump(data,open('.'.join(outfile.split(".")[:-1] + ['pickle.txt']),'w'))
for i in xrange(len(data)):
star = data[i]
print >>file, "# Star %d" % i
print >>file, "# %15s%15s%15s%15s%15s%15s%15s%15s%15s%15s%15s%15s " % ('JD','flux','err','height','amplitude','xcen','ycen','xwidth','ywidth','rotation','modelresid','reducedchi2')
for myline in star:
print >>file, "%17.5f%15.5f%15.5f%15.5f%15.5f%15.5f%15.5f%15.5f%15.5f%15.5f%15.5f%15.5f " % tuple(myline)
print >>file,"\n"
def fitstar(filename,x_cen,y_cen,errname=[]):
subim_size = 10
dx = subim_size/2
file = pyfits.open(filename)
data = file[0].data
head = file[0].header
# date = head['DATE-OBS']
JD = head['MJD-OBS']
# angle = int(head['OBJANGLE'])
# subim = data[x_cen-dx:x_cen+dx,y_cen-dx:y_cen+dx] #star 1
subim = data[y_cen-dx:y_cen+dx,x_cen-dx:x_cen+dx] #star 1
b,a,x0,y0,wx,wy,rota = moments(subim,0,1,1)
if errname == []:
noise = sqrt(abs(subim))
radial_weight = ( ((indices(subim.shape)[0]-x0)**2) + ( (indices(subim.shape)[1]-y0)**2 ) ) ** .5
errim = noise*radial_weight
else:
errfile = pyfits.open(errname)
errdata = errfile[0].data
errim = errdata[y_cen-dx:y_cen+dx,x_cen-dx:x_cen+dx]
parms , cov , infodict , errmsg = gaussfit(subim,err=errim,return_all=1)
if parms[1] > 2.*subim.max() or parms[1] < subim.max()/2. or parms[0] < subim.mean()/4.:
ptemp = gaussfit(subim)
parms , cov , infodict , errmsg = gaussfit(subim,err=errim,params=ptemp,return_all=1)
b,a,x0,y0,wx,wy,rota = parms
model = twodgaussian(parms,0,1,1)(*indices(subim.shape))
chi2 = (((subim - model)/errim)**2).sum()
# print "date: %s angle: %d height: %.1f amplitude: %.1f x0: %.1f y0: %.1f wx: %.1f wy: %.1f rota: %.1f chi2: %.1f" % (date,angle,b,a,x0,y0,wx,wy,rota,sum_residuals2)
wx,wy = abs(wx),abs(wy)
maskarr = ( asarray (((indices(subim.shape)[0]-x0)**2)<(wx*2) , dtype='int' ) * asarray ( ( (indices(subim.shape)[1]-y0)**2 ) < (wy*2) ,dtype='int'))
flux = ((maskarr*subim).sum())
my_error = ((maskarr*errim).sum())
model_resid = abs(subim-model).sum()
returnval = tuple([JD]+[flux]+[my_error]+parms.tolist()+[model_resid]+[chi2])
# return JD,flux,parms,sum_residuals2
return returnval
# printfits(outfile,filelist,x_cen,y_cen)
#
# ParmArr = fitstars(filelist,x_cen,y_cen)
# figure(1); clf()
# title('FWHM vs Julian Date')
# plot(dates,width,'x')
# figure(2); clf();
# plot(dates,amp,'o')
# plot(dates,back,'d')
# figure(4); clf();
# title('Flux vs. Amplitude')
# plot(flux,amp,'x')
|
ufoym/agpy
|
agpy/subim_gaussfit.py
|
Python
|
mit
| 6,196
|
[
"Gaussian"
] |
8596fa86212d39fb4fe03420eb806b1715eac5c1b5127b5124843d2ae3e1cedb
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/examples/linear_model/plot_logistic.py
|
Python
|
mit
| 1,426
|
[
"Gaussian"
] |
83e589dcada1db32ac80411bb18360f12568f62ff558b2d24adf5848d9df5ddf
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Reports/Text Reports/Database Summary Report.
"""
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
import posixpath
from gramps.gen.ggettext import gettext as _
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.lib import Person
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
FONT_SANS_SERIF, INDEX_TYPE_TOC, PARA_ALIGN_CENTER)
from gramps.gen.utils.file import media_path_full
from gramps.gen.datehandler import get_date
#------------------------------------------------------------------------
#
# SummaryReport
#
#------------------------------------------------------------------------
class SummaryReport(Report):
"""
This report produces a summary of the objects in the database.
"""
def __init__(self, database, options, user):
"""
Create the SummaryReport object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
"""
Report.__init__(self, database, options, user)
self.__db = database
def write_report(self):
"""
Overridden function to generate the report.
"""
self.doc.start_paragraph("SR-Title")
title = _("Database Summary Report")
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.write_text(title, mark)
self.doc.end_paragraph()
self.summarize_people()
self.summarize_families()
self.summarize_media()
def summarize_people(self):
"""
Write a summary of all the people in the database.
"""
with_media = 0
incomp_names = 0
disconnected = 0
missing_bday = 0
males = 0
females = 0
unknowns = 0
namelist = []
self.doc.start_paragraph("SR-Heading")
self.doc.write_text(_("Individuals"))
self.doc.end_paragraph()
num_people = 0
for person in self.__db.iter_people():
num_people += 1
# Count people with media.
length = len(person.get_media_list())
if length > 0:
with_media += 1
# Count people with incomplete names.
for name in [person.get_primary_name()] + person.get_alternate_names():
if name.get_first_name().strip() == "":
incomp_names += 1
else:
if name.get_surname_list():
for surname in name.get_surname_list():
if surname.get_surname().strip() == "":
incomp_names += 1
else:
incomp_names += 1
# Count people without families.
if (not person.get_main_parents_family_handle() and
not len(person.get_family_handle_list())):
disconnected += 1
# Count missing birthdays.
birth_ref = person.get_birth_ref()
if birth_ref:
birth = self.__db.get_event_from_handle(birth_ref.ref)
if not get_date(birth):
missing_bday += 1
else:
missing_bday += 1
# Count genders.
if person.get_gender() == Person.FEMALE:
females += 1
elif person.get_gender() == Person.MALE:
males += 1
else:
unknowns += 1
# Count unique surnames
if name.get_surname() not in namelist:
namelist.append(name.get_surname())
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Number of individuals: %d") % num_people)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Males: %d") % males)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Females: %d") % females)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Individuals with unknown gender: %d") % unknowns)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Incomplete names: %d") %
incomp_names)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Individuals missing birth dates: %d") %
missing_bday)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Disconnected individuals: %d") % disconnected)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Unique surnames: %d") % len(namelist))
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Individuals with media objects: %d") %
with_media)
self.doc.end_paragraph()
def summarize_families(self):
"""
Write a summary of all the families in the database.
"""
self.doc.start_paragraph("SR-Heading")
self.doc.write_text(_("Family Information"))
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Number of families: %d") % self.__db.get_number_of_families())
self.doc.end_paragraph()
def summarize_media(self):
"""
Write a summary of all the media in the database.
"""
total_media = 0
size_in_bytes = 0
notfound = []
self.doc.start_paragraph("SR-Heading")
self.doc.write_text(_("Media Objects"))
self.doc.end_paragraph()
total_media = len(self.__db.get_media_object_handles())
mbytes = "0"
for media_id in self.__db.get_media_object_handles():
media = self.__db.get_object_from_handle(media_id)
try:
size_in_bytes += posixpath.getsize(
media_path_full(self.__db, media.get_path()))
length = len(str(size_in_bytes))
if size_in_bytes <= 999999:
mbytes = _("less than 1")
else:
mbytes = str(size_in_bytes)[:(length-6)]
except:
notfound.append(media.get_path())
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Number of unique media objects: %d") %
total_media)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Total size of media objects: %s MB") % mbytes)
self.doc.end_paragraph()
if len(notfound) > 0:
self.doc.start_paragraph("SR-Heading")
self.doc.write_text(_("Missing Media Objects"))
self.doc.end_paragraph()
for media_path in notfound:
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(media_path)
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# SummaryOptions
#
#------------------------------------------------------------------------
class SummaryOptions(MenuReportOptions):
"""
SummaryOptions provides the options for the SummaryReport.
"""
def __init__(self, name, dbase):
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the marker report.
"""
pass
def make_default_style(self, default_style):
"""Make the default output style for the Summary Report."""
font = FontStyle()
font.set_size(16)
font.set_type_face(FONT_SANS_SERIF)
font.set_bold(1)
para = ParagraphStyle()
para.set_header_level(1)
para.set_bottom_border(1)
para.set_top_margin(ReportUtils.pt2cm(3))
para.set_bottom_margin(ReportUtils.pt2cm(3))
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_("The style used for the title of the page."))
default_style.add_paragraph_style("SR-Title", para)
font = FontStyle()
font.set_size(12)
font.set_bold(True)
para = ParagraphStyle()
para.set_font(font)
para.set_top_margin(0)
para.set_description(_('The basic style used for sub-headings.'))
default_style.add_paragraph_style("SR-Heading", para)
font = FontStyle()
font.set_size(12)
para = ParagraphStyle()
para.set(first_indent=-0.75, lmargin=.75)
para.set_font(font)
para.set_top_margin(ReportUtils.pt2cm(3))
para.set_bottom_margin(ReportUtils.pt2cm(3))
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style("SR-Normal", para)
|
arunkgupta/gramps
|
gramps/plugins/textreport/summary.py
|
Python
|
gpl-2.0
| 10,795
|
[
"Brian"
] |
40cacfb55145488c3bd98e3be8e16a08628aeedfbb2e8531e6ccdbdb10e2fd2f
|
from io import BytesIO
from threading import Lock
import contextlib
import itertools
import os.path
import pickle
import shutil
import tempfile
import unittest
import sys
import numpy as np
import pandas as pd
import xarray as xr
from xarray import Dataset, open_dataset, open_mfdataset, backends, save_mfdataset
from xarray.backends.common import robust_getitem
from xarray.backends.netCDF4_ import _extract_nc4_encoding
from xarray.core.pycompat import iteritems, PY3
from . import (TestCase, requires_scipy, requires_netCDF4, requires_pydap,
requires_scipy_or_netCDF4, requires_dask, requires_h5netcdf,
requires_pynio, has_netCDF4, has_scipy)
from .test_dataset import create_test_data
try:
import netCDF4 as nc4
except ImportError:
pass
try:
import dask.array as da
except ImportError:
pass
def open_example_dataset(name, *args, **kwargs):
return open_dataset(os.path.join(os.path.dirname(__file__), 'data', name),
*args, **kwargs)
def create_masked_and_scaled_data():
x = np.array([np.nan, np.nan, 10, 10.1, 10.2])
encoding = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1), 'dtype': 'i2'}
return Dataset({'x': ('t', x, {}, encoding)})
def create_encoded_masked_and_scaled_data():
attributes = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1)}
return Dataset({'x': ('t', [-1, -1, 0, 1, 2], attributes)})
def create_boolean_data():
attributes = {'units': '-'}
return Dataset({'x': ('t', [True, False, False, True], attributes)})
class TestCommon(TestCase):
def test_robust_getitem(self):
class UnreliableArrayFailure(Exception):
pass
class UnreliableArray(object):
def __init__(self, array, failures=1):
self.array = array
self.failures = failures
def __getitem__(self, key):
if self.failures > 0:
self.failures -= 1
raise UnreliableArrayFailure
return self.array[key]
array = UnreliableArray([0])
with self.assertRaises(UnreliableArrayFailure):
array[0]
self.assertEqual(array[0], 0)
actual = robust_getitem(array, 0, catch=UnreliableArrayFailure,
initial_delay=0)
self.assertEqual(actual, 0)
class Only32BitTypes(object):
pass
class DatasetIOTestCases(object):
def create_store(self):
raise NotImplementedError
def roundtrip(self, data, **kwargs):
raise NotImplementedError
def test_zero_dimensional_variable(self):
expected = create_test_data()
expected['float_var'] = ([], 1.0e9, {'units': 'units of awesome'})
expected['string_var'] = ([], np.array('foobar', dtype='S'))
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_write_store(self):
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
# we need to cf decode the store because it has time and
# non-dimension coordinates
actual = xr.decode_cf(store)
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_test_data(self):
expected = create_test_data()
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_load(self):
expected = create_test_data()
@contextlib.contextmanager
def assert_loads(vars=None):
if vars is None:
vars = expected
with self.roundtrip(expected) as actual:
for v in actual.variables.values():
self.assertFalse(v._in_memory)
yield actual
for k, v in actual.variables.items():
if k in vars:
self.assertTrue(v._in_memory)
self.assertDatasetAllClose(expected, actual)
with self.assertRaises(AssertionError):
# make sure the contextmanager works!
with assert_loads() as ds:
pass
with assert_loads() as ds:
ds.load()
with assert_loads(['var1', 'dim1', 'dim2']) as ds:
ds['var1'].load()
# verify we can read data even after closing the file
with self.roundtrip(expected) as ds:
actual = ds.load()
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_None_variable(self):
expected = Dataset({None: (('x', 'y'), [[0, 1], [2, 3]])})
with self.roundtrip(expected) as actual:
self.assertDatasetAllClose(expected, actual)
def test_roundtrip_object_dtype(self):
floats = np.array([0.0, 0.0, 1.0, 2.0, 3.0], dtype=object)
floats_nans = np.array([np.nan, np.nan, 1.0, 2.0, 3.0], dtype=object)
letters = np.array(['ab', 'cdef', 'g'], dtype=object)
letters_nans = np.array(['ab', 'cdef', np.nan], dtype=object)
all_nans = np.array([np.nan, np.nan], dtype=object)
original = Dataset({'floats': ('a', floats),
'floats_nans': ('a', floats_nans),
'letters': ('b', letters),
'letters_nans': ('b', letters_nans),
'all_nans': ('c', all_nans),
'nan': ([], np.nan)})
expected = original.copy(deep=True)
if isinstance(self, Only32BitTypes):
# for netCDF3 tests, expect the results to come back as characters
expected['letters_nans'] = expected['letters_nans'].astype('S')
expected['letters'] = expected['letters'].astype('S')
with self.roundtrip(original) as actual:
try:
self.assertDatasetIdentical(expected, actual)
except AssertionError:
# Most stores use '' for nans in strings, but some don't
# first try the ideal case (where the store returns exactly)
# the original Dataset), then try a more realistic case.
# ScipyDataTest, NetCDF3ViaNetCDF4DataTest and NetCDF4DataTest
# all end up using this case.
expected['letters_nans'][-1] = ''
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_string_data(self):
expected = Dataset({'x': ('t', ['ab', 'cdef'])})
with self.roundtrip(expected) as actual:
if isinstance(self, Only32BitTypes):
expected['x'] = expected['x'].astype('S')
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_datetime_data(self):
times = pd.to_datetime(['2000-01-01', '2000-01-02', 'NaT'])
expected = Dataset({'t': ('t', times), 't0': times[0]})
kwds = {'encoding': {'t0': {'units': 'days since 1950-01-01'}}}
with self.roundtrip(expected, save_kwargs=kwds) as actual:
self.assertDatasetIdentical(expected, actual)
self.assertEquals(actual.t0.encoding['units'],
'days since 1950-01-01')
def test_roundtrip_timedelta_data(self):
time_deltas = pd.to_timedelta(['1h', '2h', 'NaT'])
expected = Dataset({'td': ('td', time_deltas), 'td0': time_deltas[0]})
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_float64_data(self):
expected = Dataset({'x': ('y', np.array([1.0, 2.0, np.pi], dtype='float64'))})
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_example_1_netcdf(self):
expected = open_example_dataset('example_1.nc')
with self.roundtrip(expected) as actual:
# we allow the attributes to differ since that
# will depend on the encoding used. For example,
# without CF encoding 'actual' will end up with
# a dtype attribute.
self.assertDatasetEqual(expected, actual)
def test_roundtrip_coordinates(self):
original = Dataset({'foo': ('x', [0, 1])},
{'x': [2, 3], 'y': ('a', [42]), 'z': ('x', [4, 5])})
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(original, actual)
expected = original.drop('foo')
with self.roundtrip(expected) as actual:
self.assertDatasetIdentical(expected, actual)
expected = original.copy()
expected.attrs['coordinates'] = 'something random'
with self.assertRaisesRegexp(ValueError, 'cannot serialize'):
with self.roundtrip(expected):
pass
expected = original.copy(deep=True)
expected['foo'].attrs['coordinates'] = 'something random'
with self.assertRaisesRegexp(ValueError, 'cannot serialize'):
with self.roundtrip(expected):
pass
def test_roundtrip_boolean_dtype(self):
original = create_boolean_data()
self.assertEqual(original['x'].dtype, 'bool')
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(original, actual)
self.assertEqual(actual['x'].dtype, 'bool')
def test_orthogonal_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
indexers = {'dim1': np.arange(3), 'dim2': np.arange(4),
'dim3': np.arange(5)}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
self.assertDatasetAllClose(expected, actual)
# do it twice, to make sure we're switched from orthogonal -> numpy
# when we cached the values
actual = on_disk.isel(**indexers)
self.assertDatasetAllClose(expected, actual)
def test_pickle(self):
on_disk = open_example_dataset('bears.nc')
unpickled = pickle.loads(pickle.dumps(on_disk))
self.assertDatasetIdentical(on_disk, unpickled)
class CFEncodedDataTest(DatasetIOTestCases):
def test_roundtrip_strings_with_fill_value(self):
values = np.array(['ab', 'cdef', np.nan], dtype=object)
encoding = {'_FillValue': np.string_('X'), 'dtype': np.dtype('S1')}
original = Dataset({'x': ('t', values, {}, encoding)})
expected = original.copy(deep=True)
expected['x'][:2] = values[:2].astype('S')
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(expected, actual)
original = Dataset({'x': ('t', values, {}, {'_FillValue': '\x00'})})
if not isinstance(self, Only32BitTypes):
# these stores can save unicode strings
expected = original.copy(deep=True)
if isinstance(self, BaseNetCDF4Test):
# netCDF4 can't keep track of an empty _FillValue for VLEN
# variables
expected['x'][-1] = ''
elif (isinstance(self, (NetCDF3ViaNetCDF4DataTest,
NetCDF4ClassicViaNetCDF4DataTest)) or
(has_netCDF4 and type(self) is GenericNetCDFDataTest)):
# netCDF4 can't keep track of an empty _FillValue for nc3, either:
# https://github.com/Unidata/netcdf4-python/issues/273
expected['x'][-1] = np.string_('')
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(expected, actual)
def test_roundtrip_mask_and_scale(self):
decoded = create_masked_and_scaled_data()
encoded = create_encoded_masked_and_scaled_data()
with self.roundtrip(decoded) as actual:
self.assertDatasetAllClose(decoded, actual)
with self.roundtrip(decoded, open_kwargs=dict(decode_cf=False)) as actual:
# TODO: this assumes that all roundtrips will first
# encode. Is that something we want to test for?
self.assertDatasetAllClose(encoded, actual)
with self.roundtrip(encoded, open_kwargs=dict(decode_cf=False)) as actual:
self.assertDatasetAllClose(encoded, actual)
# make sure roundtrip encoding didn't change the
# original dataset.
self.assertDatasetIdentical(encoded,
create_encoded_masked_and_scaled_data())
with self.roundtrip(encoded) as actual:
self.assertDatasetAllClose(decoded, actual)
with self.roundtrip(encoded, open_kwargs=dict(decode_cf=False)) as actual:
self.assertDatasetAllClose(encoded, actual)
def test_coordinates_encoding(self):
def equals_latlon(obj):
return obj == 'lat lon' or obj == 'lon lat'
original = Dataset({'temp': ('x', [0, 1]), 'precip': ('x', [0, -1])},
{'lat': ('x', [2, 3]), 'lon': ('x', [4, 5])})
with self.roundtrip(original) as actual:
self.assertDatasetIdentical(actual, original)
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
self.assertTrue(equals_latlon(ds['temp'].attrs['coordinates']))
self.assertTrue(equals_latlon(ds['precip'].attrs['coordinates']))
self.assertNotIn('coordinates', ds.attrs)
self.assertNotIn('coordinates', ds['lat'].attrs)
self.assertNotIn('coordinates', ds['lon'].attrs)
modified = original.drop(['temp', 'precip'])
with self.roundtrip(modified) as actual:
self.assertDatasetIdentical(actual, modified)
with create_tmp_file() as tmp_file:
modified.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
self.assertTrue(equals_latlon(ds.attrs['coordinates']))
self.assertNotIn('coordinates', ds['lat'].attrs)
self.assertNotIn('coordinates', ds['lon'].attrs)
def test_roundtrip_endian(self):
ds = Dataset({'x': np.arange(3, 10, dtype='>i2'),
'y': np.arange(3, 20, dtype='<i4'),
'z': np.arange(3, 30, dtype='=i8'),
'w': ('x', np.arange(3, 10, dtype=np.float))})
with self.roundtrip(ds) as actual:
# technically these datasets are slightly different,
# one hold mixed endian data (ds) the other should be
# all big endian (actual). assertDatasetIdentical
# should still pass though.
self.assertDatasetIdentical(ds, actual)
if type(self) is NetCDF4DataTest:
ds['z'].encoding['endian'] = 'big'
with self.assertRaises(NotImplementedError):
with self.roundtrip(ds) as actual:
pass
def test_invalid_dataarray_names_raise(self):
te = (TypeError, 'string or None')
ve = (ValueError, 'string must be length 1 or')
data = np.random.random((2, 2))
da = xr.DataArray(data)
for name, e in zip([0, (4, 5), True, ''], [te, te, te, ve]):
ds = Dataset({name: da})
with self.assertRaisesRegexp(*e):
with self.roundtrip(ds) as actual:
pass
def test_encoding_kwarg(self):
ds = Dataset({'x': ('y', np.arange(10.0))})
kwargs = dict(encoding={'x': {'dtype': 'f4'}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertEqual(actual.x.encoding['dtype'], 'f4')
self.assertEqual(ds.x.encoding, {})
kwargs = dict(encoding={'x': {'foo': 'bar'}})
with self.assertRaisesRegexp(ValueError, 'unexpected encoding'):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
kwargs = dict(encoding={'x': 'foo'})
with self.assertRaisesRegexp(ValueError, 'must be castable'):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
kwargs = dict(encoding={'invalid': {}})
with self.assertRaises(KeyError):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
ds = Dataset({'t': pd.date_range('2000-01-01', periods=3)})
units = 'days since 1900-01-01'
kwargs = dict(encoding={'t': {'units': units}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertEqual(actual.t.encoding['units'], units)
self.assertDatasetIdentical(actual, ds)
_counter = itertools.count()
@contextlib.contextmanager
def create_tmp_file(suffix='.nc'):
temp_dir = tempfile.mkdtemp()
path = os.path.join(temp_dir, 'temp-%s%s' % (next(_counter), suffix))
try:
yield path
finally:
shutil.rmtree(temp_dir)
class BaseNetCDF4Test(CFEncodedDataTest):
def test_open_group(self):
# Create a netCDF file with a dataset stored within a group
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as rootgrp:
foogrp = rootgrp.createGroup('foo')
ds = foogrp
ds.createDimension('time', size=10)
x = np.arange(10)
ds.createVariable('x', np.int32, dimensions=('time',))
ds.variables['x'][:] = x
expected = Dataset()
expected['x'] = ('time', x)
# check equivalent ways to specify group
for group in 'foo', '/foo', 'foo/', '/foo/':
with open_dataset(tmp_file, group=group) as actual:
self.assertVariableEqual(actual['x'], expected['x'])
# check that missing group raises appropriate exception
with self.assertRaises(IOError):
open_dataset(tmp_file, group='bar')
with self.assertRaisesRegexp(ValueError, 'must be a string'):
open_dataset(tmp_file, group=(1, 2, 3))
def test_open_subgroup(self):
# Create a netCDF file with a dataset stored within a group within a group
with create_tmp_file() as tmp_file:
rootgrp = nc4.Dataset(tmp_file, 'w')
foogrp = rootgrp.createGroup('foo')
bargrp = foogrp.createGroup('bar')
ds = bargrp
ds.createDimension('time', size=10)
x = np.arange(10)
ds.createVariable('x', np.int32, dimensions=('time',))
ds.variables['x'][:] = x
rootgrp.close()
expected = Dataset()
expected['x'] = ('time', x)
# check equivalent ways to specify group
for group in 'foo/bar', '/foo/bar', 'foo/bar/', '/foo/bar/':
with open_dataset(tmp_file, group=group) as actual:
self.assertVariableEqual(actual['x'], expected['x'])
def test_write_groups(self):
data1 = create_test_data()
data2 = data1 * 2
with create_tmp_file() as tmp_file:
data1.to_netcdf(tmp_file, group='data/1')
data2.to_netcdf(tmp_file, group='data/2', mode='a')
with open_dataset(tmp_file, group='data/1') as actual1:
self.assertDatasetIdentical(data1, actual1)
with open_dataset(tmp_file, group='data/2') as actual2:
self.assertDatasetIdentical(data2, actual2)
def test_roundtrip_character_array(self):
with create_tmp_file() as tmp_file:
values = np.array([['a', 'b', 'c'], ['d', 'e', 'f']], dtype='S')
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('x', 2)
nc.createDimension('string3', 3)
v = nc.createVariable('x', np.dtype('S1'), ('x', 'string3'))
v[:] = values
values = np.array(['abc', 'def'], dtype='S')
expected = Dataset({'x': ('x', values)})
with open_dataset(tmp_file) as actual:
self.assertDatasetIdentical(expected, actual)
# regression test for #157
with self.roundtrip(actual) as roundtripped:
self.assertDatasetIdentical(expected, roundtripped)
def test_default_to_char_arrays(self):
data = Dataset({'x': np.array(['foo', 'zzzz'], dtype='S')})
with self.roundtrip(data) as actual:
self.assertDatasetIdentical(data, actual)
self.assertEqual(actual['x'].dtype, np.dtype('S4'))
def test_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
expected = Dataset()
time = pd.date_range('1999-01-05', periods=10)
encoding = {'units': units, 'dtype': np.dtype('int32')}
expected['time'] = ('time', time, {}, encoding)
with open_dataset(tmp_file) as actual:
self.assertVariableEqual(actual['time'], expected['time'])
actual_encoding = dict((k, v) for k, v in
iteritems(actual['time'].encoding)
if k in expected['time'].encoding)
self.assertDictEqual(actual_encoding, expected['time'].encoding)
def test_dump_encodings(self):
# regression test for #709
ds = Dataset({'x': ('y', np.arange(10.0))})
kwargs = dict(encoding={'x': {'zlib': True}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertTrue(actual.x.encoding['zlib'])
def test_dump_and_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
with open_dataset(tmp_file) as xarray_dataset:
with create_tmp_file() as tmp_file2:
xarray_dataset.to_netcdf(tmp_file2)
with nc4.Dataset(tmp_file2, 'r') as ds:
self.assertEqual(ds.variables['time'].getncattr('units'), units)
self.assertArrayEqual(ds.variables['time'], np.arange(10) + 4)
def test_compression_encoding(self):
data = create_test_data()
data['var2'].encoding.update({'zlib': True,
'chunksizes': (5, 5),
'fletcher32': True,
'original_shape': data.var2.shape})
with self.roundtrip(data) as actual:
for k, v in iteritems(data['var2'].encoding):
self.assertEqual(v, actual['var2'].encoding[k])
# regression test for #156
expected = data.isel(dim1=0)
with self.roundtrip(expected) as actual:
self.assertDatasetEqual(expected, actual)
def test_mask_and_scale(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('t', 5)
nc.createVariable('x', 'int16', ('t',), fill_value=-1)
v = nc.variables['x']
v.set_auto_maskandscale(False)
v.add_offset = 10
v.scale_factor = 0.1
v[:] = np.array([-1, -1, 0, 1, 2])
# first make sure netCDF4 reads the masked and scaled data correctly
with nc4.Dataset(tmp_file, mode='r') as nc:
expected = np.ma.array([-1, -1, 10, 10.1, 10.2],
mask=[True, True, False, False, False])
actual = nc.variables['x'][:]
self.assertArrayEqual(expected, actual)
# now check xarray
with open_dataset(tmp_file) as ds:
expected = create_masked_and_scaled_data()
self.assertDatasetIdentical(expected, ds)
def test_0dimensional_variable(self):
# This fix verifies our work-around to this netCDF4-python bug:
# https://github.com/Unidata/netcdf4-python/pull/220
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
v = nc.createVariable('x', 'int16')
v[...] = 123
with open_dataset(tmp_file) as ds:
expected = Dataset({'x': ((), 123)})
self.assertDatasetIdentical(expected, ds)
def test_variable_len_strings(self):
with create_tmp_file() as tmp_file:
values = np.array(['foo', 'bar', 'baz'], dtype=object)
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('x', 3)
v = nc.createVariable('x', str, ('x',))
v[:] = values
expected = Dataset({'x': ('x', values)})
for kwargs in [{}, {'decode_cf': True}]:
with open_dataset(tmp_file, **kwargs) as actual:
self.assertDatasetIdentical(expected, actual)
@requires_netCDF4
class NetCDF4DataTest(BaseNetCDF4Test, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore(tmp_file, mode='w') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={}):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, **save_kwargs)
with open_dataset(tmp_file, **open_kwargs) as ds:
yield ds
def test_variable_order(self):
# doesn't work with scipy or h5py :(
ds = Dataset()
ds['a'] = 1
ds['z'] = 2
ds['b'] = 3
ds.coords['c'] = 4
with self.roundtrip(ds) as actual:
self.assertEqual(list(ds), list(actual))
def test_unsorted_index_raises(self):
# should be fixed in netcdf4 v1.2.1
random_data = np.random.random(size=(4, 6))
dim0 = [0, 1, 2, 3]
dim1 = [0, 2, 1, 3, 5, 4] # We will sort this in a later step
da = xr.DataArray(data=random_data, dims=('dim0', 'dim1'),
coords={'dim0': dim0, 'dim1': dim1}, name='randovar')
ds = da.to_dataset()
with self.roundtrip(ds) as ondisk:
inds = np.argsort(dim1)
ds2 = ondisk.isel(dim1=inds)
try:
print(ds2.randovar.values) # should raise IndexError in netCDF4
except IndexError as err:
self.assertIn('first by calling .load', str(err))
@requires_netCDF4
@requires_dask
class NetCDF4ViaDaskDataTest(NetCDF4DataTest):
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={}):
with NetCDF4DataTest.roundtrip(
self, data, save_kwargs, open_kwargs) as ds:
yield ds.chunk()
def test_unsorted_index_raises(self):
# Skip when using dask because dask rewrites indexers to getitem,
# dask first pulls items by block.
pass
@requires_scipy
class ScipyInMemoryDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
fobj = BytesIO()
yield backends.ScipyDataStore(fobj, 'w')
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={}):
serialized = data.to_netcdf(**save_kwargs)
with open_dataset(BytesIO(serialized), **open_kwargs) as ds:
yield ds
@requires_scipy
class ScipyOnDiskDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.ScipyDataStore(tmp_file, mode='w') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={}):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine='scipy', **save_kwargs)
with open_dataset(tmp_file, engine='scipy', **open_kwargs) as ds:
yield ds
def test_array_attrs(self):
ds = Dataset(attrs={'foo': [[1, 2], [3, 4]]})
with self.assertRaisesRegexp(ValueError, 'must be 1-dimensional'):
with self.roundtrip(ds) as roundtripped:
pass
def test_roundtrip_example_1_netcdf_gz(self):
if sys.version_info[:2] < (2, 7):
with self.assertRaisesRegexp(ValueError,
'gzipped netCDF not supported'):
open_example_dataset('example_1.nc.gz')
else:
with open_example_dataset('example_1.nc.gz') as expected:
with open_example_dataset('example_1.nc') as actual:
self.assertDatasetIdentical(expected, actual)
def test_netcdf3_endianness(self):
# regression test for GH416
expected = open_example_dataset('bears.nc', engine='scipy')
for var in expected.values():
self.assertTrue(var.dtype.isnative)
@requires_netCDF4
class NetCDF3ViaNetCDF4DataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore(tmp_file, mode='w',
format='NETCDF3_CLASSIC') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={}):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format='NETCDF3_CLASSIC',
engine='netcdf4', **save_kwargs)
with open_dataset(tmp_file, engine='netcdf4', **open_kwargs) as ds:
yield ds
@requires_netCDF4
class NetCDF4ClassicViaNetCDF4DataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore(tmp_file, mode='w',
format='NETCDF4_CLASSIC') as store:
yield store
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={}):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format='NETCDF4_CLASSIC',
engine='netcdf4', **save_kwargs)
with open_dataset(tmp_file, engine='netcdf4', **open_kwargs) as ds:
yield ds
@requires_scipy_or_netCDF4
class GenericNetCDFDataTest(CFEncodedDataTest, Only32BitTypes, TestCase):
# verify that we can read and write netCDF3 files as long as we have scipy
# or netCDF4-python installed
def test_write_store(self):
# there's no specific store to test here
pass
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={}):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format='netcdf3_64bit', **save_kwargs)
with open_dataset(tmp_file, **open_kwargs) as ds:
yield ds
def test_engine(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'unrecognized engine'):
data.to_netcdf('foo.nc', engine='foobar')
with self.assertRaisesRegexp(ValueError, 'invalid engine'):
data.to_netcdf(engine='netcdf4')
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file)
with self.assertRaisesRegexp(ValueError, 'unrecognized engine'):
open_dataset(tmp_file, engine='foobar')
netcdf_bytes = data.to_netcdf()
with self.assertRaisesRegexp(ValueError, 'can only read'):
open_dataset(BytesIO(netcdf_bytes), engine='foobar')
def test_cross_engine_read_write_netcdf3(self):
data = create_test_data()
valid_engines = set()
if has_netCDF4:
valid_engines.add('netcdf4')
if has_scipy:
valid_engines.add('scipy')
for write_engine in valid_engines:
for format in ['NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format=format,
engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file,
engine=read_engine) as actual:
self.assertDatasetAllClose(data, actual)
@requires_h5netcdf
@requires_netCDF4
class H5NetCDFDataTest(BaseNetCDF4Test, TestCase):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
yield backends.H5NetCDFStore(tmp_file, 'w')
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={}):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine='h5netcdf', **save_kwargs)
with open_dataset(tmp_file, engine='h5netcdf', **open_kwargs) as ds:
yield ds
def test_orthogonal_indexing(self):
# doesn't work for h5py (without using dask as an intermediate layer)
pass
def test_complex(self):
expected = Dataset({'x': ('y', np.ones(5) + 1j * np.ones(5))})
with self.roundtrip(expected) as actual:
self.assertDatasetEqual(expected, actual)
def test_cross_engine_read_write_netcdf4(self):
# Drop dim3, because its labels include strings. These appear to be
# not properly read with python-netCDF4, which converts them into
# unicode instead of leaving them as bytes.
if PY3:
raise unittest.SkipTest('see https://github.com/pydata/xarray/issues/535')
data = create_test_data().drop('dim3')
data.attrs['foo'] = 'bar'
valid_engines = ['netcdf4', 'h5netcdf']
for write_engine in valid_engines:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file, engine=read_engine) as actual:
self.assertDatasetIdentical(data, actual)
def test_read_byte_attrs_as_unicode(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as nc:
nc.foo = b'bar'
actual = open_dataset(tmp_file)
expected = Dataset(attrs={'foo': 'bar'})
self.assertDatasetIdentical(expected, actual)
@requires_dask
@requires_scipy
@requires_netCDF4
class DaskTest(TestCase):
def test_open_mfdataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2]) as actual:
self.assertIsInstance(actual.foo.variable.data, da.Array)
self.assertEqual(actual.foo.variable.data.chunks,
((5, 5),))
self.assertDatasetAllClose(original, actual)
with open_mfdataset([tmp1, tmp2], chunks={'x': 3}) as actual:
self.assertEqual(actual.foo.variable.data.chunks,
((3, 2, 3, 2),))
with self.assertRaisesRegexp(IOError, 'no files to open'):
open_mfdataset('foo-bar-baz-*.nc')
def test_preprocess_mfdataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
def preprocess(ds):
return ds.assign_coords(z=0)
expected = preprocess(original)
with open_mfdataset(tmp, preprocess=preprocess) as actual:
self.assertDatasetIdentical(expected, actual)
def test_lock(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp, format='NETCDF3_CLASSIC')
with open_dataset(tmp, chunks=10) as ds:
task = ds.foo.data.dask[ds.foo.data.name, 0]
self.assertIsInstance(task[-1], type(Lock()))
with open_mfdataset(tmp) as ds:
task = ds.foo.data.dask[ds.foo.data.name, 0]
self.assertIsInstance(task[-1], type(Lock()))
with open_mfdataset(tmp, engine='scipy') as ds:
task = ds.foo.data.dask[ds.foo.data.name, 0]
self.assertNotIsInstance(task[-1], type(Lock()))
def test_save_mfdataset_roundtrip(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
datasets = [original.isel(x=slice(5)),
original.isel(x=slice(5, 10))]
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
save_mfdataset(datasets, [tmp1, tmp2])
with open_mfdataset([tmp1, tmp2]) as actual:
self.assertDatasetIdentical(actual, original)
def test_save_mfdataset_invalid(self):
ds = Dataset()
with self.assertRaisesRegexp(ValueError, 'cannot use mode'):
save_mfdataset([ds, ds], ['same', 'same'])
with self.assertRaisesRegexp(ValueError, 'same length'):
save_mfdataset([ds, ds], ['only one path'])
def test_open_and_do_math(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_mfdataset(tmp) as ds:
actual = 1.0 * ds
self.assertDatasetAllClose(original, actual)
def test_open_dataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_dataset(tmp, chunks={'x': 5}) as actual:
self.assertIsInstance(actual.foo.variable.data, da.Array)
self.assertEqual(actual.foo.variable.data.chunks, ((5, 5),))
self.assertDatasetIdentical(original, actual)
with open_dataset(tmp, chunks=5) as actual:
self.assertDatasetIdentical(original, actual)
with open_dataset(tmp) as actual:
self.assertIsInstance(actual.foo.variable.data, np.ndarray)
self.assertDatasetIdentical(original, actual)
def test_dask_roundtrip(self):
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
chunks = {'dim1': 4, 'dim2': 4, 'dim3': 4, 'time': 10}
with open_dataset(tmp, chunks=chunks) as dask_ds:
self.assertDatasetIdentical(data, dask_ds)
with create_tmp_file() as tmp2:
dask_ds.to_netcdf(tmp2)
with open_dataset(tmp2) as on_disk:
self.assertDatasetIdentical(data, on_disk)
def test_deterministic_names(self):
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
with open_mfdataset(tmp) as ds:
original_names = dict((k, v.data.name)
for k, v in ds.data_vars.items())
with open_mfdataset(tmp) as ds:
repeat_names = dict((k, v.data.name)
for k, v in ds.data_vars.items())
for var_name, dask_name in original_names.items():
self.assertIn(var_name, dask_name)
self.assertIn(tmp, dask_name)
self.assertEqual(original_names, repeat_names)
@requires_scipy_or_netCDF4
@requires_pydap
class PydapTest(TestCase):
@contextlib.contextmanager
def create_datasets(self, **kwargs):
url = 'http://test.opendap.org/opendap/hyrax/data/nc/bears.nc'
actual = open_dataset(url, engine='pydap', **kwargs)
with open_example_dataset('bears.nc') as expected:
# don't check attributes since pydap doesn't serialize them
# correctly also skip the "bears" variable since the test DAP
# server incorrectly concatenates it.
actual = actual.drop('bears')
expected = expected.drop('bears')
yield actual, expected
def test_cmp_local_file(self):
with self.create_datasets() as (actual, expected):
self.assertDatasetEqual(actual, expected)
# global attributes should be global attributes on the dataset
self.assertNotIn('NC_GLOBAL', actual.attrs)
self.assertIn('history', actual.attrs)
with self.create_datasets() as (actual, expected):
self.assertDatasetEqual(actual.isel(l=2), expected.isel(l=2))
with self.create_datasets() as (actual, expected):
self.assertDatasetEqual(actual.isel(i=0, j=-1),
expected.isel(i=0, j=-1))
with self.create_datasets() as (actual, expected):
self.assertDatasetEqual(actual.isel(j=slice(1, 2)),
expected.isel(j=slice(1, 2)))
@requires_dask
def test_dask(self):
with self.create_datasets(chunks={'j': 2}) as (actual, expected):
self.assertDatasetEqual(actual, expected)
@requires_scipy
@requires_pynio
class TestPyNio(CFEncodedDataTest, Only32BitTypes, TestCase):
def test_write_store(self):
# pynio is read-only for now
pass
def test_orthogonal_indexing(self):
# pynio also does not support list-like indexing
pass
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={}):
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine='scipy', **save_kwargs)
with open_dataset(tmp_file, engine='pynio', **open_kwargs) as ds:
yield ds
def test_weakrefs(self):
example = Dataset({'foo': ('x', np.arange(5.0))})
expected = example.rename({'foo': 'bar', 'x': 'y'})
with create_tmp_file() as tmp_file:
example.to_netcdf(tmp_file, engine='scipy')
on_disk = open_dataset(tmp_file, engine='pynio')
actual = on_disk.rename({'foo': 'bar', 'x': 'y'})
del on_disk # trigger garbage collection
self.assertDatasetIdentical(actual, expected)
class TestEncodingInvalid(TestCase):
def test_extract_nc4_encoding(self):
var = xr.Variable(('x',), [1, 2, 3], {}, {'foo': 'bar'})
with self.assertRaisesRegexp(ValueError, 'unexpected encoding'):
_extract_nc4_encoding(var, raise_on_invalid=True)
var = xr.Variable(('x',), [1, 2, 3], {}, {'chunking': (2, 1)})
encoding = _extract_nc4_encoding(var)
self.assertEqual({}, encoding)
def test_extract_h5nc_encoding(self):
# not supported with h5netcdf (yet)
var = xr.Variable(('x',), [1, 2, 3], {},
{'least_sigificant_digit': 2})
with self.assertRaisesRegexp(ValueError, 'unexpected encoding'):
_extract_nc4_encoding(var, raise_on_invalid=True)
|
NicWayand/xray
|
xarray/test/test_backends.py
|
Python
|
apache-2.0
| 44,435
|
[
"NetCDF"
] |
e66413f894bd279d9ceec0ac3a9a832a082c958552f9e42f85e0f5526b02f1b9
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright (C) 2010-2014
# Christian Kohlöffel
# Jean-Paul Schouwstra
#
# This file is part of DXF2GCODE.
#
# DXF2GCODE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DXF2GCODE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DXF2GCODE. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
# Import Qt modules
import os
import sys
from math import degrees, radians
import logging
logger = logging.getLogger()
from Core.Logger import LoggerClass
import time
from copy import copy, deepcopy
import subprocess, tempfile #webbrowser, gettext, tempfile
import argparse
from PyQt4 import QtGui, QtCore
# Import the compiled UI module
from dxf2gcode_pyQt4_ui.dxf2gcode_pyQt4_ui import Ui_MainWindow
from Core.Config import MyConfig
from Core.Point import Point
from Core.LayerContent import LayerContentClass
from Core.EntitieContent import EntitieContentClass
import Core.Globals as g
import Core.constants as c
from Core.Shape import ShapeClass
from PostPro.PostProcessor import MyPostProcessor
from PostPro.Breaks import Breaks
from DxfImport.Import import ReadDXF
from Gui.myCanvasClass import MyGraphicsScene
from Gui.TreeHandling import TreeHandler
from Gui.Dialog import myDialog
from Gui.AboutDialog import myAboutDialog
from PostPro.TspOptimisation import TSPoptimize
# Get folder of the main instance and write into globals
g.folder = os.path.dirname(os.path.abspath(sys.argv[0])).replace("\\", "/")
if os.path.islink(sys.argv[0]):
g.folder = os.path.dirname(os.readlink(sys.argv[0]))
# Create a class for our main window
class Main(QtGui.QMainWindow):
"""Main Class"""
def __init__(self, app):
"""
Initialization of the Main window. This is directly called after the
Logger has been initialized. The Function loads the GUI, creates the
used Classes and connects the actions to the GUI.
"""
QtGui.QMainWindow.__init__(self)
# This is always the same
self.app = app
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.createActions()
self.MyGraphicsView = self.ui.MyGraphicsView
self.myMessageBox = self.ui.myMessageBox
self.MyPostProcessor = MyPostProcessor()
self.TreeHandler = TreeHandler(self.ui)
self.shapes = []
self.LayerContents = []
self.EntitieContents = []
self.EntitiesRoot = []
self.filename = "" #loaded file name
QtCore.QObject.connect(self.TreeHandler,
QtCore.SIGNAL("exportOrderUpdated"),
self.updateExportRoute)
if g.config.vars.General['live_update_export_route']:
self.ui.actionLive_update_export_route.setChecked(True)
if g.config.vars.General['default_SplitEdges']:
self.ui.actionSplit_Edges.setChecked(True)
if g.config.vars.General['default_AutomaticCutterCompensation']:
self.ui.actionAutomatic_Cutter_Compensation.setChecked(True)
self.updateMachineType()
self.readSettings()
g.config.metric = 1 # default drawing units: millimeters
def tr(self, string_to_translate):
"""
Translate a string using the QCoreApplication translation framework
@param: string_to_translate: a unicode string
@return: the translated unicode string if it was possible to translate
"""
return unicode(QtGui.QApplication.translate("Main",
string_to_translate, None,
QtGui.QApplication.UnicodeUTF8))
def createActions(self):
"""
Create the actions of the main toolbar.
@purpose: Links the callbacks to the actions in the menu
"""
self.ui.actionLoad_File.triggered.connect(self.showDialog)
self.ui.actionReload_File.triggered.connect(self.reloadFile)
self.ui.actionExit.triggered.connect(self.close)
self.ui.actionOptimize_Shape.triggered.connect(self.optimize_TSP)
self.ui.actionExport_Shapes.triggered.connect(self.exportShapes)
self.ui.actionOptimize_and_Export_shapes.triggered.connect(self.optimizeAndExportShapes)
self.ui.actionShow_WP_Zero.triggered.connect(self.setShow_wp_zero)
self.ui.actionShow_path_directions.triggered.connect(self.setShow_path_directions)
self.ui.actionShow_disabled_paths.triggered.connect(self.setShow_disabled_paths)
self.ui.actionLive_update_export_route.toggled.connect(self.setUpdate_export_route)
self.ui.actionAutoscale.triggered.connect(self.autoscale)
self.ui.actionDelete_G0_paths.triggered.connect(self.deleteG0paths)
self.ui.actionTolerances.triggered.connect(self.setTolerances)
self.ui.actionRotate_all.triggered.connect(self.CallRotateAll)
self.ui.actionScale_all.triggered.connect(self.CallScaleAll)
self.ui.actionMove_WP_zero.triggered.connect(self.CallMoveWpZero)
self.ui.actionSplit_Edges.triggered.connect(self.reloadFile)
self.ui.actionAutomatic_Cutter_Compensation.triggered.connect(self.reloadFile)
self.ui.actionMilling.triggered.connect(self.setMachineTypeToMilling)
self.ui.actionDrag_Knife.triggered.connect(self.setMachineTypeToDragKnife)
self.ui.actionLathe.triggered.connect(self.setMachineTypeToLathe)
self.ui.actionAbout.triggered.connect(self.about)
def keyPressEvent(self, event):
"""
Rewritten KeyPressEvent to get other behavior while Shift is pressed.
@purpose: Changes to ScrollHandDrag while Control pressed
@param event: Event Parameters passed to function
"""
if event.isAutoRepeat():
return
if (event.key() == QtCore.Qt.Key_Shift):
self.MyGraphicsView.setDragMode(QtGui.QGraphicsView.ScrollHandDrag)
elif (event.key() == QtCore.Qt.Key_Control):
self.MyGraphicsView.selmode = 1
def keyReleaseEvent (self, event):
"""
Rewritten KeyReleaseEvent to get other behavior while Shift is pressed.
@purpose: Changes to RubberBandDrag while Control released
@param event: Event Parameters passed to function
"""
if (event.key() == QtCore.Qt.Key_Shift):
self.MyGraphicsView.setDragMode(QtGui.QGraphicsView.NoDrag)
#self.setDragMode(QtGui.QGraphicsView.RubberBandDrag )
elif (event.key() == QtCore.Qt.Key_Control):
self.MyGraphicsView.selmode = 0
def enableplotmenu(self, status = True):
"""
Enable the Toolbar buttons.
@param status: Set True to enable, False to disable
"""
self.ui.actionShow_WP_Zero.setEnabled(status)
self.ui.actionShow_path_directions.setEnabled(status)
self.ui.actionShow_disabled_paths.setEnabled(status)
self.ui.actionLive_update_export_route.setEnabled(status)
self.ui.actionAutoscale.setEnabled(status)
self.ui.actionScale_all.setEnabled(status)
self.ui.actionRotate_all.setEnabled(status)
self.ui.actionMove_WP_zero.setEnabled(status)
def showDialog(self):
"""
This function is called by the menu "File/Load File" of the main toolbar.
It creates the file selection dialog and calls the loadFile function to
load the selected file.
"""
self.filename = QtGui.QFileDialog.getOpenFileName(self,
self.tr("Open file"),
g.config.vars.Paths['import_dir'], self.tr(\
"All supported files (*.dxf *.ps *.pdf);;" \
"DXF files (*.dxf);;"\
"PS files (*.ps);;"\
"PDF files (*.pdf);;"\
"all files (*.*)"))
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
#If there is something to load then call the load function callback
if not(self.filename == ""):
logger.info(self.tr("File: %s selected") % self.filename)
self.setWindowTitle(self.tr("DXF2GCODE - [%s]") % self.filename)
#Initialize the scale, rotate and move coordinates
self.cont_scale = 1.0
self.cont_dx = 0.0
self.cont_dy = 0.0
self.rotate = 0.0
self.loadFile(self.filename)
QtGui.QApplication.restoreOverrideCursor()
def reloadFile(self):
"""
This function is called by the menu "File/Reload File" of the main toolbar.
It reloads the previously loaded file (if any)
"""
logger.info(self.tr("Reloading file: %s") % self.filename)
#If there is something to load then call the load function callback
if not(self.filename == ""):
self.loadFile(self.filename)
def optimize_TSP(self):
"""
Method is called to optimize the order of the shapes. This is performed
by solving the TSP Problem.
"""
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
logger.debug(self.tr('Optimize order of enabled shapes per layer'))
self.MyGraphicsScene.resetexproutes()
#Get the export order from the QTreeView
logger.debug(self.tr('Updating order according to TreeView'))
self.TreeHandler.updateExportOrder()
self.MyGraphicsScene.addexproutest()
for LayerContent in self.LayerContents:
#Initial values for the Lists to export.
self.shapes_to_write = []
self.shapes_fixed_order = []
shapes_st_en_points = []
#Check all shapes of Layer which shall be exported and create List
#for it.
logger.debug(self.tr("Nr. of Shapes %s; Nr. of Shapes in Route %s")
% (len(LayerContent.shapes),
len(LayerContent.exp_order)))
logger.debug(self.tr("Export Order for start: %s") % LayerContent.exp_order)
for shape_nr in range(len(LayerContent.exp_order)):
if not(self.shapes[LayerContent.exp_order[shape_nr]].send_to_TSP):
self.shapes_fixed_order.append(shape_nr)
self.shapes_to_write.append(shape_nr)
shapes_st_en_points.append(self.shapes[LayerContent.exp_order[shape_nr]].get_st_en_points())
#Perform Export only if the Number of shapes to export is bigger than 0
if len(self.shapes_to_write)>0:
#Errechnen der Iterationen
#Calculate the iterations
iter_ = min(g.config.vars.Route_Optimisation['max_iterations'],
len(self.shapes_to_write)*50)
#Adding the Start and End Points to the List.
x_st = g.config.vars.Plane_Coordinates['axis1_start_end']
y_st = g.config.vars.Plane_Coordinates['axis2_start_end']
start = Point(x = x_st, y = y_st)
ende = Point(x = x_st, y = y_st)
shapes_st_en_points.append([start, ende])
TSPs = []
TSPs.append(TSPoptimize(st_end_points = shapes_st_en_points,
order = self.shapes_fixed_order))
logger.info(self.tr("TSP start values initialised for Layer %s")
% LayerContent.LayerName)
logger.debug(self.tr("Shapes to write: %s")
% self.shapes_to_write)
logger.debug(self.tr("Fixed order: %s")
% self.shapes_fixed_order)
for it_nr in range(iter_):
#Only show each 50th step.
if (it_nr % 50) == 0:
TSPs[-1].calc_next_iteration()
new_exp_order = []
for nr in TSPs[-1].opt_route[1:len(TSPs[-1].opt_route)]:
new_exp_order.append(LayerContent.exp_order[nr])
logger.debug(self.tr("TSP done with result: %s") % TSPs[-1])
LayerContent.exp_order = new_exp_order
self.MyGraphicsScene.addexproute(LayerContent.exp_order,
LayerContent.LayerNr)
logger.debug(self.tr("New Export Order after TSP: %s")
% new_exp_order)
self.app.processEvents()
else:
LayerContent.exp_order = []
if LayerContent:
self.ui.actionDelete_G0_paths.setEnabled(True)
self.MyGraphicsScene.addexprouteen()
#Update order in the treeView, according to path calculation done by the TSP
self.TreeHandler.updateTreeViewOrder()
QtGui.QApplication.restoreOverrideCursor()
def deleteG0paths(self):
"""
Deletes the optimisation paths from the scene.
"""
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
self.MyGraphicsScene.delete_opt_path()
self.ui.actionDelete_G0_paths.setEnabled(False)
QtGui.QApplication.restoreOverrideCursor()
def exportShapes(self, status=False, saveas=None):
"""
This function is called by the menu "Export/Export Shapes". It may open
a Save Dialog if used without LinuxCNC integration. Otherwise it's
possible to select multiple postprocessor files, which are located
in the folder.
"""
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
logger.debug(self.tr('Export the enabled shapes'))
#Get the export order from the QTreeView
self.TreeHandler.updateExportOrder()
self.updateExportRoute()
logger.debug(self.tr("Sorted layers:"))
for i, layer in enumerate(self.LayerContents):
logger.debug("LayerContents[%i] = %s" % (i, layer))
if not(g.config.vars.General['write_to_stdout']):
#Get the name of the File to export
if saveas == None:
filename = self.showSaveDialog()
self.save_filename = str(filename[0].toUtf8()).decode("utf-8")
else:
filename = [None, None]
self.save_filename = saveas
#If Cancel was pressed
if not self.save_filename:
QtGui.QApplication.restoreOverrideCursor()
return
(beg, ende) = os.path.split(self.save_filename)
(fileBaseName, fileExtension) = os.path.splitext(ende)
pp_file_nr = 0
for i in range(len(self.MyPostProcessor.output_format)):
name = "%s " % (self.MyPostProcessor.output_text[i])
format_ = "(*%s)" % (self.MyPostProcessor.output_format[i])
MyFormats = name + format_
if filename[1] == MyFormats:
pp_file_nr = i
if fileExtension != self.MyPostProcessor.output_format[pp_file_nr]:
if not QtCore.QFile.exists(self.save_filename):
self.save_filename = self.save_filename + self.MyPostProcessor.output_format[pp_file_nr]
self.MyPostProcessor.getPostProVars(pp_file_nr)
else:
self.save_filename = None
self.MyPostProcessor.getPostProVars(0)
"""
Export will be performed according to LayerContents and their order
is given in this variable too.
"""
self.MyPostProcessor.exportShapes(self.load_filename,
self.save_filename,
self.LayerContents)
QtGui.QApplication.restoreOverrideCursor()
if g.config.vars.General['write_to_stdout']:
self.close()
def optimizeAndExportShapes(self):
"""
Optimize the tool path, then export the shapes
"""
self.optimize_TSP()
self.exportShapes()
def updateExportRoute(self):
"""
Update the drawing of the export route
"""
self.MyGraphicsScene.resetexproutes()
self.MyGraphicsScene.addexproutest()
for LayerContent in self.LayerContents:
if len(LayerContent.exp_order) > 0:
self.MyGraphicsScene.addexproute(LayerContent.exp_order, LayerContent.LayerNr)
if LayerContent:
self.ui.actionDelete_G0_paths.setEnabled(True)
self.MyGraphicsScene.addexprouteen()
def showSaveDialog(self):
"""
This function is called by the menu "Export/Export Shapes" of the main toolbar.
It creates the selection dialog for the exporter
@return: Returns the filename of the selected file.
"""
MyFormats = ""
for i in range(len(self.MyPostProcessor.output_format)):
name = "%s " % (self.MyPostProcessor.output_text[i])
format_ = "(*%s);;" % (self.MyPostProcessor.output_format[i])
MyFormats = MyFormats + name + format_
(beg, ende) = os.path.split(self.load_filename)
(fileBaseName, fileExtension) = os.path.splitext(ende)
default_name = os.path.join(g.config.vars.Paths['output_dir'], fileBaseName)
selected_filter = self.MyPostProcessor.output_format[0]
filename = QtGui.QFileDialog.getSaveFileNameAndFilter(self,
self.tr('Export to file'), default_name,
MyFormats, selected_filter)
logger.info(self.tr("File: %s selected") % filename[0])
return filename
def autoscale(self):
"""
This function is called by the menu "Autoscale" of the main. Forwards the
call to MyGraphicsview.autoscale()
"""
self.MyGraphicsView.autoscale()
def about(self):
"""
This function is called by the menu "Help/About" of the main toolbar and
creates the About Window
"""
message = self.tr("<html>"\
"<h2><center>You are using</center></h2>"\
"<body bgcolor="\
"<center><img src='images/dxf2gcode_logo.png' border='1' color='white'></center></body>"\
"<h2>Version:</h2>"\
"<body>%s: %s<br>"\
"Last change: %s<br>"\
"Changed by: %s<br></body>"\
"<h2>Where to get help:</h2>"\
"For more information and updates, "\
"please visit the Google Code Project: "\
"<a href='http://code.google.com/p/dxf2gcode/'>http://code.google.com/p/dxf2gcode/</a><br>"\
"For any questions on how to use dxf2gcode please use the<br>"\
"<a href='https://groups.google.com/forum/?fromgroups#!forum/dxf2gcode-users'>mailing list</a><br><br>"\
"To log bugs, or request features please use the <br>"\
"<a href='http://code.google.com/p/dxf2gcode/issues/list'>issue tracking system</a><br>"\
"<h2>License and copyright:</h2>"\
"<body>This program is written in Python and is published under the "\
"<a href='http://www.gnu.org/licenses/'>GNU GPLv3 license.</a><br>"\
"</body></html>") % (c.VERSION, c.REVISION, c.DATE, c.AUTHOR)
myAboutDialog(title = "About DXF2GCODE", message = message)
def setShow_wp_zero(self):
"""
This function is called by the menu "Show WP Zero" of the
main and forwards the call to MyGraphicsView.setShow_wp_zero()
"""
flag = self.ui.actionShow_WP_Zero.isChecked()
self.MyGraphicsView.setShow_wp_zero(flag)
def setShow_path_directions(self):
"""
This function is called by the menu "Show all path directions" of the
main and forwards the call to MyGraphicsView.setShow_path_direction()
"""
flag = self.ui.actionShow_path_directions.isChecked()
self.MyGraphicsView.setShow_path_direction(flag)
def setShow_disabled_paths(self):
"""
This function is called by the menu "Show disabled paths" of the
main and forwards the call to MyGraphicsView.setShow_disabled_paths()
"""
flag = self.ui.actionShow_disabled_paths.isChecked()
self.MyGraphicsView.setShow_disabled_paths(flag)
def setUpdate_export_route(self):
"""
This function is called by the menu "Live update tool path" of the
main and forwards the call to TreeHandler.setUpdateExportRoute()
"""
flag = self.ui.actionLive_update_export_route.isChecked()
if not flag:
#Remove any existing export route, since it won't be updated anymore
self.MyGraphicsScene.resetexproutes()
self.TreeHandler.setUpdateExportRoute(flag)
def setTolerances(self):
"""
This function is called when the Option=>Tolerances Menu is clicked.
"""
title = self.tr('Contour tolerances')
if g.config.metric == 0:
label = (self.tr("Tolerance for common points [in]:"), \
self.tr("Tolerance for curve fitting [in]:"))
else:
label = (self.tr("Tolerance for common points [mm]:"), \
self.tr("Tolerance for curve fitting [mm]:"))
value = (g.config.point_tolerance,
g.config.fitting_tolerance)
logger.debug(self.tr("set Tolerances"))
SetTolDialog = myDialog(title, label, value)
if SetTolDialog.result == None:
return
g.config.point_tolerance = float(SetTolDialog.result[0])
g.config.fitting_tolerance = float(SetTolDialog.result[1])
self.reloadFile()
#self.MyGraphicsView.update()
def CallScaleAll(self):
"""
This function is called when the Option=>Scale All Menu is clicked.
"""
title = self.tr('Scale Contour')
label = [self.tr("Scale Contour by factor:")]
value = [self.cont_scale]
ScaEntDialog = myDialog(title, label, value)
if ScaEntDialog.result == None:
return
self.cont_scale = float(ScaEntDialog.result[0])
self.EntitiesRoot.sca = self.cont_scale
self.reloadFile()
#self.MyGraphicsView.update()
def CallRotateAll(self):
"""
This function is called when the Option=>Rotate All Menu is clicked.
"""
title = self.tr('Rotate Contour')
label = [self.tr("Rotate Contour by deg:")]
value = [degrees(self.rotate)]
RotEntDialog = myDialog(title, label, value)
if RotEntDialog.result == None:
return
self.rotate = radians(float(RotEntDialog.result[0]))
self.EntitiesRoot.rot = self.rotate
self.reloadFile()
#self.MyGraphicsView.update()
def CallMoveWpZero(self):
"""
This function is called when the Option=>Move WP Zero Menu is clicked.
"""
title = self.tr('Workpiece zero offset')
label = ((self.tr("Offset %s axis by mm:") % g.config.vars.Axis_letters['ax1_letter']), \
(self.tr("Offset %s axis by mm:") % g.config.vars.Axis_letters['ax2_letter']))
value = (self.cont_dx, self.cont_dy)
MoveWpzDialog = myDialog(title, label, value, True)
if MoveWpzDialog.result == None:
return
if MoveWpzDialog.result == 'Auto':
minx = sys.float_info.max
maxy = - sys.float_info.max
for shape in self.shapes:
if not(shape.isDisabled()):
r = shape.boundingRect()
if r.left() < minx:
minx = r.left()
if r.bottom() > maxy:
maxy = r.bottom()
self.cont_dx = self.EntitiesRoot.p0.x - minx
self.cont_dy = self.EntitiesRoot.p0.y + maxy
else:
self.cont_dx = float(MoveWpzDialog.result[0])
self.cont_dy = float(MoveWpzDialog.result[1])
self.EntitiesRoot.p0.x = self.cont_dx
self.EntitiesRoot.p0.y = self.cont_dy
self.reloadFile()
#self.MyGraphicsView.update()
def setMachineTypeToMilling(self):
"""
This function is called by the menu when Machine Type -> Milling is clicked.
"""
g.config.machine_type = 'milling'
self.updateMachineType()
self.reloadFile()
def setMachineTypeToDragKnife(self):
"""
This function is called by the menu when Machine Type -> Drag Knife is clicked.
"""
g.config.machine_type = 'drag_knife'
self.updateMachineType()
self.reloadFile()
def setMachineTypeToLathe(self):
"""
This function is called by the menu when Machine Type -> Lathe is clicked.
"""
g.config.machine_type = 'lathe'
self.updateMachineType()
self.reloadFile()
def updateMachineType(self):
if g.config.machine_type == 'milling':
self.ui.actionAutomatic_Cutter_Compensation.setEnabled(True)
self.ui.actionMilling.setChecked(True)
self.ui.actionDrag_Knife.setChecked(False)
self.ui.actionLathe.setChecked(False)
self.ui.label_9.setText(self.tr("Z Infeed depth"))
elif g.config.machine_type == 'lathe':
self.ui.actionAutomatic_Cutter_Compensation.setEnabled(False)
self.ui.actionMilling.setChecked(False)
self.ui.actionDrag_Knife.setChecked(False)
self.ui.actionLathe.setChecked(True)
self.ui.label_9.setText(self.tr("No Z-Axis for lathe"))
elif g.config.machine_type == "drag_knife":
# TODO: Update of Maschine Type Lathe required. Z-Axis not available
# But fields may be used for other purpose.
self.ui.actionAutomatic_Cutter_Compensation.setEnabled(False)
self.ui.actionMilling.setChecked(False)
self.ui.actionDrag_Knife.setChecked(True)
self.ui.actionLathe.setChecked(False)
self.ui.label_9.setText(self.tr("Z Drag depth"))
def loadFile(self, filename):
"""
Loads the file given by filename. Also calls the command to
make the plot.
@param filename: String containing filename which should be loaded
"""
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
filename = str(filename).decode("utf-8")
self.load_filename = filename
(name, ext) = os.path.splitext(filename)
if (ext.lower() == ".ps") or (ext.lower() == ".pdf"):
logger.info(self.tr("Sending Postscript/PDF to pstoedit"))
#Create temporary file which will be read by the program
filename = os.path.join(tempfile.gettempdir(), 'dxf2gcode_temp.dxf')
pstoedit_cmd = g.config.vars.Filters['pstoedit_cmd'] #"C:\Program Files (x86)\pstoedit\pstoedit.exe"
pstoedit_opt = g.config.vars.Filters['pstoedit_opt'] #['-f','dxf','-mm']
ps_filename = os.path.normcase(self.load_filename)
cmd = [(('%s') % pstoedit_cmd)] + pstoedit_opt + [(('%s') % ps_filename), (('%s') % filename)]
logger.debug(cmd)
retcode = subprocess.call(cmd)
#self.textbox.text.delete(7.0, END)
logger.info(self.tr('Loading file: %s') % filename)
#logger.info("<a href=file:%s>%s</a>" % (filename, filename))
values = ReadDXF(filename)
#Output the information in the text window
logger.info(self.tr('Loaded layers: %s') % len(values.layers))
logger.info(self.tr('Loaded blocks: %s') % len(values.blocks.Entities))
for i in range(len(values.blocks.Entities)):
layers = values.blocks.Entities[i].get_used_layers()
logger.info(self.tr('Block %i includes %i Geometries, reduced to %i Contours, used layers: %s')\
% (i, len(values.blocks.Entities[i].geo), len(values.blocks.Entities[i].cont), layers))
layers = values.entities.get_used_layers()
insert_nr = values.entities.get_insert_nr()
logger.info(self.tr('Loaded %i Entities geometries, reduced to %i Contours, used layers: %s, Number of inserts: %i') \
% (len(values.entities.geo), len(values.entities.cont), layers, insert_nr))
if g.config.metric == 0:
logger.info("Drawing units: inches")
self.ui.unitLabel_3.setText("[in]")
self.ui.unitLabel_4.setText("[in]")
self.ui.unitLabel_5.setText("[in]")
self.ui.unitLabel_6.setText("[in]")
self.ui.unitLabel_7.setText("[in]")
self.ui.unitLabel_8.setText("[IPM]")
self.ui.unitLabel_9.setText("[IPM]")
else:
logger.info("Drawing units: millimeters")
self.ui.unitLabel_3.setText("[mm]")
self.ui.unitLabel_4.setText("[mm]")
self.ui.unitLabel_5.setText("[mm]")
self.ui.unitLabel_6.setText("[mm]")
self.ui.unitLabel_7.setText("[mm]")
self.ui.unitLabel_8.setText("[mm/min]")
self.ui.unitLabel_9.setText("[mm/min]")
self.makeShapesAndPlot(values)
#After all is plotted enable the Menu entities
self.enableplotmenu()
self.ui.actionDelete_G0_paths.setEnabled(False)
QtGui.QApplication.restoreOverrideCursor()
def makeShapesAndPlot(self, values):
"""
Plots all data stored in the values parameter to the Canvas
@param values: Includes all values loaded from the dxf file
"""
#Generate the Shapes
self.makeShapes(values,
p0 = Point(x = self.cont_dx, y = self.cont_dy),
pb = Point(x = 0.0, y = 0.0),
sca = [self.cont_scale, self.cont_scale, self.cont_scale],
rot = self.rotate)
# Automatic cutter compensation
self.automaticCutterCompensation()
# Break insertion
Breaks(self.LayerContents).process()
#Populate the treeViews
self.TreeHandler.buildEntitiesTree(self.EntitiesRoot)
self.TreeHandler.buildLayerTree(self.LayerContents)
#Print the values
self.MyGraphicsView.clearScene()
self.MyGraphicsScene = MyGraphicsScene()
self.MyGraphicsScene.plotAll(self.shapes, self.EntitiesRoot)
self.MyGraphicsView.setScene(self.MyGraphicsScene)
self.setShow_wp_zero()
self.setShow_path_directions()
self.setShow_disabled_paths()
self.setUpdate_export_route()
self.MyGraphicsView.show()
self.MyGraphicsView.setFocus()
#Autoscale the Canvas
self.MyGraphicsView.autoscale()
def makeShapes(self, values, p0, pb, sca, rot):
"""
Instance is called by the Main Window after the defined file is loaded.
It generates all ploting functionality. The parameters are generally
used to scale or offset the base geometry (by Menu in GUI).
@param values: The loaded dxf values from the dxf_import.py file
@param p0: The Starting Point to plot (Default x=0 and y=0)
@param bp: The Base Point to insert the geometry and base for rotation
(Default is also x=0 and y=0)
@param sca: The scale of the basis function (default =1)
@param rot: The rotation of the geometries around base (default =0)
"""
self.values = values
#Put back the contours
del(self.shapes[:])
del(self.LayerContents[:])
del(self.EntitiesRoot)
self.EntitiesRoot = EntitieContentClass(Nr = 0, Name = 'Entities',
parent = None, children = [],
p0 = p0, pb = pb,
sca = sca, rot = rot)
#Start mit () bedeutet zuweisen der Entities -1 = Standard
#Start with () means to assign the entities -1 = Default ???
self.makeEntitiesShapes(parent = self.EntitiesRoot)
self.LayerContents.sort()
def makeEntitiesShapes(self, parent = None, ent_nr = -1):
"""
Instance is called prior to plotting the shapes. It creates
all shape classes which are later plotted into the graphics.
@param parent: The parent of a shape is always an Entities. It may be root
or, if it is a Block, this is the Block.
@param ent_nr: The values given in self.values are sorted so
that 0 is the Root Entities and 1 is beginning with the first block.
This value gives the index of self.values to be used.
"""
if parent.Name == "Entities":
entities = self.values.entities
else:
ent_nr = self.values.Get_Block_Nr(parent.Name)
entities = self.values.blocks.Entities[ent_nr]
#Zuweisen der Geometrien in die Variable geos & Konturen in cont
#Assigning the geometries in the variables geos & contours in cont
ent_geos = entities.geo
#Loop for the number of contours
for cont in entities.cont:
#Abfrage falls es sich bei der Kontur um ein Insert eines Blocks handelt
#Query if it is in the contour of an insert of a block
if ent_geos[cont.order[0][0]].Typ == "Insert":
ent_geo = ent_geos[cont.order[0][0]]
#Zuweisen des Basispunkts f�r den Block
#Assign the base point for the block
new_ent_nr = self.values.Get_Block_Nr(ent_geo.BlockName)
new_entities = self.values.blocks.Entities[new_ent_nr]
pb = new_entities.basep
#Skalierung usw. des Blocks zuweisen
#Scaling, etc. assign the block
p0 = ent_geos[cont.order[0][0]].Point
sca = ent_geos[cont.order[0][0]].Scale
rot = ent_geos[cont.order[0][0]].rot
#Erstellen des neuen Entitie Contents f�r das Insert
#Creating the new Entitie Contents for the insert
NewEntitieContent = EntitieContentClass(Nr = 0,
Name = ent_geo.BlockName,
parent = parent, children = [],
p0 = p0,
pb = pb,
sca = sca,
rot = rot)
parent.addchild(NewEntitieContent)
self.makeEntitiesShapes(parent = NewEntitieContent,
ent_nr = ent_nr)
else:
#Loop for the number of geometries
self.shapes.append(ShapeClass(len(self.shapes),
cont.closed,
40,
0.0,
parent,
[]))
for ent_geo_nr in range(len(cont.order)):
ent_geo = ent_geos[cont.order[ent_geo_nr][0]]
if cont.order[ent_geo_nr][1]:
ent_geo.geo.reverse()
for geo in ent_geo.geo:
geo = copy(geo)
geo.reverse()
self.appendshapes(geo)
ent_geo.geo.reverse()
else:
for geo in ent_geo.geo:
self.appendshapes(copy(geo))
#All shapes have to be CCW direction.
self.shapes[-1].AnalyseAndOptimize()
self.shapes[-1].FindNearestStPoint()
#Connect the shapeSelectionChanged and enableDisableShape signals to our treeView, so that selections of the shapes are reflected on the treeView
self.shapes[-1].setSelectionChangedCallback(self.TreeHandler.updateShapeSelection)
self.shapes[-1].setEnableDisableCallback(self.TreeHandler.updateShapeEnabling)
self.addtoLayerContents(self.shapes[-1], ent_geo.Layer_Nr)
parent.addchild(self.shapes[-1])
def appendshapes(self, geo):
"""
Documentation required
"""
if self.ui.actionSplit_Edges.isChecked() == True:
if geo.type == 'LineGeo':
diff = (geo.Pe - geo.Pa) / 2.0
geo_b = deepcopy(geo)
geo_a = deepcopy(geo)
geo_b.Pe -= diff
geo_a.Pa += diff
self.shapes[-1].geos.append(geo_b)
self.shapes[-1].geos.append(geo_a)
else:
self.shapes[-1].geos.append(geo)
else:
self.shapes[-1].geos.append(geo)
def addtoLayerContents(self, shape, lay_nr):
"""
Instance is called while the shapes are created. This gives the
structure which shape is laying on which layer. It also writes into the
shape the reference to the LayerContent Class.
@param shape: The shape to be appended of the shape
@param lay_nr: The Nr. of the layer
"""
# Disable shape by default, if it lives on an ignored layer
#if shape.LayerContent.should_ignore():
# shape.setDisable(True, True)
#Check if the layer already exists and add shape if it is.
for LayCon in self.LayerContents:
if LayCon.LayerNr == lay_nr:
LayCon.shapes.append(shape)
shape.LayerContent = LayCon
shape.setDisabledIfOnDisabledLayer()
return
#If the Layer does not exist create a new one.
LayerName = self.values.layers[lay_nr].name
self.LayerContents.append(LayerContentClass(lay_nr, LayerName, [shape]))
shape.LayerContent = self.LayerContents[-1]
shape.setDisabledIfOnDisabledLayer()
def automaticCutterCompensation(self):
if self.ui.actionAutomatic_Cutter_Compensation.isEnabled() == self.ui.actionAutomatic_Cutter_Compensation.isChecked() == True:
for layerContent in self.LayerContents:
if layerContent.automaticCutterCompensationEnabled():
newShapes = [];
for shape in layerContent.shapes:
shape.make_papath()
for shape in layerContent.shapes:
if shape.closed:
container = None
myBounds = shape.boundingRect()
for otherShape in layerContent.shapes :
if shape != otherShape and otherShape.boundingRect().contains(myBounds):
logger.debug(self.tr("Shape: %s is contained in shape %s") % (shape.nr, otherShape.nr))
container = otherShape
if container is None:
shape.cut_cor = 41
newShapes.append(shape)
else:
shape.cut_cor = 42
newShapes.insert(layerContent.shapes.index(container), shape)
else:
newShapes.append(shape)
layerContent.shapes = newShapes
logger.debug(self.tr("new order for layer %s:") % (layerContent.LayerName))
for shape in layerContent.shapes:
logger.debug(self.tr(">>Shape: %s") % (shape.nr))
def closeEvent(self, e):
logger.debug(self.tr("exiting"))
self.writeSettings()
e.accept()
def readSettings(self):
settings = QtCore.QSettings("dxf2gcode", "dxf2gcode")
settings.beginGroup("MainWindow");
self.resize(settings.value("size", QtCore.QSize(800, 600)).toSize());
self.move(settings.value("pos", QtCore.QPoint(200, 200)).toPoint());
settings.endGroup();
def writeSettings(self):
settings = QtCore.QSettings("dxf2gcode", "dxf2gcode")
settings.beginGroup("MainWindow");
settings.setValue("size", self.size());
settings.setValue("pos", self.pos());
settings.endGroup();
if __name__ == "__main__":
"""
The main function which is executed after program start.
"""
Log=LoggerClass(logger)
#Get local language and install if available.
g.config = MyConfig()
Log.set_console_handler_loglevel()
Log.add_file_logger()
app = QtGui.QApplication(sys.argv)
locale = QtCore.QLocale.system().name()
logger.debug("locale: %s" %locale)
translator = QtCore.QTranslator()
if translator.load("dxf2gcode_" + locale, "./i18n"):
app.installTranslator(translator)
window = Main(app)
g.window = window
#shall be sent to. This Class needs a function "def write(self, charstr)
Log.add_window_logger(window.myMessageBox)
parser = argparse.ArgumentParser()
parser.add_argument("filename",nargs="?")
# parser.add_argument("-f", "--file", dest = "filename",
# help = "read data from FILENAME")
parser.add_argument("-e", "--export", dest = "export_filename",
help = "export data to FILENAME")
parser.add_argument("-q", "--quiet", action = "store_true",
dest = "quiet", help = "no GUI")
# parser.add_option("-v", "--verbose",
# action = "store_true", dest = "verbose")
options = parser.parse_args()
#(options, args) = parser.parse_args()
logger.debug("Started with following options \n%s" % (parser))
if not options.quiet:
window.show()
if not(options.filename is None):
window.filename = options.filename
#Initialize the scale, rotate and move coordinates
window.cont_scale = 1.0
window.cont_dx = 0.0
window.cont_dy = 0.0
window.rotate = 0.0
window.loadFile(options.filename)
if not(options.export_filename is None):
window.exportShapes(None, options.export_filename)
if not options.quiet:
# It's exec_ because exec is a reserved word in Python
sys.exit(app.exec_())
|
oryxr/dxf2gcode
|
dxf2gcode.py
|
Python
|
gpl-3.0
| 43,333
|
[
"VisIt"
] |
868cb21da503026ed8c9da934cbe2a2d458e16350e81405d2c36a39d11ed3879
|
"""Tests for ΔCC½ algorithms."""
from __future__ import annotations
from unittest import mock
from dxtbx.model import Crystal, Experiment, ExperimentList, Scan
from dials.algorithms.statistics.cc_half_algorithm import CCHalfFromDials
from dials.array_family import flex
from dials.command_line.compute_delta_cchalf import phil_scope
def generated_exp(n=1):
"""Generate an experiment list with two experiments."""
experiments = ExperimentList()
exp_dict = {
"__id__": "crystal",
"real_space_a": [1.0, 0.0, 0.0],
"real_space_b": [0.0, 1.0, 0.0],
"real_space_c": [0.0, 0.0, 2.0],
"space_group_hall_symbol": " C 2y",
}
for i in range(n):
experiments.append(
Experiment(
scan=Scan(image_range=[1, 25], oscillation=[0.0, 1.0]),
crystal=Crystal.from_dict(exp_dict),
identifier=str(i),
)
)
return experiments
def generated_refl():
"""Generate test data."""
refls = flex.reflection_table()
refls["intensity.scale.value"] = flex.double(range(50))
refls["intensity.scale.variance"] = flex.double(range(50))
refls["inverse_scale_factor"] = flex.double(50, 1.0)
refls["id"] = flex.int([0] * 25 + [1] * 25)
refls["xyzobs.px.value"] = flex.vec3_double(
[(0, 0, i + 0.5) for i in range(25)] * 2
)
vals = [0, 1, 2, 3, 48, 49]
# set first two of first sweep, and last two of last sweep as outliers
outliers = flex.bool(50, False)
for v in vals:
outliers[v] = True
refls.set_flags(outliers, refls.flags.outlier_in_scaling)
refls.experiment_identifiers()[0] = "0"
refls.experiment_identifiers()[1] = "1"
return refls
def test_setup_of_CCHalfFromDials():
"""Test the correct setup in image group mode.
Test for the case of outliers at the end of images, and image ranges not
equaling a multiple of the grouping."""
params = phil_scope.extract()
params.mode = "image_group"
expts = generated_exp(n=2)
refls = generated_refl()
# Expected behaviour is that the outliers will not be included in the
# image range, and that all groups will have at least 10 images in.
script = CCHalfFromDials(params, expts, refls)
assert script.group_to_datasetid_and_range == {
0: ("0", (5, 14)),
1: ("0", (15, 25)),
2: ("1", (1, 10)),
3: ("1", (11, 23)),
}
assert script.datasetid_to_groups == {"0": [0, 1], "1": [2, 3]}
def test_exclusion_in_CCHalfFromDials():
"""Test the exclusion of image groups."""
# Same input as above, but mock DeltaCCHalf algorithm to just test
# interpretation of results and setting of excluded regions. With the
# input, test that outlier edges are correctly removed.
params = phil_scope.extract()
params.mode = "image_group"
expts = generated_exp(n=2)
refls = generated_refl()
def mock_algorithm(*_):
"""Mock a result from DeltaCCHalf"""
algo = mock.Mock()
algo.run.return_value = None
algo.results_summary = {
"per_dataset_delta_cc_half_values": {
"delta_cc_half_values": [-5.0, -2.0, 4.0, -5.0],
"datasets": [0, 1, 2, 3],
},
"dataset_removal": {"cutoff_value": -1.0},
}
return algo
with mock.patch(
"dials.algorithms.statistics.cc_half_algorithm.DeltaCCHalf",
side_effect=mock_algorithm,
):
script = CCHalfFromDials(params, expts, refls)
script.run()
assert script.datasetid_to_groups == {"0": [], "1": [2]} # all but 3 removed
expts = script.experiments
assert list(expts.identifiers()) == ["1"]
assert expts[0].scan.get_valid_image_ranges(expts.identifiers()[0]) == [(1, 10)]
assert script.results_summary["dataset_removal"][
"experiment_ids_fully_removed"
] == [0]
assert script.results_summary["dataset_removal"][
"experiments_fully_removed"
] == ["0"]
|
dials/dials
|
tests/algorithms/statistics/test_delta_cchalf_algorithm.py
|
Python
|
bsd-3-clause
| 4,068
|
[
"CRYSTAL"
] |
b29d446f0a0f657a63c5102c23c167c8fdc5550221f0c0544cb9d99393f36dfc
|
'''
gaussian_filter.py - given a color RGB image, try different
Gaussian kernels for image blurring
Author: Esha Uboweja (euboweja)
'''
# Import Libraries
import cv2
import numpy as np
import sys
# Callback function for trackbar
def nothing(x):
pass
# Read image in color format
if len(sys.argv) > 1:
im_name = sys.argv[1]
else:
im_name = "../images/birds.jpg"
img = cv2.imread(im_name)
result = img
kernel_max = 20
# Create a blank image, and a window
segmented = np.zeros(img.shape, np.uint8)
cv2.namedWindow("Gaussian Blurred")
# Create trackbars for selecting [H,S,V] values
cv2.createTrackbar("kernel_rows", "Gaussian Blurred", 1, kernel_max, nothing)
cv2.createTrackbar("kernel_cols", "Gaussian Blurred", 1, kernel_max, nothing)
# Create switch for ON/OFF functionality
switch = "0 : OFF \n1 : ON"
cv2.createTrackbar(switch, "Gaussian Blurred", 0, 1, nothing)
# Wait for user input in GUI
while True:
cv2.imshow("Gaussian Blurred", result)
key = cv2.waitKey(20)
if key == 27:
break
# Get curruent trackbar positions => values
krow = cv2.getTrackbarPos("kernel_rows", "Gaussian Blurred")
if krow % 2 == 0:
krow += 1
kcol = cv2.getTrackbarPos("kernel_cols", "Gaussian Blurred")
if kcol % 2 == 0:
kcol += 1
s = cv2.getTrackbarPos(switch, "Gaussian Blurred")
if s:
# Apply Gaussian Blur to the image
result = cv2.GaussianBlur(img, (krow, kcol), 0)
# After key press, destroy all external windows
cv2.destroyAllWindows()
|
eknight7/cv_build18
|
filtering/gaussian_filter.py
|
Python
|
mit
| 1,547
|
[
"Gaussian"
] |
c8073b65c300f25a45ffd4e469e029e2319a9ef6f7d5dcbc81e2b9a784fac71e
|
# -*- coding: utf-8 -*-
# Begin CVS Header
# $Source: /Volumes/Home/Users/shoops/cvs/copasi_dev/copasi/bindings/python/unittests/Test_CVersion.py,v $
# $Revision: 1.8 $
# $Name: $
# $Author: shoops $
# $Date: 2010/07/16 18:55:59 $
# End CVS Header
# Copyright (C) 2010 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CVersion(unittest.TestCase):
def setUp(self):
self.cversion=COPASI.CVersion()
def test_getVersionMajor(self):
versionMajor=self.cversion.getVersionMajor()
self.assert_(type(versionMajor)==IntType,"Error. the major version of CVersion is not an Integer type.")
def test_getVersionMinor(self):
versionMinor=self.cversion.getVersionMinor()
self.assert_(type(versionMinor)==IntType)
def test_getVersionDevel(self):
versionDevel=self.cversion.getVersionDevel()
self.assert_(type(versionDevel)==IntType)
def test_getVersion(self):
version=self.cversion.getVersion()
self.assert_(type(version)==StringType)
def test_setVersion(self):
self.cversion.setVersion(5,12,57,"testtesttest")
self.assert_(self.cversion.getVersionMajor()==5)
self.assert_(self.cversion.getVersionMinor()==12)
self.assert_(self.cversion.getVersionDevel()==57)
def test_VERSION(self):
V=COPASI.CVersion.VERSION
self.assert_(V.__class__ == COPASI.CVersion)
def suite():
tests=[
'test_getVersionMajor'
,'test_getVersionMinor'
,'test_getVersionDevel'
,'test_getVersion'
,'test_setVersion'
]
return unittest.TestSuite(map(Test_CVersion,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
|
jonasfoe/COPASI
|
copasi/bindings/python/unittests/Test_CVersion.py
|
Python
|
artistic-2.0
| 2,030
|
[
"COPASI"
] |
14cac34e75e25e75b3f5210f22cec63566b94eaff7ddea7ac81c23a8c8d44435
|
import os
import numpy as np
from ase import io, units
from ase.optimize import QuasiNewton
from ase.parallel import paropen, rank, world
from ase.md import VelocityVerlet
from ase.md import MDLogger
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
class MinimaHopping:
"""Implements the minima hopping method of global optimization outlined
by S. Goedecker, J. Chem. Phys. 120: 9911 (2004). Initialize with an
ASE atoms object. Optional parameters are fed through keywords.
To run multiple searches in parallel, specify the minima_traj keyword,
and have each run point to the same path.
"""
_default_settings = {
'T0': 1000., # K, initial MD 'temperature'
'beta1': 1.1, # temperature adjustment parameter
'beta2': 1.1, # temperature adjustment parameter
'beta3': 1. / 1.1, # temperature adjustment parameter
'Ediff0': 0.5, # eV, initial energy acceptance threshold
'alpha1': 0.98, # energy threshold adjustment parameter
'alpha2': 1. / 0.98, # energy threshold adjustment parameter
'mdmin': 2, # criteria to stop MD simulation (no. of minima)
'logfile': 'hop.log', # text log
'minima_threshold': 0.5, # A, threshold for identical configs
'timestep': 1.0, # fs, timestep for MD simulations
'optimizer': QuasiNewton, # local optimizer to use
'minima_traj': 'minima.traj', # storage file for minima list
'fmax': 0.05, # eV/A, max force for optimizations
}
def __init__(self, atoms, **kwargs):
"""Initialize with an ASE atoms object and keyword arguments."""
self._atoms = atoms
for key in kwargs:
if not key in self._default_settings:
raise RuntimeError('Unknown keyword: %s' % key)
for k, v in self._default_settings.items():
setattr(self, '_%s' % k, kwargs.pop(k, v))
self._passedminimum = PassedMinimum() # when a MD sim. has passed
# a local minimum
# Misc storage.
self._previous_optimum = None
self._previous_energy = None
self._temperature = self._T0
self._Ediff = self._Ediff0
def __call__(self, totalsteps=None, maxtemp=None):
"""Run the minima hopping algorithm. Can specify stopping criteria
with total steps allowed or maximum searching temperature allowed.
If neither is specified, runs indefinitely (or until stopped by
batching software)."""
self._startup()
while True:
if (totalsteps and self._counter >= totalsteps):
self._log('msg', 'Run terminated. Step #%i reached of '
'%i allowed. Increase totalsteps if resuming.'
% (self._counter, totalsteps))
return
if (maxtemp and self._temperature >= maxtemp):
self._log('msg', 'Run terminated. Temperature is %.2f K;'
' max temperature allowed %.2f K.'
% (self._temperature, maxtemp))
return
self._previous_optimum = self._atoms.copy()
self._previous_energy = self._atoms.get_potential_energy()
self._molecular_dynamics()
self._optimize()
self._counter += 1
self._check_results()
def _startup(self):
"""Initiates a run, and determines if running from previous data or
a fresh run."""
status = np.array(-1.)
exists = self._read_minima()
if rank == 0:
if not exists:
# Fresh run with new minima file.
status = np.array(0.)
elif not os.path.exists(self._logfile):
# Fresh run with existing or shared minima file.
status = np.array(1.)
else:
# Must be resuming from within a working directory.
status = np.array(2.)
world.barrier()
world.broadcast(status, 0)
if status == 2.:
self._resume()
else:
self._counter = 0
self._log('init')
self._log('msg', 'Performing initial optimization.')
if status == 1.:
self._log('msg', 'Using existing minima file with %i prior '
'minima: %s' % (len(self._minima),
self._minima_traj))
self._optimize()
self._check_results()
self._counter += 1
def _resume(self):
"""Attempt to resume a run, based on information in the log
file. Note it will almost always be interrupted in the middle of
either a qn or md run or when exceeding totalsteps, so it only has
been tested in those cases currently."""
f = paropen(self._logfile, 'r')
lines = f.read().splitlines()
f.close()
self._log('msg', 'Attempting to resume stopped run.')
self._log('msg', 'Using existing minima file with %i prior '
'minima: %s' % (len(self._minima), self._minima_traj))
mdcount, qncount = 0, 0
for line in lines:
if (line[:4] == 'par:') and ('Ediff' not in line):
self._temperature = eval(line.split()[1])
self._Ediff = eval(line.split()[2])
elif line[:18] == 'msg: Optimization:':
qncount = int(line[19:].split('qn')[1])
elif line[:24] == 'msg: Molecular dynamics:':
mdcount = int(line[25:].split('md')[1])
self._counter = max((mdcount, qncount))
if qncount == mdcount:
# Either stopped during local optimization or terminated due to
# max steps.
self._log('msg', 'Attempting to resume at qn%05i' % qncount)
if qncount > 0:
atoms = io.read('qn%05i.traj' % (qncount - 1), index=-1)
self._previous_optimum = atoms.copy()
self._previous_energy = atoms.get_potential_energy()
if os.path.getsize('qn%05i.traj' % qncount) > 0:
atoms = io.read('qn%05i.traj' % qncount, index=-1)
else:
atoms = io.read('md%05i.traj' % qncount, index=-3)
self._atoms.positions = atoms.get_positions()
fmax = np.sqrt((atoms.get_forces() ** 2).sum(axis=1).max())
if fmax < self._fmax:
# Stopped after a qn finished.
self._log('msg', 'qn%05i fmax already less than fmax=%.3f'
% (qncount, self._fmax))
self._counter += 1
return
self._optimize()
self._counter += 1
if qncount > 0:
self._check_results()
else:
self._record_minimum()
self._log('msg', 'Found a new minimum.')
self._log('msg', 'Accepted new minimum.')
self._log('par')
elif qncount < mdcount:
# Probably stopped during molecular dynamics.
self._log('msg', 'Attempting to resume at md%05i.' % mdcount)
atoms = io.read('qn%05i.traj' % qncount, index=-1)
self._previous_optimum = atoms.copy()
self._previous_energy = atoms.get_potential_energy()
self._molecular_dynamics(resume=mdcount)
self._optimize()
self._counter += 1
self._check_results()
def _check_results(self):
"""Adjusts parameters and positions based on outputs."""
# No prior minima found?
self._read_minima()
if len(self._minima) == 0:
self._log('msg', 'Found a new minimum.')
self._log('msg', 'Accepted new minimum.')
self._record_minimum()
self._log('par')
return
# Returned to starting position?
if self._previous_optimum:
compare = ComparePositions(translate=False)
dmax = compare(self._atoms, self._previous_optimum)
self._log('msg', 'Max distance to last minimum: %.3f A' % dmax)
if dmax < self._minima_threshold:
self._log('msg', 'Re-found last minimum.')
self._temperature *= self._beta1
self._log('par')
return
# In a previously found position?
unique, dmax_closest = self._unique_minimum_position()
self._log('msg', 'Max distance to closest minimum: %.3f A' %
dmax_closest)
if not unique:
self._temperature *= self._beta2
self._log('msg', 'Found previously found minimum.')
self._log('par')
if self._previous_optimum:
self._log('msg', 'Restoring last minimum.')
self._atoms.positions = self._previous_optimum.positions
return
# Must have found a unique minimum.
self._temperature *= self._beta3
self._log('msg', 'Found a new minimum.')
self._log('par')
if (self._atoms.get_potential_energy() <
self._previous_energy + self._Ediff):
self._log('msg', 'Accepted new minimum.')
self._Ediff *= self._alpha1
self._log('par')
self._record_minimum()
else:
self._log('msg', 'Rejected new minimum due to energy. '
'Restoring last minimum.')
self._atoms.positions = self._previous_optimum.positions
self._Ediff *= self._alpha2
self._log('par')
def _log(self, cat='msg', message=None):
"""Records the message as a line in the log file."""
if cat == 'init':
if rank == 0:
if os.path.exists(self._logfile):
raise RuntimeError('File exists: %s' % self._logfile)
f = paropen(self._logfile, 'w')
f.write('par: %12s %12s %12s\n' % ('T (K)', 'Ediff (eV)',
'mdmin'))
f.write('ene: %12s %12s %12s\n' % ('E_current', 'E_previous',
'Difference'))
f.close()
return
f = paropen(self._logfile, 'a')
if cat == 'msg':
line = 'msg: %s' % message
elif cat == 'par':
line = ('par: %12.4f %12.4f %12i' %
(self._temperature, self._Ediff, self._mdmin))
elif cat == 'ene':
current = self._atoms.get_potential_energy()
if self._previous_optimum:
previous = self._previous_energy
line = ('ene: %12.5f %12.5f %12.5f' %
(current, previous, current - previous))
else:
line = ('ene: %12.5f' % current)
f.write(line + '\n')
f.close()
def _optimize(self):
"""Perform an optimization."""
self._atoms.set_momenta(np.zeros(self._atoms.get_momenta().shape))
opt = self._optimizer(self._atoms,
trajectory='qn%05i.traj' % self._counter,
logfile='qn%05i.log' % self._counter)
self._log('msg', 'Optimization: qn%05i' % self._counter)
opt.run(fmax=self._fmax)
self._log('ene')
def _record_minimum(self):
"""Adds the current atoms configuration to the minima list."""
traj = io.Trajectory(self._minima_traj, 'a')
traj.write(self._atoms)
self._read_minima()
self._log('msg', 'Recorded minima #%i.' % (len(self._minima) - 1))
def _read_minima(self):
"""Reads in the list of minima from the minima file."""
exists = os.path.exists(self._minima_traj)
if exists:
empty = os.path.getsize(self._minima_traj) == 0
if os.path.exists(self._minima_traj):
if not empty:
traj = io.Trajectory(self._minima_traj, 'r')
self._minima = [atoms for atoms in traj]
else:
self._minima = []
return True
else:
self._minima = []
return False
def _molecular_dynamics(self, resume=None):
"""Performs a molecular dynamics simulation, until mdmin is
exceeded. If resuming, the file number (md%05i) is expected."""
self._log('msg', 'Molecular dynamics: md%05i' % self._counter)
mincount = 0
energies, oldpositions = [], []
thermalized = False
if resume:
self._log('msg', 'Resuming MD from md%05i.traj' % resume)
if os.path.getsize('md%05i.traj' % resume) == 0:
self._log('msg', 'md%05i.traj is empty. Resuming from '
'qn%05i.traj.' % (resume, resume - 1))
atoms = io.read('qn%05i.traj' % (resume - 1), index=-1)
else:
images = io.Trajectory('md%05i.traj' % resume, 'r')
for atoms in images:
energies.append(atoms.get_potential_energy())
oldpositions.append(atoms.positions.copy())
passedmin = self._passedminimum(energies)
if passedmin:
mincount += 1
self._atoms.set_momenta(atoms.get_momenta())
thermalized = True
self._atoms.positions = atoms.get_positions()
self._log('msg', 'Starting MD with %i existing energies.' %
len(energies))
if not thermalized:
MaxwellBoltzmannDistribution(self._atoms,
temp=self._temperature * units.kB,
force_temp=True)
traj = io.Trajectory('md%05i.traj' % self._counter, 'a',
self._atoms)
dyn = VelocityVerlet(self._atoms, dt=self._timestep * units.fs)
log = MDLogger(dyn, self._atoms, 'md%05i.log' % self._counter,
header=True, stress=False, peratom=False)
dyn.attach(log, interval=1)
dyn.attach(traj, interval=1)
while mincount < self._mdmin:
dyn.run(1)
energies.append(self._atoms.get_potential_energy())
passedmin = self._passedminimum(energies)
if passedmin:
mincount += 1
oldpositions.append(self._atoms.positions.copy())
# Reset atoms to minimum point.
self._atoms.positions = oldpositions[passedmin[0]]
def _unique_minimum_position(self):
"""Identifies if the current position of the atoms, which should be
a local minima, has been found before."""
unique = True
dmax_closest = 99999.
compare = ComparePositions(translate=True)
self._read_minima()
for minimum in self._minima:
dmax = compare(minimum, self._atoms)
if dmax < self._minima_threshold:
unique = False
if dmax < dmax_closest:
dmax_closest = dmax
return unique, dmax_closest
class ComparePositions:
"""Class that compares the atomic positions between two ASE atoms
objects. Returns the maximum distance that any atom has moved, assuming
all atoms of the same element are indistinguishable. If translate is
set to True, allows for arbitrary translations within the unit cell,
as well as translations across any periodic boundary conditions. When
called, returns the maximum displacement of any one atom."""
def __init__(self, translate=True):
self._translate = translate
def __call__(self, atoms1, atoms2):
atoms1 = atoms1.copy()
atoms2 = atoms2.copy()
if not self._translate:
dmax = self. _indistinguishable_compare(atoms1, atoms2)
else:
dmax = self._translated_compare(atoms1, atoms2)
return dmax
def _translated_compare(self, atoms1, atoms2):
"""Moves the atoms around and tries to pair up atoms, assuming any
atoms with the same symbol are indistinguishable, and honors
periodic boundary conditions (for example, so that an atom at
(0.1, 0., 0.) correctly is found to be close to an atom at
(7.9, 0., 0.) if the atoms are in an orthorhombic cell with
x-dimension of 8. Returns dmax, the maximum distance between any
two atoms in the optimal configuration."""
atoms1.set_constraint()
atoms2.set_constraint()
for index in range(3):
assert atoms1.pbc[index] == atoms2.pbc[index]
least = self._get_least_common(atoms1)
indices1 = [atom.index for atom in atoms1 if atom.symbol == least[0]]
indices2 = [atom.index for atom in atoms2 if atom.symbol == least[0]]
# Make comparison sets from atoms2, which contain repeated atoms in
# all pbc's and bring the atom listed in indices2 to (0,0,0)
comparisons = []
repeat = []
for bc in atoms2.pbc:
if bc == True:
repeat.append(3)
else:
repeat.append(1)
repeated = atoms2.repeat(repeat)
moved_cell = atoms2.cell * atoms2.pbc
for moved in moved_cell:
repeated.translate(-moved)
repeated.set_cell(atoms2.cell)
for index in indices2:
comparison = repeated.copy()
comparison.translate(-atoms2[index].position)
comparisons.append(comparison)
# Bring the atom listed in indices1 to (0,0,0) [not whole list]
standard = atoms1.copy()
standard.translate(-atoms1[indices1[0]].position)
# Compare the standard to the comparison sets.
dmaxes = []
for comparison in comparisons:
dmax = self._indistinguishable_compare(standard, comparison)
dmaxes.append(dmax)
return min(dmaxes)
def _get_least_common(self, atoms):
"""Returns the least common element in atoms. If more than one,
returns the first encountered."""
symbols = [atom.symbol for atom in atoms]
least = ['', np.inf]
for element in set(symbols):
count = symbols.count(element)
if symbols.count(element) < least[1]:
least = [element, symbols.count(element)]
return least
def _indistinguishable_compare(self, atoms1, atoms2):
"""Finds each atom in atoms1's nearest neighbor with the same
chemical symbol in atoms2. Return dmax, the farthest distance an
individual atom differs by."""
atoms2 = atoms2.copy() # allow deletion
atoms2.set_constraint()
dmax = 0.
for atom1 in atoms1:
closest = [np.nan, np.inf]
for index, atom2 in enumerate(atoms2):
if atom2.symbol == atom1.symbol:
d = np.linalg.norm(atom1.position - atom2.position)
if d < closest[1]:
closest = [index, d]
if closest[1] > dmax:
dmax = closest[1]
del atoms2[closest[0]]
return dmax
class PassedMinimum:
"""Simple routine to find if a minimum in the potential energy surface
has been passed. In its default settings, a minimum is found if the
sequence ends with two downward points followed by two upward points.
Initialize with n_down and n_up, integer values of the number of up and
down points. If it has successfully determined it passed a minimum, it
returns the value (energy) of that minimum and the number of positions
back it occurred, otherwise returns None."""
def __init__(self, n_down=2, n_up=2):
self._ndown = n_down
self._nup = n_up
def __call__(self, energies):
if len(energies) < (self._nup + self._ndown + 1):
return None
status = True
index = -1
for i_up in range(self._nup):
if energies[index] < energies[index - 1]:
status = False
index -= 1
for i_down in range(self._ndown):
if energies[index] > energies[index - 1]:
status = False
index -= 1
if status:
return (-self._nup - 1), energies[-self._nup - 1]
class MHPlot:
"""Makes a plot summarizing the output of the MH algorithm from the
specified rundirectory. If no rundirectory is supplied, uses the
current directory."""
def __init__(self, rundirectory=None, logname='hop.log'):
if not rundirectory:
rundirectory = os.getcwd()
self._rundirectory = rundirectory
self._logname = logname
self._read_log()
self._fig, self._ax = self._makecanvas()
self._plot_data()
def get_figure(self):
"""Returns the matplotlib figure object."""
return self._fig
def save_figure(self, filename):
"""Saves the file to the specified path, with any allowed
matplotlib extension (e.g., .pdf, .png, etc.)."""
self._fig.savefig(filename)
def _read_log(self):
"""Reads relevant parts of the log file."""
data = [] # format: [energy, status, temperature, ediff]
f = open(os.path.join(self._rundirectory, self._logname), 'r')
lines = f.read().splitlines()
f.close()
step_almost_over = False
step_over = False
for line in lines:
if line.startswith('msg: Molecular dynamics:'):
status = 'performing MD'
elif line.startswith('msg: Optimization:'):
status = 'performing QN'
elif line.startswith('ene:'):
status = 'local optimum reached'
energy = floatornan(line.split()[1])
elif line.startswith('msg: Accepted new minimum.'):
status = 'accepted'
step_almost_over = True
elif line.startswith('msg: Found previously found minimum.'):
status = 'previously found minimum'
step_almost_over = True
elif line.startswith('msg: Re-found last minimum.'):
status = 'previous minimum'
step_almost_over = True
elif line.startswith('msg: Rejected new minimum'):
status = 'rejected'
step_almost_over = True
elif line.startswith('par: '):
temperature = floatornan(line.split()[1])
ediff = floatornan(line.split()[2])
if step_almost_over:
step_over = True
step_almost_over = False
if step_over:
data.append([energy, status, temperature, ediff])
step_over = False
if data[-1][1] != status:
data.append([np.nan, status, temperature, ediff])
self._data = data
def _makecanvas(self):
from matplotlib import pyplot
from matplotlib.ticker import ScalarFormatter
fig = pyplot.figure(figsize=(6., 8.))
lm, rm, bm, tm = 0.22, 0.02, 0.05, 0.04
vg1 = 0.01 # between adjacent energy plots
vg2 = 0.03 # between different types of plots
ratio = 2. # size of an energy plot to a parameter plot
figwidth = 1. - lm - rm
totalfigheight = 1. - bm - tm - vg1 - 2. * vg2
parfigheight = totalfigheight / (2. * ratio + 2)
epotheight = ratio * parfigheight
ax1 = fig.add_axes((lm, bm, figwidth, epotheight))
ax2 = fig.add_axes((lm, bm + epotheight + vg1,
figwidth, epotheight))
for ax in [ax1, ax2]:
ax.yaxis.set_major_formatter(ScalarFormatter(useOffset=False))
ediffax = fig.add_axes((lm, bm + 2. * epotheight + vg1 + vg2,
figwidth, parfigheight))
tempax = fig.add_axes((lm, (bm + 2 * epotheight + vg1 + 2 * vg2 +
parfigheight), figwidth, parfigheight))
for ax in [ax2, tempax, ediffax]:
ax.set_xticklabels([])
ax1.set_xlabel('step')
tempax.set_ylabel('$T$, K')
ediffax.set_ylabel(r'$E_\mathrm{diff}$, eV')
for ax in [ax1, ax2]:
ax.set_ylabel('r$E_\mathrm{pot}$, eV')
ax = CombinedAxis(ax1, ax2, tempax, ediffax)
self._set_zoomed_range(ax)
ax1.spines['top'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
return fig, ax
def _set_zoomed_range(self, ax):
"""Try to intelligently set the range for the zoomed-in part of the
graph."""
energies = [line[0] for line in self._data
if not np.isnan(line[0])]
dr = max(energies) - min(energies)
if dr == 0.:
dr = 1.
ax.set_ax1_range((min(energies) - 0.2 * dr,
max(energies) + 0.2 * dr))
def _plot_data(self):
for step, line in enumerate(self._data):
self._plot_energy(step, line)
self._plot_qn(step, line)
self._plot_md(step, line)
self._plot_parameters()
self._ax.set_xlim(self._ax.ax1.get_xlim())
def _plot_energy(self, step, line):
"""Plots energy and annotation for acceptance."""
energy, status = line[0], line[1]
if np.isnan(energy):
return
self._ax.plot([step, step + 0.5], [energy] * 2, '-',
color='k', linewidth=2.)
if status == 'accepted':
self._ax.text(step + 0.51, energy, r'$\checkmark$')
elif status == 'rejected':
self._ax.text(step + 0.51, energy, r'$\Uparrow$', color='red')
elif status == 'previously found minimum':
self._ax.text(step + 0.51, energy, r'$\hookleftarrow$',
color='red', va='center')
elif status == 'previous minimum':
self._ax.text(step + 0.51, energy, r'$\leftarrow$',
color='red', va='center')
def _plot_md(self, step, line):
"""Adds a curved plot of molecular dynamics trajectory."""
if step == 0:
return
energies = [self._data[step - 1][0]]
file = os.path.join(self._rundirectory, 'md%05i.traj' % step)
traj = io.Trajectory(file, 'r')
for atoms in traj:
energies.append(atoms.get_potential_energy())
xi = step - 1 + .5
if len(energies) > 2:
xf = xi + (step + 0.25 - xi) * len(energies) / (len(energies) - 2.)
else:
xf = step
if xf > (step + .75):
xf = step
self._ax.plot(np.linspace(xi, xf, num=len(energies)), energies,
'-k')
def _plot_qn(self, index, line):
"""Plots a dashed vertical line for the optimization."""
if line[1] == 'performing MD':
return
file = os.path.join(self._rundirectory, 'qn%05i.traj' % index)
if os.path.getsize(file) == 0:
return
traj = io.Trajectory(file, 'r')
energies = [traj[0].get_potential_energy(),
traj[-1].get_potential_energy()]
if index > 0:
file = os.path.join(self._rundirectory, 'md%05i.traj' % index)
atoms = io.read(file, index=-3)
energies[0] = atoms.get_potential_energy()
self._ax.plot([index + 0.25] * 2, energies, ':k')
def _plot_parameters(self):
"""Adds a plot of temperature and Ediff to the plot."""
steps, Ts, ediffs = [], [], []
for step, line in enumerate(self._data):
steps.extend([step + 0.5, step + 1.5])
Ts.extend([line[2]] * 2)
ediffs.extend([line[3]] * 2)
self._ax.tempax.plot(steps, Ts)
self._ax.ediffax.plot(steps, ediffs)
for ax in [self._ax.tempax, self._ax.ediffax]:
ylim = ax.get_ylim()
yrange = ylim[1] - ylim[0]
ax.set_ylim((ylim[0] - 0.1 * yrange, ylim[1] + 0.1 * yrange))
def floatornan(value):
"""Converts the argument into a float if possible, np.nan if not."""
try:
output = float(value)
except ValueError:
output = np.nan
return output
class CombinedAxis:
"""Helper class for MHPlot to plot on split y axis and adjust limits
simultaneously."""
def __init__(self, ax1, ax2, tempax, ediffax):
self.ax1 = ax1
self.ax2 = ax2
self.tempax = tempax
self.ediffax = ediffax
self._ymax = None
def set_ax1_range(self, ylim):
self._ax1_ylim = ylim
self.ax1.set_ylim(ylim)
def plot(self, *args, **kwargs):
self.ax1.plot(*args, **kwargs)
self.ax2.plot(*args, **kwargs)
# Re-adjust yrange
for yvalue in args[1]:
if yvalue > self._ymax:
self._ymax = yvalue
self.ax1.set_ylim(self._ax1_ylim)
self.ax2.set_ylim((self._ax1_ylim[1], self._ymax))
def set_xlim(self, *args):
self.ax1.set_xlim(*args)
self.ax2.set_xlim(*args)
self.tempax.set_xlim(*args)
self.ediffax.set_xlim(*args)
def text(self, *args, **kwargs):
y = args[1]
if y < self._ax1_ylim[1]:
ax = self.ax1
else:
ax = self.ax2
ax.text(*args, **kwargs)
|
suttond/MODOI
|
ase/optimize/minimahopping.py
|
Python
|
lgpl-3.0
| 29,451
|
[
"ASE"
] |
7990a56f331a194e46697fa35ce36184c7e42c711aa28d7943d5f174fd94e3d4
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import warnings
from almost import Approximate
from pytest import deprecated_call, raises
from conftest import various_backends
from trueskill import *
inf = float('inf')
nan = float('nan')
class almost(Approximate):
def normalize(self, value):
if isinstance(value, Rating):
return self.normalize(tuple(value))
elif isinstance(value, list):
try:
if isinstance(value[0][0], Rating):
# flatten transformed ratings
return list(sum(value, ()))
except (TypeError, IndexError):
pass
return super(almost, self).normalize(value)
@classmethod
def wrap(cls, f, *args, **kwargs):
return lambda *a, **k: cls(f(*a, **k), *args, **kwargs)
_rate = almost.wrap(rate)
_rate_1vs1 = almost.wrap(rate_1vs1)
_quality = almost.wrap(quality)
_quality_1vs1 = almost.wrap(quality_1vs1)
# usage
def test_compatibility_with_another_rating_systems():
"""All rating system modules should implement ``rate_1vs1`` and
``quality_1vs1`` to provide shortcuts for 1 vs 1 simple competition games.
"""
r1, r2 = Rating(30, 3), Rating(20, 2)
assert quality_1vs1(r1, r2) == quality([(r1,), (r2,)])
rated = rate([(r1,), (r2,)])
assert rate_1vs1(r1, r2) == (rated[0][0], rated[1][0])
rated = rate([(r1,), (r2,)], [0, 0])
assert rate_1vs1(r1, r2, drawn=True) == (rated[0][0], rated[1][0])
def test_compare_ratings():
assert Rating(1, 2) == Rating(1, 2)
assert Rating(1, 2) != Rating(1, 3)
assert Rating(2, 2) > Rating(1, 2)
assert Rating(3, 2) >= Rating(1, 2)
assert Rating(0, 2) < Rating(1, 2)
assert Rating(-1, 2) <= Rating(1, 2)
def test_rating_to_number():
assert int(Rating(1, 2)) == 1
assert float(Rating(1.1, 2)) == 1.1
assert complex(Rating(1.2, 2)) == 1.2 + 0j
try:
assert long(Rating(1, 2)) == long(1)
except NameError:
# Python 3 doesn't have `long` anymore
pass
def test_unsorted_groups():
t1, t2, t3 = generate_teams([1, 1, 1])
rated = rate([t1, t2, t3], [2, 1, 0])
assert almost(rated) == \
[(18.325, 6.656), (25.000, 6.208), (31.675, 6.656)]
def test_custom_environment():
env = TrueSkill(draw_probability=.50)
t1, t2 = generate_teams([1, 1], env=env)
rated = env.rate([t1, t2])
assert almost(rated) == [(30.267, 7.077), (19.733, 7.077)]
def test_setup_global_environment():
try:
setup(draw_probability=.50)
t1, t2 = generate_teams([1, 1])
rated = rate([t1, t2])
assert almost(rated) == [(30.267, 7.077), (19.733, 7.077)]
finally:
# rollback
setup()
def test_invalid_rating_groups():
env = TrueSkill()
with raises(ValueError):
env.validate_rating_groups([])
with raises(ValueError):
env.validate_rating_groups([()])
# need multiple groups not just one
with raises(ValueError):
env.validate_rating_groups([(Rating(),)])
# empty group is not allowed
with raises(ValueError):
env.validate_rating_groups([(Rating(),), ()])
# all groups should be same structure
with raises(TypeError):
env.validate_rating_groups([(Rating(),), {0: Rating()}])
def test_deprecated_methods():
env = TrueSkill()
r1, r2, r3 = Rating(), Rating(), Rating()
deprecated_call(transform_ratings, [(r1,), (r2,), (r3,)])
deprecated_call(match_quality, [(r1,), (r2,), (r3,)])
deprecated_call(env.Rating)
deprecated_call(env.transform_ratings, [(r1,), (r2,), (r3,)])
deprecated_call(env.match_quality, [(r1,), (r2,), (r3,)])
deprecated_call(env.rate_1vs1, r1, r2)
deprecated_call(env.quality_1vs1, r1, r2)
deprecated_call(lambda: Rating().exposure)
dyn = TrueSkill(draw_probability=dynamic_draw_probability)
deprecated_call(dyn.rate, [(r1,), (r2,)])
def test_deprecated_individual_rating_groups():
r1, r2, r3 = Rating(50, 1), Rating(10, 5), Rating(15, 5)
with raises(TypeError):
deprecated_call(rate, [r1, r2, r3])
with raises(TypeError):
deprecated_call(quality, [r1, r2, r3])
assert transform_ratings([r1, r2, r3]) == rate([(r1,), (r2,), (r3,)])
assert match_quality([r1, r2, r3]) == quality([(r1,), (r2,), (r3,)])
deprecated_call(transform_ratings, [r1, r2, r3])
deprecated_call(match_quality, [r1, r2, r3])
def test_rating_tuples():
r1, r2, r3 = Rating(), Rating(), Rating()
rated = rate([(r1, r2), (r3,)])
assert len(rated) == 2
assert isinstance(rated[0], tuple)
assert isinstance(rated[1], tuple)
assert len(rated[0]) == 2
assert len(rated[1]) == 1
assert isinstance(rated[0][0], Rating)
def test_rating_dicts():
class Player(object):
def __init__(self, name, rating, team):
self.name = name
self.rating = rating
self.team = team
p1 = Player('Player A', Rating(), 0)
p2 = Player('Player B', Rating(), 0)
p3 = Player('Player C', Rating(), 1)
rated = rate([{p1: p1.rating, p2: p2.rating}, {p3: p3.rating}])
assert len(rated) == 2
assert isinstance(rated[0], dict)
assert isinstance(rated[1], dict)
assert len(rated[0]) == 2
assert len(rated[1]) == 1
assert p1 in rated[0]
assert p2 in rated[0]
assert p3 in rated[1]
assert p1 not in rated[1]
assert p2 not in rated[1]
assert p3 not in rated[0]
assert isinstance(rated[0][p1], Rating)
p1.rating = rated[p1.team][p1]
p2.rating = rated[p2.team][p2]
p3.rating = rated[p3.team][p3]
def test_dont_use_0_for_min_delta():
with raises(ValueError):
rate([(Rating(),), (Rating(),)], min_delta=0)
def test_list_instead_of_tuple():
r1, r2 = Rating(), Rating()
assert rate([[r1], [r2]]) == rate([(r1,), (r2,)])
assert quality([[r1], [r2]]) == quality([(r1,), (r2,)])
def test_backend():
env = TrueSkill(backend=(NotImplemented, NotImplemented, NotImplemented))
with raises(TypeError):
env.rate_1vs1(Rating(), Rating())
with raises(ValueError):
# '__not_defined__' backend is not defined
TrueSkill(backend='__not_defined__')
# algorithm
def generate_teams(sizes, env=None):
rating_cls = Rating if env is None else env.create_rating
rating_groups = []
for size in sizes:
ratings = []
for x in range(size):
ratings.append(rating_cls())
rating_groups.append(tuple(ratings))
return rating_groups
def generate_individual(size, env=None):
return generate_teams([1] * size, env=env)
@various_backends
def test_n_vs_n():
# 1 vs 1
t1, t2 = generate_teams([1, 1])
assert _quality([t1, t2]) == 0.447
assert _rate([t1, t2]) == [(29.396, 7.171), (20.604, 7.171)]
assert _rate([t1, t2], [0, 0]) == [(25.000, 6.458), (25.000, 6.458)]
# 2 vs 2
t1, t2 = generate_teams([2, 2])
assert _quality([t1, t2]) == 0.447
assert _rate([t1, t2]) == \
[(28.108, 7.774), (28.108, 7.774), (21.892, 7.774), (21.892, 7.774)]
assert _rate([t1, t2], [0, 0]) == \
[(25.000, 7.455), (25.000, 7.455), (25.000, 7.455), (25.000, 7.455)]
# 4 vs 4
t1, t2 = generate_teams([4, 4])
assert _quality([t1, t2]) == 0.447
assert _rate([t1, t2]) == \
[(27.198, 8.059), (27.198, 8.059), (27.198, 8.059), (27.198, 8.059),
(22.802, 8.059), (22.802, 8.059), (22.802, 8.059), (22.802, 8.059)]
@various_backends
def test_1_vs_n():
t1, = generate_teams([1])
# 1 vs 2
t2, = generate_teams([2])
assert _quality([t1, t2]) == 0.135
assert _rate([t1, t2]) == \
[(33.730, 7.317), (16.270, 7.317), (16.270, 7.317)]
assert _rate([t1, t2], [0, 0]) == \
[(31.660, 7.138), (18.340, 7.138), (18.340, 7.138)]
# 1 vs 3
t2, = generate_teams([3])
assert _quality([t1, t2]) == 0.012
assert _rate([t1, t2]) == \
[(36.337, 7.527), (13.663, 7.527), (13.663, 7.527), (13.663, 7.527)]
assert almost(rate([t1, t2], [0, 0]), 2) == \
[(34.990, 7.455), (15.010, 7.455), (15.010, 7.455), (15.010, 7.455)]
# 1 vs 7
t2, = generate_teams([7])
assert _quality([t1, t2]) == 0
assert _rate([t1, t2]) == \
[(40.582, 7.917), (9.418, 7.917), (9.418, 7.917), (9.418, 7.917),
(9.418, 7.917), (9.418, 7.917), (9.418, 7.917), (9.418, 7.917)]
@various_backends
def test_individual():
# 3 players
players = generate_individual(3)
assert _quality(players) == 0.200
assert _rate(players) == \
[(31.675, 6.656), (25.000, 6.208), (18.325, 6.656)]
assert _rate(players, [0] * 3) == \
[(25.000, 5.698), (25.000, 5.695), (25.000, 5.698)]
# 4 players
players = generate_individual(4)
assert _quality(players) == 0.089
assert _rate(players) == \
[(33.207, 6.348), (27.401, 5.787), (22.599, 5.787), (16.793, 6.348)]
# 5 players
players = generate_individual(5)
assert _quality(players) == 0.040
assert _rate(players) == \
[(34.363, 6.136), (29.058, 5.536), (25.000, 5.420), (20.942, 5.536),
(15.637, 6.136)]
# 8 players
players = generate_individual(8)
assert _quality(players) == 0.004
assert _rate(players, [0] * 8) == \
[(25.000, 4.592), (25.000, 4.583), (25.000, 4.576), (25.000, 4.573),
(25.000, 4.573), (25.000, 4.576), (25.000, 4.583), (25.000, 4.592)]
# 16 players
players = generate_individual(16)
assert _rate(players) == \
[(40.539, 5.276), (36.810, 4.711), (34.347, 4.524), (32.336, 4.433),
(30.550, 4.380), (28.893, 4.349), (27.310, 4.330), (25.766, 4.322),
(24.234, 4.322), (22.690, 4.330), (21.107, 4.349), (19.450, 4.380),
(17.664, 4.433), (15.653, 4.524), (13.190, 4.711), (9.461, 5.276)]
@various_backends
def test_multiple_teams():
# 2 vs 4 vs 2
t1 = (Rating(40, 4), Rating(45, 3))
t2 = (Rating(20, 7), Rating(19, 6), Rating(30, 9), Rating(10, 4))
t3 = (Rating(50, 5), Rating(30, 2))
assert _quality([t1, t2, t3]) == 0.367
assert _rate([t1, t2, t3], [0, 1, 1]) == \
[(40.877, 3.840), (45.493, 2.934), (19.609, 6.396), (18.712, 5.625),
(29.353, 7.673), (9.872, 3.891), (48.830, 4.590), (29.813, 1.976)]
# 1 vs 2 vs 1
t1 = (Rating(),)
t2 = (Rating(), Rating())
t3 = (Rating(),)
assert _quality([t1, t2, t3]) == 0.047
@various_backends
def test_upset():
# 1 vs 1
t1, t2 = (Rating(),), (Rating(50, 12.5),)
assert _quality([t1, t2]) == 0.110
assert _rate([t1, t2], [0, 0]) == [(31.662, 7.137), (35.010, 7.910)]
# 2 vs 2
t1 = (Rating(20, 8), Rating(25, 6))
t2 = (Rating(35, 7), Rating(40, 5))
assert _quality([t1, t2]) == 0.084
assert _rate([t1, t2]) == \
[(29.698, 7.008), (30.455, 5.594), (27.575, 6.346), (36.211, 4.768)]
# 3 vs 2
t1 = (Rating(28, 7), Rating(27, 6), Rating(26, 5))
t2 = (Rating(30, 4), Rating(31, 3))
assert _quality([t1, t2]) == 0.254
assert _rate([t1, t2], [0, 1]) == \
[(28.658, 6.770), (27.484, 5.856), (26.336, 4.917), (29.785, 3.958),
(30.879, 2.983)]
assert _rate([t1, t2], [1, 0]) == \
[(21.840, 6.314), (22.474, 5.575), (22.857, 4.757), (32.012, 3.877),
(32.132, 2.949)]
# 8 players
players = [(Rating(10, 8),), (Rating(15, 7),), (Rating(20, 6),),
(Rating(25, 5),), (Rating(30, 4),), (Rating(35, 3),),
(Rating(40, 2),), (Rating(45, 1),)]
assert _quality(players) == 0.000
assert _rate(players) == \
[(35.135, 4.506), (32.585, 4.037), (31.329, 3.756), (30.984, 3.453),
(31.751, 3.064), (34.051, 2.541), (38.263, 1.849), (44.118, 0.983)]
@various_backends
def test_partial_play():
t1, t2 = (Rating(),), (Rating(), Rating())
# each results from C# Skills:
assert rate([t1, t2], weights=[(1,), (1, 1)]) == rate([t1, t2])
assert _rate([t1, t2], weights=[(1,), (1, 1)]) == \
[(33.730, 7.317), (16.270, 7.317), (16.270, 7.317)]
assert _rate([t1, t2], weights=[(0.5,), (0.5, 0.5)]) == \
[(33.939, 7.312), (16.061, 7.312), (16.061, 7.312)]
assert _rate([t1, t2], weights=[(1,), (0, 1)]) == \
[(29.440, 7.166), (25.000, 8.333), (20.560, 7.166)]
assert _rate([t1, t2], weights=[(1,), (0.5, 1)]) == \
[(32.417, 7.056), (21.291, 8.033), (17.583, 7.056)]
# match quality of partial play
t1, t2, t3 = (Rating(),), (Rating(), Rating()), (Rating(),)
assert _quality([t1, t2, t3], [(1,), (0.25, 0.75), (1,)]) == 0.2
assert _quality([t1, t2, t3], [(1,), (0.8, 0.9), (1,)]) == 0.0809
@various_backends
def test_partial_play_with_weights_dict():
t1, t2 = (Rating(),), (Rating(), Rating())
assert rate([t1, t2], weights={(0, 0): 0.5, (1, 0): 0.5, (1, 1): 0.5}) == \
rate([t1, t2], weights=[[0.5], [0.5, 0.5]])
assert rate([t1, t2], weights={(1, 0): 0}) == \
rate([t1, t2], weights=[[1], [0, 1]])
assert rate([t1, t2], weights={(1, 0): 0.5}) == \
rate([t1, t2], weights=[[1], [0.5, 1]])
@various_backends
def test_microsoft_research_example():
# http://research.microsoft.com/en-us/projects/trueskill/details.aspx
alice, bob, chris, darren, eve, fabien, george, hillary = \
Rating(), Rating(), Rating(), Rating(), \
Rating(), Rating(), Rating(), Rating()
_rated = rate([{'alice': alice}, {'bob': bob}, {'chris': chris},
{'darren': darren}, {'eve': eve}, {'fabien': fabien},
{'george': george}, {'hillary': hillary}])
rated = {}
list(map(rated.update, _rated))
assert almost(rated['alice']) == (36.771, 5.749)
assert almost(rated['bob']) == (32.242, 5.133)
assert almost(rated['chris']) == (29.074, 4.943)
assert almost(rated['darren']) == (26.322, 4.874)
assert almost(rated['eve']) == (23.678, 4.874)
assert almost(rated['fabien']) == (20.926, 4.943)
assert almost(rated['george']) == (17.758, 5.133)
assert almost(rated['hillary']) == (13.229, 5.749)
@various_backends
def test_dynamic_draw_probability():
from trueskillhelpers import calc_dynamic_draw_probability as calc
def assert_predictable_draw_probability(r1, r2, drawn=False):
dyn = TrueSkill(draw_probability=dynamic_draw_probability)
sta = TrueSkill(draw_probability=calc((r1,), (r2,), dyn))
assert dyn.rate_1vs1(r1, r2, drawn)== sta.rate_1vs1(r1, r2, drawn)
assert_predictable_draw_probability(Rating(100), Rating(10))
assert_predictable_draw_probability(Rating(10), Rating(100))
assert_predictable_draw_probability(Rating(10), Rating(100), drawn=True)
assert_predictable_draw_probability(Rating(25), Rating(25))
assert_predictable_draw_probability(Rating(25), Rating(25), drawn=True)
assert_predictable_draw_probability(Rating(-25), Rating(125))
assert_predictable_draw_probability(Rating(125), Rating(-25))
assert_predictable_draw_probability(Rating(-25), Rating(125), drawn=True)
assert_predictable_draw_probability(Rating(25, 10), Rating(25, 0.1))
# functions
@various_backends
def test_exposure():
env = TrueSkill()
assert env.expose(env.create_rating()) == 0
env = TrueSkill(1000, 200)
assert env.expose(env.create_rating()) == 0
# mathematics
def test_valid_gaussian():
from trueskill.mathematics import Gaussian
with raises(TypeError): # sigma argument is needed
Gaussian(0)
with raises(ValueError): # sigma**2 should be greater than 0
Gaussian(0, 0)
def test_valid_matrix():
from trueskill.mathematics import Matrix
with raises(TypeError): # src must be a list or dict or callable
Matrix(None)
with raises(ValueError): # src must be a rectangular array of numbers
Matrix([])
with raises(ValueError): # src must be a rectangular array of numbers
Matrix([[1, 2, 3], [4, 5]])
with raises(TypeError):
# A callable src must return an interable which generates a tuple
# containing coordinate and value
Matrix(lambda: None)
def test_matrix_from_dict():
from trueskill.mathematics import Matrix
mat = Matrix({(0, 0): 1, (4, 9): 1})
assert mat.height == 5
assert mat.width == 10
assert mat[0][0] == 1
assert mat[0][1] == 0
assert mat[4][9] == 1
assert mat[4][8] == 0
def test_matrix_from_item_generator():
from trueskill.mathematics import Matrix
def gen_matrix(height, width):
yield (0, 0), 1
yield (height - 1, width - 1), 1
mat = Matrix(gen_matrix, 5, 10)
assert mat.height == 5
assert mat.width == 10
assert mat[0][0] == 1
assert mat[0][1] == 0
assert mat[4][9] == 1
assert mat[4][8] == 0
with raises(TypeError):
# A callable src must call set_height and set_width if the size is
# non-deterministic
Matrix(gen_matrix)
def gen_and_set_size_matrix(set_height, set_width):
set_height(5)
set_width(10)
return [((0, 0), 1), ((4, 9), 1)]
mat = Matrix(gen_and_set_size_matrix)
assert mat.height == 5
assert mat.width == 10
assert mat[0][0] == 1
assert mat[0][1] == 0
assert mat[4][9] == 1
assert mat[4][8] == 0
def test_matrix_operations():
from trueskill.mathematics import Matrix
assert Matrix([[1, 2], [3, 4]]).inverse() == \
Matrix([[-2.0, 1.0], [1.5, -0.5]])
assert Matrix([[1, 2], [3, 4]]).determinant() == -2
assert Matrix([[1, 2], [3, 4]]).adjugate() == Matrix([[4, -2], [-3, 1]])
with raises(ValueError): # Bad size
assert Matrix([[1, 2], [3, 4]]) * Matrix([[5, 6]])
assert Matrix([[1, 2], [3, 4]]) * Matrix([[5, 6, 7], [8, 9, 10]]) == \
Matrix([[21, 24, 27], [47, 54, 61]])
with raises(ValueError): # Must be same size
Matrix([[1, 2], [3, 4]]) + Matrix([[5, 6, 7], [8, 9, 10]])
assert Matrix([[1, 2], [3, 4]]) + Matrix([[5, 6], [7, 8]]) == \
Matrix([[6, 8], [10, 12]])
# reported bugs
@various_backends
def test_issue3():
"""The `issue #3`_, opened by @youknowone.
These inputs led to ZeroDivisionError before 0.1.4. Also another TrueSkill
implementations cannot calculate this case.
.. _issue #3: https://github.com/sublee/trueskill/issues/3
"""
# @konikos's case 1
t1 = (Rating(42.234, 3.728), Rating(43.290, 3.842))
t2 = (Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500))
rate([t1, t2], [6, 5])
# @konikos's case 2
t1 = (Rating(25.000, 0.500), Rating(25.000, 0.500), Rating(25.000, 0.500),
Rating(25.000, 0.500), Rating(33.333, 0.500), Rating(33.333, 0.500),
Rating(33.333, 0.500), Rating(33.333, 0.500), Rating(41.667, 0.500),
Rating(41.667, 0.500), Rating(41.667, 0.500), Rating(41.667, 0.500))
t2 = (Rating(42.234, 3.728), Rating(43.291, 3.842))
rate([t1, t2], [0, 28])
@various_backends(['scipy'])
def test_issue4():
"""The `issue #4`_, opened by @sublee.
numpy.float64 handles floating-point error by different way. For example,
it can just warn RuntimeWarning on n/0 problem instead of throwing
ZeroDivisionError.
.. _issue #4: https://github.com/sublee/trueskill/issues/4
"""
import numpy
r1, r2 = Rating(105.247, 0.439), Rating(27.030, 0.901)
# make numpy to raise FloatingPointError instead of warning
# RuntimeWarning
old_settings = numpy.seterr(divide='raise')
try:
rate([(r1,), (r2,)])
finally:
numpy.seterr(**old_settings)
@various_backends([None, 'scipy'])
def test_issue5(backend):
"""The `issue #5`_, opened by @warner121.
This error occurs when a winner has too low rating than a loser. Basically
Python cannot calculate correct result but mpmath_ can. I added ``backend``
option to :class:`TrueSkill` class. If it is set to 'mpmath' then the
problem will have gone.
The result of TrueSkill calculator by Microsoft is N(-273.092, 2.683) and
N(-75.830, 2.080), of C# Skills by Moserware is N(NaN, 2.6826) and
N(NaN, 2.0798). I choose Microsoft's result as an expectation for the test
suite.
.. _issue #5: https://github.com/sublee/trueskill/issues/5
.. _mpmath: http://mpmath.googlecode.com/
"""
assert _quality_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) == 0
with raises(FloatingPointError):
rate_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190))
assert _quality_1vs1(Rating(), Rating(1000)) == 0
with raises(FloatingPointError):
rate_1vs1(Rating(), Rating(1000))
@various_backends(['mpmath'])
def test_issue5_with_mpmath():
_rate_1vs1 = almost.wrap(rate_1vs1, 0)
assert _quality_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) == 0
assert _rate_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) == \
[(-273.361, 2.683), (-75.683, 2.080)]
assert _quality_1vs1(Rating(), Rating(1000)) == 0
assert _rate_1vs1(Rating(), Rating(1000)) == \
[(415.298, 6.455), (609.702, 6.455)]
@various_backends(['mpmath'])
def test_issue5_with_more_extreme():
"""If the input is more extreme, 'mpmath' backend also made an exception.
But we can avoid the problem with higher precision.
"""
import mpmath
try:
dps = mpmath.mp.dps
with raises(FloatingPointError):
rate_1vs1(Rating(), Rating(1000000))
mpmath.mp.dps = 50
assert almost(rate_1vs1(Rating(), Rating(1000000)), prec=-1) == \
[(400016.896, 6.455), (600008.104, 6.455)]
with raises(FloatingPointError):
rate_1vs1(Rating(), Rating(1000000000000))
mpmath.mp.dps = 100
assert almost(rate_1vs1(Rating(), Rating(1000000000000)), prec=-7) == \
[(400001600117.693, 6.455), (599998399907.307, 6.455)]
finally:
mpmath.mp.dps = dps
|
amit-bansil/netsci
|
robocompviz/trueskill/trueskilltest.py
|
Python
|
mit
| 22,195
|
[
"Gaussian"
] |
af15818a89b5dade27df7ecc3054fffb7be709988e14d0af30e7555e221b8421
|
"""
Acceptance tests for Video.
"""
import os
from unittest import skipIf
from unittest.mock import patch
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.pages.lms.video.video import VideoPage
from common.test.acceptance.tests.helpers import (
UniqueCourseTest,
YouTubeStubConfig,
is_youtube_available,
)
from openedx.core.lib.tests import attr
VIDEO_SOURCE_PORT = 8777
VIDEO_HOSTNAME = os.environ.get('BOK_CHOY_HOSTNAME', 'localhost')
HTML5_SOURCES = [
f'http://{VIDEO_HOSTNAME}:{VIDEO_SOURCE_PORT}/gizmo.mp4',
f'http://{VIDEO_HOSTNAME}:{VIDEO_SOURCE_PORT}/gizmo.webm',
f'http://{VIDEO_HOSTNAME}:{VIDEO_SOURCE_PORT}/gizmo.ogv',
]
HTML5_SOURCES_INCORRECT = [
f'http://{VIDEO_HOSTNAME}:{VIDEO_SOURCE_PORT}/gizmo.mp99',
]
HLS_SOURCES = [
f'http://{VIDEO_HOSTNAME}:{VIDEO_SOURCE_PORT}/hls/history.m3u8',
]
@skipIf(is_youtube_available() is False, 'YouTube is not available!')
class VideoBaseTest(UniqueCourseTest):
"""
Base class for tests of the Video Player
Sets up the course and provides helper functions for the Video tests.
"""
def setUp(self):
"""
Initialization of pages and course fixture for video tests
"""
super().setUp()
self.longMessage = True
self.video = VideoPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.auth_page = AutoAuthPage(self.browser, course_id=self.course_id)
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.metadata = None
self.assets = []
self.contents_of_verticals = None
self.youtube_configuration = {}
self.user_info = {}
# reset youtube stub server
self.addCleanup(YouTubeStubConfig.reset)
def navigate_to_video(self):
""" Prepare the course and get to the video and render it """
self._install_course_fixture()
self._navigate_to_courseware_video_and_render()
def navigate_to_video_no_render(self):
"""
Prepare the course and get to the video unit
however do not wait for it to render, because
the has been an error.
"""
self._install_course_fixture()
self._navigate_to_courseware_video_no_render()
def _install_course_fixture(self):
""" Install the course fixture that has been defined """
if self.assets:
self.course_fixture.add_asset(self.assets)
chapter_sequential = XBlockFixtureDesc('sequential', 'Test Section')
chapter_sequential.add_children(*self._add_course_verticals())
chapter = XBlockFixtureDesc('chapter', 'Test Chapter').add_children(chapter_sequential)
self.course_fixture.add_children(chapter)
self.course_fixture.install()
if len(self.youtube_configuration) > 0:
YouTubeStubConfig.configure(self.youtube_configuration)
def _add_course_verticals(self):
"""
Create XBlockFixtureDesc verticals
:return: a list of XBlockFixtureDesc
"""
xblock_verticals = []
_contents_of_verticals = self.contents_of_verticals
# Video tests require at least one vertical with a single video.
if not _contents_of_verticals:
_contents_of_verticals = [[{'display_name': 'Video', 'metadata': self.metadata}]]
for vertical_index, vertical in enumerate(_contents_of_verticals):
xblock_verticals.append(self._create_single_vertical(vertical, vertical_index))
return xblock_verticals
def _create_single_vertical(self, vertical_contents, vertical_index):
"""
Create a single course vertical of type XBlockFixtureDesc with category `vertical`.
A single course vertical can contain single or multiple video modules.
:param vertical_contents: a list of items for the vertical to contain
:param vertical_index: index for the vertical display name
:return: XBlockFixtureDesc
"""
xblock_course_vertical = XBlockFixtureDesc('vertical', f'Test Vertical-{vertical_index}')
for video in vertical_contents:
xblock_course_vertical.add_children(
XBlockFixtureDesc('video', video['display_name'], metadata=video.get('metadata')))
return xblock_course_vertical
def _navigate_to_courseware_video(self):
""" Register for the course and navigate to the video unit """
self.auth_page.visit()
self.user_info = self.auth_page.user_info
self.courseware_page.visit()
def _navigate_to_courseware_video_and_render(self):
""" Wait for the video player to render """
self._navigate_to_courseware_video()
self.video.wait_for_video_player_render()
def _navigate_to_courseware_video_no_render(self):
""" Wait for the video Xmodule but not for rendering """
self._navigate_to_courseware_video()
self.video.wait_for_video_class()
def metadata_for_mode(self, player_mode, additional_data=None):
"""
Create a dictionary for video player configuration according to `player_mode`
:param player_mode (str): Video player mode
:param additional_data (dict): Optional additional metadata.
:return: dict
"""
metadata = {}
youtube_ids = {
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
}
if player_mode == 'html5':
metadata.update(youtube_ids)
metadata.update({
'html5_sources': HTML5_SOURCES
})
if player_mode == 'youtube_html5':
metadata.update({
'html5_sources': HTML5_SOURCES,
})
if player_mode == 'youtube_html5_unsupported_video':
metadata.update({
'html5_sources': HTML5_SOURCES_INCORRECT
})
if player_mode == 'html5_unsupported_video':
metadata.update(youtube_ids)
metadata.update({
'html5_sources': HTML5_SOURCES_INCORRECT
})
if player_mode == 'hls':
metadata.update(youtube_ids)
metadata.update({
'html5_sources': HLS_SOURCES,
})
if player_mode == 'html5_and_hls':
metadata.update(youtube_ids)
metadata.update({
'html5_sources': HTML5_SOURCES + HLS_SOURCES,
})
if additional_data:
metadata.update(additional_data)
return metadata
def go_to_sequential_position(self, position):
"""
Navigate to sequential specified by `video_display_name`
"""
self.courseware_page.go_to_sequential_position(position)
self.video.wait_for_video_player_render()
@attr('a11y')
class LMSVideoBlockA11yTest(VideoBaseTest):
"""
LMS Video Accessibility Test Class
"""
def setUp(self):
browser = os.environ.get('SELENIUM_BROWSER', 'firefox')
# the a11y tests run in CI under phantomjs which doesn't
# support html5 video or flash player, so the video tests
# don't work in it. We still want to be able to run these
# tests in CI, so override the browser setting if it is
# phantomjs.
if browser == 'phantomjs':
browser = 'firefox'
with patch.dict(os.environ, {'SELENIUM_BROWSER': browser}):
super().setUp()
def test_video_player_a11y(self):
# load transcripts so we can test skipping to
self.assets.extend(['english_single_transcript.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'transcripts': {"en": "english_single_transcript.srt"}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.show_captions()
# limit the scope of the audit to the video player only.
self.video.a11y_audit.config.set_scope(
include=["div.video"]
)
self.video.a11y_audit.check_for_accessibility_errors()
|
eduNEXT/edunext-platform
|
common/test/acceptance/tests/video/test_video_module.py
|
Python
|
agpl-3.0
| 8,636
|
[
"VisIt"
] |
a8c91807cdf2a48bde231d66df475e040f9925d3bf49a0b90a59ab7ceb568511
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This sample illustrates how various observables of interest can be checkpointed.
"""
import espressomd
required_features = ["P3M", "WCA"]
espressomd.assert_features(required_features)
from espressomd import checkpointing
import numpy as np
checkpoint = checkpointing.Checkpoint(checkpoint_id="mycheckpoint")
checkpoint.load()
# print out actors
print("\n### current active actors ###")
for act in system.actors.active_actors:
print(act)
# test user variable
print("\n### user variable test ###")
print("myvar = {}".format(myvar))
# test "system"
print("\n### system test ###")
print("system.time = {}".format(system.time))
print("system.box_l = {}".format(system.box_l))
# test "system.non_bonded_inter"
print("\n### system.non_bonded_inter test ###")
print("system.non_bonded_inter[0, 0].wca.get_params() = {}".format(
system.non_bonded_inter[0, 0].wca.get_params()))
# test "system.part"
print("\n### system.part test ###")
print("system.part[:].pos = {}".format(system.part[:].pos))
# test "system.thermostat"
print("\n### system.thermostat test ###")
print("system.thermostat.get_state() = {}".format(
system.thermostat.get_state()))
# test "p3m"
print("\n### p3m test ###")
print("p3m.get_params() = {}".format(p3m.get_params()))
# test registered objects
# all objects that are registered when writing a checkpoint are
# automatically registered after loading this checkpoint
print("\n### checkpoint register test ###")
print("checkpoint.get_registered_objects() = {}".format(
checkpoint.get_registered_objects()))
# integrate system and finally save checkpoint
print("\n### Integrate until user presses ctrl+c ###")
print("Integrating...")
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(seed=system.seed)
while True:
system.integrator.run(1000)
|
mkuron/espresso
|
samples/load_checkpoint.py
|
Python
|
gpl-3.0
| 2,567
|
[
"ESPResSo"
] |
d5e1643a7f5c736ef94dadf11a09c5e50537f10d9f71bf15cfa2117073153f44
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import codecs
from collections import OrderedDict
import os
from os.path import abspath
from os.path import join
import unittest
from commoncode import fileutils
from commoncode import text
from licensedcode import saneyaml
from license_test_utils import make_license_test_function
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data/licenses')
# set to True to print matched texts on test failure.
TRACE_TEXTS = True
"""
Data-driven tests using expectations stored in YAML files.
"""
class LicenseTest(object):
"""
A license detection test is used to verify that license detection works
correctly
It consists of two files with the same base name: a .yml file with test data
and a test file with any other extension that needs to be tested for
detection
The following data are loaded from the .yml file:
- a test file to scan for licenses,
- a list of expected licenses (with optional positions) to detect,
- optional notes.
- a boolean flag expected_failure set to True if a test is expected to fail
for now
If the list of licenses is empty, then this test should not detect any
license in the test file.
"""
def __init__(self, data_file=None, test_file=None):
self.data_file = data_file
self.test_file = test_file
if self.test_file:
self.test_file_name = fileutils.file_name(test_file)
if self.data_file:
with codecs.open(data_file, mode='rb', encoding='utf-8') as df:
data = saneyaml.load(df.read())
self.licenses = data.get('licenses', [])
# TODO: this is for future support of license expressions
self.license = data.get('license')
self.license_choice = data.get('license_choice')
self.notes = data.get('notes')
# True if the test is expected to fail
self.expected_failure = data.get('expected_failure', False)
# True if the test should be skipped
self.skip = data.get('skip', False)
def to_dict(self):
dct = OrderedDict()
if self.licenses:
dct['licenses'] = self.licenses
if self.license:
dct['license'] = self.licenses
if self.license_choice:
dct['license_choice'] = self.license_choice
if self.expected_failure:
dct['expected_failure'] = self.expected_failure
if self.skip:
dct['skip'] = self.skip
if self.notes:
dct['notes'] = self.notes
return dct
def dump(self):
"""
Dump a representation of self to tgt_dir using two files:
- a .yml for the rule data in YAML block format
- a .RULE: the rule text as a UTF-8 file
"""
as_yaml = saneyaml.dump(self.to_dict())
with codecs.open(self.data_file, 'wb', encoding='utf-8') as df:
df.write(as_yaml)
def load_license_tests(test_dir=TEST_DATA_DIR):
"""
Yield an iterable of LicenseTest loaded from test data files in test_dir.
"""
# first collect files with .yml extension and files with other extensions
# in two maps keyed by file base_name
data_files = {}
test_files = {}
for top, _, files in os.walk(test_dir):
for yfile in files:
if yfile. endswith('~'):
continue
base_name = fileutils.file_base_name(yfile)
file_path = abspath(join(top, yfile))
if yfile.endswith('.yml'):
assert base_name not in data_files
data_files[base_name] = file_path
else:
assert base_name not in test_files
test_files[base_name] = file_path
# ensure that each data file has a corresponding test file
diff = set(data_files.keys()).symmetric_difference(set(test_files.keys()))
assert not diff, ('Orphaned license test file(s) found: '
'test file without its YAML test descriptor '
'or YAML test descriptor without its test file.')
# second, create pairs of corresponding (data_file, test file) for files
# that have the same base_name
for base_name, data_file in data_files.items():
test_file = test_files[base_name]
yield LicenseTest(data_file, test_file)
def build_tests(license_tests, clazz):
"""
Dynamically build test methods from a sequence of LicenseTest and attach
these method to the clazz test class.
"""
# TODO: check that we do not have duplicated tests with same data and text
for test in license_tests:
tfn = test.test_file_name
test_name = 'test_detection_%(tfn)s' % locals()
test_name = text.python_safe_name(test_name)
# closure on the test params
test_method = make_license_test_function(
test.licenses, test.test_file, test.data_file,
test_name=test_name,
expected_failure=test.expected_failure,
skip_test=test.skip and 'Skipping long test' or False,
trace_text=TRACE_TEXTS
)
# attach that method to our test class
setattr(clazz, test_name, test_method)
class TestLicenseDataDriven(unittest.TestCase):
# test functions are attached to this class at module import time
pass
build_tests(license_tests=load_license_tests(), clazz=TestLicenseDataDriven)
|
yashdsaraf/scancode-toolkit
|
tests/licensedcode/test_detection_datadriven.py
|
Python
|
apache-2.0
| 6,867
|
[
"VisIt"
] |
12ca2ae891cecc60abb948e0fc07509f921a21c80c2a49ae553dceb27700d31d
|
"""
Test topological fingerprints.
"""
import unittest
from deepchem.feat import CircularFingerprint
class TestCircularFingerprint(unittest.TestCase):
"""
Tests for CircularFingerprint.
"""
def setUp(self):
"""
Set up tests.
"""
from rdkit import Chem
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
self.mol = Chem.MolFromSmiles(smiles)
def test_circular_fingerprints(self):
"""
Test CircularFingerprint.
"""
featurizer = CircularFingerprint()
rval = featurizer([self.mol])
assert rval.shape == (1, 2048)
def test_circular_fingerprints_with_1024(self):
"""
Test CircularFingerprint with 1024 size.
"""
featurizer = CircularFingerprint(size=1024)
rval = featurizer([self.mol])
assert rval.shape == (1, 1024)
def test_sparse_circular_fingerprints(self):
"""
Test CircularFingerprint with sparse encoding.
"""
featurizer = CircularFingerprint(sparse=True)
rval = featurizer([self.mol])
assert rval.shape == (1,)
assert isinstance(rval[0], dict)
assert len(rval[0])
def test_sparse_circular_fingerprints_with_smiles(self):
"""
Test CircularFingerprint with sparse encoding and SMILES for each
fragment.
"""
featurizer = CircularFingerprint(sparse=True, smiles=True)
rval = featurizer([self.mol])
assert rval.shape == (1,)
assert isinstance(rval[0], dict)
assert len(rval[0])
# check for separate count and SMILES entries for each fragment
for fragment_id, value in rval[0].items():
assert 'count' in value
assert 'smiles' in value
|
deepchem/deepchem
|
deepchem/feat/tests/test_circular_fingerprints.py
|
Python
|
mit
| 1,648
|
[
"RDKit"
] |
5a891fa42bca97517e8f55cbe30d1dd71013fbfc4f08348e7d1953a77bf13225
|
# Copyright (c) 2003-2016 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Python code format's checker.
By default try to follow Guido's style guide :
http://www.python.org/doc/essays/styleguide.html
Some parts of the process_token method is based from The Tab Nanny std module.
"""
from functools import reduce # pylint: disable=redefined-builtin
import keyword
import tokenize
import sys
import six
from six.moves import zip, map, filter # pylint: disable=redefined-builtin
from astroid import nodes
from pylint.interfaces import ITokenChecker, IAstroidChecker, IRawChecker
from pylint.checkers import BaseTokenChecker
from pylint.checkers.utils import check_messages
from pylint.utils import WarningScope, OPTION_RGX
_CONTINUATION_BLOCK_OPENERS = ['elif', 'except', 'for', 'if', 'while', 'def', 'class']
_KEYWORD_TOKENS = ['assert', 'del', 'elif', 'except', 'for', 'if', 'in', 'not',
'raise', 'return', 'while', 'yield']
if sys.version_info < (3, 0):
_KEYWORD_TOKENS.append('print')
_SPACED_OPERATORS = ['==', '<', '>', '!=', '<>', '<=', '>=',
'+=', '-=', '*=', '**=', '/=', '//=', '&=', '|=', '^=',
'%=', '>>=', '<<=']
_OPENING_BRACKETS = ['(', '[', '{']
_CLOSING_BRACKETS = [')', ']', '}']
_TAB_LENGTH = 8
_EOL = frozenset([tokenize.NEWLINE, tokenize.NL, tokenize.COMMENT])
_JUNK_TOKENS = (tokenize.COMMENT, tokenize.NL)
# Whitespace checking policy constants
_MUST = 0
_MUST_NOT = 1
_IGNORE = 2
# Whitespace checking config constants
_DICT_SEPARATOR = 'dict-separator'
_TRAILING_COMMA = 'trailing-comma'
_EMPTY_LINE = 'empty-line'
_NO_SPACE_CHECK_CHOICES = [_TRAILING_COMMA, _DICT_SEPARATOR, _EMPTY_LINE]
_DEFAULT_NO_SPACE_CHECK_CHOICES = [_TRAILING_COMMA, _DICT_SEPARATOR]
MSGS = {
'C0301': ('Line too long (%s/%s)',
'line-too-long',
'Used when a line is longer than a given number of characters.'),
'C0302': ('Too many lines in module (%s/%s)', # was W0302
'too-many-lines',
'Used when a module has too much lines, reducing its readability.'
),
'C0303': ('Trailing whitespace',
'trailing-whitespace',
'Used when there is whitespace between the end of a line and the '
'newline.'),
'C0304': ('Final newline missing',
'missing-final-newline',
'Used when the last line in a file is missing a newline.'),
'C0305': ('Trailing newlines',
'trailing-newlines',
'Used when there are trailing blank lines in a file.'),
'W0311': ('Bad indentation. Found %s %s, expected %s',
'bad-indentation',
'Used when an unexpected number of indentation\'s tabulations or '
'spaces has been found.'),
'C0330': ('Wrong %s indentation%s%s.\n%s%s',
'bad-continuation',
'TODO'),
'W0312': ('Found indentation with %ss instead of %ss',
'mixed-indentation',
'Used when there are some mixed tabs and spaces in a module.'),
'W0301': ('Unnecessary semicolon', # was W0106
'unnecessary-semicolon',
'Used when a statement is ended by a semi-colon (";"), which \
isn\'t necessary (that\'s python, not C ;).'),
'C0321': ('More than one statement on a single line',
'multiple-statements',
'Used when more than on statement are found on the same line.',
{'scope': WarningScope.NODE}),
'C0325' : ('Unnecessary parens after %r keyword',
'superfluous-parens',
'Used when a single item in parentheses follows an if, for, or '
'other keyword.'),
'C0326': ('%s space %s %s %s\n%s',
'bad-whitespace',
('Used when a wrong number of spaces is used around an operator, '
'bracket or block opener.'),
{'old_names': [('C0323', 'no-space-after-operator'),
('C0324', 'no-space-after-comma'),
('C0322', 'no-space-before-operator')]}),
'W0332': ('Use of "l" as long integer identifier',
'lowercase-l-suffix',
'Used when a lower case "l" is used to mark a long integer. You '
'should use a upper case "L" since the letter "l" looks too much '
'like the digit "1"',
{'maxversion': (3, 0)}),
'C0327': ('Mixed line endings LF and CRLF',
'mixed-line-endings',
'Used when there are mixed (LF and CRLF) newline signs in a file.'),
'C0328': ('Unexpected line ending format. There is \'%s\' while it should be \'%s\'.',
'unexpected-line-ending-format',
'Used when there is different newline than expected.'),
}
def _underline_token(token):
length = token[3][1] - token[2][1]
offset = token[2][1]
referenced_line = token[4]
# If the referenced line does not end with a newline char, fix it
if referenced_line[-1] != '\n':
referenced_line += '\n'
return referenced_line + (' ' * offset) + ('^' * length)
def _column_distance(token1, token2):
if token1 == token2:
return 0
if token2[3] < token1[3]:
token1, token2 = token2, token1
if token1[3][0] != token2[2][0]:
return None
return token2[2][1] - token1[3][1]
def _last_token_on_line_is(tokens, line_end, token):
return (line_end > 0 and tokens.token(line_end-1) == token or
line_end > 1 and tokens.token(line_end-2) == token
and tokens.type(line_end-1) == tokenize.COMMENT)
def _token_followed_by_eol(tokens, position):
return (tokens.type(position+1) == tokenize.NL or
tokens.type(position+1) == tokenize.COMMENT and
tokens.type(position+2) == tokenize.NL)
def _get_indent_length(line):
"""Return the length of the indentation on the given token's line."""
result = 0
for char in line:
if char == ' ':
result += 1
elif char == '\t':
result += _TAB_LENGTH
else:
break
return result
def _get_indent_hint_line(bar_positions, bad_position):
"""Return a line with |s for each of the positions in the given lists."""
if not bar_positions:
return ('', '')
delta_message = ''
markers = [(pos, '|') for pos in bar_positions]
if len(markers) == 1:
# if we have only one marker we'll provide an extra hint on how to fix
expected_position = markers[0][0]
delta = abs(expected_position - bad_position)
direction = 'add' if expected_position > bad_position else 'remove'
delta_message = _CONTINUATION_HINT_MESSAGE % (
direction, delta, 's' if delta > 1 else '')
markers.append((bad_position, '^'))
markers.sort()
line = [' '] * (markers[-1][0] + 1)
for position, marker in markers:
line[position] = marker
return (''.join(line), delta_message)
class _ContinuedIndent(object):
__slots__ = ('valid_outdent_offsets',
'valid_continuation_offsets',
'context_type',
'token',
'position')
def __init__(self,
context_type,
token,
position,
valid_outdent_offsets,
valid_continuation_offsets):
self.valid_outdent_offsets = valid_outdent_offsets
self.valid_continuation_offsets = valid_continuation_offsets
self.context_type = context_type
self.position = position
self.token = token
# The contexts for hanging indents.
# A hanging indented dictionary value after :
HANGING_DICT_VALUE = 'dict-value'
# Hanging indentation in an expression.
HANGING = 'hanging'
# Hanging indentation in a block header.
HANGING_BLOCK = 'hanging-block'
# Continued indentation inside an expression.
CONTINUED = 'continued'
# Continued indentation in a block header.
CONTINUED_BLOCK = 'continued-block'
SINGLE_LINE = 'single'
WITH_BODY = 'multi'
_CONTINUATION_MSG_PARTS = {
HANGING_DICT_VALUE: ('hanging', ' in dict value'),
HANGING: ('hanging', ''),
HANGING_BLOCK: ('hanging', ' before block'),
CONTINUED: ('continued', ''),
CONTINUED_BLOCK: ('continued', ' before block'),
}
_CONTINUATION_HINT_MESSAGE = ' (%s %d space%s)' # Ex: (remove 2 spaces)
def _Offsets(*args):
"""Valid indentation offsets for a continued line."""
return dict((a, None) for a in args)
def _BeforeBlockOffsets(single, with_body):
"""Valid alternative indent offsets for continued lines before blocks.
:param int single: Valid offset for statements on a single logical line.
:param int with_body: Valid offset for statements on several lines.
:returns: A dictionary mapping indent offsets to a string representing
whether the indent if for a line or block.
:rtype: dict
"""
return {single: SINGLE_LINE, with_body: WITH_BODY}
class TokenWrapper(object):
"""A wrapper for readable access to token information."""
def __init__(self, tokens):
self._tokens = tokens
def token(self, idx):
return self._tokens[idx][1]
def type(self, idx):
return self._tokens[idx][0]
def start_line(self, idx):
return self._tokens[idx][2][0]
def start_col(self, idx):
return self._tokens[idx][2][1]
def line(self, idx):
return self._tokens[idx][4]
class ContinuedLineState(object):
"""Tracker for continued indentation inside a logical line."""
def __init__(self, tokens, config):
self._line_start = -1
self._cont_stack = []
self._is_block_opener = False
self.retained_warnings = []
self._config = config
self._tokens = TokenWrapper(tokens)
@property
def has_content(self):
return bool(self._cont_stack)
@property
def _block_indent_size(self):
return len(self._config.indent_string.replace('\t', ' ' * _TAB_LENGTH))
@property
def _continuation_size(self):
return self._config.indent_after_paren
def handle_line_start(self, pos):
"""Record the first non-junk token at the start of a line."""
if self._line_start > -1:
return
self._is_block_opener = self._tokens.token(pos) in _CONTINUATION_BLOCK_OPENERS
self._line_start = pos
def next_physical_line(self):
"""Prepares the tracker for a new physical line (NL)."""
self._line_start = -1
self._is_block_opener = False
def next_logical_line(self):
"""Prepares the tracker for a new logical line (NEWLINE).
A new logical line only starts with block indentation.
"""
self.next_physical_line()
self.retained_warnings = []
self._cont_stack = []
def add_block_warning(self, token_position, state, valid_offsets):
self.retained_warnings.append((token_position, state, valid_offsets))
def get_valid_offsets(self, idx):
"""Returns the valid offsets for the token at the given position."""
# The closing brace on a dict or the 'for' in a dict comprehension may
# reset two indent levels because the dict value is ended implicitly
stack_top = -1
if self._tokens.token(idx) in ('}', 'for') and self._cont_stack[-1].token == ':':
stack_top = -2
indent = self._cont_stack[stack_top]
if self._tokens.token(idx) in _CLOSING_BRACKETS:
valid_offsets = indent.valid_outdent_offsets
else:
valid_offsets = indent.valid_continuation_offsets
return indent, valid_offsets.copy()
def _hanging_indent_after_bracket(self, bracket, position):
"""Extracts indentation information for a hanging indent."""
indentation = _get_indent_length(self._tokens.line(position))
if self._is_block_opener and self._continuation_size == self._block_indent_size:
return _ContinuedIndent(
HANGING_BLOCK,
bracket,
position,
_Offsets(indentation + self._continuation_size, indentation),
_BeforeBlockOffsets(indentation + self._continuation_size,
indentation + self._continuation_size * 2))
elif bracket == ':':
# If the dict key was on the same line as the open brace, the new
# correct indent should be relative to the key instead of the
# current indent level
paren_align = self._cont_stack[-1].valid_outdent_offsets
next_align = self._cont_stack[-1].valid_continuation_offsets.copy()
next_align_keys = list(next_align.keys())
next_align[next_align_keys[0] + self._continuation_size] = True
# Note that the continuation of
# d = {
# 'a': 'b'
# 'c'
# }
# is handled by the special-casing for hanging continued string indents.
return _ContinuedIndent(HANGING_DICT_VALUE, bracket, position, paren_align, next_align)
else:
return _ContinuedIndent(
HANGING,
bracket,
position,
_Offsets(indentation, indentation + self._continuation_size),
_Offsets(indentation + self._continuation_size))
def _continuation_inside_bracket(self, bracket, pos):
"""Extracts indentation information for a continued indent."""
indentation = _get_indent_length(self._tokens.line(pos))
token_start = self._tokens.start_col(pos)
next_token_start = self._tokens.start_col(pos + 1)
if self._is_block_opener and next_token_start - indentation == self._block_indent_size:
return _ContinuedIndent(
CONTINUED_BLOCK,
bracket,
pos,
_Offsets(token_start),
_BeforeBlockOffsets(next_token_start, next_token_start + self._continuation_size))
else:
return _ContinuedIndent(
CONTINUED,
bracket,
pos,
_Offsets(token_start),
_Offsets(next_token_start))
def pop_token(self):
self._cont_stack.pop()
def push_token(self, token, position):
"""Pushes a new token for continued indentation on the stack.
Tokens that can modify continued indentation offsets are:
* opening brackets
* 'lambda'
* : inside dictionaries
push_token relies on the caller to filter out those
interesting tokens.
:param int token: The concrete token
:param int position: The position of the token in the stream.
"""
if _token_followed_by_eol(self._tokens, position):
self._cont_stack.append(
self._hanging_indent_after_bracket(token, position))
else:
self._cont_stack.append(
self._continuation_inside_bracket(token, position))
class FormatChecker(BaseTokenChecker):
"""checks for :
* unauthorized constructions
* strict indentation
* line length
"""
__implements__ = (ITokenChecker, IAstroidChecker, IRawChecker)
# configuration section name
name = 'format'
# messages
msgs = MSGS
# configuration options
# for available dict keys/values see the optik parser 'add_option' method
options = (('max-line-length',
{'default' : 100, 'type' : "int", 'metavar' : '<int>',
'help' : 'Maximum number of characters on a single line.'}),
('ignore-long-lines',
{'type': 'regexp', 'metavar': '<regexp>',
'default': r'^\s*(# )?<?https?://\S+>?$',
'help': ('Regexp for a line that is allowed to be longer than '
'the limit.')}),
('single-line-if-stmt',
{'default': False, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help' : ('Allow the body of an if to be on the same '
'line as the test if there is no else.')}),
('no-space-check',
{'default': ','.join(_DEFAULT_NO_SPACE_CHECK_CHOICES),
'metavar': ','.join(_NO_SPACE_CHECK_CHOICES),
'type': 'multiple_choice',
'choices': _NO_SPACE_CHECK_CHOICES,
'help': ('List of optional constructs for which whitespace '
'checking is disabled. '
'`'+ _DICT_SEPARATOR + '` is used to allow tabulation '
'in dicts, etc.: {1 : 1,\\n222: 2}. '
'`'+ _TRAILING_COMMA + '` allows a space between comma '
'and closing bracket: (a, ). '
'`'+ _EMPTY_LINE + '` allows space-only lines.')}),
('max-module-lines',
{'default' : 1000, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of lines in a module'}
),
('indent-string',
{'default' : ' ', 'type' : "string", 'metavar' : '<string>',
'help' : 'String used as indentation unit. This is usually '
'" " (4 spaces) or "\\t" (1 tab).'}),
('indent-after-paren',
{'type': 'int', 'metavar': '<int>', 'default': 4,
'help': 'Number of spaces of indent required inside a hanging '
' or continued line.'}),
('expected-line-ending-format',
{'type': 'choice', 'metavar': '<empty or LF or CRLF>', 'default': '',
'choices': ['', 'LF', 'CRLF'],
'help': ('Expected format of line ending, '
'e.g. empty (any line ending), LF or CRLF.')}),
)
def __init__(self, linter=None):
BaseTokenChecker.__init__(self, linter)
self._lines = None
self._visited_lines = None
self._bracket_stack = [None]
def _pop_token(self):
self._bracket_stack.pop()
self._current_line.pop_token()
def _push_token(self, token, idx):
self._bracket_stack.append(token)
self._current_line.push_token(token, idx)
def new_line(self, tokens, line_end, line_start):
"""a new line has been encountered, process it if necessary"""
if _last_token_on_line_is(tokens, line_end, ';'):
self.add_message('unnecessary-semicolon', line=tokens.start_line(line_end))
line_num = tokens.start_line(line_start)
line = tokens.line(line_start)
if tokens.type(line_start) not in _JUNK_TOKENS:
self._lines[line_num] = line.split('\n')[0]
self.check_lines(line, line_num)
def process_module(self, module):
self._keywords_with_parens = set()
if 'print_function' in module.future_imports:
self._keywords_with_parens.add('print')
def _check_keyword_parentheses(self, tokens, start):
"""Check that there are not unnecessary parens after a keyword.
Parens are unnecessary if there is exactly one balanced outer pair on a
line, and it is followed by a colon, and contains no commas (i.e. is not a
tuple).
Args:
tokens: list of Tokens; the entire list of Tokens.
start: int; the position of the keyword in the token list.
"""
# If the next token is not a paren, we're fine.
if self._inside_brackets(':') and tokens[start][1] == 'for':
self._pop_token()
if tokens[start+1][1] != '(':
return
found_and_or = False
depth = 0
keyword_token = tokens[start][1]
line_num = tokens[start][2][0]
for i in range(start, len(tokens) - 1):
token = tokens[i]
# If we hit a newline, then assume any parens were for continuation.
if token[0] == tokenize.NL:
return
if token[1] == '(':
depth += 1
elif token[1] == ')':
depth -= 1
if depth:
continue
# ')' can't happen after if (foo), since it would be a syntax error.
if (tokens[i+1][1] in (':', ')', ']', '}', 'in') or
tokens[i+1][0] in (tokenize.NEWLINE,
tokenize.ENDMARKER,
tokenize.COMMENT)):
# The empty tuple () is always accepted.
if i == start + 2:
return
if keyword_token == 'not':
if not found_and_or:
self.add_message('superfluous-parens', line=line_num,
args=keyword_token)
elif keyword_token in ('return', 'yield'):
self.add_message('superfluous-parens', line=line_num,
args=keyword_token)
elif keyword_token not in self._keywords_with_parens:
if not (tokens[i+1][1] == 'in' and found_and_or):
self.add_message('superfluous-parens', line=line_num,
args=keyword_token)
return
elif depth == 1:
# This is a tuple, which is always acceptable.
if token[1] == ',':
return
# 'and' and 'or' are the only boolean operators with lower precedence
# than 'not', so parens are only required when they are found.
elif token[1] in ('and', 'or'):
found_and_or = True
# A yield inside an expression must always be in parentheses,
# quit early without error.
elif token[1] == 'yield':
return
# A generator expression always has a 'for' token in it, and
# the 'for' token is only legal inside parens when it is in a
# generator expression. The parens are necessary here, so bail
# without an error.
elif token[1] == 'for':
return
def _opening_bracket(self, tokens, i):
self._push_token(tokens[i][1], i)
# Special case: ignore slices
if tokens[i][1] == '[' and tokens[i+1][1] == ':':
return
if (i > 0 and (tokens[i-1][0] == tokenize.NAME and
not (keyword.iskeyword(tokens[i-1][1]))
or tokens[i-1][1] in _CLOSING_BRACKETS)):
self._check_space(tokens, i, (_MUST_NOT, _MUST_NOT))
else:
self._check_space(tokens, i, (_IGNORE, _MUST_NOT))
def _closing_bracket(self, tokens, i):
if self._inside_brackets(':'):
self._pop_token()
self._pop_token()
# Special case: ignore slices
if tokens[i-1][1] == ':' and tokens[i][1] == ']':
return
policy_before = _MUST_NOT
if tokens[i][1] in _CLOSING_BRACKETS and tokens[i-1][1] == ',':
if _TRAILING_COMMA in self.config.no_space_check:
policy_before = _IGNORE
self._check_space(tokens, i, (policy_before, _IGNORE))
def _check_equals_spacing(self, tokens, i):
"""Check the spacing of a single equals sign."""
if self._inside_brackets('(') or self._inside_brackets('lambda'):
self._check_space(tokens, i, (_MUST_NOT, _MUST_NOT))
else:
self._check_space(tokens, i, (_MUST, _MUST))
def _open_lambda(self, tokens, i): # pylint:disable=unused-argument
self._push_token('lambda', i)
def _handle_colon(self, tokens, i):
# Special case: ignore slices
if self._inside_brackets('['):
return
if (self._inside_brackets('{') and
_DICT_SEPARATOR in self.config.no_space_check):
policy = (_IGNORE, _IGNORE)
else:
policy = (_MUST_NOT, _MUST)
self._check_space(tokens, i, policy)
if self._inside_brackets('lambda'):
self._pop_token()
elif self._inside_brackets('{'):
self._push_token(':', i)
def _handle_comma(self, tokens, i):
# Only require a following whitespace if this is
# not a hanging comma before a closing bracket.
if tokens[i+1][1] in _CLOSING_BRACKETS:
self._check_space(tokens, i, (_MUST_NOT, _IGNORE))
else:
self._check_space(tokens, i, (_MUST_NOT, _MUST))
if self._inside_brackets(':'):
self._pop_token()
def _check_surrounded_by_space(self, tokens, i):
"""Check that a binary operator is surrounded by exactly one space."""
self._check_space(tokens, i, (_MUST, _MUST))
def _check_space(self, tokens, i, policies):
def _policy_string(policy):
if policy == _MUST:
return 'Exactly one', 'required'
else:
return 'No', 'allowed'
def _name_construct(token):
if token[1] == ',':
return 'comma'
elif token[1] == ':':
return ':'
elif token[1] in '()[]{}':
return 'bracket'
elif token[1] in ('<', '>', '<=', '>=', '!=', '=='):
return 'comparison'
else:
if self._inside_brackets('('):
return 'keyword argument assignment'
else:
return 'assignment'
good_space = [True, True]
token = tokens[i]
pairs = [(tokens[i-1], token), (token, tokens[i+1])]
for other_idx, (policy, token_pair) in enumerate(zip(policies, pairs)):
if token_pair[other_idx][0] in _EOL or policy == _IGNORE:
continue
distance = _column_distance(*token_pair)
if distance is None:
continue
good_space[other_idx] = (
(policy == _MUST and distance == 1) or
(policy == _MUST_NOT and distance == 0))
warnings = []
if not any(good_space) and policies[0] == policies[1]:
warnings.append((policies[0], 'around'))
else:
for ok, policy, position in zip(good_space, policies, ('before', 'after')):
if not ok:
warnings.append((policy, position))
for policy, position in warnings:
construct = _name_construct(token)
count, state = _policy_string(policy)
self.add_message('bad-whitespace', line=token[2][0],
args=(count, state, position, construct,
_underline_token(token)))
def _inside_brackets(self, left):
return self._bracket_stack[-1] == left
def _prepare_token_dispatcher(self):
raw = [
(_KEYWORD_TOKENS,
self._check_keyword_parentheses),
(_OPENING_BRACKETS, self._opening_bracket),
(_CLOSING_BRACKETS, self._closing_bracket),
(['='], self._check_equals_spacing),
(_SPACED_OPERATORS, self._check_surrounded_by_space),
([','], self._handle_comma),
([':'], self._handle_colon),
(['lambda'], self._open_lambda),
]
dispatch = {}
for tokens, handler in raw:
for token in tokens:
dispatch[token] = handler
return dispatch
def process_tokens(self, tokens):
"""process tokens and search for :
_ non strict indentation (i.e. not always using the <indent> parameter as
indent unit)
_ too long lines (i.e. longer than <max_chars>)
_ optionally bad construct (if given, bad_construct must be a compiled
regular expression).
"""
self._bracket_stack = [None]
indents = [0]
check_equal = False
line_num = 0
self._lines = {}
self._visited_lines = {}
token_handlers = self._prepare_token_dispatcher()
self._last_line_ending = None
last_blank_line_num = 0
self._current_line = ContinuedLineState(tokens, self.config)
for idx, (tok_type, token, start, _, line) in enumerate(tokens):
if start[0] != line_num:
line_num = start[0]
# A tokenizer oddity: if an indented line contains a multi-line
# docstring, the line member of the INDENT token does not contain
# the full line; therefore we check the next token on the line.
if tok_type == tokenize.INDENT:
self.new_line(TokenWrapper(tokens), idx-1, idx+1)
else:
self.new_line(TokenWrapper(tokens), idx-1, idx)
if tok_type == tokenize.NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = True
self._process_retained_warnings(TokenWrapper(tokens), idx)
self._current_line.next_logical_line()
self._check_line_ending(token, line_num)
elif tok_type == tokenize.INDENT:
check_equal = False
self.check_indent_level(token, indents[-1]+1, line_num)
indents.append(indents[-1]+1)
elif tok_type == tokenize.DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
check_equal = True
if len(indents) > 1:
del indents[-1]
elif tok_type == tokenize.NL:
if not line.strip('\r\n'):
last_blank_line_num = line_num
self._check_continued_indentation(TokenWrapper(tokens), idx+1)
self._current_line.next_physical_line()
elif tok_type != tokenize.COMMENT:
self._current_line.handle_line_start(idx)
# This is the first concrete token following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
if check_equal:
check_equal = False
self.check_indent_level(line, indents[-1], line_num)
if tok_type == tokenize.NUMBER and token.endswith('l'):
self.add_message('lowercase-l-suffix', line=line_num)
try:
handler = token_handlers[token]
except KeyError:
pass
else:
handler(tokens, idx)
line_num -= 1 # to be ok with "wc -l"
if line_num > self.config.max_module_lines:
# Get the line where the too-many-lines (or its message id)
# was disabled or default to 1.
symbol = self.linter.msgs_store.check_message_id('too-many-lines')
names = (symbol.msgid, 'too-many-lines')
line = next(filter(None,
map(self.linter._pragma_lineno.get, names)), 1)
self.add_message('too-many-lines',
args=(line_num, self.config.max_module_lines),
line=line)
# See if there are any trailing lines. Do not complain about empty
# files like __init__.py markers.
if line_num == last_blank_line_num and line_num > 0:
self.add_message('trailing-newlines', line=line_num)
def _check_line_ending(self, line_ending, line_num):
# check if line endings are mixed
if self._last_line_ending is not None:
if line_ending != self._last_line_ending:
self.add_message('mixed-line-endings', line=line_num)
self._last_line_ending = line_ending
# check if line ending is as expected
expected = self.config.expected_line_ending_format
if expected:
# reduce multiple \n\n\n\n to one \n
line_ending = reduce(lambda x, y: x + y if x != y else x, line_ending, "")
line_ending = 'LF' if line_ending == '\n' else 'CRLF'
if line_ending != expected:
self.add_message('unexpected-line-ending-format', args=(line_ending, expected),
line=line_num)
def _process_retained_warnings(self, tokens, current_pos):
single_line_block_stmt = not _last_token_on_line_is(tokens, current_pos, ':')
for indent_pos, state, offsets in self._current_line.retained_warnings:
block_type = offsets[tokens.start_col(indent_pos)]
hints = dict((k, v) for k, v in six.iteritems(offsets)
if v != block_type)
if single_line_block_stmt and block_type == WITH_BODY:
self._add_continuation_message(state, hints, tokens, indent_pos)
elif not single_line_block_stmt and block_type == SINGLE_LINE:
self._add_continuation_message(state, hints, tokens, indent_pos)
def _check_continued_indentation(self, tokens, next_idx):
def same_token_around_nl(token_type):
return (tokens.type(next_idx) == token_type and
tokens.type(next_idx-2) == token_type)
# Do not issue any warnings if the next line is empty.
if not self._current_line.has_content or tokens.type(next_idx) == tokenize.NL:
return
state, valid_offsets = self._current_line.get_valid_offsets(next_idx)
# Special handling for hanging comments and strings. If the last line ended
# with a comment (string) and the new line contains only a comment, the line
# may also be indented to the start of the previous token.
if same_token_around_nl(tokenize.COMMENT) or same_token_around_nl(tokenize.STRING):
valid_offsets[tokens.start_col(next_idx-2)] = True
# We can only decide if the indentation of a continued line before opening
# a new block is valid once we know of the body of the block is on the
# same line as the block opener. Since the token processing is single-pass,
# emitting those warnings is delayed until the block opener is processed.
if (state.context_type in (HANGING_BLOCK, CONTINUED_BLOCK)
and tokens.start_col(next_idx) in valid_offsets):
self._current_line.add_block_warning(next_idx, state, valid_offsets)
elif tokens.start_col(next_idx) not in valid_offsets:
self._add_continuation_message(state, valid_offsets, tokens, next_idx)
def _add_continuation_message(self, state, offsets, tokens, position):
readable_type, readable_position = _CONTINUATION_MSG_PARTS[state.context_type]
hint_line, delta_message = _get_indent_hint_line(offsets, tokens.start_col(position))
self.add_message(
'bad-continuation',
line=tokens.start_line(position),
args=(readable_type, readable_position, delta_message,
tokens.line(position), hint_line))
@check_messages('multiple-statements')
def visit_default(self, node):
"""check the node line number and check it if not yet done"""
if not node.is_statement:
return
if not node.root().pure_python:
return # XXX block visit of child nodes
prev_sibl = node.previous_sibling()
if prev_sibl is not None:
prev_line = prev_sibl.fromlineno
else:
# The line on which a finally: occurs in a try/finally
# is not directly represented in the AST. We infer it
# by taking the last line of the body and adding 1, which
# should be the line of finally:
if (isinstance(node.parent, nodes.TryFinally)
and node in node.parent.finalbody):
prev_line = node.parent.body[0].tolineno + 1
else:
prev_line = node.parent.statement().fromlineno
line = node.fromlineno
assert line, node
if prev_line == line and self._visited_lines.get(line) != 2:
self._check_multi_statement_line(node, line)
return
if line in self._visited_lines:
return
try:
tolineno = node.blockstart_tolineno
except AttributeError:
tolineno = node.tolineno
assert tolineno, node
lines = []
for line in range(line, tolineno + 1):
self._visited_lines[line] = 1
try:
lines.append(self._lines[line].rstrip())
except KeyError:
lines.append('')
def _check_multi_statement_line(self, node, line):
"""Check for lines containing multiple statements."""
# Do not warn about multiple nested context managers
# in with statements.
if isinstance(node, nodes.With):
return
# For try... except... finally..., the two nodes
# appear to be on the same line due to how the AST is built.
if (isinstance(node, nodes.TryExcept) and
isinstance(node.parent, nodes.TryFinally)):
return
if (isinstance(node.parent, nodes.If) and not node.parent.orelse
and self.config.single_line_if_stmt):
return
self.add_message('multiple-statements', node=node)
self._visited_lines[line] = 2
def check_lines(self, lines, i):
"""check lines have less than a maximum number of characters
"""
max_chars = self.config.max_line_length
ignore_long_line = self.config.ignore_long_lines
for line in lines.splitlines(True):
if not line.endswith('\n'):
self.add_message('missing-final-newline', line=i)
else:
stripped_line = line.rstrip()
if not stripped_line and _EMPTY_LINE in self.config.no_space_check:
# allow empty lines
pass
elif line[len(stripped_line):] not in ('\n', '\r\n'):
self.add_message('trailing-whitespace', line=i)
# Don't count excess whitespace in the line length.
line = stripped_line
mobj = OPTION_RGX.search(line)
if mobj and mobj.group(1).split('=', 1)[0].strip() == 'disable':
line = line.split('#')[0].rstrip()
if len(line) > max_chars and not ignore_long_line.search(line):
self.add_message('line-too-long', line=i, args=(len(line), max_chars))
i += 1
def check_indent_level(self, string, expected, line_num):
"""return the indent level of the string
"""
indent = self.config.indent_string
if indent == '\\t': # \t is not interpreted in the configuration file
indent = '\t'
level = 0
unit_size = len(indent)
while string[:unit_size] == indent:
string = string[unit_size:]
level += 1
suppl = ''
while string and string[0] in ' \t':
if string[0] != indent[0]:
if string[0] == '\t':
args = ('tab', 'space')
else:
args = ('space', 'tab')
self.add_message('mixed-indentation', args=args, line=line_num)
return level
suppl += string[0]
string = string[1:]
if level != expected or suppl:
i_type = 'spaces'
if indent[0] == '\t':
i_type = 'tabs'
self.add_message('bad-indentation', line=line_num,
args=(level * unit_size + len(suppl), i_type,
expected * unit_size))
def register(linter):
"""required method to auto register this checker """
linter.register_checker(FormatChecker(linter))
|
bgris/ODL_bgris
|
lib/python3.5/site-packages/pylint/checkers/format.py
|
Python
|
gpl-3.0
| 41,232
|
[
"VisIt"
] |
581f29d28fc357bc9ce0df8ee4e8e5f86be8e33a6470fc8e1fe827b1cf2020c0
|
from string import Template
import logging
from hs_core.hydroshare.utils import get_resource_types
logger = logging.getLogger(__name__)
def parse_app_url_template(url_template_string, term_dict_list=()):
"""
This func replaces pre-defined HS Terms in url_template_string with real values;
Example: http://www.myapps.com/app1/?res_type=${HS_RES_TYPE}
--> http://www.myapps.com/app1/?res_type=GenericResource
:param url_template_string: The url template string contains HS Terms
:param term_dict_list: a list of dict that stores pairs of Term Name and Term Value
:return: the updated url string, or None if template contains undefined terms
"""
new_url_string = url_template_string
merged_term_dic = {}
try:
for term_dict in term_dict_list:
merged_term_dic.update(term_dict)
new_url_string = Template(new_url_string).substitute(merged_term_dic)
except Exception:
logger.exception("[WebApp] '{0}' cannot be parsed by term_dict {1}.".
format(new_url_string, str(merged_term_dic)))
new_url_string = None
finally:
return new_url_string
def get_SupportedResTypes_choices():
"""
This function harvests all existing resource types in system,
and puts them in a list (except for WebApp (ToolResource) Resource type):
[
["RESOURCE_CLASS_NAME_1", "RESOURCE_VERBOSE_NAME_1"],
["RESOURCE_CLASS_NAME_2", "RESOURCE_VERBOSE_NAME_2"],
...
["RESOURCE_CLASS_NAME_N", "RESOURCE_VERBOSE_NAME_N"],
]
"""
xdci_excluded_types = ["Geographic Raster",
"HIS Referenced Time Series",
"Time Series",
"Multidimensional (NetCDF)",
"Model Program Resource",
"Model Instance Resource",
"SWAT Model Instance Resource",
"Geographic Feature (ESRI Shapefiles)",
"Script Resource",
"MODFLOW Model Instance Resource",
]
result_list = []
res_types_list = get_resource_types()
for r_type in res_types_list:
class_name = r_type.__name__
verbose_name = r_type._meta.verbose_name
if "toolresource" != class_name.lower():
if verbose_name not in xdci_excluded_types:
result_list.append([class_name, verbose_name])
return result_list
def get_SupportedSharingStatus_choices():
return [['Published', 'Published'],
['Public', 'Public'],
['Discoverable', 'Discoverable'],
['Private', 'Private'],
]
|
ResearchSoftwareInstitute/MyHPOM
|
hs_tools_resource/utils.py
|
Python
|
bsd-3-clause
| 2,737
|
[
"NetCDF"
] |
1057b95605578f12e66fe03ab6a8edabd268abbb37236ed8e10430d2acc93c04
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The set of functions to generate random 2D/3D phantoms
The TomoPhantom package is released under Apache License, Version 2.0
@author: Daniil Kazantsev
"""
def rand_init2D(x0min, x0max, y0min, y0max, c0min, c0max, ab_min, ab_max):
import numpy as np
x0 = np.random.uniform(low = x0min, high=x0max)
y0 = np.random.uniform(low = y0min, high=y0max)
c0 = np.random.uniform(low = c0min, high=c0max)
ab = np.random.uniform(low = ab_min, high=ab_max)
return (x0,y0,c0,ab)
def rand_init3D(x0min, x0max, y0min, y0max, z0min, z0max, c0min, c0max, ab_min, ab_max):
import numpy as np
x0 = np.random.uniform(low = x0min, high=x0max)
y0 = np.random.uniform(low = y0min, high=y0max)
z0 = np.random.uniform(low = z0min, high=z0max)
c0 = np.random.uniform(low = c0min, high=c0max)
ab = np.random.uniform(low = ab_min, high=ab_max)
return (x0,y0,z0,c0,ab)
# Function to generate 2D foam-like structures using randomly located circles
def foam2D(x0min, x0max, y0min, y0max, c0min, c0max, ab_min, ab_max, N_size, tot_objects, object_type):
import numpy as np
import math
import random
#2D functions
from tomophantom import TomoP2D
from tomophantom.TomoP2D import Objects2D
attemptsNo = 2000 # the number of attempts to fit the object
# objects accepted: 'ellipse', 'parabola', 'gaussian', 'mix'
mix_objects = False
if (object_type == 'ellipse'):
object_type = Objects2D.ELLIPSE
elif (object_type == 'parabola'):
object_type = Objects2D.PARABOLA
elif (object_type == 'gaussian'):
object_type = Objects2D.GAUSSIAN
elif (object_type == 'mix'):
mix_objects = True
else:
raise TypeError('object_type can be only ellipse, parabola, gaussian or mix')
X0 = np.float32(np.zeros(tot_objects))
Y0 = np.float32(np.zeros(tot_objects))
AB = np.float32(np.zeros(tot_objects))
C0_var = np.float32(np.zeros(tot_objects))
for i in range(0,tot_objects):
(x0,y0,c0,ab) = rand_init2D(x0min, x0max, y0min, y0max, c0min, c0max, ab_min, ab_max)
if (i > 0):
breakj = False
for j in range(0,attemptsNo):
if (breakj == True):
(x0,y0,c0,ab) = rand_init2D(x0min, x0max, y0min, y0max, c0min, c0max, ab_min, ab_max)
breakj = False
else:
for l in range(0,i): # checks consistency with previously created objects
dist = math.sqrt((X0[l]-x0)**2 + (Y0[l]-y0)**2)
if (dist < (ab + AB[l])) or ((abs(x0) + ab)**2 + (abs(y0) + ab)**2 > 1.0):
breakj = True
break
if (breakj == False): # re-initialise if doesn't fit the criteria
X0[i] = x0
Y0[i] = y0
AB[i] = ab
C0_var[i] = c0
break
if (AB[i] == 0.0):
X0[i] = x0
Y0[i] = y0
AB[i] = 0.0001
C0_var[i] = c0
myObjects = [] # dictionary of objects
for obj in range(0,len(X0)):
if (mix_objects == True):
rand_obj = random.randint(0,2)
if (rand_obj == 0):
object_type = Objects2D.ELLIPSE
if (rand_obj == 1):
object_type = Objects2D.PARABOLA
if (rand_obj == 2):
object_type = Objects2D.GAUSSIAN
curr_obj = {'Obj': object_type,
'C0' : C0_var[obj],
'x0' : X0[obj],
'y0' : Y0[obj],
'a' : AB[obj],
'b' : AB[obj],
'phi': 0.0}
myObjects.append(curr_obj)
Object = TomoP2D.Object(N_size, myObjects)
return (Object,myObjects)
# Function to generate 3D foam-like structures using randomly located spheres
def foam3D(x0min, x0max, y0min, y0max, z0min, z0max, c0min, c0max, ab_min, ab_max, N_size, tot_objects, object_type):
import numpy as np
import math
import random
#3D functions
from tomophantom import TomoP3D
from tomophantom.TomoP3D import Objects3D
attemptsNo = 2000
# objects accepted: 'ellipsoid', 'paraboloid', 'gaussian', 'mix'
mix_objects = False
if (object_type == 'ellipsoid'):
object_type = Objects3D.ELLIPSOID
elif (object_type == 'paraboloid'):
object_type = Objects3D.PARABOLOID
elif (object_type == 'gaussian'):
object_type = Objects3D.GAUSSIAN
elif (object_type == 'mix'):
mix_objects = True
else:
raise TypeError('object_type can be only ellipse, parabola, gaussian or mix')
X0 = np.float32(np.zeros(tot_objects))
Y0 = np.float32(np.zeros(tot_objects))
Z0 = np.float32(np.zeros(tot_objects))
AB = np.float32(np.zeros(tot_objects))
C0_var = np.float32(np.zeros(tot_objects))
for i in range(0,tot_objects):
(x0,y0,z0,c0,ab) = rand_init3D(x0min, x0max, y0min, y0max, z0min, z0max, c0min, c0max, ab_min, ab_max)
if (i > 0):
breakj = False
for j in range(0,attemptsNo):
if breakj:
(x0,y0,z0,c0,ab) = rand_init3D(x0min, x0max, y0min, y0max, z0min, z0max, c0min, c0max, ab_min, ab_max)
breakj = False
else:
for l in range(0,i): # checks consistency with previously created objects
dist = math.sqrt((X0[l]-x0)**2 + (Y0[l]-y0)**2 + (Z0[l]-z0)**2)
if (dist < (ab + AB[l])) or ((abs(x0) + ab)**2 + (abs(y0) + ab)**2 + (abs(z0) + ab)**2 > 1.0):
breakj = True
break
if (breakj == False): # re-initialise if doesn't fit the criteria
X0[i] = x0
Y0[i] = y0
Z0[i] = z0
AB[i] = ab
C0_var[i] = c0
break
if (AB[i] == 0.0):
X0[i] = x0
Y0[i] = y0
AB[i] = 0.0001
C0_var[i] = c0
myObjects = [] # dictionary of objects
for obj in range(0,len(X0)):
if (mix_objects == True):
rand_obj = random.randint(0,2)
if (rand_obj == 0):
object_type = Objects3D.ELLIPSOID
if (rand_obj == 1):
object_type = Objects3D.PARABOLOID
if (rand_obj == 2):
object_type = Objects3D.GAUSSIAN
curr_obj = {'Obj': object_type,
'C0' : C0_var[obj],
'x0' : X0[obj],
'y0' : Y0[obj],
'z0' : Z0[obj],
'a' : AB[obj],
'b' : AB[obj],
'c' : AB[obj],
'phi1': 0.0}
myObjects.append(curr_obj)
Object3D = TomoP3D.Object(N_size, myObjects)
return (Object3D,myObjects)
|
dkazanc/TomoPhantom
|
Wrappers/Python/tomophantom/randphant/generator.py
|
Python
|
apache-2.0
| 7,191
|
[
"Gaussian"
] |
1c5d208b6b1cb66ee10ec57d499ea53e44eb04c17b5c1442f8ba78f65920b2ed
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************
espressopp.integrator.ExtAnalyze
********************************
This class can be used to execute nearly all analysis objects
within the main integration loop which allows to automatically
accumulate time averages (with standard deviation error bars).
Example Usage:
>>> pt = espressopp.analysis.PressureTensor(system)
>>> extension_pt = espressopp.integrator.ExtAnalyze(pt , interval=100)
>>> integrator.addExtension(extension_pt)
>>> integrator.run(10000)
>>>
>>> pt_ave = pt.getAverageValue()
>>> print "average Pressure Tensor = ", pt_ave[:6]
>>> print " std deviation = ", pt_ave[6:]
>>> print "number of measurements = ", pt.getNumberOfMeasurements()
.. function:: espressopp.integrator.ExtAnalyze(action_obj, interval)
:param action_obj:
:param interval: (default: 1)
:type action_obj:
:type interval: int
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_ExtAnalyze
class ExtAnalyzeLocal(ExtensionLocal, integrator_ExtAnalyze):
def __init__(self, action_obj, interval=1):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_ExtAnalyze, action_obj, interval)
if pmi.isController :
class ExtAnalyze(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.ExtAnalyzeLocal',
)
|
govarguz/espressopp
|
src/integrator/ExtAnalyze.py
|
Python
|
gpl-3.0
| 2,409
|
[
"ESPResSo"
] |
61b1f626c678803884e58b5d719887fd748cd617a41208622a5bd0ff540b0eed
|
from Circ import circ
from Enemy import enemy
from Particle import particle
from Poly import poly
from Projectile import projectile
from gameFunctions import *
class missile(projectile):
def __init__(this, pos, aim, speed):
'''initializes a missile projectile, used for the missileLauncher weapon'''
projectile.__init__(this, True, pos, aim, speed)
this.form = poly((10, 0), (-5, 5), (-5, -5))
this.form.color = (255, 180, 0)
this.form.thickness = 2
this.life = 110
this.lock = None
def update(this):
'''handles the logic step for the current instance'''
projectile.update(this)
if (this.life <= 100):
if (this.lock == None):
this.search()
else:
this.seek(this.lock)
if (not this.lock in GlobalVariables.enemies):
this.lock = None
def search(this):
'''seeks out an enemy to lock onto'''
for en in GlobalVariables.enemies:
if (distance(en.pos, this.pos) <= 150):
this.lock = en
def seek(this, en):
'''homes in to an enemy'''
if (not en in GlobalVariables.enemies):
return
# dampens the velocity to account for course readjustment
this.vel = multPoint(this.vel, 0.92)
# adds velocity toward the target
this.vel = addPoints(this.vel, multPoint(normal(this.pos, en.pos), 1))
this.thrustParticle()
def thrustParticle(this):
'''emits particles to show that it is seeking an enemy'''
force = multPoint(xyComponent(this.form.angle - math.pi), 0.7)
part = particle(addPoints(this.pos, randPoint(randRange(4, 6))), multPoint(force, 5), (255, 255, 0))
part.vel = addPoints(part.vel, this.vel)
part.life = random.randrange(5, 10)
part.damping = 0.8
if (randChance(50)):
part.color = (200, 200, 0)
part.thickness = 2
GlobalVariables.particles.append(part)
def hit(this, en):
'''collides with the specified enemy'''
projectile.hit(this, en)
for cols in collidingColchecks(this.pos, 30):
for en in cols:
if (not baseIs(en, enemy) or en.dead()):
continue
# damages other enemies in within a radius, acts as splash damage
if (distance(this.pos, en.pos) < 30):
en.health -= 2
if (en.health <= 0):
en.kill()
def burst(this):
'''creates a small flash and emits some explosion particles'''
GlobalVariables.sounds[13].play()
for i in range(random.randrange(5, 10)):
part = particle(this.pos, randCirc(5), (255, 255, 0))
GlobalVariables.particles.append(part)
blast = circ()
blast.pos = this.pos
blast.scale = 30
blast.color = (255, 255, 100)
GlobalVariables.maincam.toDraw(blast)
|
reid-vollett/Software_Quality_Project
|
Missile.py
|
Python
|
gpl-3.0
| 3,006
|
[
"BLAST"
] |
e162b196b4fe6834155f024d728ebdc10be0b541e32b0294c97f80ba1855f84f
|
########################################################################
# $HeadURL$
# File : LCGPilotDirector.py
# Author : Ricardo Graciani
########################################################################
"""
LCGPilotDirector class,
It includes:
- basic configuration for LCG
- submit and monitor methods for LCG MiddleWare.
"""
__RCSID__ = "$Id$"
from DIRAC.WorkloadManagementSystem.private.GridPilotDirector import GridPilotDirector
from DIRAC.ConfigurationSystem.Client.Helpers import getVO
from DIRAC.Core.Utilities import List
import os
# Some default values
BROKERS = ['rb123.cern.ch']
class LCGPilotDirector( GridPilotDirector ):
def __init__( self, submitPool ):
"""
Define some defaults and call parent __init__
"""
self.gridMiddleware = 'LCG'
GridPilotDirector.__init__( self, submitPool )
self.resourceBrokers = BROKERS
self.loggingServers = []
def configure( self, csSection, submitPool ):
"""
Here goes specific configuration for LCG PilotDirectors
"""
GridPilotDirector.configure( csSection, submitPool )
self.log.info( '' )
self.log.info( '===============================================' )
def _prepareJDL( self, taskQueueDict, workingDirectory, pilotOptions, pilotsToSubmit, ceMask, submitPrivatePilot, privateTQ ):
"""
Write JDL for Pilot Submission
"""
# RB = List.randomize( self.resourceBrokers )[0]
LDs = []
NSs = []
LBs = []
# Select Randomly one RB from the list
RB = List.randomize( self.resourceBrokers )[0]
LDs.append( '"%s:9002"' % RB )
LBs.append( '"%s:9000"' % RB )
for LB in self.loggingServers:
NSs.append( '"%s:7772"' % LB )
LD = ', '.join( LDs )
NS = ', '.join( NSs )
LB = ', '.join( LBs )
vo = getVO()
if privateTQ or vo not in ['lhcb']:
extraReq = "True"
else:
if submitPrivatePilot:
extraReq = "! AllowsGenericPilot"
else:
extraReq = "AllowsGenericPilot"
rbJDL = """
AllowsGenericPilot = Member( "VO-lhcb-pilot" , other.GlueHostApplicationSoftwareRunTimeEnvironment );
Requirements = pilotRequirements && other.GlueCEStateStatus == "Production" && %s;
RetryCount = 0;
ErrorStorage = "%s/pilotError";
OutputStorage = "%s/pilotOutput";
# ListenerPort = 44000;
ListenerStorage = "%s/Storage";
VirtualOrganisation = "lhcb";
LoggingTimeout = 30;
LoggingSyncTimeout = 30;
LoggingDestination = { %s };
# Default NS logger level is set to 0 (null)
# max value is 6 (very ugly)
NSLoggerLevel = 0;
DefaultLogInfoLevel = 0;
DefaultStatusLevel = 0;
NSAddresses = { %s };
LBAddresses = { %s };
MyProxyServer = "no-myproxy.cern.ch";
""" % ( extraReq, workingDirectory, workingDirectory, workingDirectory, LD, NS, LB )
pilotJDL, pilotRequirements = self._JobJDL( taskQueueDict, pilotOptions, ceMask )
jdl = os.path.join( workingDirectory, '%s.jdl' % taskQueueDict['TaskQueueID'] )
jdl = self._writeJDL( jdl, [pilotJDL, rbJDL] )
return {'JDL':jdl, 'Requirements':pilotRequirements + " && " + extraReq, 'Pilots': pilotsToSubmit, 'RB':RB }
def _listMatch( self, proxy, jdl, taskQueueID, rb ):
"""
Check the number of available queues for the pilots to prevent submission
if there are no matching resources.
"""
cmd = ['edg-job-list-match', '-c', '%s' % jdl , '--config-vo', '%s' % jdl, '%s' % jdl]
return self.parseListMatchStdout( proxy, cmd, taskQueueID, rb )
def _submitPilot( self, proxy, pilotsToSubmit, jdl, taskQueueID, rb ):
"""
Submit pilot and get back the reference
"""
result = []
for _i in range( pilotsToSubmit ):
cmd = [ 'edg-job-submit', '-c', '%s' % jdl, '--config-vo', '%s' % jdl, '%s' % jdl ]
ret = self.parseJobSubmitStdout( proxy, cmd, taskQueueID, rb )
if ret:
result.append( ret )
return result
|
Sbalbp/DIRAC
|
WorkloadManagementSystem/private/LCGPilotDirector.py
|
Python
|
gpl-3.0
| 3,871
|
[
"DIRAC"
] |
5518b6497331373273a5f4f9de140808283c1f5a6d7a5c8ebdd5ca047b5b6f81
|
import unittest
from splinter import Browser
class GoogleTestCase(unittest.TestCase):
def setUp(self):
self.browser = Browser('phantomjs')
self.browser.visit('http://google.com')
def test_check_title(self):
assert self.browser.title == 'Google'
if __name__ in ('main', '__main__'):
unittest.main()
|
josemazo/learning-splinter-phantomjs
|
the_phantom.py
|
Python
|
mit
| 338
|
[
"VisIt"
] |
8837622746c74359d3a139f7f5179ce1b1909ec1b0533db91795333419ba6143
|
#ImportModules
import ShareYourSystem as SYS
from ShareYourSystem.Specials.Simulaters import Populater,Brianer
#Definition
MyBrianer=Brianer.BrianerClass(
).update(
{
#Set here the global net parameters
'StimulatingStepTimeFloat':0.1
}
).produce(
['E','I'],
Populater.PopulaterClass,
{
#Here are defined the brian classic shared arguments between pops
'brian.NeuronGroupInspectDict':SYS.InspectDict().update(
{
'LiargVariablesList':[
0,
'''
dv/dt = (ge+gi-(v+49*mV))/(20*ms) : volt
dge/dt = -ge/(5*ms) : volt
dgi/dt = -gi/(10*ms) : volt
'''
],
'KwargVariablesDict':
{
'threshold':'v>-50*mV'
'reset':'v=-60*mV'
}
}
),
#Here are the settig of future brian monitors
'push':
{
'LiargVariablesList':
[
[
Moniter.MoniterClass.update(
{
'brian.SpikeMonitorInspectDict':SYS.InspectDict()
}
)
],
],
'KwargVariablesDict':{'CollectingCollectionStr':'Monitome'}
},
#Init conditions
'PopulatingInitDict':
{
'v':-60.
}
},
**{'CollectingCollectionStr':'Populatome'}
).__setitem__(
'Dis_<Populatome>',
#Here are defined the brian classic specific arguments for each pop
[
{
'Exec_NeuronGroupInspectDict["LiargVariablesList"][0]':3200,
'ConnectingGraspClueVariablesList':
[
SYS.GraspDictClass(
{
'HintVariable':'/NodePointDeriveNoder/<Populatome>IPopulater',
'SynapseArgumentVariable':
{
'pre':'ge+=1.62*mV'
'connect':{'p':0.02}
}
}
)
]
},
{
'Exec_NeuronGroupInspectDict["LiargVariablesList"][0]':800,
'ConnectingGraspClueVariablesList':
[
SYS.GraspDictClass(
{
'HintVariable':'/NodePointDeriveNoder/<Populatome>EPopulater',
'SynapseArgumentVariable':
{
'pre':'gi-=9*mV'
'connect':{'p':0.02}
}
}
)
]
}
]
).brian()
#Definition the AttestedStr
SYS._attest(
[
'MyBrianer is '+SYS._str(
MyBrianer,
**{
'RepresentingBaseKeyStrsList':False,
'RepresentingAlineaIsBool':False
}
),
]
)
#SYS._print(MyBrianer.BrianedMonitorsList[0].__dict__)
#SYS._print(
# MyBrianer.BrianedNeuronGroupsList[0].__dict__
#)
#import matplotlib
#plot(MyBrianer['<Connectome>FirstRater'].)
#Print
|
Ledoux/ShareYourSystem
|
Pythonlogy/draft/Simulaters/Brianer/draft/01_ExampleCell copy 4.py
|
Python
|
mit
| 2,372
|
[
"Brian"
] |
d074c0987a2a2b3021093b0b0585d776cbb3e7fcfcca2225bbe8673c950a655f
|
from __future__ import division, print_function
import numpy as np
import xarray as xr
import numpy.ma as ma
from scipy.interpolate import LinearNDInterpolator as lndi
import matplotlib.dates as dates
import copy
def get_nv(self):
"""
Finds the element vertex's.
"""
neighbours_orig = self._neighbours
nnodes = self.nnodes
maxnei = self.max_neighbours
try:
import pyximport; pyximport.install()
import get_nv as gnv
nv = gnv.get_nvc(neighbours_orig, nnodes, maxnei)
except:
print('There was an issue with during using cython falling back to python.')
nv = np.empty((len(neighbours_orig)*2, 3))
neighbours = copy.deepcopy(neighbours_orig)
kk=0
for i in range(nnodes-2):
nei_cnt = 1
for ii in range(maxnei-1):
if neighbours[i, ii+1]==0:
break
nei_cnt = ii+1
if neighbours[i, ii] <= (i+1):
continue
if neighbours[i, ii+1] <= (i+1):
continue
for j in range(maxnei):
if neighbours[neighbours[i, ii]-1, j] != neighbours[i, ii+1]:
continue
nv[kk, :] = [i+1, neighbours[i,ii], neighbours[i, ii+1]]
kk = kk+1
break
if (nei_cnt > 1):
for j in range(maxnei):
if neighbours[i, 0] <= (i+1):
break
if neighbours[i, nei_cnt] <= (i+1):
break
if neighbours[neighbours[i, 0]-1, j] == 0:
break
if neighbours[neighbours[i, 0]-1, j] != neighbours[i, nei_cnt]:
continue
nv[kk, :] = [i+1, neighbours[i, nei_cnt], neighbours[i, 0]]
kk = kk+1
break
nv = np.delete(nv, np.s_[kk:], axis=0)
nv = (nv-1).astype(int)
return nv
def load_nemo(maskname, coordsname, outputname, varlist=[]):
"""
Loads a nemo coordinate file and output file.
"""
nemo = {}
nemo['coords']={}
nemo['mask']={}
nemo['rawdata']={}
nemo['mask_filename']=maskname
nemo['coordinate_filename']=coordsname
nemo['output_filename']=outputname
# Load mask with xr netcdf
ncid = xr.open_dataset(nemo['mask_filename'])
for key in ncid.variables.keys():
nemo['mask'][key]=ncid.variables[key]
# Load coordinates with xr netcdf
ncid = xr.open_dataset(nemo['coordinate_filename'])
for key in ncid.variables.keys():
nemo['coords'][key]=ncid.variables[key]
# Load output with scipy netcdf
ncid = xr.open_dataset(nemo['output_filename'])
for key in ncid.variables.keys():
nemo['rawdata'][key]=ncid.variables[key]
# Set convenience fields
nemo['LON']=nemo['coords']['nav_lon']
nemo['LAT']=nemo['coords']['nav_lat']
nemo['lon']=np.ravel(nemo['LON'])
nemo['lat']=np.ravel(nemo['LAT'])
nemo['x'],nemo['y'],nemo['proj']=lcc(nemo['lon'],nemo['lat'])
nemo['xy']=np.array([nemo['x'],nemo['y']]).T
nemo['ll']=np.array([nemo['lon'],nemo['lat']]).T
# Don't set mask for all data. Causes large ram usage.
# Not sure if possible to have memory mapped masks....
return nemo
def interp_nemo(nemo, points, fieldname, time, layer=None, mymask=None):
"""
Interpolation nemo data field to a set of points.
"""
# If no specified mask try and pick mask based on fieldname.
if mymask is None:
masks = makemasks(nemo)
mymask = masks[fieldname]
# Sigh... slicing multi-dim arrays is not working the same as ipython...
# Grrrrrrr....
# So if we have layers then slice layer and time together else just time...
# Then slice layer if specified
if layer is not None:
tdata = nemo['rawdata'][fieldname][time,layer,]
mymask = ~mymask[layer,:].astype(bool)
else:
tdata = nemo['rawdata'][fieldname][time,]
datain = ma.masked_array(tdata, mask=mymask, fill_value=np.nan)
interpf = lndi(nemo['ll'], np.ravel(datain), fill_value=np.nan)
values = interpf(points[:,0], points[:,1])
return values
def lcc(lon,lat):
"""
Given a lon lat converts to x,y and return them and the projection
"""
try:
import pyproj as pyp
except ImportError:
print("pyproj is not installed, please install pyproj.")
return
# Define the lcc projection
xmax = np.nanmax(lon)
xmin = np.nanmin(lon)
ymax = np.nanmax(lat)
ymin = np.nanmin(lat)
xavg = ( xmax + xmin ) * 0.5;
yavg = ( ymax + ymin ) * 0.5;
ylower = ( ymax - ymin ) * 0.25 + ymin;
yupper = ( ymax - ymin ) * 0.75 + ymin;
projstr = 'lcc +lon_0='+str(xavg)+' +lat_0='+str(yavg)+' +lat_1='+str(ylower)+' +lat_2='+str(yupper)
proj = pyp.Proj(proj=projstr)
x, y = proj(lon,lat)
return x, y, proj
def makemasks(nemo):
"""
Create dictionary of masks.
"""
masks={}
try:
masks['votemper']=nemo['mask']['tmask'][0,]
except KeyError:
print('Missing tmask, no mask set for votemper.')
try:
masks['vosaline']=nemo['mask']['tmask'][0,]
except KeyError:
print('Missing tmask, no mask set for vosaline.')
try:
# This is a hair hacky should find real mask
a = np.min(nemo['rawdata']['sossheig'],axis=0)
b = np.max(nemo['rawdata']['sossheig'],axis=0)
masks['sossheig'] = a==b
except KeyError:
print('Can''t create sossheig mask, no mask set for sossheig.')
return masks
def find_depth(nemo, depth):
"""
Find the layer that is closest to each depth
"""
h = nemo['rawdata']['deptht']
idx = np.array([],dtype=np.int)
for val in depth:
tidx = np.argmin(np.fabs(h-val))
idx=np.append(idx,tidx)
return idx
def find_time(nemo, time):
"""
Find the time that is closest to each time
"""
if type(time) is not np.ndarray:
time = np.array(time, dtype=np.datetime64)
if type(time.flat[0]) is not np.datetime64:
time = time.astype(np.datetime64)
times = nemo['rawdata']['time_counter']
idx = np.array([],dtype=np.int)
for val in time:
tidx = np.argmin(np.abs(times-val))
idx=np.append(idx,tidx)
return idx
def fix_nanzero(mesh, datain, fixnan=True, fixzero=True, cutoff=0):
"""
Given a data set on a grid spread data to nans and zeros
Note: only works with node data currently.
"""
data = copy.deepcopy(datain)
def idx_nanzero(data, fixnan, fixzero):
idx=np.array([],dtype=int)
if fixnan==True:
idx = np.append(idx,np.argwhere(np.isnan(data)==1))
if fixzero==True:
idx = np.append(idx,np.argwhere(data<cutoff))
return idx
idx = idx_nanzero(data, fixnan, fixzero)
while len(idx)>0:
for i in idx:
neis = (mesh._neighbours[i,]-1).astype(int)
neis = neis[neis!=-1]
nodes = data[neis]
nidx = idx_nanzero(nodes, fixnan, fixzero)
bidx = np.ones(neis.shape,dtype=bool)
bidx[nidx]=False
if np.sum(bidx)>0:
data[i] = np.mean(nodes[bidx])
idx = idx_nanzero(data, fixnan, fixzero)
return data
def sort_boundary(mesh):
"""
Given a mesh it sorts the external boundary and returns it as an array.
"""
boundary = 1
bcode = mesh._boundary_code
nodenumber = mesh._nodenumber
neighbours = mesh._neighbours
nn = copy.deepcopy(nodenumber[bcode==boundary]).astype(int)
nnei = copy.deepcopy(neighbours[bcode==boundary]).astype(int)
#find the neighbour of the first node
idx = np.argwhere(nnei==nn[0])[0][0]
#have to use temp values with copy as the standard swap doesn't work when things are swapped again and again.
#there must be a more python way to hand that....
tmpval = nn[1].copy()
nn[1] = nn[idx]
nn[idx] = tmpval
tmpval = nnei[1,:].copy()
nnei[1,:] = nnei[idx,:]
nnei[idx,:] = tmpval
for i in range(1,len(nn)-1):
for j in range(mesh.max_neighbours):
nei = nnei[i,j]
if nei==0: continue
idx = np.argwhere(nn[(i+1):]==nei)
if len(idx)==1:
tmpval = nn[(i+1)].copy()
nn[(i+1)] = nn[(idx+i+1)]
nn[(idx+i+1)] = tmpval
tmpval = nnei[(i+1),:].copy()
nnei[(i+1),:] = nnei[(idx+i+1),:]
nnei[(idx+i+1),:] = tmpval
break
return nn
|
moflaher/fvcom_pyprocessor
|
fvcom_pyprocessor/utilities.py
|
Python
|
agpl-3.0
| 9,163
|
[
"NetCDF"
] |
f12003eb4bd39195be6b29da3990319aaa549dd4bab41155ca658bd4be80d577
|
import urllib.parse
import pytest
import tldextract
import cocrawler.urls as urls
from cocrawler.urls import URL
def test_urllib_parse():
# This is just here so I can understand what urllib is doing with these:
assert urllib.parse.urljoin('http://example.com/foo', '///bar') == 'http://example.com/bar'
assert urllib.parse.urljoin('http://example.com/foo', '////bar') == 'http://example.com//bar'
assert urllib.parse.urljoin('http://example.com/foo', '/////bar') == 'http://example.com///bar'
assert urllib.parse.urljoin('https://example.com/foo', '///bar') == 'https://example.com/bar'
assert urllib.parse.urljoin('https://example.com/foo', '////bar') == 'https://example.com//bar'
assert urllib.parse.urljoin('https://example.com/foo', '/////bar') == 'https://example.com///bar'
assert urllib.parse.urljoin('http://example.com/foo', '///bar.com') == 'http://example.com/bar.com'
assert urllib.parse.urljoin('http://example.com/foo', '////bar.com') == 'http://example.com//bar.com'
assert urllib.parse.urljoin('http://example.com/foo', '/////bar.com') == 'http://example.com///bar.com'
# this round-trips; I canonicalize these urls the same
assert urllib.parse.urljoin('http://example.com', '?q=123') == 'http://example.com?q=123'
assert urllib.parse.urljoin('http://example.com/', '?q=123') == 'http://example.com/?q=123'
def test_clean_webpage_links():
cwl = urls.clean_webpage_links
assert cwl(' foo ') == 'foo'
assert cwl(' foo\t ') == 'foo'
assert cwl('\x01 foo ') == 'foo'
assert cwl('///foo ') == '//foo'
assert cwl('////foo ') == '//foo'
assert cwl('http:///foo/bar') == 'http://foo/bar'
assert cwl('https:///foo\\bar') == 'https://foo/bar'
assert cwl('\\\\\\foo ') == '//foo'
assert cwl('\\\\\\\\foo ') == '//foo'
assert cwl('http:\\\\\\foo') == 'http://foo'
assert cwl('https:\\\\\\foo\\bar') == 'https://foo/bar'
assert cwl('h\nt\r\ntp://ex\r\nample.com') == 'http://example.com'
# short urls don't mess with this
assert cwl('"') == '"'
assert cwl('http://foo.com">') == 'http://foo.com">' # although maybe this should flag
# long urls
assert cwl('x'*100 + ' ' + 'x'*400) == 'x' * 100
assert cwl('x'*100 + '\r >"' + 'x'*400) == 'x' * 100
assert cwl('x'*100 + '\n' + 'x'*400) == 'x' * 100
assert cwl('x'*2001) == '' # throw-up-hands error case
assert cwl('&') == '&'
assert cwl('&amp;') == '&'
assert cwl('&') == '&'
def test_remove_dot_segments():
rds = urls.remove_dot_segments
# examples from rfc 3986
assert rds('/a/b/c/./../../g') == '/a/g'
assert rds('/mid/content=5/../6') == '/mid/6'
# and a few test cases of our own
assert rds('foo') == 'foo' # we used to raise ValueError, but it's too common
assert rds('/') == '/'
assert rds('/..') == '/'
assert rds('/.') == '/'
assert rds('/../foo') == '/foo'
assert rds('/../foo/') == '/foo/'
assert rds('/.././foo/./') == '/foo/'
assert rds('/./.././../foo/') == '/foo/'
assert rds('/./.././../foo/./') == '/foo/'
assert rds('/./.././../foo/../bar/') == '/bar/'
assert rds('/./.././../foo/../bar') == '/bar'
# urljoin examples from RFC 3986 -- joined 'by hand' and then ./.. processed
# kept only the ones with ./..
assert rds('/b/c/./g') == '/b/c/g'
assert rds('/b/c/.') == '/b/c'
assert rds('/b/c/./') == '/b/c/'
assert rds('/b/c/..') == '/b'
assert rds('/b/c/../') == '/b/'
assert rds('/b/c/../g') == '/b/g'
assert rds('/b/c/../..') == '/'
assert rds('/b/c/../../') == '/'
assert rds('/b/c/../../g') == '/g'
def test_safe_url_canonicalization():
suc = urls.safe_url_canonicalization
assert suc('http://example.com/?') == ('http://example.com/', '')
assert suc('http://Example%2ECom?') == ('http://example.com/', '')
assert suc('http://example.com/?foo=bar') == ('http://example.com/?foo=bar', '')
assert suc('http://example.com?foo=bar') == ('http://example.com/?foo=bar', '')
assert suc('HTTP://EXAMPLE.COM/') == ('http://example.com/', '')
assert suc('HTTP://EXAMPLE.COM:80/') == ('http://example.com/', '')
assert suc('httpS://EXAMPLE.COM:443/') == ('https://example.com/', '')
assert suc('HTTP://EXAMPLE.COM:81/') == ('http://example.com:81/', '')
assert suc('http://example.com#frag') == ('http://example.com/', '#frag')
assert suc('http://example.com#!frag') == ('http://example.com/', '#!frag')
assert suc('http://example.com/#frag') == ('http://example.com/', '#frag')
assert suc('http://example.com/?foo=bar#frag') == ('http://example.com/?foo=bar', '#frag')
assert suc('http://bücher.com/?') == ('http://xn--bcher-kva.com/', '')
assert suc('http://example.com/%2a%3Doof%20%%2f') == ('http://example.com/*=oof%20%%2f', '')
assert suc('http://example.com/foo%2a%3D%20%%2ffoo') == ('http://example.com/foo*=%20%%2ffoo', '')
# unreserved
assert suc('http://example.com/%41%5a%61%7a%30%39%2d%2e%5f%7e') == ('http://example.com/AZaz09-._~', '')
assert suc('http://example.com/%5b%7b%3C') == ('http://example.com/%5B%7B%3C', '')
# path
assert suc('http://example.com/%21%24%3b%3d%3a%40') == ('http://example.com/!$;=:@', '')
assert suc('http://example.com/?%21%24%3b%3d%3a%40') == ('http://example.com/?!$;%3D:@', '')
assert suc('http://example.com/#%21%24%3b%3d%3a%40') == ('http://example.com/', '#!$;%3D:@')
assert suc('http://example.com/foo bar') == ('http://example.com/foo%20bar', '')
# query/fragment
assert suc('http://example.com/%3a%40%2f%3f%40') == ('http://example.com/:@%2F%3F@', '')
assert suc('http://example.com/?%3a%40%2f%3f%40') == ('http://example.com/?:@/?@', '')
assert suc('http://example.com/?foo bar') == ('http://example.com/?foo+bar', '')
assert suc('http://example.com/#%3a%40%2f%3f%40') == ('http://example.com/', '#:@/?@')
# Bug report from Stbastian Nagel of CC to IA:
assert suc('http://visit.webhosting.yahoo.com/visit.gif?&r=http%3A//web.archive.org/web/20090517140029/http%3A//anthonystewarthead.electric-chi.com/&b=Netscape%205.0%20%28Windows%3B%20en-US%29&s=1366x768&o=Win32&c=24&j=true&v=1.2') == \
('http://visit.webhosting.yahoo.com/visit.gif?&r=http://web.archive.org/web/20090517140029/http://anthonystewarthead.electric-chi.com/&b=Netscape%205.0%20(Windows;%20en-US)&s=1366x768&o=Win32&c=24&j=true&v=1.2', '')
def test_special_redirect():
sr = urls.special_redirect
assert sr(URL('http://example.com/'), URL('http://example.com/foo')) is None
assert sr(URL('http://example.com/'), URL('https://example.com/foo')) is None
assert sr(URL('http://example.com/'), URL('https://www.example.com/foo')) is None
assert sr(URL('http://example.com/'), URL('http://example.com/?foo=1')) is None
assert sr(URL('http://example.com/'), URL('http://example.com/bar?foo=1')) is None
url1 = URL('http://example.com/')
assert sr(url1, url1) == 'same'
assert sr(url1, URL('https://example.com/')) == 'tohttps'
assert sr(url1, URL('http://www.example.com/')) == 'towww'
assert sr(url1, URL('https://www.example.com/')) == 'towww+tohttps'
url2str = 'http://www.example.com/'
url2 = URL(url2str)
assert sr(url2, URL('https://www.example.com/')) == 'tohttps'
assert sr(url2, URL('http://example.com/')) == 'tononwww'
assert sr(url2, URL('https://example.com/')) == 'tononwww+tohttps'
assert sr(url2str, 'https://www.example.com/') == 'tohttps'
assert sr(url2str, 'http://example.com/') == 'tononwww'
assert sr(url2str, 'https://example.com/') == 'tononwww+tohttps'
url3 = URL('https://www.example.com/')
assert sr(url3, URL('http://www.example.com/')) == 'tohttp'
assert sr(url3, URL('https://example.com/')) == 'tononwww'
assert sr(url3, URL('http://example.com/')) == 'tononwww+tohttp'
url4 = URL('https://example.com/')
assert sr(url4, URL('http://www.example.com/')) == 'towww+tohttp'
url5 = URL('https://example.com/foo')
url6 = URL('https://example.com/foo/')
assert sr(url5, url6) == 'addslash'
assert sr(url6, url5) == 'removeslash'
def test_get_domain():
assert urls.get_domain('http://www.bbc.co.uk') == 'bbc.co.uk'
assert urls.get_domain('http://www.nhs.uk') == 'www.nhs.uk' # nhs.uk is a public suffix, surprise
assert urls.get_domain('http://sub.nhs.uk') == 'sub.nhs.uk' # ditto
assert urls.get_domain('http://www.example.com') == 'example.com'
assert urls.get_domain('http://sub.example.com') == 'example.com'
assert urls.get_domain('http://sub.blogspot.com') == 'sub.blogspot.com', "make sure private domains are included"
# if the blogspot test doesn't work, try this from the shell: "tldextract -u -p"
# unfortunately, all tldextract users use the same cache
# https://github.com/john-kurkowski/tldextract/issues/66
assert urls.get_domain('http://www.com') == 'www.com'
def test_get_hostname():
assert urls.get_hostname('http://www.bbc.co.uk') == 'www.bbc.co.uk'
assert urls.get_hostname('http://www.bbc.co.uk', remove_www=True) == 'bbc.co.uk'
assert urls.get_hostname('http://bbc.co.uk') == 'bbc.co.uk'
assert urls.get_hostname('http://www.example.com') == 'www.example.com'
assert urls.get_hostname('http://www.example.com:80') == 'www.example.com'
assert urls.get_hostname('http://www.sub.example.com') == 'www.sub.example.com'
assert urls.get_hostname('http://sub.example.com') == 'sub.example.com'
assert urls.get_hostname('http://www.com') == 'www.com'
assert urls.get_hostname('http://www.com', remove_www=True) == 'www.com'
def test_tldextract():
'''
verify that tldextract parses just the netloc
This is neither documented or tested by tldextract (!)
'''
assert tldextract.extract('example.com').registered_domain == 'example.com'
assert tldextract.extract('www.example.com').registered_domain == 'example.com'
def test_URL():
url = URL('http://www.example.com/')
assert url.url == 'http://www.example.com/'
assert list(url.urlsplit) == ['http', 'www.example.com', '/', '', '']
assert url.netloc == 'www.example.com'
assert url.hostname == 'www.example.com'
assert url.hostname_without_www == 'example.com'
assert url.registered_domain == 'example.com'
assert url.original_frag is None
url = URL('http://www.example.com/#foo#foo')
assert url.original_frag == '#foo#foo'
url = URL('http://www.example.com/#')
assert url.original_frag is None
# canonicalization
url = URL('http://www.example.com/?')
assert url.url == 'http://www.example.com/'
url = URL('http://www.example.com')
assert url.url == 'http://www.example.com/'
url = URL('http://www.example.com/?#')
assert url.url == 'http://www.example.com/'
url = URL('http://www.example.com/foo')
assert url.url == 'http://www.example.com/foo'
url = URL('http://www.example.com/foo/')
assert url.url == 'http://www.example.com/foo/'
# urljoin
urlj1 = URL('http://www.example.com/foo/')
urlj2 = 'http://www.example.com/foo/'
url = URL('foo', urljoin=urlj1)
assert url.url == 'http://www.example.com/foo/foo'
url = URL('foo', urljoin=urlj1)
assert url.url == 'http://www.example.com/foo/foo'
url = URL('/bar', urljoin=urlj1)
assert url.url == 'http://www.example.com/bar'
url = URL('/bar', urljoin=urlj2)
assert url.url == 'http://www.example.com/bar'
url = URL('http://sub.example.com/', urljoin=urlj1)
assert url.url == 'http://sub.example.com/'
url = URL('http://sub.example.com/', urljoin=urlj2)
assert url.url == 'http://sub.example.com/'
url = URL('foo', urljoin='http://example.com/subdir/') # base can cause this
assert url.url == 'http://example.com/subdir/foo'
# read-only
with pytest.raises(AttributeError):
url.url = 'foo'
# urljoin examples from RFC 3986 -- python takes care of . and ..
urlj = URL('http://a/b/c/d;p?q')
# assert URL('g:h', urljoin=urlj).url == 'g:h' # absolute url missing hostname
assert URL('g', urljoin=urlj).url == 'http://a/b/c/g'
assert URL('./g', urljoin=urlj).url == 'http://a/b/c/g'
assert URL('g/', urljoin=urlj).url == 'http://a/b/c/g/'
assert URL('/g', urljoin=urlj).url == 'http://a/g'
assert URL('//g', urljoin=urlj).url == 'http://g/' # altered because I insist on the trailing /
assert URL('?y', urljoin=urlj).url == 'http://a/b/c/d;p?y'
assert URL('g?y', urljoin=urlj).url == 'http://a/b/c/g?y'
assert URL('#s', urljoin=urlj).url == 'http://a/b/c/d;p?q' # I drop the frag
assert URL('g#s', urljoin=urlj).url == 'http://a/b/c/g' # I drop the frag
assert URL('g?y#s', urljoin=urlj).url == 'http://a/b/c/g?y' # I drop the frag
assert URL(';x', urljoin=urlj).url == 'http://a/b/c/;x'
assert URL('g;x', urljoin=urlj).url == 'http://a/b/c/g;x'
assert URL('g;x?y#s', urljoin=urlj).url == 'http://a/b/c/g;x?y' # I drop the frag
assert URL('', urljoin=urlj).url == 'http://a/b/c/d;p?q'
assert URL('.', urljoin=urlj).url == 'http://a/b/c/'
assert URL('./', urljoin=urlj).url == 'http://a/b/c/'
assert URL('..', urljoin=urlj).url == 'http://a/b/'
assert URL('../', urljoin=urlj).url == 'http://a/b/'
assert URL('../g', urljoin=urlj).url == 'http://a/b/g'
assert URL('../..', urljoin=urlj).url == 'http://a/'
assert URL('../../', urljoin=urlj).url == 'http://a/'
assert URL('../../g', urljoin=urlj).url == 'http://a/g'
|
cocrawler/cocrawler
|
tests/unit/test_urls.py
|
Python
|
apache-2.0
| 13,568
|
[
"VisIt"
] |
b0bc21a8eaffa4add3305c2b7b8b568afa378e013f94e4e153ccfae31f8e3639
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.compat.six import iteritems, string_types
from ansible.errors import AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.helpers import load_list_of_roles
from ansible.playbook.role.include import RoleInclude
__all__ = ['RoleMetadata']
class RoleMetadata(Base):
'''
This class wraps the parsing and validation of the optional metadata
within each Role (meta/main.yml).
'''
_allow_duplicates = FieldAttribute(isa='bool', default=False)
_dependencies = FieldAttribute(isa='list', default=[])
_galaxy_info = FieldAttribute(isa='GalaxyInfo')
def __init__(self, owner=None):
self._owner = owner
super(RoleMetadata, self).__init__()
@staticmethod
def load(data, owner, variable_manager=None, loader=None):
'''
Returns a new RoleMetadata object based on the datastructure passed in.
'''
if not isinstance(data, dict):
raise AnsibleParserError("the 'meta/main.yml' for role %s is not a dictionary" % owner.get_name())
m = RoleMetadata(owner=owner).load_data(data, variable_manager=variable_manager, loader=loader)
return m
def _load_dependencies(self, attr, ds):
'''
This is a helper loading function for the dependencies list,
which returns a list of RoleInclude objects
'''
if ds is None:
ds = []
current_role_path = None
if self._owner:
current_role_path = os.path.dirname(self._owner._role_path)
try:
return load_list_of_roles(ds, play=self._owner._play, current_role_path=current_role_path, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError:
raise AnsibleParserError("A malformed list of role dependencies was encountered.", obj=self._ds)
def _load_galaxy_info(self, attr, ds):
'''
This is a helper loading function for the galaxy info entry
in the metadata, which returns a GalaxyInfo object rather than
a simple dictionary.
'''
return ds
def serialize(self):
return dict(
allow_duplicates = self._allow_duplicates,
dependencies = self._dependencies,
)
def deserialize(self, data):
setattr(self, 'allow_duplicates', data.get('allow_duplicates', False))
setattr(self, 'dependencies', data.get('dependencies', []))
|
goozbach/ansible
|
lib/ansible/playbook/role/metadata.py
|
Python
|
gpl-3.0
| 3,373
|
[
"Galaxy"
] |
a11ace556f623b6271e05507f8c81e2d40f048b76bf7aff60b4a17eb102809d2
|
#! /usr/bin/env python2.7
"""
This script performs event averaging for particle
spectra and anisotropic flow coefficients calculated
from event-by-event simulations
v_n is analyzed up to n = 6
Format for particle_XXX_vndata.dat file:
n_order real_part real_part_err imag_part imag_part_err
Format for particle_XXX_vndata_diff.dat file:
pT(GeV) pT_err(GeV) dN/(2pi dy pT dpT)(GeV^-2) dN/(2pi dy pT dpT)_err(GeV^-2)
vn_real vn_real_err vn_imag vn_imag_err
All the errors are only statistic errors
"""
from sys import argv, exit
from os import path, mkdir
from glob import glob
from numpy import *
import shutil
# define colors
purple = "\033[95m"
green = "\033[92m"
blue = "\033[94m"
yellow = "\033[93m"
red = "\033[91m"
normal = "\033[0m"
try:
working_folder = path.abspath(argv[1])
avg_folder = path.join(path.abspath(argv[2]),
working_folder.split('/')[-1])
print("output folder: %s" % avg_folder)
if(path.isdir(avg_folder)):
print("folder %s already exists!" % avg_folder)
var = raw_input("do you want to delete it? [y/N]")
if 'y' in var:
shutil.rmtree(avg_folder)
else:
print("please choose another folder path~")
exit(0)
mkdir(avg_folder)
except IndexError:
print("Usage: average_event_spvn.py working_folder results_folder")
exit(1)
particle_list = ['pion_p', 'pion_m', 'Kaon_p', 'Kaon_m', 'proton',
'anti_proton', 'Lambda', 'Xi_m', 'anti_Xi',
'Omega', 'Charged_eta']
nonlinear_reponse_correlator_name_list = [
'v4_L', 'v4(Psi2)', 'rho_422', 'chi_422',
'v5_L', 'v5(Psi23)', 'rho_523', 'chi_523',
'v6_L', 'v6(Psi2)', 'v6(Psi3)',
'rho_6222', 'rho_633', 'chi_6222', 'chi_633']
n_order = 7
def calcualte_inte_vn(pT_low, pT_high, data):
npT = 50
pT_inte_array = linspace(pT_low, pT_high, npT)
dN_event = data[:, 2]
pT_event = data[:, 0]
dN_interp = exp(interp(pT_inte_array, pT_event, log(dN_event+1e-30)))
temp_vn_array = []
for iorder in range(1, n_order):
vn_real_event = data[:, 3*iorder]
vn_imag_event = data[:, 3*iorder+1]
vn_real_interp = interp(pT_inte_array, pT_event, vn_real_event)
vn_imag_interp = interp(pT_inte_array, pT_event, vn_imag_event)
vn_real_inte = (
sum(vn_real_interp*dN_interp*pT_inte_array)
/sum(dN_interp*pT_inte_array))
vn_imag_inte = (
sum(vn_imag_interp*dN_interp*pT_inte_array)
/sum(dN_interp*pT_inte_array))
vn_inte = vn_real_inte + 1j*vn_imag_inte
temp_vn_array.append(vn_inte)
return(temp_vn_array)
def calculate_chi_422(vn_array):
v2_array = vn_array[:, 1]
nev = len(v2_array)
v4_array = vn_array[:, 3]
chi_422_num = v4_array*(v2_array.conjugate())**2
chi_422_den = (abs(v2_array))**4
chi_422_num_ave = mean(chi_422_num)
chi_422_den_ave = mean(chi_422_den)
chi_422_num_err = std(chi_422_num)/sqrt(nev)
chi_422_den_err = std(chi_422_den)/sqrt(nev)
chi_422 = chi_422_num_ave/chi_422_den_ave
chi_422_err = sqrt(
(chi_422_num_err/chi_422_den_ave)**2.
+ (chi_422_num_ave*chi_422_den_err/(chi_422_den_ave)**2.)**2.)
return(chi_422.real, chi_422_err.real)
def calculate_chi_523(vn_array):
v2_array = vn_array[:, 1]
nev = len(v2_array)
v3_array = vn_array[:, 2]
v5_array = vn_array[:, 4]
chi_523_num = v5_array*(v2_array.conjugate()*v3_array.conjugate())
chi_523_den = (abs(v2_array))**2*(abs(v3_array))**2
chi_523_num_ave = mean(chi_523_num)
chi_523_den_ave = mean(chi_523_den)
chi_523_num_err = std(chi_523_num)/sqrt(nev)
chi_523_den_err = std(chi_523_den)/sqrt(nev)
chi_523 = chi_523_num_ave/chi_523_den_ave
chi_523_err = sqrt(
(chi_523_num_err/chi_523_den_ave)**2.
+ (chi_523_num_ave*chi_523_den_err/(chi_523_den_ave)**2.)**2.)
return(chi_523.real, chi_523_err.real)
def calculate_chi_6222(vn_array):
v6_array = vn_array[:, 5]
v2_array = vn_array[:, 1]
nev = len(v2_array)
chi_6222_num = v6_array*((v2_array.conjugate())**3)
chi_6222_den = (abs(v2_array))**6
chi_6222_num_ave = mean(chi_6222_num)
chi_6222_den_ave = mean(chi_6222_den)
chi_6222_num_err = std(chi_6222_num)/sqrt(nev)
chi_6222_den_err = std(chi_6222_den)/sqrt(nev)
chi_6222 = chi_6222_num_ave/chi_6222_den_ave
chi_6222_err = sqrt(
(chi_6222_num_err/chi_6222_den_ave)**2.
+ (chi_6222_num_ave*chi_6222_den_err/(chi_6222_den_ave)**2.)**2.)
return(chi_6222.real, chi_6222_err.real)
def calculate_chi_633(vn_array):
v6_array = vn_array[:, 5]
v3_array = vn_array[:, 2]
nev = len(v3_array)
chi_633_num = v6_array*((v3_array.conjugate())**2)
chi_633_den = (abs(v3_array))**4
chi_633_num_ave = mean(chi_633_num)
chi_633_den_ave = mean(chi_633_den)
chi_633_num_err = std(chi_633_num)/sqrt(nev)
chi_633_den_err = std(chi_633_den)/sqrt(nev)
chi_633 = chi_633_num_ave/chi_633_den_ave
chi_633_err = sqrt(
(chi_633_num_err/chi_633_den_ave)**2.
+ (chi_633_num_ave*chi_633_den_err/(chi_633_den_ave)**2.)**2.)
return(chi_633.real, chi_633_err.real)
def calculate_v4_Psi2(chi_422, chi_422_err, vn_array):
"""
v4(Psi2) = chi_422*sqrt(<abs(V2)**4>)
"""
v2_array = vn_array[:, 1]
nev = len(v2_array)
temp_array = abs(v2_array)**4
v2_factor = sqrt(mean(temp_array))
v2_factor_err = std(temp_array)/(2.*v2_factor)/sqrt(nev)
v4_Psi2 = chi_422*v2_factor
v4_Psi2_err = sqrt((chi_422_err*v2_factor)**2.
+ (chi_422*v2_factor_err)**2.)
return(v4_Psi2, v4_Psi2_err)
def calculate_v5_Psi23(chi_523, chi_523_err, vn_array):
"""
v5(Psi23) = chi_523*sqrt(<abs(V2)**2*abs(V3)**2>)
"""
v2_array = vn_array[:, 1]
v3_array = vn_array[:, 2]
nev = len(v2_array)
temp_array = abs(v2_array)**2.*abs(v3_array)**2.
v23_factor = sqrt(mean(temp_array))
v23_factor_err = std(temp_array)/(2.*v23_factor)/sqrt(nev)
v5_Psi23 = chi_523*v23_factor
v5_Psi23_err = sqrt((chi_523_err*v23_factor)**2.
+ (chi_523*v23_factor_err)**2.)
return(v5_Psi23, v5_Psi23_err)
def calculate_v6_Psi2(chi_6222, chi_6222_err, vn_array):
"""
v6(Psi2) = chi_6222*sqrt(<abs(V2)**6>)
"""
v2_array = vn_array[:, 1]
nev = len(v2_array)
temp_array = abs(v2_array)**6.
v2_factor = sqrt(mean(temp_array))
v2_factor_err = std(temp_array)/(2.*v2_factor)/sqrt(nev)
v6_Psi2 = chi_6222*v2_factor
v6_Psi2_err = sqrt((chi_6222_err*v2_factor)**2.
+ (chi_6222*v2_factor_err)**2.)
return(v6_Psi2, v6_Psi2_err)
def calculate_v6_Psi3(chi_633, chi_633_err, vn_array):
"""
v6(Psi3) = chi_633*sqrt(<abs(V3)**4>)
"""
v3_array = vn_array[:, 2]
nev = len(v3_array)
temp_array = abs(v3_array)**4.
v3_factor = sqrt(mean(temp_array))
v3_factor_err = std(temp_array)/(2.*v3_factor)/sqrt(nev)
v6_Psi3 = chi_633*v3_factor
v6_Psi3_err = sqrt((chi_633_err*v3_factor)**2.
+ (chi_633*v3_factor_err)**2.)
return(v6_Psi3, v6_Psi3_err)
def calculate_rho_422(v4_Psi2, v4_Psi2_err, vn_array):
"""
rho_422 = v4(Psi2)/v4(Psi4)
"""
v4_array = vn_array[:, 3]
nev = len(v4_array)
v4_Psi4 = sqrt(mean(abs(v4_array)**2.))
v4_Psi4_err = std(abs(v4_array)**2.)/(2.*v4_Psi4)/sqrt(nev)
rho_422 = v4_Psi2/v4_Psi4
rho_422_err = sqrt((v4_Psi2_err/v4_Psi4)**2.
+ (v4_Psi2*v4_Psi4_err/v4_Psi4**2.)**2.)
return(rho_422, rho_422_err)
def calculate_rho_523(v5_Psi23, v5_Psi23_err, vn_array):
"""
rho_523 = v5(Psi23)/v5(Psi5)
"""
v5_array = vn_array[:, 4]
nev = len(v5_array)
v5_Psi5 = sqrt(mean(abs(v5_array)**2.))
v5_Psi5_err = std(abs(v5_array)**2.)/(2.*v5_Psi5)/sqrt(nev)
rho_523 = v5_Psi23/v5_Psi5
rho_523_err = sqrt((v5_Psi23_err/v5_Psi5)**2.
+ (v5_Psi23*v5_Psi5_err/v5_Psi5**2.)**2.)
return(rho_523, rho_523_err)
def calculate_rho_6222(v6_Psi2, v6_Psi2_err, vn_array):
"""
rho_6222 = v6(Psi2)/v6(Psi6)
"""
v6_array = vn_array[:, 5]
nev = len(v6_array)
v6_Psi6 = sqrt(mean(abs(v6_array)**2.))
v6_Psi6_err = std(abs(v6_array)**2.)/(2.*v6_Psi6)/sqrt(nev)
rho_6222 = v6_Psi2/v6_Psi6
rho_6222_err = sqrt((v6_Psi2_err/v6_Psi6)**2.
+ (v6_Psi2*v6_Psi6_err/v6_Psi6**2.)**2.)
return(rho_6222, rho_6222_err)
def calculate_rho_633(v6_Psi3, v6_Psi3_err, vn_array):
"""
rho_633 = v6(Psi3)/v6(Psi6)
"""
v6_array = vn_array[:, 5]
nev = len(v6_array)
v6_Psi6 = sqrt(mean(abs(v6_array)**2.))
v6_Psi6_err = std(abs(v6_array)**2.)/(2.*v6_Psi6)/sqrt(nev)
rho_633 = v6_Psi3/v6_Psi6
rho_633_err = sqrt((v6_Psi3_err/v6_Psi6)**2.
+ (v6_Psi3*v6_Psi6_err/v6_Psi6**2.)**2.)
return(rho_633, rho_633_err)
def calculate_v4_L(v4_Psi2, v4_Psi2_err, vn_array):
"""
v4_L = sqrt(v4(Psi4)^2 - v4(Psi2)^2)
"""
v4_array = vn_array[:, 3]
nev = len(v4_array)
v4_Psi4_sq = mean(abs(v4_array)**2.)
v4_Psi4_sq_err = std(abs(v4_array)**2.)/sqrt(nev)
v4_L = sqrt(v4_Psi4_sq - v4_Psi2**2.)
v4_L_err = (sqrt(v4_Psi4_sq_err**2. + (2.*v4_Psi2*v4_Psi2_err)**2.)
/(2.*v4_L))
return(v4_L, v4_L_err)
def calculate_v5_L(v5_Psi23, v5_Psi23_err, vn_array):
"""
v5_L = sqrt(v5(Psi5)^2 - v5(Psi23)^2)
"""
v5_array = vn_array[:, 4]
nev = len(v5_array)
v5_Psi5_sq = mean(abs(v5_array)**2.)
v5_Psi5_sq_err = std(abs(v5_array)**2.)/sqrt(nev)
v5_L = sqrt(v5_Psi5_sq - v5_Psi23**2.)
v5_L_err = (sqrt(v5_Psi5_sq_err**2. + (2.*v5_Psi23*v5_Psi23_err)**2.)
/(2.*v5_L))
return(v5_L, v5_L_err)
def calculate_v6_L(chi_6222, chi_6222_err, chi_633, chi_633_err, vn_array):
"""
v6_L = sqrt(v6(Psi6)^2 - chi_6222^2 v2^6
- chi_633^2 v3^4 - 2 Re(chi_6222*chi_633*v2^3 v3^{2*}))
"""
v6_array = vn_array[:, 5]
v2_array = vn_array[:, 1]
v3_array = vn_array[:, 2]
nev = len(v6_array)
v6_Psi6_sq = mean(abs(v6_array)**2.)
v6_Psi6_sq_err = std(abs(v6_array)**2.)/sqrt(nev)
v2_6 = mean(abs(v2_array)**6.)
v2_6_err = std(abs(v2_array)**6.)/sqrt(nev)
v3_4 = mean(abs(v3_array)**4.)
v3_4_err = std(abs(v3_array)**4.)/sqrt(nev)
v23 = real(mean(v2_array**3.*conj(v3_array)**2.))
v23_err = real(std(v2_array**3.*conj(v3_array)**2.))/sqrt(nev)
v6_L = (v6_Psi6_sq - chi_6222**2.*v2_6 - chi_633**2.*v3_4
- 2.*chi_6222*chi_633*v23)
v6_L_err = sqrt(
v6_Psi6_sq_err**2.
+ (2.*chi_6222*chi_6222_err*v2_6)**2. + (chi_6222**2.*v2_6_err)**2.
+ (2.*chi_633*chi_633_err*v3_4)**2. + (chi_633**2.*v3_4_err)**2.
+ (2.*chi_6222_err*chi_633*v23)**2.
+ (2.*chi_6222*chi_633_err*v23)**2.
+ (2.*chi_6222*chi_633*v23_err)**2.)
return(v6_L, v6_L_err)
def calculate_nonlinear_reponse(vn_array):
"""
this function computes all the nonlinear response coefficients
proposed in the paper arXiv: 1502.02502 up to v6
"""
chi_422, chi_422_err = calculate_chi_422(vn_array)
v4_Psi2, v4_Psi2_err = calculate_v4_Psi2(chi_422, chi_422_err, vn_array)
rho_422, rho_422_err = calculate_rho_422(v4_Psi2, v4_Psi2_err, vn_array)
v4_L, v4_L_err = calculate_v4_L(v4_Psi2, v4_Psi2_err, vn_array)
chi_523, chi_523_err = calculate_chi_523(vn_array)
v5_Psi23, v5_Psi23_err = calculate_v5_Psi23(chi_523, chi_523_err, vn_array)
rho_523, rho_523_err = calculate_rho_523(v5_Psi23, v5_Psi23_err, vn_array)
v5_L, v5_L_err = calculate_v5_L(v5_Psi23, v5_Psi23_err, vn_array)
chi_6222, chi_6222_err = calculate_chi_6222(vn_array)
v6_Psi2, v6_Psi2_err = calculate_v6_Psi2(chi_6222, chi_6222_err, vn_array)
rho_6222, rho_6222_err = calculate_rho_6222(v6_Psi2, v6_Psi2_err, vn_array)
chi_633, chi_633_err = calculate_chi_633(vn_array)
v6_Psi3, v6_Psi3_err = calculate_v6_Psi3(chi_633, chi_633_err, vn_array)
rho_633, rho_633_err = calculate_rho_633(v6_Psi3, v6_Psi3_err, vn_array)
v6_L, v6_L_err = calculate_v6_L(chi_6222, chi_6222_err,
chi_633, chi_633_err, vn_array)
results = [v4_L, v4_L_err, v4_Psi2, v4_Psi2_err, rho_422, rho_422_err,
chi_422, chi_422_err,
v5_L, v5_L_err, v5_Psi23, v5_Psi23_err, rho_523, rho_523_err,
chi_523, chi_523_err,
v6_L, v6_L_err, v6_Psi2, v6_Psi2_err, v6_Psi3, v6_Psi3_err,
rho_6222, rho_6222_err, rho_633, rho_633_err,
chi_6222, chi_6222_err, chi_633, chi_633_err]
return(results)
def calcualte_vn_2(vn_data_array):
vn_data_array = array(vn_data_array)
nev = len(vn_data_array[:, 0])
vn_2 = sqrt(mean(abs(vn_data_array)**2., 0)) + 1e-30
vn_2_err = std(abs(vn_data_array)**2., 0)/sqrt(nev)/2./vn_2
return(vn_2, vn_2_err)
def calculate_diff_vn_single_event(pT_ref_low, pT_ref_high, data):
npT = 50
pT_inte_array = linspace(pT_ref_low, pT_ref_high, npT)
dN_event = data[:, 2]
pT_event = data[:, 0]
dN_interp = exp(interp(pT_inte_array, pT_event, log(dN_event+1e-30)))
dN_ref = sum(dN_interp*pT_inte_array)
temp_vn_real_array = []
temp_vn_imag_array = []
temp_vn_denorm_array = []
for iorder in range(1, n_order):
vn_real_event = data[:, 3*iorder]
vn_imag_event = data[:, 3*iorder+1]
vn_real_interp = interp(pT_inte_array, pT_event, vn_real_event)
vn_imag_interp = interp(pT_inte_array, pT_event, vn_imag_event)
vn_real_inte = (
sum(vn_real_interp*dN_interp*pT_inte_array)
/sum(dN_interp*pT_inte_array))
vn_imag_inte = (
sum(vn_imag_interp*dN_interp*pT_inte_array)
/sum(dN_interp*pT_inte_array))
vn_ref = vn_real_inte + 1j*vn_imag_inte
vn_pt = vn_real_event + 1j*vn_imag_event
numerator_real = real(dN_event*vn_pt*dN_ref*conj(vn_ref))
numerator_imag = imag(dN_event*vn_pt*dN_ref*conj(vn_ref))
denorm = dN_event*dN_ref
temp_vn_real_array.append(numerator_real)
temp_vn_imag_array.append(numerator_imag)
temp_vn_denorm_array.append(denorm)
return(temp_vn_real_array, temp_vn_imag_array, temp_vn_denorm_array)
def get_vn_diff_2PC_from_single_event(data):
dN_event = data[:, 2]
temp_vn_real_array = []
temp_vn_imag_array = []
temp_vn_denorm_array = []
for iorder in range(1, n_order):
vn_real_event = data[:, 3*iorder]
vn_imag_event = data[:, 3*iorder+1]
vn_pt = vn_real_event + 1j*vn_imag_event
numerator_real = real(dN_event*vn_pt)
numerator_imag = imag(dN_event*vn_pt)
denorm = dN_event
temp_vn_real_array.append(numerator_real)
temp_vn_imag_array.append(numerator_imag)
temp_vn_denorm_array.append(denorm)
return(temp_vn_real_array, temp_vn_imag_array, temp_vn_denorm_array)
def calculate_vn_diff_SP(vn_diff_real, vn_diff_imag, vn_diff_denorm,
vn_2, vn_2_err):
"""
this funciton calculates the scalar-product vn
"""
vn_diff_real = array(vn_diff_real)
vn_diff_imag = array(vn_diff_imag)
vn_diff_denorm = array(vn_diff_denorm) + 1e-30
nev = len(vn_diff_denorm[:, 0])
vn_denorm = vn_2.reshape(len(vn_2), 1)
vn_denorm_err = vn_2_err.reshape(len(vn_2_err), 1)
vn_diff_SP = (
mean(vn_diff_real, 0)/mean(vn_diff_denorm, 0)/vn_denorm)
vn_diff_SP_err = sqrt(
( std(vn_diff_real, 0)/sqrt(nev)/mean(vn_diff_denorm, 0)
/vn_denorm)**2.
+ (vn_diff_SP*vn_denorm_err/vn_denorm)**2.)
return(vn_diff_SP, vn_diff_SP_err)
def calculate_vn_diff_2PC(vn_diff_real, vn_diff_imag, vn_diff_denorm):
"""
this funciton calculates the rms vn[2](pT)
"""
vn_diff_real = array(vn_diff_real)
vn_diff_imag = array(vn_diff_imag)
nev = len(vn_diff_real[:, 0])
vn_diff_2PC = sqrt(mean((vn_diff_real**2. + vn_diff_imag**2.), 0))
vn_diff_2PC_err = (std((vn_diff_real**2. + vn_diff_imag**2.), 0)/sqrt(nev)
/(2.*vn_diff_2PC + 1e-15))
return(vn_diff_2PC, vn_diff_2PC_err)
def calculate_vn_distribution(vn_array):
nbin = 20
vn_array = array(vn_array)
vn_dim = len(vn_array[0, :])
output = []
for vn_order in range(vn_dim):
vn_mag_array = abs(vn_array[:, vn_order])
vn_min = min(vn_mag_array)
vn_max = max(vn_mag_array) + 1e-10
bin_boundaries = linspace(vn_min, vn_max, nbin+1)
bin_width = bin_boundaries[1] - bin_boundaries[0]
bin_center = zeros([nbin])
bin_value = zeros([nbin])
for vn_elem in vn_mag_array:
vn_idx = int((vn_elem - vn_min)/bin_width)
bin_value[vn_idx] += 1.
bin_center[vn_idx] += vn_elem
bin_center = bin_center/(bin_value + 1e-15)
bin_value = bin_value/len(vn_array)
bin_value_err = sqrt(bin_value/len(vn_array))
bin_value = bin_value/bin_width
bin_value_err = bin_value_err/bin_width
for i in range(nbin):
if abs(bin_center[i]) < 1e-15:
bin_center[i] = (bin_boundaries[i] + bin_boundaries[i+1])/2.
output.append(bin_center)
output.append(bin_value)
output.append(bin_value_err)
output = array(output)
return(output.transpose())
def calcualte_event_plane_correlations(vn_array):
"""
this function compute the scalar-product event plane correlations
vn_array is a matrix [event_idx, vn_order]
"""
vn_array = array(vn_array)
nev = len(vn_array[:, 0])
# cos(4(Psi_2 - Psi_4))
v2_array = vn_array[:, 1]
v4_array = vn_array[:, 3]
v2_2 = mean(abs(v2_array)**2.)
v4_2 = mean(abs(v4_array)**2.)
v2_2_err = std(abs(v2_array)**2.)/sqrt(nev)
v4_2_err = std(abs(v4_array)**2.)/sqrt(nev)
corr_224_num = mean(real(v2_array**2.*conj(v4_array)))
corr_224_num_err = std(real(v2_array**2.*conj(v4_array)))/sqrt(nev)
corr_224_denorm = sqrt(v2_2**2.*v4_2)
corr_224_denorm_err = sqrt((v2_2_err/(v2_2**2.*sqrt(v4_2)))**2.
+ (v4_2_err/(2.*v2_2*v4_2**1.5))**2.)
corr_224 = corr_224_num/sqrt(v2_2*v2_2*v4_2)
corr_224_err = sqrt(
(corr_224_num_err/corr_224_denorm)**2.
+ (corr_224_denorm_err*corr_224_num)**2.)
# cos(6(Psi_2 - Psi_3))
v3_array = vn_array[:, 2]
v3_2 = mean(abs(v3_array)**2.)
v3_2_err = std(abs(v3_array)**2.)/sqrt(nev)
corr_22233_num = mean(real(v2_array**3.*conj(v3_array)**2.))
corr_22233_num_err = std(real(v2_array**3.*conj(v3_array)**2.))/sqrt(nev)
corr_22233_denorm = sqrt(v2_2**3.*v3_2**2.)
corr_22233_denorm_err = sqrt((3.*v2_2_err/(2.*v2_2**2.5*v3_2))**2.
+ (v3_2_err/(v2_2**1.5*v3_2**2.))**2.)
corr_22233 = corr_22233_num/corr_22233_denorm
corr_22233_err = sqrt(
(corr_22233_num_err/corr_22233_denorm)**2.
+ (corr_22233_denorm_err*corr_22233_num)**2.)
# cos(6(Psi_2 - Psi_6))
v6_array = vn_array[:, 5]
v6_2 = mean(abs(v6_array)**2.)
v6_2_err = std(abs(v6_array)**2.)/sqrt(nev)
corr_2226_num = mean(real(v2_array**3.*conj(v6_array)))
corr_2226_num_err = std(real(v2_array**3.*conj(v6_array)))/sqrt(nev)
corr_2226_denorm = sqrt(v2_2**3.*v6_2)
corr_2226_denorm_err = sqrt(
(3.*v2_2_err/(2.*v2_2**2.5*sqrt(v6_2)))**2.
+ (v6_2_err/(2.*v2_2**1.5*v6_2**1.5))**2.)
corr_2226 = corr_2226_num/corr_2226_denorm
corr_2226_err = sqrt(
(corr_2226_num_err/corr_2226_denorm)**2.
+ (corr_2226_num*corr_2226_denorm_err)**2.)
# cos(6(Psi_3 - Psi_6))
corr_336_num = mean(real(v3_array**2.*conj(v6_array)))
corr_336_num_err = std(real(v3_array**2.*conj(v6_array)))/sqrt(nev)
corr_336_denorm = sqrt(v3_2**2.*v6_2)
corr_336_denorm_err = sqrt(
(v3_2_err/(v3_2**2.*sqrt(v6_2)))**2.
+ (v6_2_err/(2.*v3_2*v6_2**1.5))**2.)
corr_336 = corr_336_num/corr_336_denorm
corr_336_err = sqrt(
(corr_336_num_err/corr_336_denorm)**2.
+ (corr_336_num*corr_336_denorm_err)**2.)
# cos(2Psi_2 + 3Psi_3 - 5Psi_5)
v5_array = vn_array[:, 4]
v5_2 = mean(abs(v5_array)**2.)
v5_2_err = std(abs(v5_array)**2.)/sqrt(nev)
corr_235_num = mean(real(v2_array*v3_array*conj(v5_array)))
corr_235_num_err = std(real(v2_array*v3_array*conj(v5_array)))/sqrt(nev)
corr_235_denorm = sqrt(v2_2*v3_2*v5_2)
corr_235_denorm_err = sqrt((v2_2_err/(2.*v2_2*sqrt(v3_2*v5_2)))**2.
+ (v3_2_err/(2.*v3_2*sqrt(v2_2*v5_2)))**2.
+ (v5_2_err/(2.*v5_2*sqrt(v2_2*v3_2)))**2.)
corr_235 = corr_235_num/corr_235_denorm
corr_235_err = sqrt(
(corr_235_num_err/corr_235_denorm)**2.
+ (corr_235_num*corr_235_denorm_err)**2.)
# cos(2Psi_2 + 4Psi_4 - 6Psi_6)
corr_246_num = mean(real(v2_array*v4_array*conj(v6_array)))
corr_246_num_err = std(real(v2_array*v4_array*conj(v6_array)))/sqrt(nev)
corr_246_denorm = sqrt(v2_2*v4_2*v6_2)
corr_246_denorm_err = sqrt((v2_2_err/(2.*v2_2*sqrt(v4_2*v6_2)))**2.
+ (v4_2_err/(2.*v4_2*sqrt(v2_2*v6_2)))**2.
+ (v6_2_err/(2.*v6_2*sqrt(v2_2*v4_2)))**2.)
corr_246 = corr_246_num/corr_246_denorm
corr_246_err = sqrt(
(corr_246_num_err/corr_246_denorm)**2.
+ (corr_246_num*corr_246_denorm_err)**2.)
# cos(2Psi_2 - 6Psi_3 + 4Psi_4)
corr_234_num = mean(real(v2_array*conj(v3_array)**2.*v4_array))
corr_234_num_err = std(real(v2_array*conj(v3_array)**2.*v4_array))/sqrt(nev)
corr_234_denorm = sqrt(v2_2*v3_2**2.*v4_2)
corr_234_denorm_err = sqrt(
(v2_2_err/(2.*v2_2**1.5*v3_2*sqrt(v4_2)))**2.
+ (v3_2_err/(sqrt(v2_2*v4_2)*v3_2**2.))**2.
+ (v4_2_err/(2.*sqrt(v2_2)*v3_2*v4_2**1.5))**2.)
corr_234 = corr_234_num/corr_234_denorm
corr_234_err = sqrt(
(corr_234_num_err/corr_234_denorm)**2.
+ (corr_234_num*corr_234_denorm_err)**2.)
results = [corr_224, corr_22233, corr_2226, corr_336,
corr_235, corr_246, corr_234]
results_err = [corr_224_err, corr_22233_err, corr_2226_err, corr_336_err,
corr_235_err, corr_246_err, corr_234_err]
return(results, results_err)
def calculate_vn_arrays_for_rn_ratios(data):
# this function compute the complex pT-integrated Vn vector
# in different pT ranges for a single event
# it returns a 2d matrix vn_arrays[pT_idx, n_order_idx]
pT_boundaries = [0.3, 0.5, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0]
npT = 50
vn_arrays = []
for ipT in range(len(pT_boundaries)-1):
pT_low = pT_boundaries[ipT]
pT_high = pT_boundaries[ipT + 1]
pT_mid = (pT_low + pT_high)/2.
vn_array = calcualte_inte_vn(pT_low, pT_high, data)
vn_array.insert(0, pT_mid)
vn_arrays.append(vn_array)
return(vn_arrays)
def calculate_rn_ratios(vn_event_arrays):
# this function compute rn ratio in different pT bins
# according to the CMS measurements
# it reads in a 3d data cube
# vn_event_arrays[event_idx, pT_idx, n_order_idx]
# it returns rn_arrays[iorder, pT_idx, 3]
vn_event_arrays = array(vn_event_arrays)
rn_arrays = []
for iorder in range(2, 5):
# compute r2, r3, r4
rn_array = []
for itrig in range(3, len(vn_event_arrays[0, :, 0])):
pT_trig = real(vn_event_arrays[0, itrig, 0])
vn_trig_array = vn_event_arrays[:, itrig, iorder]
nev = len(vn_trig_array)
denorm2_array = abs(vn_trig_array)**2.
denorm2 = sqrt(mean(denorm2_array))
denorm2_err = std(denorm2_array)/sqrt(nev)/(2.*denorm2)
for iasso in range(0, itrig+1):
pT_asso = real(vn_event_arrays[0, iasso, 0])
vn_asso_array = vn_event_arrays[:, iasso, iorder]
num_array = real(vn_asso_array*conj(vn_trig_array))
num = mean(num_array)
num_err = std(num_array)/sqrt(nev)
denorm1_array = abs(vn_asso_array)**2.
denorm1 = sqrt(mean(denorm1_array))
denorm1_err = std(denorm1_array)/sqrt(nev)/(2.*denorm1)
rn_temp = num/(denorm1*denorm2)
rn_temp_err = sqrt(
(num_err/(denorm1*denorm2))**2.
+ (num*denorm1_err/((denorm1**2.)*denorm2))**2.
+ (num*denorm2_err/(denorm1*(denorm2**2.)))**2.)
rn_array.append([pT_trig - pT_asso, rn_temp, rn_temp_err])
rn_arrays.append(rn_array)
rn_arrays = array(rn_arrays)
return(rn_arrays)
file_folder_list = glob(path.join(working_folder, '*'))
nev = len(file_folder_list)
for ipart, particle_id in enumerate(particle_list):
print("processing %s ..." % particle_list[ipart])
# first particle yield dN/dy
file_name = '%s_integrated_vndata.dat' % particle_id
dN_dy = []
for ifolder in range(nev):
results_folder = path.abspath(file_folder_list[ifolder])
temp_data = loadtxt(path.join(results_folder, file_name))
dN_dy.append(temp_data[0, 1])
dN_dy = array(dN_dy)
dN_dy_avg = mean(dN_dy)
dN_dy_avg_err = std(dN_dy)/sqrt(nev)
# then <pT>, vn, dN/(2pi dy pT dpT), vn{SP}(pT)
file_name = '%s_vndata.dat' % particle_id
pT_array = []
dN_array = []
vn_phenix_array = []
vn_star_array = []
vn_alice_array = []
vn_cms_array = []
vn_cms_arrays_for_rn = []
vn_atlas_array = []
vn_diff_phenix_real = []; vn_diff_phenix_imag = [];
vn_diff_phenix_denorm = []
vn_diff_star_real = []; vn_diff_star_imag = []; vn_diff_star_denorm = []
vn_diff_alice_real = []; vn_diff_alice_imag = []; vn_diff_alice_denorm = []
vn_diff_2PC_real = []; vn_diff_2PC_imag = []; vn_diff_2PC_denorm = []
vn_diff_cms_real = []; vn_diff_cms_imag = []; vn_diff_cms_denorm = []
vn_diff_atlas_real = []; vn_diff_atlas_imag = []; vn_diff_atlas_denorm = []
for ifolder in range(nev):
results_folder = path.abspath(file_folder_list[ifolder])
temp_data = loadtxt(path.join(results_folder, file_name))
dN_event = temp_data[:, 2] # dN/(2pi dy pT dpT)
pT_event = temp_data[:, 0]
# record particle spectra
pT_array.append(pT_event)
dN_array.append(dN_event)
# pT-integrated vn
# vn with PHENIX pT cut
temp_vn_array = calcualte_inte_vn(0.2, 2.0, temp_data)
vn_phenix_array.append(temp_vn_array)
# vn with STAR pT cut
temp_vn_array = calcualte_inte_vn(0.15, 2.0, temp_data)
vn_star_array.append(temp_vn_array)
# vn with ALICE pT cut
temp_vn_array = calcualte_inte_vn(0.2, 3.0, temp_data)
vn_alice_array.append(temp_vn_array)
# vn with CMS pT cut
temp_vn_array = calcualte_inte_vn(0.3, 3.0, temp_data)
vn_cms_array.append(temp_vn_array)
if particle_id == "Charged_eta":
temp_vn_arrays = (
calculate_vn_arrays_for_rn_ratios(temp_data))
vn_cms_arrays_for_rn.append(temp_vn_arrays)
# vn with ATLAS pT cut
temp_vn_array = calcualte_inte_vn(0.5, 3.0, temp_data)
vn_atlas_array.append(temp_vn_array)
# pT-differential vn using scalar-product method
# vn{SP}(pT) with PHENIX pT cut
temp_vn_diff_real, temp_vn_diff_imag, temp_dn_diff = (
calculate_diff_vn_single_event(0.15, 2.0, temp_data))
vn_diff_phenix_real.append(temp_vn_diff_real);
vn_diff_phenix_imag.append(temp_vn_diff_imag);
vn_diff_phenix_denorm.append(temp_dn_diff);
# vn{SP}(pT) with STAR pT cut
temp_vn_diff_real, temp_vn_diff_imag, temp_dn_diff = (
calculate_diff_vn_single_event(0.15, 2.0, temp_data))
vn_diff_star_real.append(temp_vn_diff_real);
vn_diff_star_imag.append(temp_vn_diff_imag);
vn_diff_star_denorm.append(temp_dn_diff);
# vn{SP}(pT) with ALICE pT cut
temp_vn_diff_real, temp_vn_diff_imag, temp_dn_diff = (
calculate_diff_vn_single_event(0.2, 3.0, temp_data))
vn_diff_alice_real.append(temp_vn_diff_real);
vn_diff_alice_imag.append(temp_vn_diff_imag);
vn_diff_alice_denorm.append(temp_dn_diff);
# vn{SP}(pT) with CMS pT cut
temp_vn_diff_real, temp_vn_diff_imag, temp_dn_diff = (
calculate_diff_vn_single_event(0.3, 3.0, temp_data))
vn_diff_cms_real.append(temp_vn_diff_real);
vn_diff_cms_imag.append(temp_vn_diff_imag);
vn_diff_cms_denorm.append(temp_dn_diff);
# vn{SP}(pT) with ATLAS pT cut
temp_vn_diff_real, temp_vn_diff_imag, temp_dn_diff = (
calculate_diff_vn_single_event(0.5, 3.0, temp_data))
vn_diff_atlas_real.append(temp_vn_diff_real);
vn_diff_atlas_imag.append(temp_vn_diff_imag);
vn_diff_atlas_denorm.append(temp_dn_diff);
# pT-differential vn using 2PC method
# vn[2](pT)
temp_vn_diff_real, temp_vn_diff_imag, temp_dn_diff = (
get_vn_diff_2PC_from_single_event(temp_data))
vn_diff_2PC_real.append(temp_vn_diff_real)
vn_diff_2PC_imag.append(temp_vn_diff_imag)
vn_diff_2PC_denorm.append(temp_dn_diff)
# now we perform event average
dN_array = array(dN_array)
pT_array = array(pT_array)
n_pT = len(pT_array[0, :])
pT_spectra = zeros([n_pT])
for ipT in range(len(pT_array[0, :])):
dN_temp = sum(dN_array[:, ipT]*pT_array[:, ipT])
if(dN_temp > 0):
pT_spectra[ipT] = (
sum(pT_array[:, ipT]**2.*dN_array[:, ipT])/dN_temp)
else:
pT_spectra[ipT] = mean(pT_array[:, ipT])
dN_spectra = mean(pT_array*dN_array, 0)/pT_spectra # dN/(2pi dy pT dpT)
dN_spectra_err = std(pT_array*dN_array, 0)/pT_spectra/sqrt(nev)
# calculate mean pT
pT_interp = linspace(0.05, 2.95, 30)
dN_interp = exp(interp(pT_interp, pT_spectra, log(dN_spectra+1e-30)))
dN_interp_err = interp(pT_interp, pT_spectra, dN_spectra_err)
mean_pT = sum(pT_interp**2.*dN_interp)/sum(pT_interp*dN_interp)
mean_pT_upper = (sum(pT_interp**2.*(dN_interp+dN_interp_err))
/sum(pT_interp*(dN_interp+dN_interp_err)))
mean_pT_lower = (sum(pT_interp**2.*(dN_interp-dN_interp_err))
/sum(pT_interp*(dN_interp-dN_interp_err)))
mean_pT_err = max(abs(mean_pT_upper - mean_pT),
abs(mean_pT - mean_pT_lower))
# calcualte vn{2}
vn_phenix_2, vn_phenix_2_err = calcualte_vn_2(vn_phenix_array)
vn_star_2, vn_star_2_err = calcualte_vn_2(vn_star_array)
vn_alice_2, vn_alice_2_err = calcualte_vn_2(vn_alice_array)
vn_cms_2, vn_cms_2_err = calcualte_vn_2(vn_cms_array)
vn_atlas_2, vn_atlas_2_err = calcualte_vn_2(vn_atlas_array)
if (particle_id == 'Charged_eta'):
vn_alice_array2 = array(vn_alice_array)
vn_cms_array2 = array(vn_cms_array)
vn_atlas_array2 = array(vn_atlas_array)
# calculate non-linear response coefficents with ALICE pT cut
nonlinear_response_alice = calculate_nonlinear_reponse(vn_alice_array2)
# calculate non-linear response coefficents with CMS pT cut
nonlinear_response_cms = calculate_nonlinear_reponse(vn_cms_array2)
# calculate non-linear response coefficents with ATLAS pT cut
nonlinear_response_atlas = calculate_nonlinear_reponse(vn_atlas_array2)
# calculate vn distribution for charged hadrons
vn_phenix_dis = calculate_vn_distribution(vn_phenix_array)
vn_star_dis = calculate_vn_distribution(vn_star_array)
vn_alice_dis = calculate_vn_distribution(vn_alice_array)
vn_cms_dis = calculate_vn_distribution(vn_cms_array)
vn_atlas_dis = calculate_vn_distribution(vn_atlas_array)
# calculate rn ratios
rn_cms = calculate_rn_ratios(vn_cms_arrays_for_rn)
# calculate flow event-plane correlation
vn_corr_alice, vn_corr_alice_err = (
calcualte_event_plane_correlations(vn_alice_array))
vn_corr_atlas, vn_corr_atlas_err = (
calcualte_event_plane_correlations(vn_atlas_array))
# calcualte vn{SP}(pT)
vn_diff_SP_phenix, vn_diff_SP_phenix_err = calculate_vn_diff_SP(
vn_diff_phenix_real, vn_diff_phenix_imag, vn_diff_phenix_denorm,
vn_phenix_2, vn_phenix_2_err)
vn_diff_SP_star, vn_diff_SP_star_err = calculate_vn_diff_SP(
vn_diff_star_real, vn_diff_star_imag, vn_diff_star_denorm,
vn_star_2, vn_star_2_err)
vn_diff_SP_alice, vn_diff_SP_alice_err = calculate_vn_diff_SP(
vn_diff_alice_real, vn_diff_alice_imag, vn_diff_alice_denorm,
vn_alice_2, vn_alice_2_err)
vn_diff_SP_cms, vn_diff_SP_cms_err = calculate_vn_diff_SP(
vn_diff_cms_real, vn_diff_cms_imag, vn_diff_cms_denorm,
vn_cms_2, vn_cms_2_err)
vn_diff_SP_atlas, vn_diff_SP_atlas_err = calculate_vn_diff_SP(
vn_diff_atlas_real, vn_diff_atlas_imag, vn_diff_atlas_denorm,
vn_atlas_2, vn_atlas_2_err)
# calcualte vn[2](pT)
vn_diff_2PC, vn_diff_2PC_err = calculate_vn_diff_2PC(
vn_diff_2PC_real, vn_diff_2PC_imag, vn_diff_2PC_denorm)
###########################################################################
# finally, output all the results
###########################################################################
if (particle_id =='Charged_eta'):
# output non-linear response coefficients chi_n for CMS pt cut
output_filename = ("non_linear_response_coefficients_CMS.dat")
f = open(output_filename, 'w')
f.write("# type value stat. err\n")
for i in range(len(nonlinear_reponse_correlator_name_list)):
f.write("%s %.10e %.10e\n"
% (nonlinear_reponse_correlator_name_list[i],
nonlinear_response_cms[2*i],
nonlinear_response_cms[2*i+1]))
f.close()
shutil.move(output_filename, avg_folder)
# output non-linear response coefficients chi_n for ALICE pt cut
output_filename = ("non_linear_response_coefficients_ALICE.dat")
f = open(output_filename, 'w')
f.write("# type value stat. err\n")
for i in range(len(nonlinear_reponse_correlator_name_list)):
f.write("%s %.10e %.10e\n"
% (nonlinear_reponse_correlator_name_list[i],
nonlinear_response_alice[2*i],
nonlinear_response_alice[2*i+1]))
f.close()
shutil.move(output_filename, avg_folder)
# output non-linear response coefficients chi_n for ATLAS pt cut
output_filename = ("non_linear_response_coefficients_ATLAS.dat")
f = open(output_filename, 'w')
f.write("# type value stat. err\n")
for i in range(len(nonlinear_reponse_correlator_name_list)):
f.write("%s %.10e %.10e\n"
% (nonlinear_reponse_correlator_name_list[i],
nonlinear_response_atlas[2*i],
nonlinear_response_atlas[2*i+1]))
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_integrated_observables.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("dN/dy= %.10e +/- %.10e\n" % (dN_dy_avg, dN_dy_avg_err))
f.write("<pT>= %.10e +/- %.10e\n" % (mean_pT, mean_pT_err))
for iorder in range(1, n_order):
f.write("v_%d{2}(phenix)= %.10e +/- %.10e\n"
% (iorder, vn_phenix_2[iorder-1], vn_phenix_2_err[iorder-1]))
f.write("v_%d{2}(STAR)= %.10e +/- %.10e\n"
% (iorder, vn_star_2[iorder-1], vn_star_2_err[iorder-1]))
f.write("v_%d{2}(ALICE)= %.10e +/- %.10e\n"
% (iorder, vn_alice_2[iorder-1], vn_alice_2_err[iorder-1]))
f.write("v_%d{2}(CMS)= %.10e +/- %.10e\n"
% (iorder, vn_cms_2[iorder-1], vn_cms_2_err[iorder-1]))
f.write("v_%d{2}(ATLAS)= %.10e +/- %.10e\n"
% (iorder, vn_atlas_2[iorder-1], vn_atlas_2_err[iorder-1]))
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_differential_observables_PHENIX.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("#pT dN/(2pi dy pT dpT) dN/(2pi dy pT dpT)_err "
"vn{SP} vn{SP}_err\n")
for ipT in range(len(pT_spectra)):
f.write("%.10e %.10e %.10e "
% (pT_spectra[ipT], dN_spectra[ipT], dN_spectra_err[ipT]))
for iorder in range(1, n_order):
f.write("%.10e %.10e " % (vn_diff_SP_phenix[iorder-1, ipT],
vn_diff_SP_phenix_err[iorder-1, ipT]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_differential_observables_STAR.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("#pT dN/(2pi dy pT dpT) dN/(2pi dy pT dpT)_err "
"vn{SP} vn{SP}_err\n")
for ipT in range(len(pT_spectra)):
f.write("%.10e %.10e %.10e "
% (pT_spectra[ipT], dN_spectra[ipT], dN_spectra_err[ipT]))
for iorder in range(1, n_order):
f.write("%.10e %.10e " % (vn_diff_SP_star[iorder-1, ipT],
vn_diff_SP_star_err[iorder-1, ipT]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_differential_observables_ALICE.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("#pT dN/(2pi dy pT dpT) dN/(2pi dy pT dpT)_err "
"vn{SP} vn{SP}_err\n")
for ipT in range(len(pT_spectra)):
f.write("%.10e %.10e %.10e "
% (pT_spectra[ipT], dN_spectra[ipT], dN_spectra_err[ipT]))
for iorder in range(1, n_order):
f.write("%.10e %.10e " % (vn_diff_SP_alice[iorder-1, ipT],
vn_diff_SP_alice_err[iorder-1, ipT]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_differential_observables_2PC.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("#pT dN/(2pi dy pT dpT) dN/(2pi dy pT dpT)_err "
"vn[2] vn[2]_err\n")
for ipT in range(len(pT_spectra)):
f.write("%.10e %.10e %.10e "
% (pT_spectra[ipT], dN_spectra[ipT], dN_spectra_err[ipT]))
for iorder in range(1, n_order):
f.write("%.10e %.10e " % (vn_diff_2PC[iorder-1, ipT],
vn_diff_2PC_err[iorder-1, ipT]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_differential_observables_CMS.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("#pT dN/(2pi dy pT dpT) dN/(2pi dy pT dpT)_err "
"vn{SP} vn{SP}_err\n")
for ipT in range(len(pT_spectra)):
f.write("%.10e %.10e %.10e "
% (pT_spectra[ipT], dN_spectra[ipT], dN_spectra_err[ipT]))
for iorder in range(1, n_order):
f.write("%.10e %.10e " % (vn_diff_SP_cms[iorder-1, ipT],
vn_diff_SP_cms_err[iorder-1, ipT]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_differential_observables_ATLAS.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("#pT dN/(2pi dy pT dpT) dN/(2pi dy pT dpT)_err "
"vn{SP} vn{SP}_err\n")
for ipT in range(len(pT_spectra)):
f.write("%.10e %.10e %.10e "
% (pT_spectra[ipT], dN_spectra[ipT], dN_spectra_err[ipT]))
for iorder in range(1, n_order):
f.write("%.10e %.10e " % (vn_diff_SP_atlas[iorder-1, ipT],
vn_diff_SP_atlas_err[iorder-1, ipT]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
if (particle_id == 'Charged_eta'):
output_filename = ("%s_vn_distribution_PHENIX.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("#vn dP(vn)/dvn dP(vn)/dvn_err\n")
for ipT in range(len(vn_phenix_dis[:, 0])):
for iorder in range(1, n_order):
f.write("%.10e %.10e %.10e "
% (vn_phenix_dis[ipT, 3*(iorder-1)],
vn_phenix_dis[ipT, 3*(iorder-1)+1],
vn_phenix_dis[ipT, 3*(iorder-1)+2]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_vn_distribution_STAR.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("#vn dP(vn)/dvn dP(vn)/dvn_err\n")
for ipT in range(len(vn_star_dis[:, 0])):
for iorder in range(1, n_order):
f.write("%.10e %.10e %.10e "
% (vn_star_dis[ipT, 3*(iorder-1)],
vn_star_dis[ipT, 3*(iorder-1)+1],
vn_star_dis[ipT, 3*(iorder-1)+2]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_vn_distribution_ALICE.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("#vn dP(vn)/dvn dP(vn)/dvn_err\n")
for ipT in range(len(vn_alice_dis[:, 0])):
for iorder in range(1, n_order):
f.write("%.10e %.10e %.10e "
% (vn_alice_dis[ipT, 3*(iorder-1)],
vn_alice_dis[ipT, 3*(iorder-1)+1],
vn_alice_dis[ipT, 3*(iorder-1)+2]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_vn_distribution_CMS.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("#vn dP(vn)/dvn dP(vn)/dvn_err\n")
for ipT in range(len(vn_cms_dis[:, 0])):
for iorder in range(1, n_order):
f.write("%.10e %.10e %.10e "
% (vn_cms_dis[ipT, 3*(iorder-1)],
vn_cms_dis[ipT, 3*(iorder-1)+1],
vn_cms_dis[ipT, 3*(iorder-1)+2]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_vn_distribution_ATLAS.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("#vn dP(vn)/dvn dP(vn)/dvn_err\n")
for ipT in range(len(vn_atlas_dis[:, 0])):
for iorder in range(1, n_order):
f.write("%.10e %.10e %.10e "
% (vn_atlas_dis[ipT, 3*(iorder-1)],
vn_atlas_dis[ipT, 3*(iorder-1)+1],
vn_atlas_dis[ipT, 3*(iorder-1)+2]))
f.write("\n")
f.close()
shutil.move(output_filename, avg_folder)
# output rn ratios
pT_trig = ['1.0', '1.5', '2.0', '2.5', '3.0']
ipTtrig = 0
output_filename = ("%s_rn_ratios_CMS_pTtrig_%s_%s.dat"
% (particle_list[ipart],
pT_trig[ipTtrig], pT_trig[ipTtrig+1]))
f = open(output_filename, 'w')
f.write("#pT_mid rn rn_err (n = 2, 3, 4)\n")
for ipT in range(len(rn_cms[0, :, 0])):
for iorder in range(len(rn_cms[:, 0, 0])):
f.write("%.5e %.5e %.5e "
% (rn_cms[iorder, ipT, 0],
rn_cms[iorder, ipT, 1],
rn_cms[iorder, ipT, 2]))
f.write("\n")
if rn_cms[0, ipT, 0] == 0.0:
f.close()
shutil.move(output_filename, avg_folder)
ipTtrig += 1
if ipTtrig < (len(pT_trig) - 1):
output_filename = ("%s_rn_ratios_CMS_pTtrig_%s_%s.dat"
% (particle_list[ipart],
pT_trig[ipTtrig],
pT_trig[ipTtrig+1]))
f = open(output_filename, 'w')
f.write("#pT_mid rn rn_err (n = 2, 3, 4)\n")
# output flow event-plane correlation
output_filename = ("%s_event_plane_correlation_ALICE.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("#correlator value value_err\n")
f.write("4(24) %.5e %.5e\n"
% (vn_corr_alice[0], vn_corr_alice_err[0]))
f.write("6(23) %.5e %.5e\n"
% (vn_corr_alice[1], vn_corr_alice_err[1]))
f.write("6(26) %.5e %.5e\n"
% (vn_corr_alice[2], vn_corr_alice_err[2]))
f.write("6(36) %.5e %.5e\n"
% (vn_corr_alice[3], vn_corr_alice_err[3]))
f.write("(235) %.5e %.5e\n"
% (vn_corr_alice[4], vn_corr_alice_err[4]))
f.write("(246) %.5e %.5e\n"
% (vn_corr_alice[5], vn_corr_alice_err[5]))
f.write("(234) %.5e %.5e\n"
% (vn_corr_alice[6], vn_corr_alice_err[6]))
f.close()
shutil.move(output_filename, avg_folder)
output_filename = ("%s_event_plane_correlation_ATLAS.dat"
% particle_list[ipart])
f = open(output_filename, 'w')
f.write("#correlator value value_err\n")
f.write("4(24) %.5e %.5e\n"
% (vn_corr_atlas[0], vn_corr_atlas_err[0]))
f.write("6(23) %.5e %.5e\n"
% (vn_corr_atlas[1], vn_corr_atlas_err[1]))
f.write("6(26) %.5e %.5e\n"
% (vn_corr_atlas[2], vn_corr_atlas_err[2]))
f.write("6(36) %.5e %.5e\n"
% (vn_corr_atlas[3], vn_corr_atlas_err[3]))
f.write("(235) %.5e %.5e\n"
% (vn_corr_atlas[4], vn_corr_atlas_err[4]))
f.write("(246) %.5e %.5e\n"
% (vn_corr_atlas[5], vn_corr_atlas_err[5]))
f.write("(234) %.5e %.5e\n"
% (vn_corr_atlas[6], vn_corr_atlas_err[6]))
f.close()
shutil.move(output_filename, avg_folder)
print "Analysis is done."
|
chunshen1987/HBTcorrelation_MCafterburner
|
ebe_scripts/average_event_spvn_pure_hydro.py
|
Python
|
mit
| 47,473
|
[
"Psi4"
] |
275a6dfb167736fa793ad39d0a948266b87aa37f36ea9825109e45b9a42daa67
|
import re
import numpy as np
def parse_file(self):
"""Extract important attributes from the Gaussian realtime logfile."""
filename = self.logfile
lines = [line.rstrip('\n') for line in open(filename)]
muX = []
muY = []
muZ = []
mX = []
mY = []
mZ = []
eX = []
eY = []
eZ = []
bX = []
bY = []
bZ = []
t = []
en = []
#FIXME: FOR H2+ RABI ONLY
HOMO= []
LUMO= []
for idx, line in enumerate(lines):
r = re.findall(r'5/.*/12',line)
if line[1:26] == 'External field Parameters':
self.envelope['Field'] = True
for jdx in range(1,15):
# control for newlines (length zero)
#print lines[idx+jdx].split()
if not len(lines[idx+jdx]):
continue
elif 'Envelope' in lines[idx+jdx].split()[0]:
self.envelope['Envelope'] = lines[idx+jdx].split()[2] # string
elif 'Gauge' in lines[idx+jdx].split()[0]:
self.envelope['Gauge'] = lines[idx+jdx].split()[2] # string
elif 'Ex' in lines[idx+jdx].split()[0]:
self.envelope['Ex'] = float(lines[idx+jdx].split()[2]) # au
elif 'Ey' in lines[idx+jdx].split()[0]:
self.envelope['Ey'] = float(lines[idx+jdx].split()[2]) # au
elif 'Ez' in lines[idx+jdx].split()[0]:
self.envelope['Ez'] = float(lines[idx+jdx].split()[2]) # au
elif 'Bx' in lines[idx+jdx].split()[0]:
self.envelope['Bx'] = float(lines[idx+jdx].split()[2]) # au
elif 'By' in lines[idx+jdx].split()[0]:
self.envelope['By'] = float(lines[idx+jdx].split()[2]) # au
elif 'Bz' in lines[idx+jdx].split()[0]:
self.envelope['Bz'] = float(lines[idx+jdx].split()[2]) # au
elif 'Frequency' in lines[idx+jdx].split()[0]:
self.envelope['Frequency'] = float(lines[idx+jdx].split()[2]) # au
elif 'Phase' in lines[idx+jdx].split()[0]:
self.envelope['Phase'] = float(lines[idx+jdx].split()[2]) # au
elif 't(on)' in lines[idx+jdx].split()[0]:
self.envelope['TOn'] = float(lines[idx+jdx].split()[2]) # au
elif 't(off)' in lines[idx+jdx].split()[0]:
# Exception to fix user setting Toff to obscenely large values
try:
self.envelope['TOff'] = float(lines[idx+jdx].split()[2]) # au
except ValueError:
self.envelope['TOff'] = 100000000.000 # au
elif 'Terms' in lines[idx+jdx].split()[0]:
self.envelope['Terms'] = lines[idx+jdx].split()[3:] # multistring
#break
elif line[1:27] == 'No external field applied.':
self.envelope['Field'] = False
elif r:
iops = r[0].split('/')[1:-1][0].split(',')
for iop in iops:
key = iop.split('=')[0]
val = iop.split('=')[1]
self.iops[key] = [val]
elif line[1:33] == ' Number of steps =':
self.total_steps = int(lines[idx].split()[4])
elif line[1:33] == ' Step size =':
self.step_size = float(lines[idx].split()[3])
elif line[1:33] == ' Orthonormalization method =':
self.orthonorm = lines[idx].split()[3]
elif line[1:27] == 'Alpha occupation numbers:':
#FIXME ONLY FOR H2+ RABI
HOMO.append(float(lines[idx+2].split()[1]))
try:
LUMO.append(float(lines[idx+2].split()[2]))
except IndexError:
LUMO.append(0.0)
elif line[1:7] == 'Time =':
time = line.split()
t.append(float(time[2]))
elif line[1:22] == 'Dipole Moment (Debye)':
dipole = lines[idx+1].split()
muX.append(float(dipole[1])*0.393456)
muY.append(float(dipole[3])*0.393456)
muZ.append(float(dipole[5])*0.393456)
elif line[1:31] == 'Magnetic Dipole Moment (a.u.):':
dipole = lines[idx+1].split()
mX.append(float(dipole[1]))
mY.append(float(dipole[3]))
mZ.append(float(dipole[5]))
elif line[1:9] == 'Energy =':
energy = line.split()
en.append(float(energy[2]))
elif line[1:38] == 'Current electromagnetic field (a.u.):':
efield = lines[idx+1].split()
bfield = lines[idx+2].split()
eX.append(float(efield[1]))
eY.append(float(efield[3]))
eZ.append(float(efield[5]))
bX.append(float(bfield[1]))
bY.append(float(bfield[3]))
bZ.append(float(bfield[5]))
elif line[1:27] == ' Restart MMUT every':
self.mmut_restart = line.split()[3]
# Save to object, if it exists
if(muX and muY and muZ):
self.electricDipole.x = np.asarray(muX)
self.electricDipole.y = np.asarray(muY)
self.electricDipole.z = np.asarray(muZ)
if(mX and mY and mZ):
self.magneticDipole.x = np.asarray(mX)
self.magneticDipole.y = np.asarray(mY)
self.magneticDipole.z = np.asarray(mZ)
if(eX and eY and eZ):
self.electricField.x = np.asarray(eX)
self.electricField.y = np.asarray(eY)
self.electricField.z = np.asarray(eZ)
if(bX and bY and bZ):
self.magneticField.x = np.asarray(bX)
self.magneticField.y = np.asarray(bY)
self.magneticField.z = np.asarray(bZ)
if(t):
self.time = np.asarray(t)
if(en):
self.energy = np.asarray(en)
#FIXME FOR H2+ RABI ONLY
if(HOMO):
self.HOMO = np.asarray(HOMO)
if(LUMO):
self.LUMO = np.asarray(LUMO)
def clean_data(self):
"""Make all the data arrays the same length, in case the log file
did not finish a full time step (e.g. you killed the job early or are
monitoring a job in progess. Furthermore, delete redundant time steps
corresponding to when MMUT restarts"""
def get_length(data):
"""Get length of array. If array is 'None', make it seem impossibly
large"""
if data.size:
return len(data)
else:
return 1e100
# if doMMUT == True, we will delete duplicate data from MMUT restart
doMMUT = False
lengths = []
for x in self.propertyarrays:
try:
# If it is an array, remove MMUT steps, and grab its length
#FIXME Not sure if MMUT steps are actually double printed in latest
if (doMMUT):
self.__dict__[x] = np.delete(self.__dict__[x],
list(range(int(self.mmut_restart)-1,
self.__dict__[x].shape[0],
int(self.mmut_restart))),
axis=0)
lengths.append(get_length(self.__dict__[x]))
except AttributeError:
try:
# Dipoles, fields, etc., are objects and we want their x/y/z
for q in ['_x','_y','_z']:
#FIXME Again, not sure about MMUT duplicates
if (doMMUT):
self.__dict__[x].__dict__[q] = \
np.delete(self.__dict__[x].__dict__[q],
list(range(int(self.mmut_restart)-1,
self.__dict__[x].__dict__[q].shape[0],
int(self.mmut_restart))),
axis=0)
lengths.append(get_length(self.__dict__[x].__dict__[q]))
except:
#print "Unknown data type: "+str(x)+str(q)
pass
self.min_length = min(lengths)
# truncate all the arrays so they are the same length
truncate(self,self.min_length)
def truncate(self,length):
""" Truncates the property arrays to a given *length* (integer) """
for x in self.propertyarrays:
try:
# If it is an array, truncate its length
self.__dict__[x] = self.__dict__[x][:length]
except TypeError:
try:
# Dipoles, fields, etc., are objects and we want their x/y/z
for q in ['_x','_y','_z']:
self.__dict__[x].__dict__[q] = \
self.__dict__[x].__dict__[q][:length]
except:
#print "Unknown data type: "+str(x)+str(q)
pass
def decode_iops(self):
for iop in self.iops:
# OLD
#if iop == '132':
# key = int(self.iops[iop][0])
# if key == 0:
# self.iops[iop].append('No Fock updates')
# else:
# self.iops[iop].append(str(key)+' Fock updates per nuclear step')
if iop == '134':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('0.05 au step size')
else:
self.iops[iop].append(str(key*0.00001)+' au step size')
elif iop == '133':
key = int(self.iops[iop][0])
if (key % 10) == 0:
self.iops[iop].append('First call to l512')
elif (key % 10) == 1:
self.iops[iop].append('First call to l512')
elif (key % 10) == 2:
self.iops[iop].append('Not first call to l512')
elif iop == '132':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Propagation for 15 steps')
else:
self.iops[iop].append('Propagation for '+str(abs(key))+' steps')
elif iop == '136':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Lowdin')
elif key == 1:
self.iops[iop].append('Lowdin')
elif key == 2:
self.iops[iop].append('Cholesky')
elif iop == '137':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('')
else:
self.iops[iop].append('')
elif iop == '138':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('No external field')
elif (key % 10) == 1:
self.iops[iop].append('Electric Dipole')
elif iop == '139':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('')
else:
self.iops[iop].append('')
elif iop == '140':
key = int(self.iops[iop][0])
self.iops[iop].append('Pop. analysis (N/A)')
elif iop == '141':
key = int(self.iops[iop][0])
if key%10 == 0:
self.iops[iop].append('No additional print')
elif key%10 == 1:
self.iops[iop].append('Print orbital occu. num')
elif (key % 10) == 2:
self.iops[iop].append('Print orbital energy + orbital occu. num')
elif (key % 100)/10 == 1:
self.iops[iop].append('Print electron density difference')
elif (key % 100)/100 == 1:
self.iops[iop].append('Debug print')
elif iop == '142':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Print every step')
else:
self.iops[iop].append('Print every '+str(key)+' steps')
elif iop == '143':
key = int(self.iops[iop][0])
if key <= 0:
self.iops[iop].append('Do not restart MMUT')
else:
self.iops[iop].append('Restart MMUT every '+str(key)+' steps')
elif iop == '144':
key = int(self.iops[iop][0])
if key == 0:
self.iops[iop].append('Print HOMO-6 to LUMO+10')
elif key == -1:
self.iops[iop].append('Print all orbitals')
else:
self.iops[iop].append('Print HOMO-6*N to LUMO+6*N+4')
|
wavefunction91/gaussian_realtime_parse
|
parse_file.py
|
Python
|
mit
| 12,348
|
[
"Gaussian"
] |
5299bcc66296613fbe81803096d445f40dbcc0ab349ac7695080bb6a40c6ad8e
|
"""
Demonstration application for range search using R tree.
Left mouse press starts rectangle add, Left mouse release closes rectangle add
Right mouse click begins drag of rectangle.
"""
import tkinter
from adk.R import RTree
from adk.region import Region, minValue, maxValue, X, Y
RectangleSize = 4
class RTreeApp:
def __init__(self):
"""App for creating R tree dynamically and executing range queries"""
self.tree = RTree(m=2, M=4)
self.ready = False
# for range query
self.selectedRegion = None
self.newRegionStart = None
self.newRegion = None
# for identifiers
self.counter = 0
self.master = tkinter.Tk()
self.master.title('R Tree Range Query Application')
self.w = tkinter.Frame(self.master, width=512, height=512)
self.canvas = tkinter.Canvas(self.w, width=512, height=512)
self.paint()
self.canvas.bind("<Button-1>", self.startAdd)
self.canvas.bind("<B1-Motion>", self.extendAdd) # only when right mouse dragged
self.canvas.bind("<ButtonRelease-1>", self.endAdd)
self.canvas.bind("<Button-3>", self.range) # when right mouse clicked
self.canvas.bind("<ButtonRelease-3>", self.clear)
self.canvas.bind("<B3-Motion>", self.range) # only when right mouse dragged
self.w.pack()
def startAdd(self, event):
"""End of range search."""
x = event.x
y = self.toCartesian(event.y)
self.newRegionStart = (x,y)
def extendAdd(self, event):
"""End of range search."""
if self.newRegionStart:
x = event.x
y = self.toCartesian(event.y)
self.newRegion = Region (x,y,x,y).unionPoint(self.newRegionStart)
self.paint()
def endAdd(self, event):
"""End of range search."""
if self.newRegionStart:
self.newRegionStart = None
self.counter += 1
if self.newRegion:
self.tree.add(self.newRegion, str(self.counter))
self.newRegion = None
self.paint()
def toCartesian(self, y):
"""Convert tkinter point into Cartesian."""
return self.w.winfo_height() - y
def toTk(self,y):
"""Convert Cartesian into tkinter point."""
if y == maxValue: return 0
tk_y = self.w.winfo_height()
if y != minValue:
tk_y -= y
return tk_y
def clear(self, event):
"""End of range search."""
self.selectedRegion = None
self.paint()
def range(self, event):
"""Initiate a range search using a selected rectangular region."""
p = (event.x, self.toCartesian(event.y))
if self.selectedRegion is None:
self.selectedStart = Region(p[X],p[Y], p[X],p[Y])
self.selectedRegion = self.selectedStart.unionPoint(p)
self.paint()
# return (node,0,True) where status is True if draining entire tree rooted at node. If False,
# then (rect,id,False). Draw these
# as shaded red rectangle to identify whole sub-tree is selected.
for triple in self.tree.range(self.selectedRegion):
if triple[2]:
r = triple[0].region
self.canvas.create_rectangle(r.x_min, self.toTk(r.y_min), r.x_max, self.toTk(r.y_max),
fill='Red', stipple='gray12')
else:
r = triple[0]
self.canvas.create_rectangle(r.x_min, self.toTk(r.y_min), r.x_max, self.toTk(r.y_max),
fill='Red')
def click(self, event):
"""Add point to KDtree."""
p = (event.x, self.toCartesian(event.y))
self.tree.add(p)
self.paint()
def visit (self, n):
""" Visit node to paint properly."""
if n == None: return
if n.level == 0:
for idx in range(n.count):
r = n.children[idx].region
self.canvas.create_rectangle(r.x_min, self.toTk(r.y_min), r.x_max, self.toTk(r.y_max),
fill='Gray')
for idx in range(n.count):
self.visit(n.children[idx])
# Do after all children so we can see interior nodes, too.
r = n.region
self.canvas.create_rectangle(r.x_min, self.toTk(r.y_min), r.x_max, self.toTk(r.y_max),
outline='Black', dash=(2, 4))
color = 'Gray'
if n.level == 0:
color='Black'
self.canvas.create_text(r.x_max - 16*len(n.id), self.toTk(r.y_max) + 16, anchor = tkinter.W,
font = "Times 16 bold", fill=color, text=n.id)
def prepare(self, event):
"""prepare to add points."""
if self.label:
self.label.destroy()
self.label = None
self.canvas.pack()
def paint(self):
"""Paint R tree by visiting all nodes, or show introductory message."""
if self.ready:
self.canvas.delete(tkinter.ALL)
self.visit(self.tree.root)
if self.newRegion:
self.canvas.create_rectangle(self.newRegion.x_min, self.toTk(self.newRegion.y_min),
self.newRegion.x_max, self.toTk(self.newRegion.y_max),
outline='Black', dash=(2, 4))
if self.selectedRegion:
self.canvas.create_rectangle(self.selectedRegion.x_min, self.toTk(self.selectedRegion.y_min),
self.selectedRegion.x_max, self.toTk(self.selectedRegion.y_max),
outline='Red', dash=(2, 4))
else:
self.label = tkinter.Label(self.w, width=100, height = 40, text="Click To Add Points")
self.label.bind("<Button-1>", self.prepare)
self.label.pack()
self.ready = True
if __name__ == "__main__":
app = RTreeApp()
app.w.mainloop()
|
heineman/algorithms-nutshell-2ed
|
PythonCode/demo/app_R_range.py
|
Python
|
mit
| 6,569
|
[
"VisIt"
] |
d3f8380afcefb77bf1a8aca41a191fde647d0945465ee7ce108462986368ac56
|
"""
PDBProp
=======
"""
import gzip
import json
import logging
import os.path as op
import mmtf
import os
from cobra.core import DictList
import pandas as pd
import requests
import deprecation
from Bio.PDB import PDBList
from lxml import etree
from six.moves.urllib_error import URLError
from six.moves.urllib.request import urlopen, urlretrieve
import ssbio.databases.pisa as pisa
import ssbio.utils
from ssbio.protein.structure.structprop import StructProp
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
log = logging.getLogger(__name__)
class PDBProp(StructProp):
"""Store information about a protein structure from the Protein Data Bank.
Extends the :class:`~ssbio.protein.structure.structprop.StructProp` class to allow initialization of the structure
by its PDB ID, and then enabling downloads of the structure file as well as parsing its metadata.
Args:
ident (str):
description (str):
chains (str):
mapped_chains (str):
structure_path (str):
file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
"""
def __init__(self, ident, description=None, chains=None, mapped_chains=None, structure_path=None, file_type=None):
StructProp.__init__(self, ident, description=description, chains=chains, mapped_chains=mapped_chains,
is_experimental=True, structure_path=structure_path, file_type=file_type)
self.experimental_method = None
self.resolution = None
self.date = None
self.taxonomy_name = None
self.biological_assemblies = DictList()
"""DictList: A list for storing Bioassembly objects related to this PDB ID"""
def download_structure_file(self, outdir, file_type=None, load_header_metadata=True, force_rerun=False):
"""Download a structure file from the PDB, specifying an output directory and a file type. Optionally download
the mmCIF header file and parse data from it to store within this object.
Args:
outdir (str): Path to output directory
file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
load_header_metadata (bool): If header metadata should be loaded into this object, fastest with mmtf files
force_rerun (bool): If structure file should be downloaded even if it already exists
"""
ssbio.utils.double_check_attribute(object=self, setter=file_type, backup_attribute='file_type',
custom_error_text='Please set file type to be downloaded from the PDB: '
'pdb, mmCif, xml, or mmtf')
# XTODO: check if outfile exists using ssbio.utils.force_rerun, pdblist seems to take long if it exists
# I know why - it's because we're renaming the ent to pdb. need to have mapping from file type to final extension
# Then check if file exists, if not then download again
p = PDBList()
with ssbio.utils.suppress_stdout():
structure_file = p.retrieve_pdb_file(pdb_code=self.id, pdir=outdir, file_format=file_type, overwrite=force_rerun)
if not op.exists(structure_file):
log.debug('{}: {} file not available'.format(self.id, file_type))
raise URLError('{}.{}: file not available to download'.format(self.id, file_type))
else:
log.debug('{}: {} file saved'.format(self.id, file_type))
# Rename .ent files to .pdb
if file_type == 'pdb':
new_name = structure_file.replace('pdb', '').replace('ent', 'pdb')
os.rename(structure_file, new_name)
structure_file = new_name
self.load_structure_path(structure_file, file_type)
if load_header_metadata and file_type == 'mmtf':
self.update(parse_mmtf_header(structure_file))
if load_header_metadata and file_type != 'mmtf':
self.update(parse_mmcif_header(download_mmcif_header(pdb_id=self.id, outdir=outdir, force_rerun=force_rerun)))
def get_pisa_complex_predictions(self, outdir, existing_pisa_multimer_xml=None):
if not existing_pisa_multimer_xml:
pisa_xmls = pisa.download_pisa_multimers_xml(pdb_ids=self.id, outdir=outdir,
save_single_xml_files=True)
else:
pisa_xmls = {}
pisa_xmls[self.id] = existing_pisa_multimer_xml
pisa_dict = pisa.parse_pisa_multimers_xml(pisa_xmls[self.id], download_structures=True,
outdir=outdir)
def __json_encode__(self):
# TODO: investigate why saving with # does not work!
to_return = {}
for x in self.__dict__.keys():
if x == 'pdb_title' or x == 'description':
sanitized = ssbio.utils.force_string(getattr(self, x)).replace('#', '-')
else:
to_return.update({x: getattr(self, x)})
return to_return
def parse_mmtf_header(infile):
"""Parse an MMTF file and return basic header-like information.
Args:
infile (str): Path to MMTF file
Returns:
dict: Dictionary of parsed header
Todo:
- Can this be sped up by not parsing the 3D coordinate info somehow?
- OR just store the sequences when this happens since it is already being parsed.
"""
infodict = {}
mmtf_decoder = mmtf.parse(infile)
infodict['date'] = mmtf_decoder.deposition_date
infodict['release_date'] = mmtf_decoder.release_date
try:
infodict['experimental_method'] = [x.decode() for x in mmtf_decoder.experimental_methods]
except AttributeError:
infodict['experimental_method'] = [x for x in mmtf_decoder.experimental_methods]
infodict['resolution'] = mmtf_decoder.resolution
infodict['description'] = mmtf_decoder.title
group_name_exclude = ['HOH']
chem_comp_type_exclude = ['l-peptide linking', 'peptide linking']
chemicals = list(set([mmtf_decoder.group_list[idx]['groupName'] for idx in mmtf_decoder.group_type_list if mmtf_decoder.group_list[idx]['chemCompType'].lower() not in chem_comp_type_exclude and mmtf_decoder.group_list[idx]['groupName'] not in group_name_exclude]))
infodict['chemicals'] = chemicals
return infodict
def download_mmcif_header(pdb_id, outdir='', force_rerun=False):
"""Download a mmCIF header file from the RCSB PDB by ID.
Args:
pdb_id: PDB ID
outdir: Optional output directory, default is current working directory
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
"""
# TODO: keep an eye on https://github.com/biopython/biopython/pull/943 Biopython PR#493 for functionality of this
# method in biopython. extra file types have not been added to biopython download yet
pdb_id = pdb_id.lower()
file_type = 'cif'
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type)
urlretrieve(download_link, outfile)
log.debug('{}: saved header file'.format(outfile))
else:
log.debug('{}: header file already saved'.format(outfile))
return outfile
def parse_mmcif_header(infile):
"""Parse a couple important fields from the mmCIF file format with some manual curation of ligands.
If you want full access to the mmCIF file just use the MMCIF2Dict class in Biopython.
Args:
infile: Path to mmCIF file
Returns:
dict: Dictionary of parsed header
"""
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
newdict = {}
try:
mmdict = MMCIF2Dict(infile)
except ValueError as e:
log.exception(e)
return newdict
chemical_ids_exclude = ['HOH']
chemical_types_exclude = ['l-peptide linking','peptide linking']
if '_struct.title' in mmdict:
newdict['pdb_title'] = mmdict['_struct.title']
else:
log.debug('{}: No title field'.format(infile))
if '_struct.pdbx_descriptor' in mmdict:
newdict['description'] = mmdict['_struct.pdbx_descriptor']
else:
log.debug('{}: no description field'.format(infile))
if '_pdbx_database_status.recvd_initial_deposition_date' in mmdict:
newdict['date'] = mmdict['_pdbx_database_status.recvd_initial_deposition_date']
elif '_database_PDB_rev.date' in mmdict:
newdict['date'] = mmdict['_database_PDB_rev.date']
else:
log.debug('{}: no date field'.format(infile))
if '_exptl.method' in mmdict:
newdict['experimental_method'] = mmdict['_exptl.method']
else:
log.debug('{}: no experimental method field'.format(infile))
# TODO: refactor how to get resolutions based on experimental method
if '_refine.ls_d_res_high' in mmdict:
try:
if isinstance(mmdict['_refine.ls_d_res_high'], list):
newdict['resolution'] = [float(x) for x in mmdict['_refine.ls_d_res_high']]
else:
newdict['resolution'] = float(mmdict['_refine.ls_d_res_high'])
except:
try:
newdict['resolution'] = float(mmdict['_em_3d_reconstruction.resolution'])
except:
log.debug('{}: no resolution field'.format(infile))
else:
log.debug('{}: no resolution field'.format(infile))
if '_chem_comp.id' in mmdict:
chemicals_filtered = ssbio.utils.filter_list_by_indices(mmdict['_chem_comp.id'],
ssbio.utils.not_find(mmdict['_chem_comp.type'],
chemical_types_exclude,
case_sensitive=False))
chemicals_fitered = ssbio.utils.filter_list(chemicals_filtered, chemical_ids_exclude, case_sensitive=True)
newdict['chemicals'] = chemicals_fitered
else:
log.debug('{}: no chemical composition field'.format(infile))
if '_entity_src_gen.pdbx_gene_src_scientific_name' in mmdict:
newdict['taxonomy_name'] = mmdict['_entity_src_gen.pdbx_gene_src_scientific_name']
else:
log.debug('{}: no organism field'.format(infile))
return newdict
def download_sifts_xml(pdb_id, outdir='', force_rerun=False):
"""Download the SIFTS file for a PDB ID.
Args:
pdb_id (str): PDB ID
outdir (str): Output directory, current working directory if not specified.
force_rerun (bool): If the file should be downloaded again even if it exists
Returns:
str: Path to downloaded file
"""
baseURL = 'ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/xml/'
filename = '{}.xml.gz'.format(pdb_id.lower())
outfile = op.join(outdir, filename.split('.')[0] + '.sifts.xml')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
response = urlopen(baseURL + filename)
with open(outfile, 'wb') as f:
f.write(gzip.decompress(response.read()))
return outfile
def map_uniprot_resnum_to_pdb(uniprot_resnum, chain_id, sifts_file):
"""Map a UniProt residue number to its corresponding PDB residue number.
This function requires that the SIFTS file be downloaded,
and also a chain ID (as different chains may have different mappings).
Args:
uniprot_resnum (int): integer of the residue number you'd like to map
chain_id (str): string of the PDB chain to map to
sifts_file (str): Path to the SIFTS XML file
Returns:
(tuple): tuple containing:
mapped_resnum (int): Mapped residue number
is_observed (bool): Indicates if the 3D structure actually shows the residue
"""
# Load the xml with lxml
parser = etree.XMLParser(ns_clean=True)
tree = etree.parse(sifts_file, parser)
root = tree.getroot()
my_pdb_resnum = None
# TODO: "Engineered_Mutation is also a possible annotation, need to figure out what to do with that
my_pdb_annotation = False
# Find the right chain (entities in the xml doc)
ent = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}entity'
for chain in root.findall(ent):
# TODO: IMPORTANT - entityId is not the chain ID!!! it is just in alphabetical order!
if chain.attrib['entityId'] == chain_id:
# Find the "crossRefDb" tag that has the attributes dbSource="UniProt" and dbResNum="your_resnum_here"
# Then match it to the crossRefDb dbResNum that has the attribute dbSource="PDBresnum"
# Check if uniprot + resnum even exists in the sifts file (it won't if the pdb doesn't contain the residue)
ures = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="UniProt"][@dbResNum="%s"]' % uniprot_resnum
my_uniprot_residue = chain.findall(ures)
if len(my_uniprot_residue) == 1:
# Get crossRefDb dbSource="PDB"
parent = my_uniprot_residue[0].getparent()
pres = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="PDB"]'
my_pdb_residue = parent.findall(pres)
my_pdb_resnum = int(my_pdb_residue[0].attrib['dbResNum'])
# Get <residueDetail dbSource="PDBe" property="Annotation">
# Will be Not_Observed if it is not seen in the PDB
anno = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}residueDetail[@dbSource="PDBe"][@property="Annotation"]'
my_pdb_annotation = parent.findall(anno)
if len(my_pdb_annotation) == 1:
my_pdb_annotation = my_pdb_annotation[0].text
if my_pdb_annotation == 'Not_Observed':
my_pdb_annotation = False
else:
my_pdb_annotation = True
else:
return None, False
return my_pdb_resnum, my_pdb_annotation
def best_structures(uniprot_id, outname=None, outdir=None, seq_ident_cutoff=0.0, force_rerun=False):
"""Use the PDBe REST service to query for the best PDB structures for a UniProt ID.
More information found here: https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
Link used to retrieve results: https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/:accession
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and, if the same, resolution.
Here is the ranking algorithm described by the PDB paper:
https://nar.oxfordjournals.org/content/44/D1/D385.full
"Finally, a single quality indicator is also calculated for each entry by taking the harmonic average
of all the percentile scores representing model and model-data-fit quality measures and then subtracting
10 times the numerical value of the resolution (in Angstrom) of the entry to ensure that resolution plays
a role in characterising the quality of a structure. This single empirical 'quality measure' value is used
by the PDBe query system to sort results and identify the 'best' structure in a given context. At present,
entries determined by methods other than X-ray crystallography do not have similar data quality information
available and are not considered as 'best structures'."
Args:
uniprot_id (str): UniProt Accession ID
outname (str): Basename of the output file of JSON results
outdir (str): Path to output directory of JSON results
seq_ident_cutoff (float): Cutoff results based on percent coverage (in decimal form)
force_rerun (bool): Obtain best structures mapping ignoring previously downloaded results
Returns:
list: Rank-ordered list of dictionaries representing chain-specific PDB entries. Keys are:
* pdb_id: the PDB ID which maps to the UniProt ID
* chain_id: the specific chain of the PDB which maps to the UniProt ID
* coverage: the percent coverage of the entire UniProt sequence
* resolution: the resolution of the structure
* start: the structure residue number which maps to the start of the mapped sequence
* end: the structure residue number which maps to the end of the mapped sequence
* unp_start: the sequence residue number which maps to the structure start
* unp_end: the sequence residue number which maps to the structure end
* experimental_method: type of experiment used to determine structure
* tax_id: taxonomic ID of the protein's original organism
"""
outfile = ''
if not outdir:
outdir = ''
# if output dir is specified but not outname, use the uniprot
if not outname and outdir:
outname = uniprot_id
if outname:
outname = op.join(outdir, outname)
outfile = '{}.json'.format(outname)
# Load a possibly existing json file
if not ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
with open(outfile, 'r') as f:
raw_data = json.load(f)
log.debug('{}: loaded existing json file'.format(uniprot_id))
# Otherwise run the web request
else:
# TODO: add a checker for a cached file of uniprot -> PDBs - can be generated within gempro pipeline and stored
response = requests.get('https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/{}'.format(uniprot_id),
data={'key': 'value'})
if response.status_code == 404:
log.debug('{}: 404 returned, probably no structures available.'.format(uniprot_id))
raw_data = {uniprot_id: {}}
else:
log.debug('{}: Obtained best structures'.format(uniprot_id))
raw_data = response.json()
# Write the json file if specified
if outfile:
with open(outfile, 'w') as f:
json.dump(raw_data, f)
log.debug('{}: Saved json file of best structures'.format(uniprot_id))
data = dict(raw_data)[uniprot_id]
# Filter for sequence identity percentage
if seq_ident_cutoff != 0:
for result in data:
if result['coverage'] < seq_ident_cutoff:
data.remove(result)
return data
def blast_pdb(seq, outfile='', outdir='', evalue=0.0001, seq_ident_cutoff=0.0, link=False, force_rerun=False):
"""Returns a list of BLAST hits of a sequence to available structures in the PDB.
Args:
seq (str): Your sequence, in string format
outfile (str): Name of output file
outdir (str, optional): Path to output directory. Default is the current directory.
evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal, 0.0001 is stringent (default).
seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form)
link (bool, optional): Set to True if a link to the HTML results should be displayed
force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False
Returns:
list: Rank ordered list of BLAST hits in dictionaries.
"""
if len(seq) < 12:
raise ValueError('Sequence must be at least 12 residues long.')
if link:
page = 'PDB results page: http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence={}&eCutOff={}&maskLowComplexity=yes&matrix=BLOSUM62&outputFormat=HTML'.format(seq, evalue)
print(page)
parser = etree.XMLParser(ns_clean=True)
outfile = op.join(outdir, outfile)
if ssbio.utils.force_rerun(force_rerun, outfile):
# Load the BLAST XML results if force_rerun=True
page = 'http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence={}&eCutOff={}&maskLowComplexity=yes&matrix=BLOSUM62&outputFormat=XML'.format(
seq, evalue)
req = requests.get(page)
if req.status_code == 200:
response = req.text
# Save the XML file
if outfile:
with open(outfile, 'w') as f:
f.write(response)
# Parse the XML string
tree = etree.ElementTree(etree.fromstring(response, parser))
log.debug('Loaded BLAST results from REST server')
else:
log.error('BLAST request timed out')
return []
else:
tree = etree.parse(outfile, parser)
log.debug('{}: Loaded existing BLAST XML results'.format(outfile))
# Get length of original sequence to calculate percentages
len_orig = float(len(seq))
root = tree.getroot()
hit_list = []
for hit in root.findall('BlastOutput_iterations/Iteration/Iteration_hits/Hit'):
info = {}
hitdef = hit.find('Hit_def')
if hitdef is not None:
info['hit_pdb'] = hitdef.text.split('|')[0].split(':')[0].lower()
info['hit_pdb_chains'] = hitdef.text.split('|')[0].split(':')[2].split(',')
# One PDB can align to different parts of the sequence
# Will just choose the top hit for this single PDB
hsp = hit.findall('Hit_hsps/Hsp')[0]
# Number of identical residues
hspi = hsp.find('Hsp_identity')
if hspi is not None:
info['hit_num_ident'] = int(hspi.text)
info['hit_percent_ident'] = int(hspi.text)/len_orig
if int(hspi.text)/len_orig < seq_ident_cutoff:
log.debug('{}: does not meet sequence identity cutoff'.format(hitdef.text.split('|')[0].split(':')[0]))
continue
# Number of similar residues (positive hits)
hspp = hsp.find('Hsp_positive')
if hspp is not None:
info['hit_num_similar'] = int(hspp.text)
info['hit_percent_similar'] = int(hspp.text) / len_orig
# Total number of gaps (unable to align in either query or subject)
hspg = hsp.find('Hsp_gaps')
if hspg is not None:
info['hit_num_gaps'] = int(hspg.text)
info['hit_percent_gaps'] = int(hspg.text) / len_orig
# E-value of BLAST
hspe = hsp.find('Hsp_evalue')
if hspe is not None:
info['hit_evalue'] = float(hspe.text)
# Score of BLAST
hsps = hsp.find('Hsp_score')
if hsps is not None:
info['hit_score'] = float(hsps.text)
hit_list.append(info)
log.debug("{}: Number of BLAST hits".format(len(hit_list)))
return hit_list
def blast_pdb_df(blast_results):
"""Make a dataframe of BLAST results"""
cols = ['hit_pdb', 'hit_pdb_chains', 'hit_evalue', 'hit_score', 'hit_num_ident', 'hit_percent_ident',
'hit_num_similar', 'hit_percent_similar', 'hit_num_gaps', 'hit_percent_gaps']
return pd.DataFrame.from_records(blast_results, columns=cols)
def _property_table():
"""Download the PDB -> resolution table directly from the RCSB PDB REST service.
See the other fields that you can get here: http://www.rcsb.org/pdb/results/reportField.do
Returns:
Pandas DataFrame: table of structureId as the index, resolution and experimentalTechnique as the columns
"""
url = 'http://www.rcsb.org/pdb/rest/customReport.csv?pdbids=*&customReportColumns=structureId,resolution,experimentalTechnique,releaseDate&service=wsfile&format=csv'
r = requests.get(url)
p = pd.read_csv(StringIO(r.text)).set_index('structureId')
return p
def get_resolution(pdb_id):
"""Quick way to get the resolution of a PDB ID using the table of results from the REST service
Returns infinity if the resolution is not available.
Returns:
float: resolution of a PDB ID in Angstroms
TODO:
- Unit test
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table')
else:
resolution = _property_table().ix[pdb_id, 'resolution']
if pd.isnull(resolution):
log.debug('{}: no resolution available, probably not an X-ray crystal structure')
resolution = float('inf')
return resolution
def get_release_date(pdb_id):
"""Quick way to get the release date of a PDB ID using the table of results from the REST service
Returns None if the release date is not available.
Returns:
str: Organism of a PDB ID
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table')
else:
release_date = _property_table().ix[pdb_id, 'releaseDate']
if pd.isnull(release_date):
log.debug('{}: no release date available')
release_date = None
return release_date
def get_num_bioassemblies(pdb_id, cache=False, outdir=None, force_rerun=False):
"""Check if there are bioassemblies using the PDB REST API, and if there are, get the number of bioassemblies
available.
See: https://www.rcsb.org/pages/webservices/rest, section 'List biological assemblies'
Not all PDB entries have biological assemblies available and some have multiple. Details that are necessary to
recreate a biological assembly from the asymmetric unit can be accessed from the following requests.
- Number of biological assemblies associated with a PDB entry
- Access the transformation information needed to generate a biological assembly (nr=0 will return information
for the asymmetric unit, nr=1 will return information for the first assembly, etc.)
A query of https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId=1hv4 returns this::
<nrBioAssemblies structureId="1HV4" hasAssemblies="true" count="2"/>
Args:
pdb_id (str): PDB ID
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again
"""
parser = etree.XMLParser(ns_clean=True)
if not outdir:
outdir = os.getcwd()
outfile = op.join(outdir, '{}_nrbiomols.xml'.format(pdb_id))
if ssbio.utils.force_rerun(force_rerun, outfile):
page = 'https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId={}'.format(pdb_id)
req = requests.get(page)
if req.status_code == 200:
response = req.text
# Save the XML file
if cache:
with open(outfile, 'w') as f:
f.write(response)
# Parse the XML string
tree = etree.ElementTree(etree.fromstring(response, parser))
log.debug('Loaded bioassembly information from REST server')
else:
log.error('Request timed out')
req.raise_for_status()
else:
tree = etree.parse(outfile, parser)
log.debug('{}: Loaded existing XML results'.format(outfile))
r = tree.getroot()
has_biomols = r.get('hasAssemblies')
if has_biomols == 'true':
has_biomols = True
else:
has_biomols = False
if has_biomols:
num_biomols = r.get('count')
else:
num_biomols = 0
num_biomols = int(num_biomols)
return num_biomols
def get_bioassembly_info(pdb_id, biomol_num, cache=False, outdir=None, force_rerun=False):
"""Get metadata about a bioassembly from the RCSB PDB's REST API.
See: https://www.rcsb.org/pdb/rest/bioassembly/bioassembly?structureId=1hv4&nr=1
The API returns an XML file containing the information on a biological assembly that looks like this::
<bioassembly structureId="1HV4" assemblyNr="1" method="PISA" desc="author_and_software_defined_assembly">
<transformations operator="1" chainIds="A,B,C,D">
<transformation index="1">
<matrix m11="1.00000000" m12="0.00000000" m13="0.00000000" m21="0.00000000" m22="1.00000000" m23="0.00000000" m31="0.00000000" m32="0.00000000" m33="1.00000000"/>
<shift v1="0.00000000" v2="0.00000000" v3="0.00000000"/>
</transformation>
</transformations>
</bioassembly>
Args:
pdb_id (str): PDB ID
biomol_num (int): Biological assembly number you are interested in
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again
"""
parser = etree.XMLParser(ns_clean=True)
#
# if not outdir:
# outdir = os.getcwd()
# outfile = op.join(outdir, '{}.xml'.format(self.id))
#
# if ssbio.utils.force_rerun(force_rerun, outfile):
# page = 'https://www.rcsb.org/pdb/rest/bioassembly/bioassembly?structureId={}&nr={}'.format(
# self.original_pdb_id, biomol_num)
# req = requests.get(page)
#
# if req.status_code == 200:
# response = req.text
#
# # Save the XML file
# if cache:
# with open(outfile, 'w') as f:
# f.write(response)
#
# # Parse the XML string
# r = xmltodict.parse(response)
# log.debug('Loaded bioassembly information from REST server')
# else:
# log.error('Request timed out')
# req.raise_for_status()
# else:
# with open(outfile, 'r') as f:
# r = xmltodict.parse(f.read())
# log.debug('{}: Loaded existing XML results'.format(outfile))
#
# self.biomol_to_chain_dict[biomol_num] = {'chains': r['bioassembly']['transformations']['@chainIds'],
# 'multiplier': len(r['bioassembly']['transformations']['transformation'])}
# # TODO: figure out how to store matrices etc.
#
# log.info('{}_{}: ')
def download_biomol(pdb_id, biomol_num, outdir, file_type='pdb', force_rerun=False):
import zlib
from six.moves.urllib_error import URLError
from six.moves.urllib.request import urlopen, urlretrieve
import contextlib
ssbio.utils.make_dir(outdir)
server_folder = pdb_id[1:3]
if file_type == 'pdb':
# server = 'ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/divided/{}/'.format(server_folder)
server = 'https://files.rcsb.org/download/'
server_filename = pdb_id + '.pdb%i.gz' % biomol_num
local_filename = pdb_id + '_bio%i.pdb' % biomol_num
outfile = op.join(outdir, local_filename)
elif file_type.lower() == 'mmcif' or file_type.lower() == 'cif':
server = 'ftp://ftp.wwpdb.org/pub/pdb/data/biounit/mmCIF/divided/{}/'.format(server_folder)
server_filename = pdb_id + '-assembly%i.cif.gz' % biomol_num
local_filename = pdb_id + '_bio%i.cif' % biomol_num
outfile = op.join(outdir, local_filename)
else:
raise ValueError('Biological assembly only available in PDB or mmCIF file types.')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
download_link = op.join(server, server_filename)
try:
with contextlib.closing(urlopen(download_link)) as f:
decompressed_data = zlib.decompress(f.read(), 16 + zlib.MAX_WBITS)
with open(op.join(outdir, local_filename), 'wb') as f:
f.write(decompressed_data)
except URLError as e:
print(e)
return None
return outfile
########################################################################################################################
########################################################################################################################
# DEPRECATED FUNCTIONS
########################################################################################################################
########################################################################################################################
@deprecation.deprecated(deprecated_in="1.0", removed_in="2.0",
details="Use Biopython's PDBList.retrieve_pdb_file function instead")
def download_structure(pdb_id, file_type, outdir='', only_header=False, force_rerun=False):
"""Download a structure from the RCSB PDB by ID. Specify the file type desired.
Args:
pdb_id: PDB ID
file_type: pdb, pdb.gz, mmcif, cif, cif.gz, xml.gz, mmtf, mmtf.gz
outdir: Optional output directory
only_header: If only the header file should be downloaded
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
"""
# method in biopython. extra file types have not been added to biopython download yet
pdb_id = pdb_id.lower()
file_type = file_type.lower()
file_types = ['pdb', 'pdb.gz', 'mmcif', 'cif', 'cif.gz', 'xml.gz', 'mmtf', 'mmtf.gz']
if file_type not in file_types:
raise ValueError('Invalid file type, must be either: pdb, pdb.gz, cif, cif.gz, xml.gz, mmtf, mmtf.gz')
if file_type == 'mmtf':
file_type = 'mmtf.gz'
if file_type.endswith('.gz'):
gzipped = True
else:
gzipped = False
if file_type == 'mmcif':
file_type = 'cif'
if only_header:
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
else:
folder = 'download'
outfile = op.join(outdir, '{}.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
if file_type == 'mmtf.gz' or file_type == 'mmtf':
mmtf_api = '1.0'
download_link = 'http://mmtf.rcsb.org/v{}/full/{}.mmtf.gz'.format(mmtf_api, pdb_id)
else:
download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type)
urlretrieve(download_link, outfile)
if gzipped:
outfile = ssbio.utils.gunzip_file(infile=outfile,
outfile=outfile.strip('.gz'),
outdir=outdir,
delete_original=False,
force_rerun_flag=force_rerun)
log.debug('{}: saved structure file'.format(outfile))
else:
if file_type == 'mmtf.gz':
outfile = op.join(outdir, '{}.{}'.format(pdb_id, 'mmtf'))
log.debug('{}: structure file already saved'.format(outfile))
return outfile
|
nmih/ssbio
|
ssbio/databases/pdb.py
|
Python
|
mit
| 34,959
|
[
"BLAST",
"Biopython",
"CRYSTAL"
] |
2d9925d8198346200be5fcde45d5547f45767c7cdb08c5d6267ae95e36bd268a
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hasnotebase import HasNoteBase
#-------------------------------------------------------------------------
# "Families having notes"
#-------------------------------------------------------------------------
class HasNote(HasNoteBase):
"""Families having notes"""
name = _('Families having <count> notes')
description = _("Matches families having a certain number notes")
|
arunkgupta/gramps
|
gramps/gen/filters/rules/family/_hasnote.py
|
Python
|
gpl-2.0
| 1,722
|
[
"Brian"
] |
1f9ec1ba211eb3524e48a3c76e84c169bf22586a90b47a68c2bb92f2a0d8cb36
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2017-10-11 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
# NOTE: We use the fluxes from the Gregorian calendar file also for the 360_day emission files
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/combined_1960-2020/0.5x0.5/combined_sources_MeCHO_lumped_1960-2020_greg.nc'
#
# STASH code emissions are associated with
# 301-320: surface
# m01s00i308: MeCHO surface emissions
#
# 321-340: full atmosphere
#
stash='m01s00i308'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='MeCHO'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name=str.strip(species_name)+' surf emissions'
ocube.standard_name='tendency_of_atmosphere_mass_content_of_acetaldehyde_due_to_emission'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['um_stash_source']=stash
ocube.attributes['tracer_name']=str.strip(species_name)
ocube.attributes['lumped_species']='acetaldehyde and other non-CH2O aldehydes'
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='combined_sources_MeCHO_lumped_1960-2020_greg.nc'
ocube.attributes['title']='Time-varying monthly surface emissions of acetaldehyde lumped with other non-CH2O aldehydes from 1960 to 2020'
ocube.attributes['File_version']='v2'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ], dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_'+species_name+'.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name','lumped_species'])
# end of script
|
acsis-project/emissions
|
emissions/python/timeseries_1960-2020/regrid_MeCHO_emissions_n96e_360d.py
|
Python
|
gpl-3.0
| 17,485
|
[
"NetCDF"
] |
d9763d78913a47310100047cdb3cee016b8fbdfd05bfeaf88fdd1cac177641d7
|
# -*- coding: utf-8 -*-
#
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2007-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Parser for ORCA output files"""
from __future__ import print_function
import numpy
from . import logfileparser
from . import utils
class ORCA(logfileparser.Logfile):
"""An ORCA log file."""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(ORCA, self).__init__(logname="ORCA", *args, **kwargs)
self.package = "ORCA"
def __str__(self):
"""Return a string representation of the object."""
return "ORCA log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'ORCA("%s")' % (self.filename)
def normalisesym(self, label):
"""Use standard symmetry labels instead of Gaussian labels.
To normalise:
(1) If label is one of [SG, PI, PHI, DLTA], replace by [sigma, pi, phi, delta]
(2) replace any G or U by their lowercase equivalent
>>> sym = Gaussian("dummyfile").normalisesym
>>> labels = ['A1', 'AG', 'A1G', "SG", "PI", "PHI", "DLTA", 'DLTU', 'SGG']
>>> map(sym, labels)
['A1', 'Ag', 'A1g', 'sigma', 'pi', 'phi', 'delta', 'delta.u', 'sigma.g']
"""
def before_parsing(self):
# A geometry optimization is started only when
# we parse a cycle (so it will be larger than zero().
self.gopt_cycle = 0
# Keep track of whether this is a relaxed scan calculation
self.is_relaxed_scan = False
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
if line[0:15] == "Number of atoms":
natom = int(line.split()[-1])
self.set_attribute('natom', natom)
if line[1:13] == "Total Charge":
charge = int(line.split()[-1])
self.set_attribute('charge', charge)
line = next(inputfile)
mult = int(line.split()[-1])
self.set_attribute('mult', mult)
# SCF convergence output begins with:
#
# --------------
# SCF ITERATIONS
# --------------
#
# However, there are two common formats which need to be handled, implemented as separate functions.
if "SCF ITERATIONS" in line:
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
colums = line.split()
if colums[1] == "Energy":
self.parse_scf_condensed_format(inputfile, colums)
elif colums[1] == "Starting":
self.parse_scf_expanded_format(inputfile, colums)
# Information about the final iteration, which also includes the convergence
# targets and the convergence values, is printed separately, in a section like this:
#
# *****************************************************
# * SUCCESS *
# * SCF CONVERGED AFTER 9 CYCLES *
# *****************************************************
#
# ...
#
# Total Energy : -382.04963064 Eh -10396.09898 eV
#
# ...
#
# ------------------------- ----------------
# FINAL SINGLE POINT ENERGY -382.049630637
# ------------------------- ----------------
#
# We cannot use this last message as a stop condition in general, because
# often there is vibrational output before it. So we use the 'Total Energy'
# line. However, what comes after that is different for single point calculations
# and in the inner steps of geometry optimizations.
if "SCF CONVERGED AFTER" in line:
if not hasattr(self, "scfenergies"):
self.scfenergies = []
if not hasattr(self, "scfvalues"):
self.scfvalues = []
if not hasattr(self, "scftargets"):
self.scftargets = []
while not "Total Energy :" in line:
line = next(inputfile)
energy = float(line.split()[5])
self.scfenergies.append(energy)
self._append_scfvalues_scftargets(inputfile, line)
# Sometimes the SCF does not converge, but does not halt the
# the run (like in bug 3184890). In this this case, we should
# remain consistent and use the energy from the last reported
# SCF cycle. In this case, ORCA print a banner like this:
#
# *****************************************************
# * ERROR *
# * SCF NOT CONVERGED AFTER 8 CYCLES *
# *****************************************************
if "SCF NOT CONVERGED AFTER" in line:
if not hasattr(self, "scfenergies"):
self.scfenergies = []
if not hasattr(self, "scfvalues"):
self.scfvalues = []
if not hasattr(self, "scftargets"):
self.scftargets = []
energy = self.scfvalues[-1][-1][0]
self.scfenergies.append(energy)
self._append_scfvalues_scftargets(inputfile, line)
# The convergence targets for geometry optimizations are printed at the
# beginning of the output, although the order and their description is
# different than later on. So, try to standardize the names of the criteria
# and save them for later so that we can get the order right.
#
# *****************************
# * Geometry Optimization Run *
# *****************************
#
# Geometry optimization settings:
# Update method Update .... BFGS
# Choice of coordinates CoordSys .... Redundant Internals
# Initial Hessian InHess .... Almoef's Model
#
# Convergence Tolerances:
# Energy Change TolE .... 5.0000e-06 Eh
# Max. Gradient TolMAXG .... 3.0000e-04 Eh/bohr
# RMS Gradient TolRMSG .... 1.0000e-04 Eh/bohr
# Max. Displacement TolMAXD .... 4.0000e-03 bohr
# RMS Displacement TolRMSD .... 2.0000e-03 bohr
#
if line[25:50] == "Geometry Optimization Run":
stars = next(inputfile)
blank = next(inputfile)
line = next(inputfile)
while line[0:23] != "Convergence Tolerances:":
line = next(inputfile)
if hasattr(self, 'geotargets'):
self.logger.warning('The geotargets attribute should not exist yet. There is a problem in the parser.')
self.geotargets = []
self.geotargets_names = []
# There should always be five tolerance values printed here.
for i in range(5):
line = next(inputfile)
name = line[:25].strip().lower().replace('.','').replace('displacement', 'step')
target = float(line.split()[-2])
self.geotargets_names.append(name)
self.geotargets.append(target)
# The convergence targets for relaxed surface scan steps are printed at the
# beginning of the output, although the order and their description is
# different than later on. So, try to standardize the names of the criteria
# and save them for later so that we can get the order right.
#
# *************************************************************
# * RELAXED SURFACE SCAN STEP 12 *
# * *
# * Dihedral ( 11, 10, 3, 4) : 180.00000000 *
# *************************************************************
#
# Geometry optimization settings:
# Update method Update .... BFGS
# Choice of coordinates CoordSys .... Redundant Internals
# Initial Hessian InHess .... Almoef's Model
#
# Convergence Tolerances:
# Energy Change TolE .... 5.0000e-06 Eh
# Max. Gradient TolMAXG .... 3.0000e-04 Eh/bohr
# RMS Gradient TolRMSG .... 1.0000e-04 Eh/bohr
# Max. Displacement TolMAXD .... 4.0000e-03 bohr
# RMS Displacement TolRMSD .... 2.0000e-03 bohr
if line[25:50] == "RELAXED SURFACE SCAN STEP":
self.is_relaxed_scan = True
blank = next(inputfile)
info = next(inputfile)
stars = next(inputfile)
blank = next(inputfile)
line = next(inputfile)
while line[0:23] != "Convergence Tolerances:":
line = next(inputfile)
self.geotargets = []
self.geotargets_names = []
# There should always be five tolerance values printed here.
for i in range(5):
line = next(inputfile)
name = line[:25].strip().lower().replace('.','').replace('displacement', 'step')
target = float(line.split()[-2])
self.geotargets_names.append(name)
self.geotargets.append(target)
# After each geometry optimization step, ORCA prints the current convergence
# parameters and the targets (again), so it is a good idea to check that they
# have not changed. Note that the order of these criteria here are different
# than at the beginning of the output, so make use of the geotargets_names created
# before and save the new geovalues in correct order.
#
# ----------------------|Geometry convergence|---------------------
# Item value Tolerance Converged
# -----------------------------------------------------------------
# Energy change 0.00006021 0.00000500 NO
# RMS gradient 0.00031313 0.00010000 NO
# RMS step 0.01596159 0.00200000 NO
# MAX step 0.04324586 0.00400000 NO
# ....................................................
# Max(Bonds) 0.0218 Max(Angles) 2.48
# Max(Dihed) 0.00 Max(Improp) 0.00
# -----------------------------------------------------------------
#
if line[33:53] == "Geometry convergence":
if not hasattr(self, "geovalues"):
self.geovalues = []
headers = next(inputfile)
dashes = next(inputfile)
names = []
values = []
targets = []
line = next(inputfile)
while list(set(line.strip())) != ["."]:
name = line[10:28].strip().lower()
value = float(line.split()[2])
target = float(line.split()[3])
names.append(name)
values.append(value)
targets.append(target)
line = next(inputfile)
# The energy change is normally not printed in the first iteration, because
# there was no previous energy -- in that case assume zero, but check that
# no previous geovalues were parsed.
newvalues = []
for i, n in enumerate(self.geotargets_names):
if (n == "energy change") and (n not in names):
if not self.is_relaxed_scan:
assert len(self.geovalues) == 0
newvalues.append(0.0)
else:
newvalues.append(values[names.index(n)])
assert targets[names.index(n)] == self.geotargets[i]
self.geovalues.append(newvalues)
#if not an optimization, determine structure used
if line[0:21] == "CARTESIAN COORDINATES" and not hasattr(self, "atomcoords"):
self.skip_line(inputfile, 'dashes')
atomnos = []
atomcoords = []
line = next(inputfile)
while len(line) > 1:
broken = line.split()
atomnos.append(self.table.number[broken[0]])
atomcoords.append(list(map(float, broken[1:4])))
line = next(inputfile)
self.set_attribute('natom', len(atomnos))
self.set_attribute('atomnos', atomnos)
self.atomcoords = [atomcoords]
# There's always a banner announcing the next geometry optimization cycle,
# which looks something like this:
#
# *************************************************************
# * GEOMETRY OPTIMIZATION CYCLE 2 *
# *************************************************************
if "GEOMETRY OPTIMIZATION CYCLE" in line:
# Keep track of the current cycle jsut in case, because some things
# are printed differently inside the first/last and other cycles.
self.gopt_cycle = int(line.split()[4])
self.skip_lines(inputfile, ['s', 'd', 'text', 'd'])
if not hasattr(self,"atomcoords"):
self.atomcoords = []
atomnos = []
atomcoords = []
for i in range(self.natom):
line = next(inputfile)
broken = line.split()
atomnos.append(self.table.number[broken[0]])
atomcoords.append(list(map(float, broken[1:4])))
self.atomcoords.append(atomcoords)
self.set_attribute('atomnos', atomnos)
if line[21:68] == "FINAL ENERGY EVALUATION AT THE STATIONARY POINT":
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.atomcoords))
self.skip_lines(inputfile, ['text', 's', 'd', 'text', 'd'])
atomcoords = []
for i in range(self.natom):
line = next(inputfile)
broken = line.split()
atomcoords.append(list(map(float, broken[1:4])))
self.atomcoords.append(atomcoords)
if "The optimization did not converge" in line:
if not hasattr(self, 'optdone'):
self.optdone = []
if line[0:16] == "ORBITAL ENERGIES":
self.skip_lines(inputfile, ['d', 'text', 'text'])
self.moenergies = [[]]
self.homos = [[0]]
line = next(inputfile)
while len(line) > 20: #restricted calcs are terminated by ------
info = line.split()
self.moenergies[0].append(float(info[3]))
if float(info[1]) > 0.00: #might be 1 or 2, depending on restricted-ness
self.homos[0] = int(info[0])
line = next(inputfile)
line = next(inputfile)
#handle beta orbitals
if line[17:35] == "SPIN DOWN ORBITALS":
text = next(inputfile)
self.moenergies.append([])
self.homos.append(0)
line = next(inputfile)
while len(line) > 20: #actually terminated by ------
info = line.split()
self.moenergies[1].append(float(info[3]))
if float(info[1]) == 1.00:
self.homos[1] = int(info[0])
line = next(inputfile)
# So nbasis was parsed at first with the first pattern, but it turns out that
# semiempirical methods (at least AM1 as reported by Julien Idé) do not use this.
# For this reason, also check for the second patterns, and use it as an assert
# if nbasis was already parsed. Regression PCB_1_122.out covers this test case.
if line[1:32] == "# of contracted basis functions":
self.set_attribute('nbasis', int(line.split()[-1]))
if line[1:27] == "Basis Dimension Dim":
self.set_attribute('nbasis', int(line.split()[-1]))
if line[0:14] == "OVERLAP MATRIX":
self.skip_line(inputfile, 'dashes')
self.aooverlaps = numpy.zeros( (self.nbasis, self.nbasis), "d")
for i in range(0, self.nbasis, 6):
self.updateprogress(inputfile, "Overlap")
header = next(inputfile)
size = len(header.split())
for j in range(self.nbasis):
line = next(inputfile)
broken = line.split()
self.aooverlaps[j, i:i+size] = list(map(float, broken[1:size+1]))
# Molecular orbital coefficients.
# This is also where atombasis is parsed.
if line[0:18] == "MOLECULAR ORBITALS":
self.skip_line(inputfile, 'dashes')
mocoeffs = [ numpy.zeros((self.nbasis, self.nbasis), "d") ]
self.aonames = []
self.atombasis = []
for n in range(self.natom):
self.atombasis.append([])
for spin in range(len(self.moenergies)):
if spin == 1:
self.skip_line(inputfile, 'blank')
mocoeffs.append(numpy.zeros((self.nbasis, self.nbasis), "d"))
for i in range(0, self.nbasis, 6):
self.updateprogress(inputfile, "Coefficients")
self.skip_lines(inputfile, ['numbers', 'energies', 'occs'])
dashes = next(inputfile)
broken = dashes.split()
size = len(broken)
for j in range(self.nbasis):
line = next(inputfile)
broken = line.split()
#only need this on the first time through
if spin == 0 and i == 0:
atomname = line[3:5].split()[0]
num = int(line[0:3])
orbital = broken[1].upper()
self.aonames.append("%s%i_%s"%(atomname, num+1, orbital))
self.atombasis[num].append(j)
temp = []
vals = line[16:-1] #-1 to remove the last blank space
for k in range(0, len(vals), 10):
temp.append(float(vals[k:k+10]))
mocoeffs[spin][i:i+size, j] = temp
self.mocoeffs = mocoeffs
if line[0:18] == "TD-DFT/TDA EXCITED":
# Could be singlets or triplets
if line.find("SINGLETS") >= 0:
sym = "Singlet"
elif line.find("TRIPLETS") >= 0:
sym = "Triplet"
else:
sym = "Not specified"
if not hasattr(self, "etenergies"):
self.etsecs = []
self.etenergies = []
self.etsyms = []
lookup = {'a':0, 'b':1}
line = next(inputfile)
while line.find("STATE") < 0:
line = next(inputfile)
# Contains STATE or is blank
while line.find("STATE") >= 0:
broken = line.split()
self.etenergies.append(float(broken[-2]))
self.etsyms.append(sym)
line = next(inputfile)
sec = []
# Contains SEC or is blank
while line.strip():
start = line[0:8].strip()
start = (int(start[:-1]), lookup[start[-1]])
end = line[10:17].strip()
end = (int(end[:-1]), lookup[end[-1]])
contrib = float(line[35:47].strip())
sec.append([start, end, contrib])
line = next(inputfile)
self.etsecs.append(sec)
line = next(inputfile)
# This will parse etoscs for TD calculations, but note that ORCA generally
# prints two sets, one based on the length form of transition dipole moments,
# the other based on the velocity form. Although these should be identical
# in the basis set limit, in practice they are rarely the same. Here we will
# effectively parse just the spectrum based on the length-form.
if (line[25:44] == "ABSORPTION SPECTRUM" or \
line[9:28] == "ABSORPTION SPECTRUM") and not hasattr(self,
"etoscs"):
self.skip_lines(inputfile, ['d', 'header', 'header', 'd'])
self.etoscs = []
for x in self.etsyms:
osc = next(inputfile).split()[3]
if osc == "spin": # "spin forbidden"
osc = 0
else:
osc = float(osc)
self.etoscs.append(osc)
if line[0:23] == "VIBRATIONAL FREQUENCIES":
self.skip_lines(inputfile, ['d', 'b'])
self.vibfreqs = numpy.zeros((3 * self.natom,),"d")
for i in range(3 * self.natom):
line = next(inputfile)
self.vibfreqs[i] = float(line.split()[1])
if numpy.any(self.vibfreqs[0:6] != 0):
msg = "Modes corresponding to rotations/translations "
msg += "may be non-zero."
self.logger.warning(msg)
self.vibfreqs = self.vibfreqs[6:]
if line[0:12] == "NORMAL MODES":
""" Format:
NORMAL MODES
------------
These modes are the cartesian displacements weighted by the diagonal matrix
M(i,i)=1/sqrt(m[i]) where m[i] is the mass of the displaced atom
Thus, these vectors are normalized but *not* orthogonal
0 1 2 3 4 5
0 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
1 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
2 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
...
"""
self.vibdisps = numpy.zeros(( 3 * self.natom, self.natom, 3), "d")
self.skip_lines(inputfile, ['d', 'b', 'text', 'text', 'text', 'b'])
for mode in range(0, 3 * self.natom, 6):
header = next(inputfile)
for atom in range(self.natom):
x = next(inputfile).split()[1:]
y = next(inputfile).split()[1:]
z = next(inputfile).split()[1:]
self.vibdisps[mode:mode + 6, atom, 0] = x
self.vibdisps[mode:mode + 6, atom, 1] = y
self.vibdisps[mode:mode + 6, atom, 2] = z
self.vibdisps = self.vibdisps[6:]
if line[0:11] == "IR SPECTRUM":
self.skip_lines(inputfile, ['d', 'b', 'header', 'd'])
self.vibirs = numpy.zeros((3 * self.natom,),"d")
line = next(inputfile)
while len(line) > 2:
num = int(line[0:4])
self.vibirs[num] = float(line.split()[2])
line = next(inputfile)
self.vibirs = self.vibirs[6:]
if line[0:14] == "RAMAN SPECTRUM":
self.skip_lines(inputfile, ['d', 'b', 'header', 'd'])
self.vibramans = numpy.zeros((3 * self.natom,),"d")
line = next(inputfile)
while len(line) > 2:
num = int(line[0:4])
self.vibramans[num] = float(line.split()[2])
line = next(inputfile)
self.vibramans = self.vibramans[6:]
# ORCA will print atomic charges along with the spin populations,
# so care must be taken about choosing the proper column.
# Population analyses are performed usually only at the end
# of a geometry optimization or other run, so we want to
# leave just the final atom charges.
# Here is an example for Mulliken charges:
# --------------------------------------------
# MULLIKEN ATOMIC CHARGES AND SPIN POPULATIONS
# --------------------------------------------
# 0 H : 0.126447 0.002622
# 1 C : -0.613018 -0.029484
# 2 H : 0.189146 0.015452
# 3 H : 0.320041 0.037434
# ...
# Sum of atomic charges : -0.0000000
# Sum of atomic spin populations: 1.0000000
if line[:23] == "MULLIKEN ATOMIC CHARGES":
has_spins = "AND SPIN POPULATIONS" in line
if not hasattr(self, "atomcharges"):
self.atomcharges = { }
if has_spins and not hasattr(self, "atomspins"):
self.atomspins = {}
self.skip_line(inputfile, 'dashes')
charges = []
if has_spins:
spins = []
line = next(inputfile)
while line[:21] != "Sum of atomic charges":
charges.append(float(line[8:20]))
if has_spins:
spins.append(float(line[20:]))
line = next(inputfile)
self.atomcharges["mulliken"] = charges
if has_spins:
self.atomspins["mulliken"] = spins
# Things are the same for Lowdin populations, except that the sums
# are not printed (there is a blank line at the end).
if line[:22] == "LOEWDIN ATOMIC CHARGES":
has_spins = "AND SPIN POPULATIONS" in line
if not hasattr(self, "atomcharges"):
self.atomcharges = { }
if has_spins and not hasattr(self, "atomspins"):
self.atomspins = {}
self.skip_line(inputfile, 'dashes')
charges = []
if has_spins:
spins = []
line = next(inputfile)
while line.strip():
charges.append(float(line[8:20]))
if has_spins:
spins.append(float(line[20:]))
line = next(inputfile)
self.atomcharges["lowdin"] = charges
if has_spins:
self.atomspins["lowdin"] = spins
# It is not stated explicitely, but the dipole moment components printed by ORCA
# seem to be in atomic units, so they will need to be converted. Also, they
# are most probably calculated with respect to the origin .
#
# -------------
# DIPOLE MOMENT
# -------------
# X Y Z
# Electronic contribution: 0.00000 -0.00000 -0.00000
# Nuclear contribution : 0.00000 0.00000 0.00000
# -----------------------------------------
# Total Dipole Moment : 0.00000 -0.00000 -0.00000
# -----------------------------------------
# Magnitude (a.u.) : 0.00000
# Magnitude (Debye) : 0.00000
#
if line.strip() == "DIPOLE MOMENT":
self.skip_lines(inputfile, ['d', 'XYZ', 'electronic', 'nuclear', 'd'])
total = next(inputfile)
assert "Total Dipole Moment" in total
reference = [0.0, 0.0, 0.0]
dipole = numpy.array([float(d) for d in total.split()[-3:]])
dipole = utils.convertor(dipole, "ebohr", "Debye")
if not hasattr(self, 'moments'):
self.moments = [reference, dipole]
else:
try:
assert numpy.all(self.moments[1] == dipole)
except AssertionError:
self.logger.warning('Overwriting previous multipole moments with new values')
self.moments = [reference, dipole]
def parse_scf_condensed_format(self, inputfile, line):
""" Parse the SCF convergence information in condensed format """
# This is what it looks like
# ITER Energy Delta-E Max-DP RMS-DP [F,P] Damp
# *** Starting incremental Fock matrix formation ***
# 0 -384.5203638934 0.000000000000 0.03375012 0.00223249 0.1351565 0.7000
# 1 -384.5792776162 -0.058913722842 0.02841696 0.00175952 0.0734529 0.7000
# ***Turning on DIIS***
# 2 -384.6074211837 -0.028143567475 0.04968025 0.00326114 0.0310435 0.0000
# 3 -384.6479682063 -0.040547022616 0.02097477 0.00121132 0.0361982 0.0000
# 4 -384.6571124353 -0.009144228947 0.00576471 0.00035160 0.0061205 0.0000
# 5 -384.6574659959 -0.000353560584 0.00191156 0.00010160 0.0025838 0.0000
# 6 -384.6574990782 -0.000033082375 0.00052492 0.00003800 0.0002061 0.0000
# 7 -384.6575005762 -0.000001497987 0.00020257 0.00001146 0.0001652 0.0000
# 8 -384.6575007321 -0.000000155848 0.00008572 0.00000435 0.0000745 0.0000
# **** Energy Check signals convergence ****
assert line[2] == "Delta-E"
assert line[3] == "Max-DP"
if not hasattr(self, "scfvalues"):
self.scfvalues = []
self.scfvalues.append([])
# Try to keep track of the converger (NR, DIIS, SOSCF, etc.).
diis_active = True
while not line == []:
if 'Newton-Raphson' in line:
diis_active = False
elif 'SOSCF' in line:
diis_active = False
elif line[0].isdigit() and diis_active:
energy = float(line[1])
deltaE = float(line[2])
maxDP = float(line[3])
rmsDP = float(line[4])
self.scfvalues[-1].append([deltaE, maxDP, rmsDP])
elif line[0].isdigit() and not diis_active:
energy = float(line[1])
deltaE = float(line[2])
maxDP = float(line[5])
rmsDP = float(line[6])
self.scfvalues[-1].append([deltaE, maxDP, rmsDP])
line = next(inputfile).split()
def parse_scf_expanded_format(self, inputfile, line):
""" Parse SCF convergence when in expanded format. """
# The following is an example of the format
# -----------------------------------------
#
# *** Starting incremental Fock matrix formation ***
#
# ----------------------------
# ! ITERATION 0 !
# ----------------------------
# Total Energy : -377.960836651297 Eh
# Energy Change : -377.960836651297 Eh
# MAX-DP : 0.100175793695
# RMS-DP : 0.004437973661
# Actual Damping : 0.7000
# Actual Level Shift : 0.2500 Eh
# Int. Num. El. : 43.99982197 (UP= 21.99991099 DN= 21.99991099)
# Exchange : -34.27550826
# Correlation : -2.02540957
#
#
# ----------------------------
# ! ITERATION 1 !
# ----------------------------
# Total Energy : -378.118458080109 Eh
# Energy Change : -0.157621428812 Eh
# MAX-DP : 0.053240648588
# RMS-DP : 0.002375092508
# Actual Damping : 0.7000
# Actual Level Shift : 0.2500 Eh
# Int. Num. El. : 43.99994143 (UP= 21.99997071 DN= 21.99997071)
# Exchange : -34.00291075
# Correlation : -2.01607243
#
# ***Turning on DIIS***
#
# ----------------------------
# ! ITERATION 2 !
# ----------------------------
# ....
#
if not hasattr(self, "scfvalues"):
self.scfvalues = []
self.scfvalues.append([])
line = "Foo" # dummy argument to enter loop
while line.find("******") < 0:
line = next(inputfile)
info = line.split()
if len(info) > 1 and info[1] == "ITERATION":
dashes = next(inputfile)
energy_line = next(inputfile).split()
energy = float(energy_line[3])
deltaE_line = next(inputfile).split()
deltaE = float(deltaE_line[3])
if energy == deltaE:
deltaE = 0
maxDP_line = next(inputfile).split()
maxDP = float(maxDP_line[2])
rmsDP_line = next(inputfile).split()
rmsDP = float(rmsDP_line[2])
self.scfvalues[-1].append([deltaE, maxDP, rmsDP])
return
# end of parse_scf_expanded_format
def _append_scfvalues_scftargets(self, inputfile, line):
# The SCF convergence targets are always printed after this, but apparently
# not all of them always -- for example the RMS Density is missing for geometry
# optimization steps. So, assume the previous value is still valid if it is
# not found. For additional certainty, assert that the other targets are unchanged.
while not "Last Energy change" in line:
line = next(inputfile)
deltaE_value = float(line.split()[4])
deltaE_target = float(line.split()[7])
line = next(inputfile)
if "Last MAX-Density change" in line:
maxDP_value = float(line.split()[4])
maxDP_target = float(line.split()[7])
line = next(inputfile)
if "Last RMS-Density change" in line:
rmsDP_value = float(line.split()[4])
rmsDP_target = float(line.split()[7])
else:
rmsDP_value = self.scfvalues[-1][-1][2]
rmsDP_target = self.scftargets[-1][2]
assert deltaE_target == self.scftargets[-1][0]
assert maxDP_target == self.scftargets[-1][1]
self.scfvalues[-1].append([deltaE_value, maxDP_value, rmsDP_value])
self.scftargets.append([deltaE_target, maxDP_target, rmsDP_target])
if __name__ == "__main__":
import sys
import doctest, orcaparser
if len(sys.argv) == 1:
doctest.testmod(orcaparser, verbose=False)
if len(sys.argv) == 2:
parser = orcaparser.ORCA(sys.argv[1])
data = parser.parse()
if len(sys.argv) > 2:
for i in range(len(sys.argv[2:])):
if hasattr(data, sys.argv[2 + i]):
print(getattr(data, sys.argv[2 + i]))
|
ChemSem/cclib
|
src/cclib/parser/orcaparser.py
|
Python
|
lgpl-2.1
| 35,788
|
[
"Gaussian",
"ORCA",
"cclib"
] |
1782e84d1f9655c024ee434b66f95562a44b7f2d6b96ebb0d6bc07aed839f7e7
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2014, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Vic Iglesias <vic.iglesias@eucalyptus.com>
# Imran Hossain Shaon <imran.hossain@hpe.com>
# qemu-img modifications: Brian Thomason <brian.thomason@hp.com>
#
#
# TODO: add check for env variables for user credentials
#
import argparse
import json
import os
import re
import sys
import ConfigParser
import urllib
import time
from subprocess import Popen, PIPE, call
class EmiManager:
def __init__(self, user, region,
catalog_url="http://shaon.me/catalog-web"):
self.user = user
self.region = region
self.catalog_url = catalog_url
self.temp_dir_prefix = "emis"
def check_dependencies(self):
deps_list = ["wget", "xz", "bzip2"]
print
sys.stdout.write("\t\t%s\r" % "checking euca2ools...")
try:
cmd = call("type " + "euca-describe-instances", shell=True,
stdout=PIPE, stderr=PIPE)
if cmd:
raise RuntimeError
except (ImportError, RuntimeError):
sys.stdout.flush()
time.sleep(0.5)
print_error("\r\t\tchecking euca2ools... failed")
time.sleep(0.5)
print_error("Euca2ools not found.\n")
print_info("Install instructions can be found here:\n"
"https://www.eucalyptus.com/docs/eucalyptus/4.2/"
"index.html#shared/installing_euca2ools.html")
sys.exit("Bye")
sys.stdout.flush()
time.sleep(0.5)
print_success("\t\tchecking euca2ools... ok")
time.sleep(0.5)
for package in deps_list:
print
sys.stdout.write("\t\tchecking %s\r" % (package + "..."))
if call(["which", package], stdout=PIPE, stderr=PIPE):
sys.stdout.flush()
time.sleep(0.5)
print_error("\t\tchecking %s" % (package + "... failed"))
time.sleep(0.5)
else:
sys.stdout.flush()
time.sleep(0.5)
print_success("\t\tchecking %s" % (package + "... ok"))
time.sleep(0.5)
for pkg in deps_list:
self.install_package(pkg)
def install_package(self, package_name, nogpg=False):
while call(["which", package_name], stdout=PIPE, stderr=PIPE):
if call(["yum", "install", "-y", package_name]):
print_error("Failed to install " + package_name + ".")
sys.exit("Bye")
def install_qemu_img(self):
if call(["yum", "install", "-y", "--nogpgcheck", "qemu-img"]):
print_error("Failed to install qemu-img.")
sys.exit("Bye")
def get_catalog(self):
return json.loads(urllib.urlopen(self.catalog_url).read())["images"]
def print_catalog(self):
print "Select an image Id from the following table: "
print
catalog = self.get_catalog()
format_spec = '{0:3} {1:20} {2:20} {3:20} {4:10}'
print_bold(
format_spec.format("id", "version", "image-format", "created-date",
"description"))
image_number = 1
for image in catalog:
if not image["image-format"]:
image["image-format"] = "None"
print format_spec.format(str(image_number), image["version"],
image["image-format"],
image["created-date"],
image["description"])
image_number += 1
print
def get_image(self, retry=2):
self.print_catalog()
while retry > 0:
try:
number = int(raw_input(
"Enter the image ID you would like to install: "))
if (number - 1 < 0) or (number - 1 > len(self.get_catalog())):
print_error(
"Invalid image Id. "
"Please select an Id from the table.")
raise ValueError
image = self.get_catalog()[number - 1]
return image
except (ValueError, KeyError, IndexError):
retry -= 1
sys.exit("Bye")
def install_image(self, image):
if image["image-format"] == "qcow2":
print_info(
"This image is available in 'qcow2' "
"format and requires qemu-img "
"package for 'raw' conversion.\n")
# Check that qemu-img is installed
sys.stdout.write("\t\t%s\r" % "checking qemu-img...")
if call(["which", "qemu-img"], stdout=PIPE, stderr=PIPE):
sys.stdout.flush()
time.sleep(1)
print_error("\t\tchecking %s" % "qemu-img... failed")
time.sleep(1)
install_qemu = "Install 'qemu-img' package? (Y/n): ".strip()
if check_response(install_qemu):
self.install_qemu_img()
else:
sys.exit("Bye")
else:
sys.stdout.flush()
time.sleep(1)
print_success("\t\tchecking %s" % "qemu-img... ok")
time.sleep(1)
image_name = image["os"] + "-" + image["created-date"]
describe_images = "euca-describe-images --filter name={0} " \
"--region {1}@{2}".format(image_name,
self.user, self.region)
(stdout, stderr) = self.check_output(describe_images)
if re.search(image_name, stdout):
print_warning(
"Warning: An image with name '" + image_name +
"' is already install.")
print_warning(stdout)
install_image = "Continue? (Y/n) : ".strip()
if check_response(install_image):
image_name = image['os'] + "-" + str(time.time())
else:
sys.exit("Bye")
directory_format = '{0}-{1}.XXXXXXXX'.format(self.temp_dir_prefix,
image["os"])
# Make temp directory
(tmpdir, stderr) = self.check_output('mktemp -d ' + directory_format)
# Download image
download_path = tmpdir.strip() + "/" + image["url"].rsplit("/", 1)[-1]
print_info(
"Downloading " + image['url'] + " image to: " + download_path)
if call(["wget", image["url"], "-O", download_path]):
print_error(
"Image download failed attempting to download:\n" + image[
"url"])
sys.exit("Bye")
# Decompress image, if necessary
if image["url"].endswith(".xz"):
print_info("Decompressing image...")
if call(["xz", "-d", download_path]):
print_error(
"Unable to decompress image downloaded to: " +
download_path)
sys.exit("Bye")
image_path = download_path.strip(".xz")
print_info("Decompressed image can be found at: " + image_path)
elif image["url"].endswith(".bz2"):
print_info("Decompressing image...")
if call(["bzip2", "-d", download_path]):
print_error(
"Unable to decompress image downloaded to: " +
download_path)
sys.exit("Bye")
image_path = download_path.strip(".bz2")
print_info("Decompressed image can be found at: " + image_path)
else:
image_path = download_path
# Convert image to raw format, if necessary
if image["image-format"] == "qcow2":
print_info("Converting image...")
image_basename = image_path[0:image_path.rindex(".")]
if call(["qemu-img", "convert", "-O", "raw", image_path,
image_basename + ".raw"]):
print_error("Unable to convert image")
sys.exit("Bye")
image_path = image_path[0:image_path.rindex(".")] + ".raw"
print_info("Converted image can be found at: " + image_path)
print_info("Installing image to bucket: " + image_name + "\n")
install_cmd = "euca-install-image -r x86_64 -i {0} --virt hvm " \
"-b {1} -n {1} --region {2}@{3}". \
format(image_path, image_name, self.user, self.region)
print_info("Running installation command: ")
print_info(install_cmd)
if call(install_cmd.split()):
print_error("Unable to install image that was downloaded to: \n" +
download_path)
sys.exit("Bye")
def check_output(self, command):
process = Popen(command.split(), stdout=PIPE)
return process.communicate()
class EucaCredentials(object):
def __init__(self, home_dir='/root', conf_dir='.euca', ext='.ini'):
self.ext = ext
self.home_dir = home_dir
self.conf_dir = conf_dir
config = self.get_config()
sections = config.sections()
self.region = self.select_region(sections)
self.user = self.select_user(sections)
def select_user(self, sections):
users = self.get_sections('user', sections)
print_success("Found " + str(len(users)) + " available user/s in " +
os.path.join(self.home_dir, self.conf_dir))
self.print_info(users)
try:
number = int(raw_input("\nSelect User ID: "))
user = (users[number - 1]).split(' ')[1]
return user
except (ValueError, KeyError, IndexError):
print "Invalid user selected\n"
sys.exit("Bye")
def select_region(self, sections):
regions = self.get_sections('region', sections)
print_success("Found " + str(len(regions)) +
" available region/s in " +
os.path.join(self.home_dir, self.conf_dir))
self.print_info(regions)
try:
number = int(raw_input("\nSelect Region ID: "))
region = (regions[number - 1]).split(' ')[1]
return region
except (ValueError, KeyError, IndexError):
print_error("Invalid region selected\n")
sys.exit("Bye")
def get_config(self):
print_info("Reading user credentials...\n")
try:
directory = os.path.join(self.home_dir, self.conf_dir)
files = os.listdir(directory)
abs_files = []
for f in files:
abs_files.append(os.path.join(directory, f))
res = filter(lambda x: x.endswith(self.ext), abs_files)
config = ConfigParser.ConfigParser()
config.read(res)
return config
except Exception, e:
print e
print_error("Error: Cannot find directory or .ini file " +
os.path.join(self.home_dir, self.conf_dir))
print_error("Create admin config file: "
"eval `clcadmin-assume-system-credentials`; "
"euare-useraddkey admin -wd <dns domain name> > "
".euca/admin.ini\n")
exit(1)
def get_sections(self, name, section_list):
return filter(lambda x: x.startswith(name), section_list)
def print_info(self, key_val_list):
index = 1
format_spec = '{0:3} {1:10} {2:30}'
print
print_bold(format_spec.format("id", "type", "value"))
for kvl in key_val_list:
kv = kvl.split(' ')
try:
print format_spec.format(str(index), kv[0], kv[1])
except IndexError, e:
print_error("Incorrect syntaxt for: " + kv)
index += 1
class bcolors:
"""
Courtesy: http://stackoverflow.com/questions/287871
"""
HEADER = '\033[37m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
BOLD = '\033[1m'
ENDC = '\033[0m'
def check_response(query_string):
response = raw_input(query_string)
if response.lower() == 'y' or response == '':
return True
else:
return False
def print_error(message):
print bcolors.FAIL + message + bcolors.ENDC
def print_warning(message):
print bcolors.WARNING + message + bcolors.ENDC
def print_success(message):
print bcolors.OKGREEN + message + bcolors.ENDC
def print_debug(message):
print bcolors.HEADER + "[debug] " + message + bcolors.ENDC
def print_info(message):
print bcolors.HEADER + message + bcolors.ENDC
def print_bold(message):
print bcolors.BOLD + message + bcolors.ENDC
def print_title():
title = """
______ _ _
| ____| | | | |
| |__ _ _ ___ __ _| |_ _ _ __ | |_ _ _ ___
| __|| | | |/ __/ _` | | | | | '_ \| __| | | / __|
| |___| |_| | (_| (_| | | |_| | |_) | |_| |_| \__ \\
|______\__,_|\___\__,_|_|\__, | .__/ \__|\__,_|___/
__/ | |
__ __ _ |___/|_| _____
| \/ | | | (_) |_ _|
| \ / | __ _ ___| |__ _ _ __ ___ | | _ __ ___ __ _ __ _ ___ ___
| |\/| |/ _` |/ __| '_ \| | '_ \ / _ \ | | | '_ ` _ \ / _` |/ _` |/ _ \/ __|
| | | | (_| | (__| | | | | | | | __/ _| |_| | | | | | (_| | (_| | __/\__ \\
|_| |_|\__,_|\___|_| |_|_|_| |_|\___| |_____|_| |_| |_|\__,_|\__, |\___||___/
__/ |
|___/ """
print_success(title)
def exit_message():
print
print "For more information visit:\n\thttp://emis.eucalyptus.com"
def main():
parser = argparse.ArgumentParser(description='Process Arguments.')
parser.add_argument('-c', '--catalog',
default="https://raw.githubusercontent.com/shaon/emis/master/catalog-web",
help='Image catalog json file')
args = parser.parse_args()
print_title()
euca_creds = EucaCredentials()
emi_manager = EmiManager(euca_creds.user, euca_creds.region,
catalog_url=args.catalog)
emi_manager.check_dependencies()
image = emi_manager.get_image()
emi_manager.install_image(image)
exit_message()
if __name__ == "__main__":
main()
|
shaon/emis
|
install-emis.py
|
Python
|
gpl-3.0
| 15,926
|
[
"Brian",
"VisIt"
] |
eddee345f16661a997be77a4c6e3091de40a1c16811077ab4df49f0e753a9e74
|
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
from pandas.core.index import Index, MultiIndex, ensure_index
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None):
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
return self._apply("roll_max", "max", **kwargs)
_shared_docs["min"] = dedent(
"""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series.
DataFrame.%(name)s : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
)
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
return self._apply("roll_min", "min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("roll_mean", "mean", **kwargs)
_shared_docs["median"] = dedent(
"""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.median : Equivalent method for Series.
DataFrame.median : Equivalent method for DataFrame.
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
)
def median(self, **kwargs):
return self._apply("roll_median_c", "median", **kwargs)
_shared_docs["std"] = dedent(
"""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(
libwindow.roll_var(arg, window, minp, index_as_array, self.closed, ddof)
)
return self._apply(
f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs["var"] = dedent(
"""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
)
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
return self._apply(
"roll_var", "var", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs[
"skew"
] = """
Unbiased %(name)s skewness.
Parameters
----------
**kwargs
Keyword arguments to be passed into func.
"""
def skew(self, **kwargs):
return self._apply(
"roll_skew", "skew", check_minp=_require_min_periods(3), **kwargs
)
_shared_docs["kurt"] = dedent(
"""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.kurt : Equivalent method for Series.
DataFrame.kurt : Equivalent method for DataFrame.
scipy.stats.skew : Third moment of a probability density.
scipy.stats.kurtosis : Reference SciPy method.
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
"""
)
def kurt(self, **kwargs):
return self._apply(
"roll_kurt", "kurt", check_minp=_require_min_periods(4), **kwargs
)
_shared_docs["quantile"] = dedent(
"""
Calculate the %(name)s quantile.
Parameters
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
**kwargs:
For compatibility with other %(name)s methods. Has no effect on
the result.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.quantile : Computes value at the given quantile over all data
in Series.
DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
"""
)
def quantile(self, quantile, interpolation="linear", **kwargs):
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return libwindow.roll_max(
arg, window, minp, index_as_array, self.closed
)
elif quantile == 0.0:
return libwindow.roll_min(
arg, window, minp, index_as_array, self.closed
)
else:
return libwindow.roll_quantile(
arg,
window,
minp,
index_as_array,
self.closed,
quantile,
interpolation,
)
return self._apply(f, "quantile", quantile=quantile, **kwargs)
_shared_docs[
"cov"
] = """
Calculate the %(name)s sample covariance.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
**kwargs
Keyword arguments to be passed into func.
"""
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype("float64")
Y = Y.astype("float64")
mean = lambda x: x.rolling(
window, self.min_periods, center=self.center
).mean(**kwargs)
count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
_shared_docs["corr"] = dedent(
"""
Calculate %(name)s correlation.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self.
pairwise : bool, default None
Calculate pairwise combinations of columns within a
DataFrame. If `other` is not specified, defaults to `True`,
otherwise defaults to `False`.
Not relevant for :class:`~pandas.Series`.
**kwargs
Unused.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the
%(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.corr : Equivalent method for Series.
DataFrame.corr : Equivalent method for DataFrame.
%(name)s.cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Notes
-----
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.
Examples
--------
The below example shows a rolling calculation with a window size of
four matching the equivalent function call using :meth:`numpy.corrcoef`.
>>> v1 = [3, 3, 3, 5, 8]
>>> v2 = [3, 4, 4, 4, 8]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> # numpy returns a 2X2 array, the correlation coefficient
>>> # is the number at entry [0][1]
>>> print(fmt.format(np.corrcoef(v1[:-1], v2[:-1])[0][1]))
0.333333
>>> print(fmt.format(np.corrcoef(v1[1:], v2[1:])[0][1]))
0.916949
>>> s1 = pd.Series(v1)
>>> s2 = pd.Series(v2)
>>> s1.rolling(4).corr(s2)
0 NaN
1 NaN
2 NaN
3 0.333333
4 0.916949
dtype: float64
The below example shows a similar rolling calculation on a
DataFrame using the pairwise option.
>>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\
[46., 31.], [50., 36.]])
>>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))
[[1. 0.6263001]
[0.6263001 1. ]]
>>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))
[[1. 0.5553681]
[0.5553681 1. ]]
>>> df = pd.DataFrame(matrix, columns=['X','Y'])
>>> df
X Y
0 51.0 35.0
1 49.0 30.0
2 47.0 32.0
3 46.0 31.0
4 50.0 36.0
>>> df.rolling(4).corr(pairwise=True)
X Y
0 X NaN NaN
Y NaN NaN
1 X NaN NaN
Y NaN NaN
2 X NaN NaN
Y NaN NaN
3 X 1.000000 0.626300
Y 0.626300 1.000000
4 X 1.000000 0.555368
Y 0.555368 1.000000
"""
)
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
b = b.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(
self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)
)
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
return Index(self.obj[self.on])
else:
raise ValueError(
"invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on)
)
def validate(self):
super().validate()
# we allow rolling on a datetimelike index
if (self.obj.empty or self.is_datetimelike) and isinstance(
self.window, (str, ABCDateOffset, timedelta)
):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented "
"for datetimelike and offset "
"based windows"
)
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError(
"closed only implemented for datetimelike " "and offset based windows"
)
def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or "index"
raise ValueError("{0} must be " "monotonic".format(formatted))
def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError(
"passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window)
)
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.rolling
DataFrame.rolling
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="rolling")
@Appender(_shared_docs["count"])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply("roll_count", "count")
return super().count()
@Substitution(name="rolling")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_rolling_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_rolling_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_rolling_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show a rolling calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> import scipy.stats
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False)))
3.999946
>>> s = pd.Series(arr)
>>> s.rolling(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 3.999946
dtype: float64
"""
)
@Appender(_agg_doc)
@Substitution(name="rolling")
@Appender(_shared_docs["kurt"])
def kurt(self, **kwargs):
return super().kurt(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["quantile"])
def quantile(self, quantile, interpolation="linear", **kwargs):
return super().quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["cov"])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["corr"])
def corr(self, other=None, pairwise=None, **kwargs):
return super().corr(other=other, pairwise=pairwise, **kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provide a rolling groupby implementation.
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super()._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
Validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level.
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provide expanding transformations.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : bool, default False
Set the labels at the center of the window.
axis : int or str, default 0
Returns
-------
a Window sub-classed for the particular operation
See Also
--------
rolling : Provides rolling window calculations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
"""
_attributes = ["min_periods", "center", "axis"]
def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs):
super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
"""
Get the window length over which to perform some operation.
Parameters
----------
other : object, default None
The other object that is involved in the operation.
Such an object is involved for operations like covariance.
Returns
-------
window : int
The window length.
"""
axis = self.obj._get_axis(self.axis)
length = len(axis) + (other is not None) * len(axis)
other = self.min_periods or -1
return max(length, other)
_agg_see_also_doc = dedent(
"""
See Also
--------
DataFrame.expanding.aggregate
DataFrame.rolling.aggregate
DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="expanding")
@Appender(_shared_docs["count"])
def count(self, **kwargs):
return super().count(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_expanding_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_expanding_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_expanding_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_expanding_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="expanding")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show an expanding calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> import scipy.stats
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False)))
4.999874
>>> s = pd.Series(arr)
>>> s.expanding(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 4.999874
dtype: float64
"""
)
@Appender(_agg_doc)
@Substitution(name="expanding")
@Appender(_shared_docs["kurt"])
def kurt(self, **kwargs):
return super().kurt(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["quantile"])
def quantile(self, quantile, interpolation="linear", **kwargs):
return super().quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
@Substitution(name="expanding")
@Appender(_doc_template)
@Appender(_shared_docs["cov"])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["corr"])
def corr(self, other=None, pairwise=None, **kwargs):
return super().corr(other=other, pairwise=pairwise, **kwargs)
class ExpandingGroupby(_GroupByMixin, Expanding):
"""
Provide a expanding groupby implementation.
"""
@property
def _constructor(self):
return Expanding
_bias_template = """
Parameters
----------
bias : bool, default False
Use a standard estimation bias correction.
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
_pairwise_template = """
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
bias : bool, default False
Use a standard estimation bias correction.
**kwargs
Keyword arguments to be passed into func.
"""
class EWM(_Rolling):
r"""
Provide exponential weighted functions.
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`.
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`.
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{for} halflife > 0`.
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`.
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
adjust : bool, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings
(viewing EWMA as a moving average).
ignore_na : bool, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. The value 0 identifies the rows, and 1
identifies the columns.
Returns
-------
DataFrame
A Window sub-classed for the particular operation.
See Also
--------
rolling : Provides rolling window calculations.
expanding : Provides expanding transformations.
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
When adjust is True (default), weighted averages are calculated using
weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of x and y used in
calculating the final weighted average of [x, None, y] are 1-alpha and 1
(if adjust is True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
"""
_attributes = ["com", "min_periods", "adjust", "ignore_na", "axis"]
def __init__(
self,
obj,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
adjust=True,
ignore_na=False,
axis=0,
):
self.obj = obj
self.com = _get_center_of_mass(com, span, halflife, alpha)
self.min_periods = min_periods
self.adjust = adjust
self.ignore_na = ignore_na
self.axis = axis
self.on = None
@property
def _constructor(self):
return EWM
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
def _apply(self, func, **kwargs):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
Returns
-------
y : same type as input argument
"""
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg):
return cfunc(
arg,
self.com,
int(self.adjust),
int(self.ignore_na),
int(self.min_periods),
)
results.append(np.apply_along_axis(func, self.axis, values))
return self._wrap_results(results, block_list, obj, exclude)
@Substitution(name="ewm")
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""
Exponential weighted moving average.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
nv.validate_window_func("mean", args, kwargs)
return self._apply("ewma", **kwargs)
@Substitution(name="ewm")
@Appender(_doc_template)
@Appender(_bias_template)
def std(self, bias=False, *args, **kwargs):
"""
Exponential weighted moving stddev.
"""
nv.validate_window_func("std", args, kwargs)
return _zsqrt(self.var(bias=bias, **kwargs))
vol = std
@Substitution(name="ewm")
@Appender(_doc_template)
@Appender(_bias_template)
def var(self, bias=False, *args, **kwargs):
"""
Exponential weighted moving variance.
"""
nv.validate_window_func("var", args, kwargs)
def f(arg):
return libwindow.ewmcov(
arg,
arg,
self.com,
int(self.adjust),
int(self.ignore_na),
int(self.min_periods),
int(bias),
)
return self._apply(f, **kwargs)
@Substitution(name="ewm")
@Appender(_doc_template)
@Appender(_pairwise_template)
def cov(self, other=None, pairwise=None, bias=False, **kwargs):
"""
Exponential weighted sample covariance.
"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_cov(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
cov = libwindow.ewmcov(
X._prep_values(),
Y._prep_values(),
self.com,
int(self.adjust),
int(self.ignore_na),
int(self.min_periods),
int(bias),
)
return X._wrap_result(cov)
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
@Substitution(name="ewm")
@Appender(_doc_template)
@Appender(_pairwise_template)
def corr(self, other=None, pairwise=None, **kwargs):
"""
Exponential weighted sample correlation.
"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_corr(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
def _cov(x, y):
return libwindow.ewmcov(
x,
y,
self.com,
int(self.adjust),
int(self.ignore_na),
int(self.min_periods),
1,
)
x_values = X._prep_values()
y_values = Y._prep_values()
with np.errstate(all="ignore"):
cov = _cov(x_values, y_values)
x_var = _cov(x_values, x_values)
y_var = _cov(y_values, y_values)
corr = cov / _zsqrt(x_var * y_var)
return X._wrap_result(corr)
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
# Helper Funcs
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
if not (
isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame))
and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))
):
raise TypeError(
"arguments to moment function must be of type "
"np.ndarray/Series/DataFrame"
)
if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance(
arg2, (np.ndarray, ABCSeries)
):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, ABCDataFrame):
from pandas import DataFrame
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, ABCDataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
X, Y = arg1.align(arg2, join="outer")
X = X + 0 * Y
Y = Y + 0 * X
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index, columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j < i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(
*_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j])
)
from pandas import concat
result_index = arg1.index.union(arg2.index)
if len(result_index):
# construct result frame
result = concat(
[
concat(
[results[i][j] for j, c in enumerate(arg2.columns)],
ignore_index=True,
)
for i, c in enumerate(arg1.columns)
],
ignore_index=True,
axis=1,
)
result.columns = arg1.columns
# set the index and reorder
if arg2.columns.nlevels > 1:
result.index = MultiIndex.from_product(
arg2.columns.levels + [result_index]
)
result = result.reorder_levels([2, 0, 1]).sort_index()
else:
result.index = MultiIndex.from_product(
[range(len(arg2.columns)), range(len(result_index))]
)
result = result.swaplevel(1, 0).sort_index()
result.index = MultiIndex.from_product(
[result_index] + [arg2.columns]
)
else:
# empty result
result = DataFrame(
index=MultiIndex(
levels=[arg1.index, arg2.columns], codes=[[], []]
),
columns=arg2.columns,
dtype="float64",
)
# reset our index names to arg1 names
# reset our column names to arg2 names
# careful not to mutate the original names
result.columns = result.columns.set_names(arg1.columns.names)
result.index = result.index.set_names(
result_index.names + arg2.columns.names
)
return result
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {
i: f(*_prep_binary(arg1.iloc[:, i], arg2))
for i, col in enumerate(arg1.columns)
}
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
def _get_center_of_mass(comass, span, halflife, alpha):
valid_count = com.count_not_none(comass, span, halflife, alpha)
if valid_count > 1:
raise ValueError("comass, span, halflife, and alpha " "are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if comass is not None:
if comass < 0:
raise ValueError("comass must satisfy: comass >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
comass = (span - 1) / 2.0
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
comass = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
comass = (1.0 - alpha) / alpha
else:
raise ValueError("Must pass one of comass, span, halflife, or alpha")
return float(comass)
def _offset(window, center):
if not is_integer(window):
window = len(window)
offset = (window - 1) / 2.0 if center else 0
try:
return int(offset)
except TypeError:
return offset.astype(int)
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _zsqrt(x):
with np.errstate(all="ignore"):
result = np.sqrt(x)
mask = x < 0
if isinstance(x, ABCDataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception("Input arrays must be of the same type!")
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
# Top-level exports
def rolling(obj, win_type=None, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError("invalid type: %s" % type(obj))
if win_type is not None:
return Window(obj, win_type=win_type, **kwds)
return Rolling(obj, **kwds)
rolling.__doc__ = Window.__doc__
def expanding(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError("invalid type: %s" % type(obj))
return Expanding(obj, **kwds)
expanding.__doc__ = Expanding.__doc__
def ewm(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError("invalid type: %s" % type(obj))
return EWM(obj, **kwds)
ewm.__doc__ = EWM.__doc__
|
toobaz/pandas
|
pandas/core/window.py
|
Python
|
bsd-3-clause
| 84,775
|
[
"Gaussian"
] |
f628c9acb492f85bca85fdb778f7c879b906cd0c831ab721bad61ff742ec7412
|
""" GraphUtilities is a a collection of utility functions and classes used
in the DIRAC Graphs package.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
import os
import time
import datetime
import calendar
import math
import pytz
import numpy
from matplotlib.ticker import ScalarFormatter
from matplotlib.dates import AutoDateLocator, AutoDateFormatter, DateFormatter, RRuleLocator, \
rrulewrapper, HOURLY, MINUTELY, SECONDLY, YEARLY, MONTHLY, DAILY
from dateutil.relativedelta import relativedelta
__RCSID__ = "$Id$"
def evalPrefs( *args, **kw ):
""" Interpret arguments as preferencies dictionaries or key-value pairs. The overriding order
is right most - most important one. Returns a single dictionary of preferencies
"""
prefs = {}
for pDict in list( args ) + [kw]:
if isinstance(pDict, dict):
for key in pDict:
if key == "metadata":
for mkey in pDict[key]:
prefs[mkey] = pDict[key][mkey]
else:
prefs[key] = pDict[key]
return prefs
def pixelToPoint( size, dpi ):
""" Convert size expressed in pixels into points for a given dpi resolution
"""
return float( size ) * 100. / float( dpi )
datestrings = ['%x %X', '%x', '%Y-%m-%d %H:%M:%S']
def convert_to_datetime( dstring ):
orig_string = str( dstring )
try:
if isinstance( dstring, datetime.datetime ):
results = string
else:
results = eval( str( dstring ), {'__builtins__':None, 'time':time, 'math':math}, {} )
if isinstance(results, (int, float)):
results = datetime.datetime.fromtimestamp( int( results ) )
elif isinstance( results, datetime.datetime ):
pass
else:
raise ValueError( "Unknown datetime type!" )
except Exception as e:
t = None
for dateformat in datestrings:
try:
t = time.strptime( string, dateformat )
timestamp = calendar.timegm( t ) #-time.timezone
results = datetime.datetime.fromtimestamp( timestamp )
break
except:
pass
if t is None:
try:
string = string.split( '.', 1 )[0]
t = time.strptime( string, dateformat )
timestamp = time.mktime( t ) #-time.timezone
results = datetime.datetime.fromtimestamp( timestamp )
except:
raise ValueError( "Unable to create time from string!\nExpecting " \
"format of: '12/06/06 12:54:67'\nRecieved:%s" % orig_string )
return results
def to_timestamp( val ):
try:
v = float( val )
if v > 1000000000 and v < 1900000000:
return v
except:
pass
val = convert_to_datetime( val )
#return calendar.timegm( val.timetuple() )
return time.mktime( val.timetuple() )
# If the graph has more than `hour_switch` minutes, we print
# out hours in the subtitle.
hour_switch = 7
# If the graph has more than `day_switch` hours, we print
# out days in the subtitle.
day_switch = 7
# If the graph has more than `week_switch` days, we print
# out the weeks in the subtitle.
week_switch = 7
def add_time_to_title( begin, end, metadata = {} ):
""" Given a title and two times, adds the time info to the title.
Example results::
"Number of Attempted Transfers
(24 Hours from 4:45 12-14-2006 to 5:56 12-15-2006)"
There are two important pieces to the subtitle we add - the duration
(i.e., '48 Hours') and the time interval (i.e., 11:00 07-02-2007 to
11:00 07-04-2007).
We attempt to make the duration match the size of the span (for a bar
graph, this would be the width of the individual bar) in order for it
to make the most sense. The formatting of the time interval is based
upon how much real time there is from the beginning to the end.
We made the distinction because some would want to show graphs
representing 168 Hours, but needed the format to show the date as
well as the time.
"""
if 'span' in metadata:
interval = metadata['span']
else:
interval = time_interval( begin, end )
formatting_interval = time_interval( begin, end )
if formatting_interval == 600:
format_str = '%H:%M:%S'
elif formatting_interval == 3600:
format_str = '%Y-%m-%d %H:%M'
elif formatting_interval == 86400:
format_str = '%Y-%m-%d'
elif formatting_interval == 86400 * 7:
format_str = 'Week %U of %Y'
if interval < 600:
format_name = 'Seconds'
time_slice = 1
elif interval < 3600 and interval >= 600:
format_name = 'Minutes'
time_slice = 60
elif interval >= 3600 and interval < 86400:
format_name = 'Hours'
time_slice = 3600
elif interval >= 86400 and interval < 86400 * 7:
format_name = 'Days'
time_slice = 86400
elif interval >= 86400 * 7:
format_name = 'Weeks'
time_slice = 86400 * 7
else:
format_str = '%x %X'
format_name = 'Seconds'
time_slice = 1
begin_tuple = time.localtime( begin )
end_tuple = time.localtime( end )
added_title = '%i %s from ' % ( int( ( end - begin ) / time_slice ), format_name )
added_title += time.strftime( '%s to' % format_str, begin_tuple )
if time_slice < 86400:
add_utc = ' UTC'
else:
add_utc = ''
added_title += time.strftime( ' %s%s' % ( format_str, add_utc ), end_tuple )
return added_title
def time_interval( begin, end ):
"""
Determine the appropriate time interval based upon the length of
time as indicated by the `starttime` and `endtime` keywords.
"""
if end - begin < 600 * hour_switch:
return 600
if end - begin < 86400 * day_switch:
return 3600
elif end - begin < 86400 * 7 * week_switch:
return 86400
else:
return 86400 * 7
def comma_format( x_orig ):
x = float( x_orig )
if x >= 1000:
after_comma = x % 1000
before_comma = int( x ) / 1000
return '%s,%03g' % ( comma_format( before_comma ), after_comma )
else:
return str( x_orig )
class PrettyScalarFormatter( ScalarFormatter ):
def _set_orderOfMagnitude( self, range ):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the offset
locs = numpy.absolute( self.locs )
if self.offset: oom = math.floor( math.log10( range ) )
else:
if locs[0] > locs[-1]:
val = locs[0]
else:
val = locs[-1]
if val == 0:
oom = 0
else:
oom = math.floor( math.log10( val ) )
if oom <= -7:
self.orderOfMagnitude = oom
elif oom >= 9:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def pprint_val( self, x ):
pstring = ScalarFormatter.pprint_val( self, x )
return comma_format( pstring )
class PrettyDateFormatter( AutoDateFormatter ):
""" This class provides a formatter which conforms to the
desired date formates for the Phedex system.
"""
def __init__( self, locator ):
tz = pytz.timezone( 'UTC' )
AutoDateFormatter.__init__( self, locator, tz = tz )
def __call__( self, x, pos = 0 ):
scale = float( self._locator._get_unit() )
if scale == 365.0:
self._formatter = DateFormatter( "%Y", self._tz )
elif scale == 30.0:
self._formatter = DateFormatter( "%b %Y", self._tz )
elif ( scale >= 1.0 ) and ( scale <= 7.0 ):
self._formatter = DateFormatter( "%Y-%m-%d", self._tz )
elif scale == ( 1.0 / 24.0 ):
self._formatter = DateFormatter( "%H:%M", self._tz )
elif scale == ( 1.0 / ( 24 * 60 ) ):
self._formatter = DateFormatter( "%H:%M", self._tz )
elif scale == ( 1.0 / ( 24 * 3600 ) ):
self._formatter = DateFormatter( "%H:%M:%S", self._tz )
else:
self._formatter = DateFormatter( "%b %d %Y %H:%M:%S", self._tz )
return self._formatter( x, pos )
class PrettyDateLocator( AutoDateLocator ):
def get_locator( self, dmin, dmax ):
'pick the best locator based on a distance'
delta = relativedelta( dmax, dmin )
numYears = ( delta.years * 1.0 )
numMonths = ( numYears * 12.0 ) + delta.months
numDays = ( numMonths * 31.0 ) + delta.days
numHours = ( numDays * 24.0 ) + delta.hours
numMinutes = ( numHours * 60.0 ) + delta.minutes
numSeconds = ( numMinutes * 60.0 ) + delta.seconds
numticks = 5
# self._freq = YEARLY
interval = 1
bymonth = 1
bymonthday = 1
byhour = 0
byminute = 0
bysecond = 0
if numYears >= numticks:
self._freq = YEARLY
elif numMonths >= numticks:
self._freq = MONTHLY
bymonth = range( 1, 13 )
if ( 0 <= numMonths ) and ( numMonths <= 14 ):
interval = 1 # show every month
elif ( 15 <= numMonths ) and ( numMonths <= 29 ):
interval = 3 # show every 3 months
elif ( 30 <= numMonths ) and ( numMonths <= 44 ):
interval = 4 # show every 4 months
else: # 45 <= numMonths <= 59
interval = 6 # show every 6 months
elif numDays >= numticks:
self._freq = DAILY
bymonth = None
bymonthday = range( 1, 32 )
if ( 0 <= numDays ) and ( numDays <= 9 ):
interval = 1 # show every day
elif ( 10 <= numDays ) and ( numDays <= 19 ):
interval = 2 # show every 2 days
elif ( 20 <= numDays ) and ( numDays <= 35 ):
interval = 3 # show every 3 days
elif ( 36 <= numDays ) and ( numDays <= 80 ):
interval = 7 # show every 1 week
else: # 100 <= numDays <= ~150
interval = 14 # show every 2 weeks
elif numHours >= numticks:
self._freq = HOURLY
bymonth = None
bymonthday = None
byhour = range( 0, 24 ) # show every hour
if ( 0 <= numHours ) and ( numHours <= 14 ):
interval = 1 # show every hour
elif ( 15 <= numHours ) and ( numHours <= 30 ):
interval = 2 # show every 2 hours
elif ( 30 <= numHours ) and ( numHours <= 45 ):
interval = 3 # show every 3 hours
elif ( 45 <= numHours ) and ( numHours <= 68 ):
interval = 4 # show every 4 hours
elif ( 68 <= numHours ) and ( numHours <= 90 ):
interval = 6 # show every 6 hours
else: # 90 <= numHours <= 120
interval = 12 # show every 12 hours
elif numMinutes >= numticks:
self._freq = MINUTELY
bymonth = None
bymonthday = None
byhour = None
byminute = range( 0, 60 )
if numMinutes > ( 10.0 * numticks ):
interval = 10
# end if
elif numSeconds >= numticks:
self._freq = SECONDLY
bymonth = None
bymonthday = None
byhour = None
byminute = None
bysecond = range( 0, 60 )
if numSeconds > ( 10.0 * numticks ):
interval = 10
# end if
else:
# do what?
# microseconds as floats, but floats from what reference point?
pass
rrule = rrulewrapper( self._freq, interval = interval, \
dtstart = dmin, until = dmax, \
bymonth = bymonth, bymonthday = bymonthday, \
byhour = byhour, byminute = byminute, \
bysecond = bysecond )
locator = RRuleLocator( rrule, self.tz )
locator.set_axis( self.axis )
locator.set_view_interval( *self.axis.get_view_interval() )
locator.set_data_interval( *self.axis.get_data_interval() )
return locator
def pretty_float( num ):
if num > 1000:
return comma_format( int( num ) )
try:
floats = int( max( 2 - max( numpy.floor( numpy.log( abs( num ) + 1e-3 ) / numpy.log( 10. ) ), 0 ), 0 ) )
except:
floats = 2
format = "%." + str( floats ) + "f"
if isinstance(num, tuple):
return format % float( num[0] )
else:
try:
retval = format % float( num )
except:
raise Exception( "Unable to convert %s into a float." % ( str( num ) ) )
return retval
def statistics( results, span = None, is_timestamp = False ):
results = dict( results )
if span != None:
parsed_data = {}
min_key = min( results.keys() )
max_key = max( results.keys() )
for i in range( min_key, max_key + span, span ):
if i in results:
parsed_data[i] = results[i]
del results[i]
else:
parsed_data[i] = 0.0
if len( results ) > 0:
raise Exception( "Unable to use all the values for the statistics" )
else:
parsed_data = results
values = parsed_data.values()
data_min = min( values )
data_max = max( values )
data_avg = numpy.average( values )
if is_timestamp:
current_time = max( parsed_data.keys() )
data_current = parsed_data[ current_time ]
return data_min, data_max, data_avg, data_current
else:
return data_min, data_max, data_avg
def makeDataFromCSV( csv ):
""" Generate plot data dictionary from a csv file or string
"""
if os.path.exists( csv ):
with open( csv, 'r' ) as fdata:
flines = fdata.readlines()
else:
flines = csv.split( '\n' )
graph_data = {}
labels = flines[0].strip().split( ',' )
if len( labels ) == 2:
# simple plot data
for line in flines:
line = line.strip()
if line[0] != '#':
key, value = line.split( ',' )
graph_data[key] = value
elif len( flines ) == 2:
values = flines[1].strip().split( ',' )
for key,value in zip(labels,values):
graph_data[key] = value
elif len( labels ) > 2:
# stacked graph data
del labels[0]
del flines[0]
for label in labels:
plot_data = {}
index = labels.index( label ) + 1
for line in flines:
values = line.strip().split( ',' )
value = values[index].strip()
#if value:
plot_data[values[0]] = values[index]
#else:
#plot_data[values[0]] = '0.'
#pass
graph_data[label] = dict( plot_data )
return graph_data
def darkenColor( color, factor=2 ):
c1 = int( color[1:3], 16 )
c2 = int( color[3:5], 16 )
c3 = int( color[5:7], 16 )
c1 /= factor
c2 /= factor
c3 /= factor
result = '#' + (str( hex( c1) ).replace( '0x', '' ).zfill( 2 ) +
str( hex( c2) ).replace( '0x', '' ).zfill( 2 ) +
str( hex( c3) ).replace( '0x', '' ).zfill( 2 ) )
return result
|
Andrew-McNab-UK/DIRAC
|
Core/Utilities/Graphs/GraphUtilities.py
|
Python
|
gpl-3.0
| 14,278
|
[
"DIRAC"
] |
d3b66f822a1f8ff5634b5ce7372b26c6a6d681356e77bfce177374c714a44283
|
"""This file contains very superficial tests of the PISM Python
wrappers. The goal is to be able to detect changes in the API
(function signatures, etc), not to test correctness.
Use with nose (https://pypi.python.org/pypi/nose/) and coverage.py
(https://pypi.python.org/pypi/coverage)
Run this to get a coverage report:
nosetests --with-coverage --cover-branches --cover-html --cover-package=PISM test/nosetests.py
"""
import PISM
import sys
import numpy as np
def create_dummy_grid():
"Create a dummy grid"
ctx = PISM.Context()
params = PISM.GridParameters(ctx.config)
params.ownership_ranges_from_options(ctx.size)
return PISM.IceGrid(ctx.ctx, params)
def context_test():
"Test creating a new PISM context"
ctx = PISM.Context()
config = ctx.config
us = ctx.unit_system
EC = ctx.enthalpy_converter
def context_missing_attribute_test():
"Test the handling of missing attributes"
ctx = PISM.Context()
try:
config = ctx.foo # there is no "foo", this should fail
return False
except AttributeError:
return True
def create_grid_test():
"Test the creation of the IceGrid object"
grid1 = create_dummy_grid()
grid2 = PISM.model.initGrid(PISM.Context(), 100e3, 100e3, 4000, 11, 11, 21, PISM.CELL_CORNER)
def algorithm_failure_exception_test():
"Test the AlgorithmFailureException class"
try:
raise PISM.AlgorithmFailureException("no good reason")
return False # should not be reached
except PISM.AlgorithmFailureException as e:
print "calling e.reason(): ", e.reason()
print "{}".format(e)
return True
def printing_test():
"Test verbPrintf"
ctx = PISM.Context()
PISM.verbPrintf(1, ctx.com, "hello %s!\n", "world")
def random_vec_test():
"Test methods creating random fields"
grid = create_dummy_grid()
vec_scalar = PISM.vec.randVectorS(grid, 1.0)
vec_vector = PISM.vec.randVectorV(grid, 2.0)
vec_scalar_ghosted = PISM.vec.randVectorS(grid, 1.0, 2)
vec_vector_ghosted = PISM.vec.randVectorV(grid, 2.0, 2)
def vec_metadata_test():
"Test accessing IceModelVec metadata"
grid = create_dummy_grid()
vec_scalar = PISM.vec.randVectorS(grid, 1.0)
m = vec_scalar.metadata()
m.set_string("units", "kg")
print m.get_string("units")
def vars_ownership_test():
"Test passing IceModelVec ownership from Python to C++ (i.e. PISM)."
grid = create_dummy_grid()
variables = PISM.Vars()
print "Adding 'thk'..."
variables.add(PISM.model.createIceThicknessVec(grid))
print "Returned from add_thk()..."
print "Getting 'thk' from variables..."
thk = variables.get("thk")
print thk
thk.begin_access()
print "thickness at 0,0 is", thk[0, 0]
thk.end_access()
def vec_access_test():
"Test the PISM.vec.Access class and IceGrid::points, points_with_ghosts, coords"
grid = create_dummy_grid()
vec_scalar = PISM.vec.randVectorS(grid, 1.0)
vec_scalar_ghosted = PISM.vec.randVectorS(grid, 1.0, 2)
with PISM.vec.Access(comm=[vec_scalar_ghosted], nocomm=vec_scalar):
for (i, j) in grid.points_with_ghosts():
pass
with PISM.vec.Access(comm=vec_scalar_ghosted, nocomm=[vec_scalar]):
for (i, j) in grid.points():
# do something
pass
for (i, j, x, y) in grid.coords():
# do something with coordinates
pass
# try with nocomm=None
with PISM.vec.Access(comm=vec_scalar_ghosted):
pass
def toproczero_test():
"Test communication to processor 0"
grid = create_dummy_grid()
vec_scalar = PISM.vec.randVectorS(grid, 1.0)
vec_vector = PISM.vec.randVectorV(grid, 2.0)
tz = PISM.vec.ToProcZero(grid)
array_scalar_0 = tz.communicate(vec_scalar)
tz2 = PISM.vec.ToProcZero(grid, dof=2, dim=2)
array_vector_0 = tz2.communicate(vec_vector)
try:
tz3 = PISM.vec.ToProcZero(grid, dof=2, dim=3)
return False
except NotImplementedError:
# 3D fields are not supported (yet)
pass
def create_modeldata_test():
"Test creating the ModelData class"
grid = create_dummy_grid()
md = PISM.model.ModelData(grid)
md2 = PISM.model.ModelData(grid, config=grid.ctx().config())
def grid_from_file_test():
"Intiialize a grid from a file"
grid = create_dummy_grid()
enthalpy = PISM.model.createEnthalpyVec(grid)
enthalpy.set(80e3)
output_file = "test_grid_from_file.nc"
pio = PISM.util.prepare_output(output_file)
enthalpy.write(pio)
pio = PISM.PIO(grid.com, "netcdf3", output_file, PISM.PISM_READONLY)
grid2 = PISM.IceGrid.FromFile(grid.ctx(), pio, "enthalpy", PISM.CELL_CORNER)
def create_special_vecs_test():
"Test helpers used to create standard PISM fields"
grid = create_dummy_grid()
usurf = PISM.model.createIceSurfaceVec(grid)
thk = PISM.model.createIceThicknessVec(grid)
usurfstore = PISM.model.createIceSurfaceStoreVec(grid)
thkstore = PISM.model.createIceThicknessStoreVec(grid)
bed = PISM.model.createBedrockElevationVec(grid)
tauc = PISM.model.createYieldStressVec(grid)
strainheat = PISM.model.createStrainHeatingVec(grid)
u, v, w = PISM.model.create3DVelocityVecs(grid)
hardav = PISM.model.createAveragedHardnessVec(grid)
enthalpy = PISM.model.createEnthalpyVec(grid)
age = PISM.model.createAgeVec(grid)
bmr = PISM.model.createBasalMeltRateVec(grid)
tillphi = PISM.model.createTillPhiVec(grid)
cell_area = PISM.model.createCellAreaVec(grid)
basal_water = PISM.model.createBasalWaterVec(grid)
gl_mask = PISM.model.createGroundingLineMask(grid)
vel = PISM.model.create2dVelocityVec(grid)
taudx = PISM.model.createDrivingStressXVec(grid)
taudy = PISM.model.createDrivingStressYVec(grid)
vel_misfit_weight = PISM.model.createVelocityMisfitWeightVec(grid)
cbar = PISM.model.createCBarVec(grid)
mask = PISM.model.createIceMaskVec(grid)
bcmask = PISM.model.createBCMaskVec(grid)
no_model_mask = PISM.model.createNoModelMaskVec(grid)
zeta_fixed_mask = PISM.model.createZetaFixedMaskVec(grid)
lon = PISM.model.createLongitudeVec(grid)
lat = PISM.model.createLatitudeVec(grid)
# test ModelVecs.add()
modeldata = PISM.model.ModelData(grid)
vecs = modeldata.vecs
vecs.add(mask)
print vecs
# test getattr
vecs.mask
return True
def options_test():
"Test command-line option handling"
ctx = PISM.Context()
o = PISM.PETSc.Options()
M = PISM.optionsInt("-M", "description", default=100)
M = PISM.optionsInt("-M", "description", default=None)
S = PISM.optionsString("-S", "description", default="string")
S = PISM.optionsString("-S", "description", default=None)
R = PISM.optionsReal("-R", "description", default=1.5)
R = PISM.optionsReal("-R", "description", default=None)
o.setValue("-B", "on")
B = PISM.optionsFlag("-B", "description", default=False)
B = PISM.optionsFlag("B", "description", default=False)
B = PISM.optionsFlag("-B", "description", default=None)
o.setValue("-no_C", "on")
C = PISM.optionsFlag("C", "description", default=None)
D = PISM.optionsFlag("D", "description", default=None)
D = PISM.optionsFlag("D", "description", default=True)
o.setValue("-no_D", "on")
o.setValue("-D", "on")
try:
# should throw RuntimeError
D = PISM.optionsFlag("D", "description", default=None)
return False
except RuntimeError:
pass
o.setValue("-IA", "1,2,3")
IA = PISM.optionsIntArray("-IA", "description", default=[1, 2])
IA = PISM.optionsIntArray("-IA", "description", default=None)
IA2 = PISM.optionsIntArray("-IA2", "description", default=None)
IA2 = PISM.optionsIntArray("-IA2", "description", default=[1, 2])
o.setValue("-RA", "1,2,3")
RA = PISM.optionsRealArray("-RA", "description", default=[2, 3])
RA = PISM.optionsRealArray("-RA", "description", default=None)
RA2 = PISM.optionsRealArray("-RA2", "description", default=[2, 3])
RA2 = PISM.optionsRealArray("-RA2", "description", default=None)
o.setValue("-SA", "1,2,3")
SA = PISM.optionsStringArray("-SA", "description", default="one,two")
SA = PISM.optionsStringArray("-SA", "description", default=None)
SA2 = PISM.optionsStringArray("-SA2", "description", default="two,three")
SA2 = PISM.optionsStringArray("-SA2", "description", default=None)
M = PISM.optionsList("-L", "description", choices="one,two", default="one")
M = PISM.optionsList("-L", "description", choices="one,two", default=None)
def pism_vars_test():
"""Test adding fields to and getting them from pism::Vars."""
grid = create_dummy_grid()
v = grid.variables()
v.add(PISM.model.createIceThicknessVec(grid))
# test getting by short name
print v.get("thk").metadata().get_string("units")
# test getting by standard name
print v.get("land_ice_thickness").metadata().get_string("units")
def modelvecs_test():
"Test the ModelVecs class"
grid = create_dummy_grid()
mask = PISM.model.createIceMaskVec(grid)
mask.set(PISM.MASK_GROUNDED)
modeldata = PISM.model.ModelData(grid)
vecs = modeldata.vecs
vecs.add(mask, "ice_mask", writing=True)
# use the default name, no writing
vecs.add(PISM.model.createIceThicknessVec(grid))
try:
vecs.add(mask, "ice_mask")
return False
except RuntimeError:
# should fail: mask was added already
pass
# get a field:
print "get() method: ice mask: ", vecs.get("ice_mask").metadata().get_string("long_name")
print "dot notation: ice mask: ", vecs.ice_mask.metadata().get_string("long_name")
try:
vecs.invalid
return False
except AttributeError:
# should fail
pass
try:
vecs.get("invalid")
return False
except RuntimeError:
# should fail
pass
# test __repr__
print vecs
# test has()
print "Has thickness?", vecs.has("thickness")
# test markForWriting
vecs.markForWriting("ice_mask")
vecs.markForWriting(mask)
vecs.markForWriting("thk")
# test write()
output_file = "test_ModelVecs.nc"
pio = PISM.util.prepare_output(output_file)
pio.close()
vecs.write(output_file)
# test writeall()
vecs.writeall(output_file)
def sia_test():
"Test the PISM.sia module"
ctx = PISM.Context()
params = PISM.GridParameters(ctx.config)
params.Lx = 1e5
params.Ly = 1e5
params.Lz = 1000
params.Mx = 100
params.My = 100
params.Mz = 11
params.registration = PISM.CELL_CORNER
params.periodicity = PISM.NOT_PERIODIC
params.ownership_ranges_from_options(ctx.size)
grid = PISM.IceGrid(ctx.ctx, params)
enthalpyconverter = PISM.EnthalpyConverter(ctx.config)
mask = PISM.model.createIceMaskVec(grid)
mask.set(PISM.MASK_GROUNDED)
thk = PISM.model.createIceThicknessVec(grid)
thk.set(1000.0)
surface = PISM.model.createIceSurfaceVec(grid)
surface.set(1000.0)
bed = PISM.model.createBedrockElevationVec(grid)
bed.set(0.0)
enthalpy = PISM.model.createEnthalpyVec(grid)
enthalpy.set(enthalpyconverter.enthalpy(270.0, 0.0, 0.0))
modeldata = PISM.model.ModelData(grid)
modeldata.setPhysics(enthalpyconverter)
vecs = grid.variables()
fields = [thk, surface, mask, bed, enthalpy]
for field in fields:
vecs.add(field)
vel_sia = PISM.sia.computeSIASurfaceVelocities(modeldata)
def util_test():
"Test the PISM.util module"
grid = create_dummy_grid()
output_file = "test_pism_util.nc"
pio = PISM.PIO(grid.com, "netcdf3", output_file, PISM.PISM_READWRITE_MOVE)
pio.close()
PISM.util.writeProvenance(output_file)
PISM.util.writeProvenance(output_file, message="history string")
PISM.util.fileHasVariable(output_file, "data")
# Test PISM.util.Bunch
b = PISM.util.Bunch(a=1, b="string")
b.update(c=3.0)
print b.a, b["b"], b.has_key("b"), b
def logging_test():
"Test the PISM.logging module"
grid = create_dummy_grid()
import PISM.logging as L
PISM.PIO(grid.com, "netcdf3", "log.nc", PISM.PISM_READWRITE_MOVE)
c = L.CaptureLogger("log.nc")
L.clear_loggers()
L.add_logger(L.print_logger)
L.add_logger(c)
L.log("log message\n", L.kError)
L.logError("error message\n")
L.logWarning("warning message\n")
L.logMessage("log message (again)\n")
L.logDebug("debug message\n")
L.logPrattle("prattle message\n")
c.write() # default arguments
c.readOldLog()
PISM.PIO(grid.com, "netcdf3", "other_log.nc", PISM.PISM_READWRITE_MOVE)
c.write("other_log.nc", "other_log") # non-default arguments
def column_interpolation_test(plot=False):
"""Test ColumnInterpolation by interpolating from the coarse grid to the
fine grid and back."""
import numpy as np
import pylab as plt
Lz = 1000.0
Mz = 41
def z_quadratic(Mz, Lz):
"Compute levels of a quadratic coarse grid."
result = np.zeros(Mz)
z_lambda = 4.0
for k in xrange(Mz - 1):
zeta = float(k) / (Mz - 1)
result[k] = Lz * ((zeta / z_lambda) * (1.0 + (z_lambda - 1.0) * zeta))
result[Mz - 1] = Lz
return result
def fine_grid(z_coarse):
"Compute levels of the fine grid corresponding to a given coarse grid."
Lz = z_coarse[-1]
dz = np.min(np.diff(z_coarse))
Mz = int(np.ceil(Lz / dz) + 1)
dz = Lz / (Mz - 1.0)
result = np.zeros(Mz)
for k in range(1, Mz):
result[k] = z_coarse[0] + k * dz
return result
def test_quadratic_interp():
z_coarse = z_quadratic(Mz, Lz)
f_coarse = (z_coarse / Lz) ** 2
z_fine = fine_grid(z_coarse)
print "Testing quadratic interpolation"
return test_interp(z_coarse, f_coarse, z_fine, "Quadratic interpolation")
def test_linear_interp():
z_coarse = np.linspace(0, Lz, Mz)
f_coarse = (z_coarse / Lz) ** 2
z_fine = fine_grid(z_coarse)
print "Testing linear interpolation"
return test_interp(z_coarse, f_coarse, z_fine, "Linear interpolation")
def test_interp(z, f, z_fine, title):
interp = PISM.ColumnInterpolation(z, z_fine)
f_fine = interp.coarse_to_fine(f, interp.Mz_fine())
f_fine_numpy = np.interp(z_fine, z, f)
f_roundtrip = interp.fine_to_coarse(f_fine)
def plot():
plt.figure()
plt.hold(True)
plt.plot(z, f, 'o-', label="original coarse-grid data")
plt.plot(z_fine, f_fine, 'o-', label="interpolated onto the fine grid")
plt.plot(z, f_roundtrip, 'o-', label="interpolated back onto the coarse grid")
plt.plot(z, f_roundtrip - f, 'o-', label="difference after the roundtrip")
plt.legend(loc="best")
plt.title(title)
plt.grid(True)
if plot:
plot()
delta = np.linalg.norm(f - f_roundtrip, ord=1)
delta_numpy = np.linalg.norm(f_fine - f_fine_numpy, ord=1)
print "norm1(fine_to_coarse(coarse_to_fine(f)) - f) = %f" % delta
print "norm1(PISM - NumPy) = %f" % delta_numpy
return delta, delta_numpy
linear_delta, linear_delta_numpy = test_linear_interp()
quadratic_delta, _ = test_quadratic_interp()
if plot:
plt.show()
if (linear_delta > 1e-12 or
linear_delta_numpy > 1e-12 or
quadratic_delta > 1e-3):
return False
return True
def pism_join_test():
"Test PISM.join()"
assert PISM.join(["one", "two"], ':') == "one:two"
def pism_split_test():
"Test PISM.split()"
assert PISM.split("one,two,three", ',') == ("one", "two", "three")
def pism_ends_with_test():
"Test PISM.ends_with()"
assert PISM.ends_with("foo.nc", ".nc") == True
assert PISM.ends_with("foo.nc and more text", ".nc") == False
assert PISM.ends_with("short_string", "longer_suffix") == False
def linear_interpolation_test(plot=False):
"Test linear interpolation code used to regrid fields"
import numpy as np
M_in = 11
M_out = 101
a = 0.0
b = 10.0
padding = 1.0
x_input = np.linspace(a, b, M_in)
x_output = np.sort(((b + padding) - (a - padding)) * np.random.rand(M_out) + (a - padding))
def F(x):
return x * 2.0 + 5.0
values = F(x_input)
i = PISM.LinearInterpolation(x_input, x_output)
F_interpolated = i.interpolate(values)
F_desired = F(x_output)
F_desired[x_output < a] = F(a)
F_desired[x_output > b] = F(b)
if plot:
import pylab as plt
plt.hold(True)
plt.plot(x_output, F_interpolated, 'o-', color='blue', label="interpolated result")
plt.plot(x_output, F_desired, 'x-', color='green', label="desired result")
plt.plot(x_input, values, 'o-', color='red', label="input")
plt.grid(True)
plt.legend(loc="best")
plt.show()
assert np.max(np.fabs(F_desired - F_interpolated)) < 1e-16
def pism_context_test():
"Test creating and using a C++-level Context"
com = PISM.PETSc.COMM_WORLD
system = PISM.UnitSystem("")
logger = PISM.Logger(com, 2)
config = PISM.DefaultConfig(com, "pism_config", "-config", system)
config.init_with_default(logger)
EC = PISM.EnthalpyConverter(config)
time = PISM.Time(config, "360_day", system)
ctx = PISM.cpp.Context(com, system, config, EC, time, logger, "greenland")
print ctx.com().Get_size()
print ctx.config().get_double("constants.standard_gravity")
print ctx.enthalpy_converter().L(273.15)
print ctx.time().current()
print PISM.convert(ctx.unit_system(), 1, "km", "m")
print ctx.prefix()
def check_flow_law(factory, flow_law_name, EC, stored_data):
factory.set_default(flow_law_name)
law = factory.create()
depth = 2000
gs = 1e-3
sigma = [1e4, 5e4, 1e5, 1.5e5]
T_pa = [-30, -5, 0, 0]
omega = [0.0, 0.0, 0.0, 0.005]
assert len(T_pa) == len(omega)
p = EC.pressure(depth)
Tm = EC.melting_temperature(p)
data = []
print " Flow table for %s" % law.name()
print "| Sigma | Temperature | Omega | Flow factor |"
print "|--------------+--------------+--------------+--------------|"
for S in sigma:
for Tpa, O in zip(T_pa, omega):
T = Tm + Tpa
E = EC.enthalpy(T, O, p)
F = law.flow(S, E, p, gs)
data.append(F)
print "| %e | %e | %e | %e |" % (S, T, O, F)
print "|--------------+--------------+--------------+--------------|"
print ""
data = np.array(data)
assert np.max(np.fabs(data - stored_data)) < 1e-16
def flowlaw_test():
data = {}
data["arr"] = [3.91729503e-18, 6.42803396e-17, 1.05746828e-16, 1.05746828e-16,
9.79323757e-17, 1.60700849e-15, 2.64367070e-15, 2.64367070e-15,
3.91729503e-16, 6.42803396e-15, 1.05746828e-14, 1.05746828e-14,
8.81391381e-16, 1.44630764e-14, 2.37930363e-14, 2.37930363e-14]
data["arrwarm"] = [1.59798478e-19, 1.04360343e-16, 3.30653997e-16, 3.30653997e-16,
3.99496194e-18, 2.60900856e-15, 8.26634991e-15, 8.26634991e-15,
1.59798478e-17, 1.04360343e-14, 3.30653997e-14, 3.30653997e-14,
3.59546574e-17, 2.34810771e-14, 7.43971492e-14, 7.43971492e-14]
data["gk"] = [7.32439717e-17, 5.49629815e-15, 2.41713799e-14, 2.41713799e-14,
2.16360102e-16, 1.93446849e-14, 9.04428380e-14, 9.04428380e-14,
4.06191746e-16, 3.39770143e-14, 1.60574708e-13, 1.60574708e-13,
6.68976826e-16, 4.80704753e-14, 2.27816175e-13, 2.27816175e-13]
data["gpbld"] = [4.65791754e-18, 1.45114704e-16, 4.54299921e-16, 8.66009225e-16,
1.16447938e-16, 3.62786761e-15, 1.13574980e-14, 2.16502306e-14,
4.65791754e-16, 1.45114704e-14, 4.54299921e-14, 8.66009225e-14,
1.04803145e-15, 3.26508084e-14, 1.02217482e-13, 1.94852076e-13]
data["hooke"] = [5.26775897e-18, 2.12325906e-16, 5.32397091e-15, 5.32397091e-15,
1.31693974e-16, 5.30814764e-15, 1.33099273e-13, 1.33099273e-13,
5.26775897e-16, 2.12325906e-14, 5.32397091e-13, 5.32397091e-13,
1.18524577e-15, 4.77733287e-14, 1.19789346e-12, 1.19789346e-12]
data["isothermal_glen"] = [3.16890000e-16, 3.16890000e-16, 3.16890000e-16, 3.16890000e-16,
7.92225000e-15, 7.92225000e-15, 7.92225000e-15, 7.92225000e-15,
3.16890000e-14, 3.16890000e-14, 3.16890000e-14, 3.16890000e-14,
7.13002500e-14, 7.13002500e-14, 7.13002500e-14, 7.13002500e-14]
data["pb"] = [4.65791754e-18, 1.45114704e-16, 4.54299921e-16, 4.54299921e-16,
1.16447938e-16, 3.62786761e-15, 1.13574980e-14, 1.13574980e-14,
4.65791754e-16, 1.45114704e-14, 4.54299921e-14, 4.54299921e-14,
1.04803145e-15, 3.26508084e-14, 1.02217482e-13, 1.02217482e-13]
data["gpbld3"] = [4.65791754e-18, 1.45114704e-16, 4.54299921e-16, 8.66009225e-16,
1.16447938e-16, 3.62786761e-15, 1.13574980e-14, 2.16502306e-14,
4.65791754e-16, 1.45114704e-14, 4.54299921e-14, 8.66009225e-14,
1.04803145e-15, 3.26508084e-14, 1.02217482e-13, 1.94852076e-13]
ctx = PISM.context_from_options(PISM.PETSc.COMM_WORLD, "flowlaw_test")
EC = ctx.enthalpy_converter()
factory = PISM.FlowLawFactory("stress_balance.sia.", ctx.config(), EC)
for flow_law_name, data in data.iteritems():
check_flow_law(factory, flow_law_name, EC, np.array(data))
def gpbld3_vs_gpbld_test():
"Test the optimized version of GPBLD by comparing it to the one that uses libm."
ctx = PISM.context_from_options(PISM.PETSc.COMM_WORLD, "GPBLD3_test")
EC = ctx.enthalpy_converter()
gpbld = PISM.GPBLD("stress_balance.sia.", ctx.config(), EC)
gpbld3 = PISM.GPBLD3("stress_balance.sia.", ctx.config(), EC)
import numpy as np
N = 11
T_pa = np.linspace(-30, 0, N)
depth = np.linspace(0, 4000, N)
omega = np.linspace(0, 0.02, N)
sigma = [1e4, 5e4, 1e5, 1.5e5]
gs = 1e-3
for d in depth:
p = EC.pressure(d)
Tm = EC.melting_temperature(p)
for Tpa in T_pa:
T = Tm + Tpa
for o in omega:
if T >= Tm:
E = EC.enthalpy(T, o, p)
else:
E = EC.enthalpy(T, 0.0, p)
for s in sigma:
regular = gpbld.flow(s, E, p, gs)
optimized = gpbld3.flow(s, E, p, gs)
assert np.fabs(regular - optimized) / regular < 2e-14
def gpbld3_hardness_test():
"Test the hardness implementation in the optimized version of GPBLD."
ctx = PISM.context_from_options(PISM.PETSc.COMM_WORLD, "GPBLD3_test")
EC = ctx.enthalpy_converter()
gpbld = PISM.GPBLD("stress_balance.sia.", ctx.config(), EC)
gpbld3 = PISM.GPBLD3("stress_balance.sia.", ctx.config(), EC)
import numpy as np
N = 11
T_pa = np.linspace(-30, 0, N)
depth = np.linspace(0, 4000, N)
omega = np.linspace(0, 0.02, N)
for d in depth:
p = EC.pressure(d)
Tm = EC.melting_temperature(p)
for Tpa in T_pa:
T = Tm + Tpa
for o in omega:
if T >= Tm:
E = EC.enthalpy(T, o, p)
else:
E = EC.enthalpy(T, 0.0, p)
regular = gpbld.hardness(E, p)
optimized = gpbld3.hardness(E, p)
assert np.fabs(regular - optimized) / regular < 4e-15
def gpbld3_error_report():
"""Print max. absolute and relative difference between GPBLD and
GPBLD3. Uses 101*101*101*101 samples in a "reasonable" range of
pressure-adjusted temperatures, depth, water fraction, and
effective stress. This takes about 15 minutes to complete.
"""
ctx = PISM.context_from_options(PISM.PETSc.COMM_WORLD, "GPBLD3_test")
EC = ctx.enthalpy_converter()
gpbld = PISM.GPBLD("stress_balance.sia.", ctx.config(), EC)
gpbld3 = PISM.GPBLD3("stress_balance.sia.", ctx.config(), EC)
import numpy as np
N = 31
T_pa = np.linspace(-30, 0, N)
depth = np.linspace(0, 5000, N)
omega = np.linspace(0, 0.02, N)
sigma = np.linspace(0, 5e5, N)
gs = 1e-3
max_difference = 0.0
max_rel_difference = 0.0
for d in depth:
p = EC.pressure(d)
Tm = EC.melting_temperature(p)
for Tpa in T_pa:
T = Tm + Tpa
for o in omega:
if T >= Tm:
E = EC.enthalpy(T, o, p)
else:
E = EC.enthalpy(T, 0.0, p)
for s in sigma:
regular = gpbld.flow(s, E, p, gs)
optimized = gpbld3.flow(s, E, p, gs)
max_difference = max(np.fabs(regular - optimized), max_difference)
if regular > 0.0:
max_rel_difference = max(np.fabs(regular - optimized) / regular,
max_rel_difference)
print "%d (%e) samples" % (N**4, N**4)
print "max difference", max_difference
print "max relative difference", max_rel_difference
def ssa_trivial_test():
"Test the SSA solver using a trivial setup."
context = PISM.Context()
unit_system = context.unit_system
L = 50.e3 # // 50km half-width
H0 = 500 # // m
dhdx = 0.005 # // pure number, slope of surface & bed
nu0 = PISM.convert(unit_system, 30.0, "MPa year", "Pa s")
tauc0 = 1.e4 # // 1kPa
class TrivialSSARun(PISM.ssa.SSAExactTestCase):
def _initGrid(self):
self.grid = PISM.IceGrid.Shallow(PISM.Context().ctx, L, L, 0, 0,
self.Mx, self.My, PISM.CELL_CORNER, PISM.NOT_PERIODIC)
def _initPhysics(self):
self.modeldata.setPhysics(context.enthalpy_converter)
def _initSSACoefficients(self):
self._allocStdSSACoefficients()
self._allocateBCs()
vecs = self.modeldata.vecs
vecs.land_ice_thickness.set(H0)
vecs.surface_altitude.set(H0)
vecs.bedrock_altitude.set(0.0)
vecs.tauc.set(tauc0)
# zero Dirichler B.C. everywhere
vecs.vel_bc.set(0.0)
vecs.bc_mask.set(1.0)
def _initSSA(self):
# The following ensure that the strength extension is used everywhere
se = self.ssa.strength_extension
se.set_notional_strength(nu0 * H0)
se.set_min_thickness(4000 * 10)
# For the benefit of SSAFD on a non-periodic grid
self.config.set_boolean("ssa.compute_surface_gradient_inward", True)
def exactSolution(self, i, j, x, y):
return [0, 0]
Mx = 11
My = 11
test_case = TrivialSSARun(Mx, My)
test_case.run("ssa_trivial.nc")
def epsg_test():
"Test EPSG to CF conversion."
l = PISM.StringLogger(PISM.PETSc.COMM_WORLD, 2)
system = PISM.Context().unit_system
# test supported EPSG codes
for code in [3413, 3031]:
print "Trying code {}".format(code)
l.reset()
# +init at the beginning
v = PISM.epsg_to_cf(system, "+init=epsg:%d" % code)
# +init not at the beginning of the string
v = PISM.epsg_to_cf(system, "+units=m +init=epsg:%d" % code)
# +init followed by more options
v = PISM.epsg_to_cf(system, "+init=epsg:%d +units=m" % code)
v.report_to_stdout(l, 2)
print l.get(),
print "done."
# test that unsupported codes trigger an exception
try:
v = PISM.epsg_to_cf(system, "+init=epsg:3032")
raise AssertionError("should fail with 3032: only 3413 and 3031 are supported")
except RuntimeError as e:
print "unsupported codes trigger exceptions: {}".format(e)
# test that an invalid PROJ.4 string (e.g. an EPSG code is not a
# number) triggers an exception
try:
v = PISM.epsg_to_cf(system, "+init=epsg:not-a-number +units=m")
# raise AssertionError("an invalid PROJ.4 string failed to trigger an exception")
except RuntimeError as e:
print "invalid codes trigger exceptions: {}".format(e)
def regridding_test():
"Test 2D regridding: same input and target grids."
import numpy as np
ctx = PISM.Context()
params = PISM.GridParameters(ctx.config)
params.Mx = 3
params.My = 3
params.ownership_ranges_from_options(1)
grid = PISM.IceGrid(ctx.ctx, params)
thk1 = PISM.model.createIceThicknessVec(grid)
thk2 = PISM.model.createIceThicknessVec(grid)
x = grid.x()
x_min = np.min(x)
x_max = np.max(x)
y = grid.y()
y_min = np.min(y)
y_max = np.max(y)
with PISM.vec.Access(nocomm=[thk1]):
for (i, j) in grid.points():
F_x = (x[i] - x_min) / (x_max - x_min)
F_y = (y[j] - y_min) / (y_max - y_min)
thk1[i, j] = (F_x + F_y) / 2.0
thk1.dump("thk1.nc")
thk2.regrid("thk1.nc", critical=True)
with PISM.vec.Access(nocomm=[thk1, thk2]):
for (i, j) in grid.points():
v1 = thk1[i,j]
v2 = thk2[i,j]
if np.abs(v1 - v2) > 1e-12:
raise AssertionError("mismatch at {},{}: {} != {}".format(i, j, v1, v2))
import os
os.remove("thk1.nc")
def po_constant_test():
"""Test that the basal melt rate computed by ocean::Constant is the
same regardless of whether it is set using
ocean.sub_shelf_heat_flux_into_ice or the command-line option."""
grid = create_dummy_grid()
config = grid.ctx().config()
L = config.get_double("constants.fresh_water.latent_heat_of_fusion")
rho = config.get_double("constants.ice.density")
# prescribe a heat flux that corresponds to a mass flux which is
# an integer multiple of m / year so that we can easily specify it
# using a command-line option
M = PISM.convert(grid.ctx().unit_system(), 1, "m / year", "m / second")
Q_default = config.get_double("ocean.sub_shelf_heat_flux_into_ice")
Q = M * L * rho
config.set_double("ocean.sub_shelf_heat_flux_into_ice", Q)
# without the command-line option
ocean_constant = PISM.OceanConstant(grid)
ocean_constant.init()
mass_flux_1 = PISM.model.createIceThicknessVec(grid)
ocean_constant.shelf_base_mass_flux(mass_flux_1)
# reset Q
config.set_double("ocean.sub_shelf_heat_flux_into_ice", Q_default)
# with the command-line option
o = PISM.PETSc.Options()
o.setValue("-shelf_base_melt_rate", 1.0)
ocean_constant = PISM.OceanConstant(grid)
ocean_constant.init()
mass_flux_2 = PISM.model.createIceThicknessVec(grid)
ocean_constant.shelf_base_mass_flux(mass_flux_2)
import numpy as np
with PISM.vec.Access(nocomm=[mass_flux_1, mass_flux_2]):
assert np.fabs(mass_flux_1[0, 0] - M * rho) < 1e-16
assert np.fabs(mass_flux_2[0, 0] - M * rho) < 1e-16
def netcdf_string_attribute_test():
"Test reading a NetCDF-4 string attribute."
import os
basename = "string_attribute_test"
attribute = "string attribute"
def setup():
cdl = """
netcdf string_attribute_test {
string :string_attribute = "%s" ;
:text_attribute = "%s" ;
}
""" % (attribute, attribute)
with open(basename + ".cdl", "w") as f:
f.write(cdl)
os.system("ncgen -4 %s.cdl" % basename)
def teardown():
# remove the temporary file
os.remove(basename + ".nc")
os.remove(basename + ".cdl")
def compare(backend):
try:
pio = PISM.PIO(PISM.PETSc.COMM_WORLD, backend, basename + ".nc", PISM.PISM_READONLY)
except:
# Don't fail if backend creation failed: PISM may not have
# been compiled with parallel I/O enabled.
return
read_string = pio.get_att_text("PISM_GLOBAL", "string_attribute")
read_text = pio.get_att_text("PISM_GLOBAL", "text_attribute")
# check that written and read strings are the same
print "written string: '%s'" % attribute
print "read string: '%s'" % read_string
print "read text: '%s'" % read_text
assert read_string == attribute
assert read_text == attribute
setup()
compare("netcdf3")
compare("netcdf4_parallel")
teardown()
def interpolation_weights_test():
"Test 2D interpolation weights."
def interp2d(grid, F, x, y):
i_left, i_right, j_bottom, j_top = grid.compute_point_neighbors(x, y)
w = grid.compute_interp_weights(x, y);
i = [i_left, i_right, i_right, i_left]
j = [j_bottom, j_bottom, j_top, j_top]
result = 0.0
for k in range(4):
result += w[k] * F[j[k], i[k]]
return result;
Mx = 100
My = 200
Lx = 20
Ly = 10
grid = PISM.IceGrid_Shallow(PISM.Context().ctx,
Lx, Ly, 0, 0, Mx, My,
PISM.CELL_CORNER,
PISM.NOT_PERIODIC)
x = grid.x()
y = grid.y()
X,Y = np.meshgrid(x,y)
Z = 2 * X + 3 * Y
N = 1000
np.random.seed(1)
x_pts = np.random.rand(N) * (2 * Lx) - Lx
y_pts = np.random.rand(N) * (2 * Ly) - Ly
# a linear function should be recovered perfectly
exact = 2 * x_pts + 3 * y_pts
result = np.array([interp2d(grid, Z, x_pts[k], y_pts[k]) for k in range(N)])
np.testing.assert_almost_equal(result, exact)
def vertical_extrapolation_during_regridding_test():
"Test extrapolation in the vertical direction"
# create a grid with 11 levels, 1000m thick
ctx = PISM.Context()
params = PISM.GridParameters(ctx.config)
params.Lx = 1e5
params.Ly = 1e5
params.Mx = 3
params.My = 3
params.Mz = 11
params.Lz = 1000
params.registration = PISM.CELL_CORNER
params.periodicity = PISM.NOT_PERIODIC
params.ownership_ranges_from_options(ctx.size)
z = np.linspace(0, params.Lz, params.Mz)
params.z[:] = z
grid = PISM.IceGrid(ctx.ctx, params)
# create an IceModelVec that uses this grid
v = PISM.IceModelVec3()
v.create(grid, "test", PISM.WITHOUT_GHOSTS)
v.set(0.0)
# set a column
with PISM.vec.Access(nocomm=[v]):
v.set_column(1, 1, z)
# save to a file
v.dump("test.nc")
# create a taller grid (to 2000m):
params.Lz = 2000
params.Mz = 41
z_tall = np.linspace(0, params.Lz, params.Mz)
params.z[:] = z_tall
tall_grid = PISM.IceGrid(ctx.ctx, params)
# create an IceModelVec that uses this grid
v_tall = PISM.IceModelVec3()
v_tall.create(tall_grid, "test", PISM.WITHOUT_GHOSTS)
# Try regridding without extrapolation. This should fail.
try:
ctx.ctx.log().disable()
v_tall.regrid("test.nc", PISM.CRITICAL)
ctx.ctx.log().enable()
raise AssertionError("Should not be able to regrid without extrapolation")
except RuntimeError as e:
pass
# allow extrapolation during regridding
ctx.config.set_boolean("grid.allow_extrapolation", True)
# regrid from test.nc
ctx.ctx.log().disable()
v_tall.regrid("test.nc", PISM.CRITICAL)
ctx.ctx.log().enable()
# get a column
with PISM.vec.Access(nocomm=[v_tall]):
column = np.array(v_tall.get_column_vector(1, 1))
# compute the desired result
desired = np.r_[np.linspace(0, 1000, 21), np.zeros(20) + 1000]
# compare
np.testing.assert_almost_equal(column, desired)
# clean up
import os
os.remove("test.nc")
|
talbrecht/pism_pik
|
test/nosetests.py
|
Python
|
gpl-3.0
| 36,151
|
[
"NetCDF"
] |
1cb88782144e1a2e2a2c54e19082ff7324b15a4a62274a0e2aea2d6368fad457
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
dimensions= 2
size1d = 5
halfSize1 = [size1d,size1d,0.1]
halfSize2 = halfSize1
GRIDSIZE = [16,16]
# wavepacket parameters
k0_x = 0
k0_y = 0
gaussWidth_x = 1.5
gaussWidth_y = 1.5
potentialCoefficient1= [-2.0,0,0]
potentialCoefficient2= [ 2.0,0,0]
O.engines=[
StateDispatcher([
St1_QMPacketGaussianWave(),
]),
SpatialQuickSortCollider([
Bo1_Box_Aabb(),
]),
InteractionLoop(
[Ig2_2xQMGeometry_QMIGeom()],
[Ip2_QMParticleCoulomb_QMParticleCoulomb_QMIPhysCoulombParticles()],
[Law2_QMIGeom_QMIPhysCoulombParticles()]
),
SchrodingerKosloffPropagator(threadNum=8),
]
displayOptions1 = { 'partsScale':70,'partsSquared':0
,'partAbsolute':['default surface', 'hidden', 'nodes', 'points', 'wire', 'surface']
,'partImaginary':['default hidden', 'hidden', 'nodes', 'points', 'wire', 'surface']
,'partReal':['default hidden', 'hidden', 'nodes', 'points', 'wire', 'surface']
,'renderMaxTime':0.5}
displayOptions2 = { 'partsScale':70,'partsSquared':0
,'partAbsolute':['default wire', 'hidden', 'nodes', 'points', 'wire', 'surface']
,'partImaginary':['default hidden', 'hidden', 'nodes', 'points', 'wire', 'surface']
,'partReal':['default hidden', 'hidden', 'nodes', 'points', 'wire', 'surface']
,'renderMaxTime':0.5}
body0 = QMBody()
body0.shape = QMGeometry(extents=halfSize1,color=[1,1,1],displayOptions=[QMDisplayOptions(**displayOptions1)])
body0.material = QMParticleCoulomb(dim=dimensions,hbar=1,m=1,coefficient=potentialCoefficient1)
# FFTW is best at handling sizes of the form 2ᵃ 3ᵇ 5ᶜ 7ᵈ 11ᵉ 13ᶠ , where e+f is either 0 or 1 ## http://www.nanophys.kth.se/nanophys/fftw-info/fftw_3.html
body0.state = QMPacketGaussianWave(x0=[-1,0,0],t0=0,k0=[k0_x,k0_y,0],a0=[gaussWidth_x,gaussWidth_y,0],gridSize=GRIDSIZE) #,se3=[[0.5,0.5,0.5],Quaternion((1,0,0),0)])
nid=O.bodies.append(body0)
O.bodies[nid].state.setNumeric()
body1 = QMBody()
body1.shape = QMGeometry(extents=halfSize2,color=[0.6,0.6,0.0],displayOptions=[QMDisplayOptions(**displayOptions2)])
body1.material = QMParticleCoulomb(dim=dimensions,hbar=1,m=1,coefficient=potentialCoefficient2)
body1.state = QMPacketGaussianWave(x0=[1,0,0],t0=0,k0=[-k0_x,-k0_y,0],a0=[gaussWidth_x,gaussWidth_y,0],gridSize=GRIDSIZE) #,se3=[[0.5,0.5,0.5],Quaternion((1,0,0),0)])
nid=O.bodies.append(body1)
O.bodies[nid].state.setNumeric()
O.dt=.1
O.save('/tmp/a.xml.bz2');
#o.run(100000); o.wait(); print o.iter/o.realtime,'iterations/sec'
try:
from yade import qt
qt.Controller()
qt.controller.setViewAxes(dir=(0,1,0),up=(0,0,1))
qt.controller.setWindowTitle("Two Gaussian packets connected via Coulomb potential in 2D")
qt.Renderer().blinkHighlight=False
qt.View()
qt.views()[0].center(False,5) # median=False, suggestedRadius = 5
except ImportError:
pass
#O.run(20000)
|
cosurgi/trunk
|
examples/qm/2d-coulomb-interaction.py
|
Python
|
gpl-2.0
| 3,067
|
[
"Gaussian"
] |
2bfcc50d29cc728b1c645019513e25333d305fa81dd021adf8bcebfe7da0ea25
|
import json
import os
from shutil import copytree
from subprocess import CalledProcessError, check_call, check_output
import pytest
import pkgpanda.build
import pkgpanda.build.cli
from pkgpanda.util import expect_fs
def get_tar_contents(filename):
return set(check_output(["tar", "-tf", filename]).decode().splitlines())
def package(resource_dir, name, tmpdir):
# Build once using command line interface
pkg_dir = tmpdir.join(name)
copytree(resource_dir, str(pkg_dir))
with pkg_dir.as_cwd():
check_call(["mkpanda"])
# Build once using programmatic interface
pkg_dir_2 = str(tmpdir.join("api-build/" + name))
copytree(resource_dir, pkg_dir_2)
package_store = pkgpanda.build.PackageStore(str(tmpdir.join("api-build")), None)
pkgpanda.build.build_package_variants(package_store, name, True)
def test_build(tmpdir):
package("resources/base", "base", tmpdir)
# TODO(cmaloney): Check the package exists with the right contents.
def test_build_bad_sha1(tmpdir):
package("resources/base", "base", tmpdir)
def test_url_extract_tar(tmpdir):
package("resources/url_extract-tar", "url_extract-tar", tmpdir)
def test_url_extract_zip(tmpdir):
package("resources/url_extract-zip", "url_extract-zip", tmpdir)
def test_single_source_with_extra(tmpdir):
package("resources/single_source_extra", "single_source_extra", tmpdir)
# remove the built package tarball because that has a variable filename
cache_dir = tmpdir.join("cache/packages/single_source_extra/")
packages = [str(x) for x in cache_dir.visit(fil="single_source_extra*.tar.xz")]
assert len(packages) == 1, "should have built exactly one package: {}".format(packages)
os.remove(packages[0])
expect_fs(str(cache_dir), {
"latest": None,
"single_source_extra": ["foo"]})
def test_bad_buildinfo(tmpdir):
def tmp_pkg(name, buildinfo):
pkg_dir = tmpdir.join(name)
pkg_dir.ensure(dir=True)
pkg_dir.join('buildinfo.json').write(json.dumps(buildinfo).encode())
pkg_dir.join('build').ensure()
with pytest.raises(pkgpanda.build.BuildError):
package_store = pkgpanda.build.PackageStore(str(tmpdir), None)
pkgpanda.build.build_package_variants(package_store, name, True)
package(str(pkg_dir), name, tmpdir.join('build'))
tmp_pkg('unknown_field', {'user': 'dcos_user', 'docker': 'ubuntu:14.04.4'})
tmp_pkg('disallowed_field', {'name': 'disallowed_field', 'docker': 'ubuntu:14.04.4'})
# TODO(cmaloney): Re-enable once we build a dcos-builder docker as part of this test. Currently the
# default docker is dcos-builder, and that isn't built here so these tests fail.
# def test_no_buildinfo(tmpdir):
# package("resources/no_buildinfo", "no_buildinfo", tmpdir)
def test_restricted_services(tmpdir):
with pytest.raises(CalledProcessError):
package("resources-nonbootstrapable/restricted_services", "restricted_services", tmpdir)
def test_single_source_corrupt(tmpdir):
with pytest.raises(CalledProcessError):
package("resources-nonbootstrapable/single_source_corrupt", "single_source", tmpdir)
# Check the corrupt file got moved to the right place
expect_fs(str(tmpdir.join("cache/packages/single_source/single_source")), ["foo.corrupt"])
def test_bootstrap(tmpdir):
pkg_dir = tmpdir.join("bootstrap_test")
copytree("resources/", str(pkg_dir))
with pkg_dir.as_cwd():
treeinfo = {
'variants': {
'variant': 'downstream',
'non_bootstrap_variant': 'downstream',
},
# All packages in resources/ except non_bootstrap*
'bootstrap_package_list': [
'base',
'single_source',
'single_source_extra',
'url_extract-tar',
'url_extract-zip',
'variant',
]
}
pkg_dir.join("treeinfo.json").write(json.dumps(treeinfo), ensure=True)
check_call(["mkpanda", "tree", "--mkbootstrap"])
cache_dir = str(pkg_dir.join("cache/bootstrap")) + "/"
bootstrap_id = open(cache_dir + "bootstrap.latest", 'r').read().strip()
bootstrap_files = get_tar_contents(cache_dir + bootstrap_id + ".bootstrap.tar.xz")
# Seperate files that come from individual packages from those in the root directory
package_files = dict()
merged_files = set()
for path in bootstrap_files:
if not path.startswith("./packages/"):
merged_files.add(path)
continue
# Skip the packages folder itself
if path == './packages/':
continue
# Figure out the package name, file inside the package
path_parts = path.split('/')
package_name = path_parts[2].split('--')[0]
file_path = '/'.join(path_parts[3:])
file_set = package_files.get(package_name, set())
# don't add the package directory / empty path.
if len(file_path) == 0:
continue
file_set.add(file_path)
package_files[package_name] = file_set
# Check that the root has exactly the right set of files.
assert merged_files == {
'./',
'./active.buildinfo.full.json',
'./bootstrap',
'./environment',
'./environment.export',
'./active/',
'./active/base',
'./active/url_extract-tar',
'./active/url_extract-zip',
'./active/variant',
'./active/single_source',
'./active/single_source_extra',
'./bin/',
'./bin/mesos-master',
'./etc/',
'./etc/dcos-service-configuration.json',
'./lib/',
'./lib/',
'./lib/libmesos.so',
'./include/'}
assert package_files == {
'url_extract-zip': {'pkginfo.json', 'buildinfo.full.json'},
'url_extract-tar': {'pkginfo.json', 'buildinfo.full.json'},
'single_source': {'pkginfo.json', 'buildinfo.full.json'},
'single_source_extra': {'pkginfo.json', 'buildinfo.full.json'},
'variant': {'pkginfo.json', 'buildinfo.full.json'},
'base': {
'base',
'bin/',
'dcos.target.wants/',
'dcos.target.wants/dcos-foo.service',
'version',
'buildinfo.full.json',
'bin/mesos-master',
'pkginfo.json',
'lib/',
'lib/libmesos.so'}}
|
asridharan/dcos
|
pkgpanda/build/tests/build_integration_test.py
|
Python
|
apache-2.0
| 6,700
|
[
"VisIt"
] |
bb2ee658971d9d9f0a76a8e4ad11940f3941305dd86856cd2b0c97db519f79e6
|
#!/usr/bin/env python
"""
remove FileCatalog directories
"""
import os
import DIRAC
from COMDIRAC.Interfaces import critical
from COMDIRAC.Interfaces import DSession
from COMDIRAC.Interfaces import createCatalog
from COMDIRAC.Interfaces import pathFromArguments
if __name__ == "__main__":
import sys
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [Path]...' % Script.scriptName,
'Arguments:',
' Path: directory path',
'', 'Examples:',
' $ drmdir ./some_lfn_directory',
] )
)
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
session = DSession()
if len( args ) < 1:
print "Error: No argument provided\n%s:" % Script.scriptName
Script.showHelp()
DIRAC.exit( -1 )
Script.enableCS()
catalog = createCatalog()
result = catalog.removeDirectory( pathFromArguments( session, args ) )
if result["OK"]:
if result["Value"]["Failed"]:
for p in result["Value"]["Failed"]:
print "ERROR - \"%s\": %s" % ( p, result["Value"]["Failed"][p] )
else:
print "ERROR: %s" % result["Message"]
|
pigay/COMDIRAC
|
Interfaces/scripts/drmdir.py
|
Python
|
gpl-3.0
| 1,444
|
[
"DIRAC"
] |
e8b53465fb3587c4390f6af8a4a8f0216222716a0352ea8548e0e0cfa1aed076
|
import mxnet as mx
import logging
import os
import time
def _get_lr_scheduler(args, kv, MF=True):
if 'lr_factor' not in args or args.lr_factor >= 1:
return (args.lr, None)
epoch_size = args.num_examples / args.batch_size
if 'dist' in args.kv_store:
epoch_size /= kv.num_workers
begin_epoch = args.load_epoch if args.load_epoch else 0
step_epochs = [int(l) for l in args.lr_step_epochs.split(',')]
lr = args.lr
for s in step_epochs:
if begin_epoch >= s:
lr *= args.lr_factor
if lr != args.lr:
logging.info('Adjust learning rate to %e for epoch %d' %(lr, begin_epoch))
steps = [epoch_size * (x-begin_epoch) for x in step_epochs if x-begin_epoch > 0]
if MF:
return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor))
else:
return (lr, mx.lr_scheduler.FactorScheduler(int(0.75*epoch_size),args.lr_factor))
def _load_model(args, rank=0):
if 'load_epoch' not in args or args.load_epoch is None:
return (None, None, None)
assert args.model_prefix is not None
model_prefix = args.model_prefix
if rank > 0 and os.path.exists("%s-%d-symbol.json" % (model_prefix, rank)):
model_prefix += "-%d" % (rank)
sym, arg_params, aux_params = mx.model.load_checkpoint(
model_prefix, args.load_epoch)
logging.info('Loaded model %s_%04d.params', model_prefix, args.load_epoch)
return (sym, arg_params, aux_params)
def _save_model(args, rank=0):
if args.model_prefix is None:
return None
dst_dir = os.path.dirname(args.model_prefix)
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
return mx.callback.do_checkpoint(args.model_prefix if rank == 0 else "%s-%d" % (
args.model_prefix, rank))
def add_fit_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
train = parser.add_argument_group('Training', 'model training')
train.add_argument('--network', type=str,
help='the neural network to use')
train.add_argument('--num-layers', type=int,
help='number of layers in the neural network, required by some networks such as resnet')
train.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
train.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
train.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-factor', type=float, default=0.1,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-step-epochs', type=str,
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
train.add_argument('--wd', type=float, default=0.0001,
help='weight decay for sgd')
train.add_argument('--batch-size', type=int, default=128,
help='the batch size')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str,
help='model prefix')
parser.add_argument('--monitor', dest='monitor', type=int, default=0,
help='log network parameters every N iters if larger than 0')
train.add_argument('--load-epoch', type=int,
help='load the model on an epoch using the model-load-prefix')
train.add_argument('--top-k', type=int, default=5,
help='report the top-k accuracy. 0 means no report.')
train.add_argument('--test-io', type=int, default=0,
help='1 means test reading speed without training')
train.add_argument('--dtype', type=str, default='float32',
help='precision: float32 or float16')
return train
def fit(args, network, data_loader, **kwargs):
"""
train a model
args : argparse returns
network : the symbol definition of the nerual network
data_loader : function that returns the train and val data iterators
"""
# kvstore
kv = mx.kvstore.create(args.kv_store)
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# data iterators
(train, val) = data_loader(args, kv)
if args.test_io:
tic = time.time()
for i, batch in enumerate(train):
for j in batch.data:
j.wait_to_read()
if (i+1) % args.disp_batches == 0:
logging.info('Batch [%d]\tSpeed: %.2f samples/sec' % (
i, args.disp_batches*args.batch_size/(time.time()-tic)))
tic = time.time()
return
# load model
if 'arg_params' in kwargs and 'aux_params' in kwargs:
arg_params = kwargs['arg_params']
aux_params = kwargs['aux_params']
else:
sym, arg_params, aux_params = _load_model(args, kv.rank)
if sym is not None:
assert sym.tojson() == network.tojson()
# save model
checkpoint = _save_model(args, kv.rank)
# devices for training
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
# learning rate
lr, lr_scheduler = _get_lr_scheduler(args, kv, MF= True if (args.MF is None or args.MF!=0) else False )
# create model
model = mx.mod.Module(
context = devs,
symbol = network
)
lr_scheduler = lr_scheduler
optimizer_params = {
'learning_rate': lr,
'momentum' : args.mom,
'wd' : args.wd,
'lr_scheduler': lr_scheduler,
#'multi_precision': True
}
if args.optimizer == 'rmsprop':
optimizer_params = {
'learning_rate': lr,
#'momentum' : args.mom,
#'wd' : args.wd,
'lr_scheduler': lr_scheduler,
#'multi_precision': True
}
monitor = mx.mon.Monitor(args.monitor, pattern=".*") if args.monitor > 0 else None
if args.network == 'alexnet':
# AlexNet will not converge using Xavier
initializer = mx.init.Normal()
else:
#initializer = mx.init.Xavier(
# rnd_type='gaussian', factor_type="in", magnitude=2)
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
# evaluation metrices
eval_metrics = ['accuracy']
if args.top_k > 0:
eval_metrics.append(mx.metric.create('top_k_accuracy', top_k=args.top_k))
# callbacks that run after each batch
batch_end_callbacks = [mx.callback.Speedometer(args.batch_size, args.disp_batches)]
if 'batch_end_callback' in kwargs:
cbs = kwargs['batch_end_callback']
batch_end_callbacks += cbs if isinstance(cbs, list) else [cbs]
# run
model.fit(train,
begin_epoch = args.load_epoch if args.load_epoch else 0,
num_epoch = args.num_epochs,
eval_data = val,
eval_metric = eval_metrics,
kvstore = kv,
optimizer = args.optimizer,
optimizer_params = optimizer_params,
initializer = initializer,
arg_params = arg_params,
aux_params = aux_params,
batch_end_callback = batch_end_callbacks,
epoch_end_callback = checkpoint,
allow_missing = True,
monitor = monitor)
|
deepinsight/Deformable-ConvNets
|
deeplab/common/fit.py
|
Python
|
apache-2.0
| 8,087
|
[
"Gaussian"
] |
3afc10cdbd7827f83ff010a75f6279ae9f60be96b6da4465e4cfac3d562b6977
|
#
# Copyright 2001 - 2016 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import pygameui as ui
from osci.StarMapWidget import StarMapWidget
from osci import gdata, res, client
import ige.ospace.Const as Const
from ige.ospace import Rules
from ColorDefinitionDlg import ColorDefinitionDlg
import ige
# pact actions
ACTION_NONE = 0
ACTION_CANCEL = 1
ACTION_CONFIRM = 3
ACTION_OFFER = 4
class DiplomacyDlg:
def __init__(self, app):
self.app = app
self.createUI()
self.selectedPartyID = Const.OID_NONE
self.selectedPactID = Const.OID_NONE
self.galaxyScenario = None
self.cDlg = ColorDefinitionDlg(self.app)
def display(self):
self.show()
self.win.show()
# register for updates
if self not in gdata.updateDlgs:
gdata.updateDlgs.append(self)
def hide(self):
self.win.setStatus(_("Ready."))
self.win.hide()
self.galaxyScenario = None
# unregister updates
if self in gdata.updateDlgs:
gdata.updateDlgs.remove(self)
def update(self):
if not self.galaxyScenario:
galaxyID = client.getPlayer().galaxy
galaxy = client.get(galaxyID)
self.galaxyScenario = galaxy.scenario
self.show()
def _getContactEntry(self, contactID):
contact = client.get(contactID, publicOnly=1)
dipl = client.getDiplomacyWith(contactID)
if dipl.relChng > 0:
suffix = _(" +")
elif dipl.relChng < 0:
suffix = _(" -")
else:
suffix = _("")
relation = _("%s%s") % (_(gdata.relationNames[int(dipl.relation / 125)]), suffix)
contactName = _("%s [elect]") % contact.name if client.getPlayer().voteFor == contactID else contact.name
if getattr(dipl, "stats", None):
return ui.Item(contactName,
tContactID=contactID,
tRelation=relation,
tRelation_raw=dipl.relation,
tPopulation=dipl.stats.storPop,
tPlanets=dipl.stats.planets,
tStructures=dipl.stats.structs,
tProduction=dipl.stats.prodProd,
tScience=dipl.stats.prodSci,
tFleetPwr=dipl.stats.fleetPwr,
tContact=(_("-"), _("Mobile"), _("Static"))[dipl.contactType],
foreground=res.getPlayerColor(contactID),
tooltipTitle=_("Relation"),
tooltip=_("Relation %d, change %+d") % (dipl.relation, dipl.relChng),
statustip=_("Relation %d, change %+d") % (dipl.relation, dipl.relChng))
else:
return ui.Item(contactName,
tContactID=contactID,
tRelation=relation,
tRelation_raw=dipl.relation,
tPopulation="-",
tPlanets="-",
tStructures="-",
tProduction="-",
tScience="-",
tFleetPwr="-",
tContact=(_("None"), _("Mobile"), _("Static"))[dipl.contactType],
foreground=res.getPlayerColor(contactID))
def _getPlayerEntry(self):
player = client.getPlayer()
contactName = _("%s [elect]") % player.name if player.voteFor == player.oid else player.name
return ui.Item(contactName,
tContactID=player.oid,
tRelation="-",
tRelation_raw=10000,
tPopulation=getattr(player.stats, "storPop", "?"),
tPlanets=getattr(player.stats, "planets", "?"),
tStructures=getattr(player.stats, "structs", "?"),
tProduction=getattr(player.stats, "prodProd", "?"),
tScience=getattr(player.stats, "prodSci", "?"),
tFleetPwr=getattr(player.stats, "fleetPwr", "?"),
tContact="-",
foreground=res.getFFColorCode(Const.REL_UNITY))
def _buildContactList(self):
player = client.getPlayer()
items = []
selected = None
for contactID in player.diplomacyRels:
item = self._getContactEntry(contactID)
items.append(item)
if self.selectedPartyID == contactID:
selected = item
# player
item = self._getPlayerEntry()
items.append(item)
if self.selectedPartyID == player.oid:
selected = item
self.win.vContacts.items = items
self.win.vContacts.selectItem(selected)
self.win.vContacts.itemsChanged()
return selected
def _processVoting(self, selected):
player = client.getPlayer()
if self.galaxyScenario == Const.SCENARIO_OUTERSPACE:
# this is just in case we reloged
self.win.vAbstain.visible = 1
self.win.vVoteFor.visible = 1
self.win.vAbstain.enabled = player.voteFor != Const.OID_NONE
if selected:
self.win.vVoteFor.enabled = selected.tContactID != player.voteFor
else:
self.win.vVoteFor.enabled = 0
else:
self.win.vAbstain.visible = 0
self.win.vVoteFor.visible = 0
def _getPactsEntry(self, pactID, dipl):
pactSpec = Rules.pactDescrs[pactID]
if pactID in dipl.pacts:
pactState1 = dipl.pacts[pactID][0]
if self.partyDipl:
pactState2 = self.partyDipl.pacts.get(pactID, [Const.PACT_OFF])[0]
pactState2Text = _(gdata.pactStates[pactState2])
else:
pactState2 = Const.PACT_OFF
pactState2Text = _("N/A")
item = ui.Item(_(gdata.pactNames[pactID]),
tState1=_(gdata.pactStates[pactState1]),
tState2=pactState2Text,
tPactState=pactState1,
foreground=gdata.sevColors[(gdata.DISABLED, gdata.INFO, gdata.MIN)[min(pactState1, pactState2)]])
else:
if self.partyDipl:
pactState2 = self.partyDipl.pacts.get(pactID, [Const.PACT_OFF])[0]
pactState2Text = _(gdata.pactStates[pactState2])
else:
pactState2 = Const.PACT_OFF
pactState2Text = _("N/A")
item = ui.Item(_(gdata.pactNames[pactID]),
tState1=_(gdata.pactStates[Const.PACT_OFF]),
tState2=pactState2Text,
tPactState=Const.PACT_OFF,
foreground=gdata.sevColors[gdata.DISABLED])
item.tPactID = pactID
return item
def _processPacts(self):
player = client.getPlayer()
items = []
selected = None
if self.selectedPartyID and self.selectedPartyID != player.oid:
dipl = client.cmdProxy.getPartyDiplomacyRels(player.oid, self.selectedPartyID)[0]
if not dipl:
dipl = client.getDiplomacyWith(self.selectedPartyID)
for pactID in gdata.pacts:
pactSpec = Rules.pactDescrs[pactID]
if not pactSpec.validityInterval[0] < dipl.relation < pactSpec.validityInterval[1]:
continue
items.append(self._getPactsEntry(pactID, dipl))
self.win.vPacts.items = items
self.win.vPacts.selectItem(selected)
self.win.vPacts.itemsChanged()
def show(self):
selected = self._buildContactList()
self._processVoting(selected)
self._processPacts()
# Highlight buttons
self.win.vHighlight.enabled = 1 - int(gdata.config.defaults.highlights == 'yes')
self.win.vUHighlight.enabled = int(gdata.config.defaults.highlights == 'yes')
def onContactSelected(self, widget, action, data):
if self.win.vContacts.selection:
self.selectedPartyID = self.win.vContacts.selection[0].tContactID
else:
self.selectedPartyID = None
self.partyDipl = client.cmdProxy.getPartyDiplomacyRels(client.getPlayerID(), self.selectedPartyID)[1]
self.update()
self.onPactSelected(None, None, None)
def onPactSelected(self, widget, action, data):
if self.win.vPacts.selection:
self.selectedPactID = self.win.vPacts.selection[0].tPactID
else:
self.selectedPactID = None
self.win.vChangePactState.enabled = 0
self.win.vPactConditions.enabled = 0
self.win.vPactCondReset.enabled = 0
self.win.vConditions.items = []
self.win.vConditions.itemsChanged()
return
self.win.vChangePactState.enabled = 1
self.win.vPactConditions.enabled = 1
self.win.vPactCondReset.enabled = 1
item = self.win.vPacts.selection[0]
if item.tPactState == Const.PACT_OFF:
self.win.vChangePactState.text = _("Enable")
self.win.vChangePactState.data = "ENABLE"
else:
self.win.vChangePactState.text = _("Disable")
self.win.vChangePactState.data = "DISABLE"
# show conditions
items = []
selected = []
dipl = client.getDiplomacyWith(self.selectedPartyID)
conditions = dipl.pacts.get(item.tPactID, [0, item.tPactID])[1:]
if self.partyDipl:
partnerConditions = self.partyDipl.pacts.get(item.tPactID, [0])[1:]
else:
partnerConditions = []
states = (_(" "), _("Required"))
self.win.vCondTitle.text = _('Conditions for pact: %s') % _(gdata.pactNames[item.tPactID])
for pactID in gdata.pacts:
item = ui.Item(_(gdata.pactNames[pactID]),
tState1=states[pactID in conditions],
tState2=states[pactID in partnerConditions],
tPactID=pactID,
foreground=gdata.sevColors[(gdata.NONE, gdata.MAJ, gdata.MIN)[(pactID in conditions) + (pactID in partnerConditions)]])
items.append(item)
if pactID in conditions:
selected.append(item)
self.win.vConditions.items = items
for item in selected:
self.win.vConditions.selectItem(item)
self.win.vConditions.itemsChanged()
def onPactChange(self, widget, action, data):
citem = self.win.vContacts.selection[0]
pitem = self.win.vPacts.selection[0]
pactState = pitem.tPactState
if widget.data == "ENABLE":
pactState = Const.PACT_INACTIVE
elif widget.data == "DISABLE":
pactState = Const.PACT_OFF
if widget.data == "CONDSRESET":
conditions = [pitem.tPactID]
else:
conditions = []
for item in self.win.vConditions.selection:
conditions.append(item.tPactID)
try:
self.win.setStatus(_('Executing CHANGE PACT CONDITIONS command...'))
player = client.getPlayer()
player.diplomacyRels = client.cmdProxy.changePactCond(player.oid,
citem.tContactID, pitem.tPactID, pactState, conditions)
self.win.setStatus(_('Command has been executed.'))
except ige.GameException, e:
self.win.setStatus(e.args[0])
return
self.update()
def onVoteFor(self, widget, action, data):
citem = self.win.vContacts.selection[0]
try:
self.win.setStatus(_('Executing ELECT command...'))
player = client.getPlayer()
player.voteFor = client.cmdProxy.setVoteFor(player.oid,
citem.tContactID)
self.win.setStatus(_('Command has been executed.'))
except ige.GameException, e:
self.win.setStatus(e.args[0])
return
self.update()
def onAbstain(self, widget, action, data):
try:
self.win.setStatus(_('Executing ELECT command...'))
player = client.getPlayer()
player.voteFor = client.cmdProxy.setVoteFor(player.oid, Const.OID_NONE)
self.win.setStatus(_('Command has been executed.'))
except ige.GameException, e:
self.win.setStatus(e.args[0])
return
self.update()
def onHighlight(self, widget, action, data):
gdata.config.defaults.highlights = 'yes'
# register for updates
if self not in gdata.updateDlgs:
gdata.updateDlgs.append(self)
gdata.mainGameDlg.update()
self.update()
def onUHighlight(self, widget, action, data):
gdata.config.defaults.highlights = 'no'
# register for updates
if self not in gdata.updateDlgs:
gdata.updateDlgs.append(self)
gdata.mainGameDlg.update()
self.update()
def onDeleteHighlight(self, widget, action, data):
playerID = self.win.vContacts.selection[0].tContactID
if gdata.playersHighlightColors.has_key(playerID):
del gdata.playersHighlightColors[playerID]
self.update()
gdata.mainGameDlg.update()
def onColorDefinition(self, widget, action, data):
playerID = self.win.vContacts.selection[0].tContactID
if gdata.playersHighlightColors.has_key(playerID):
self.cDlg.display(color = gdata.playersHighlightColors[playerID], confirmAction = self.onColorDefinitionConfirmed)
else:
self.cDlg.display(confirmAction = self.onColorDefinitionConfirmed)
def onColorDefinitionConfirmed(self):
playerID = self.win.vContacts.selection[0].tContactID
gdata.playersHighlightColors[playerID] = self.cDlg.color
self.update()
gdata.mainGameDlg.update()
def onClose(self, widget, action, data):
self.hide()
def onHighlightMenu(self, widget, action, data):
self.eventPopup.show()
def createUI(self):
w, h = gdata.scrnSize
self.win = ui.Window(self.app,
modal=1,
escKeyClose=1,
titleOnly=(w == 800 and h == 600),
movable=0,
title=_('Diplomacy'),
rect=ui.Rect((w - 800 - 4 * (w != 800)) / 2,
(h - 600 - 4 * (h != 600)) / 2,
800 + 4 * (w != 800),
580 + 4 * (h != 600)),
layoutManager=ui.SimpleGridLM())
self.win.subscribeAction('*', self)
# player listing
ui.Listbox(self.win, layout=(0, 0, 40, 14), id='vContacts',
columns=((_('Name'), 'text', 8, ui.ALIGN_W),
(_('Relation'), 'tRelation', 4, ui.ALIGN_E),
(_('Population'), 'tPopulation', 4, ui.ALIGN_E),
(_('Planets'), 'tPlanets', 4, ui.ALIGN_E),
(_('Structures'), 'tStructures', 4, ui.ALIGN_E),
(_('Production'), 'tProduction', 4, ui.ALIGN_E),
(_('Research'), 'tScience', 4, ui.ALIGN_E),
(_('Military pwr'), 'tFleetPwr', 4, ui.ALIGN_E),
(_("Contact"), "tContact", 4, ui.ALIGN_E)),
columnLabels=1, action="onContactSelected", rmbAction="onHighlightMenu")
# Voting
ui.Button(self.win, layout=(0, 14, 5, 1), text=_("Elect"),
id="vVoteFor", action="onVoteFor")
ui.Button(self.win, layout=(5, 14, 5, 1), text=_("Abstain"),
id="vAbstain", action="onAbstain")
# Highlights
ui.Button(self.win, layout=(24, 14, 8, 1), text=_("Highlights On"),
id="vHighlight", action="onHighlight")
ui.Button(self.win, layout=(32, 14, 8, 1), text=_("Highligh Off"),
id="vUHighlight", action="onUHighlight")
# pacts
ui.Title(self.win, layout=(0, 15, 20, 1), text=_('Pacts'),
font='normal-bold', align=ui.ALIGN_W)
ui.Listbox(self.win, layout=(0, 16, 20, 10), id='vPacts',
columns=((_('I'), 'tState1', 3, ui.ALIGN_W),
(_('Partner'), 'tState2', 3, ui.ALIGN_W),
(_('Pact'), 'text', 13, ui.ALIGN_W)),
columnLabels=1, action="onPactSelected")
ui.Button(self.win, layout=(0, 26, 20, 1), text=_("On"),
id="vChangePactState", action="onPactChange", enabled=0)
# conditions
ui.Title(self.win, layout=(20, 15, 20, 1), text=_('Conditions'),
id="vCondTitle", font='normal-bold', align=ui.ALIGN_W)
ui.Listbox(self.win, layout=(20, 16, 20, 10), id='vConditions',
columns=((_('I'), 'tState1', 3, ui.ALIGN_W),
(_('Partner'), 'tState2', 3, ui.ALIGN_W),
(_('Pact'), 'text', 13, ui.ALIGN_W)),
columnLabels=1, multiselection=1)
ui.Button(self.win, layout=(20, 26, 15, 1), text=_("Change"),
id="vPactConditions", action="onPactChange", enabled=0, data="CONDS")
ui.Button(self.win, layout=(35, 26, 5, 1), text=_("Reset"),
id="vPactCondReset", action="onPactChange", enabled=0, data="CONDSRESET")
# status bar + submit/cancel
ui.TitleButton(self.win, layout=(35, 27, 5, 1), text=_('Close'), action='onClose')
ui.Title(self.win, id='vStatusBar', layout=(0, 27, 35, 1), align=ui.ALIGN_W)
# highlight menu
self.eventPopup = ui.Menu(self.app, title=_("Highligh actions"),
items=[ui.Item(_("Define color"), action="onColorDefinition"),
ui.Item(_("Disable highlight"), action="onDeleteHighlight")])
self.eventPopup.subscribeAction("*", self)
|
ospaceteam/outerspace
|
client/osci/dialog/DiplomacyDlg.py
|
Python
|
gpl-2.0
| 19,031
|
[
"Galaxy"
] |
bc5841467524d681344730a9c563b28eb09c38c5228b9294ca3bfcc3ab3723ba
|
from __future__ import print_function, division
import numpy as np
from pymatgen import units as pymatgen_units
from ..core import Writable
from .abidata import input_variable_blocks
from .utils import listify
from .variable import InputVariable, SpecialInputVariable
__all__ = ['AbinitInput']
class AbinitInput(Writable):
"""Abinit input file."""
def __init__(self, **kwargs):
super(AbinitInput, self).__init__(**kwargs)
self.variables = dict()
self.variables_blocks = list()
self.decimals = dict()
for (name, register) in input_variable_blocks.items():
self.variables_blocks.append(VariableBlock(name, register))
self.variables_blocks.append(VariableBlock('Other'))
def __str__(self):
lines = list()
# Clear blocks
for block in self.variables_blocks:
block.clear()
# Sort variables in blocks
for name, value in self.variables.items():
variable = SpecialInputVariable(name, value)
if name in self.decimals:
variable.decimals = self.decimals[name]
for block in self.variables_blocks:
if variable.basename in block.register:
block.append(variable)
break
else:
self.variables_blocks[-1].append(variable)
# Make the string
for block in self.variables_blocks:
if block:
lines.append(str(block))
lines.append('')
block.clear()
return '\n'.join(lines) + '\n'
def clear(self):
"""Clear variables."""
self.variables.clear()
for block in self.variables_blocks:
block.clear()
def set_variable(self, name, value, decimals=None): # TODO ndecimal or ndigits
"""Set a single variable."""
self.variables[name] = value
if value is None:
del self.variables[name]
return
if decimals is not None:
self.decimals[name] = decimals
def set_variables(self, variables, dataset=0, **kwargs):
"""
Sets variables by providing a dictionary, or expanding a dictionary,
and possibly append them by a dataset index.
Example::
>> kpoint_grid_shifted = {
>> 'kptopt' : 1,
>> 'ngkpt' : 3*[4],
>> 'nshiftk' : 4,
>> 'shiftk' : [[0.5,0.5,0.5],
>> [0.5,0.0,0.0],
>> [0.0,0.5,0.0],
>> [0.0,0.0,0.5]],}
>>
>> kpoint_grid_unshifted = {
>> 'kptopt' : 1,
>> 'ngkpt' : 3*[4],
>> 'nshiftk' : 1,
>> 'shiftk' : [0,0,0],}
>>
>> cell = {
>> 'ntypat' : 1
>> 'znucl' : 6.0
>> 'natom' : 2
>> 'typat' : [1, 1]
>> 'xred' : [[0,0,0],[0.25,0.25,0.25]]
>> 'acell' : 3*[6.9]
>> 'rprim' : [[0.0,0.5,0.5],
>> [0.5,0.0,0.5],
>> [0.5,0.5,0.0]]}
>>
>> f = InputFile()
>> f.set_variables(ndtset=3, ecut=4.0, ecutsm=0.5)
>>
>> f.set_variables(cell) # These two lines
>> f.set_variables(**cell) # are equivalent.
>>
>> # Here we append a dataset index at the end of all variables.
>> f.set_variables(kpoint_grid_shifted, dataset=1)
>> f.set_variables(kpoint_grid_unshifted, dataset=[2, 3])
>>
>> f.write('myfile.in') # The name was not set at initialization.
"""
#variables.update(kwargs)
if not dataset:
dataset = ['']
for ds in listify(dataset):
for (key, val) in variables.items():
newkey = key + str(ds)
self.set_variable(newkey, val, **kwargs)
def set_structure(self, structure):
variables = structure_to_abivars(structure)
self.set_variables(variables)
# =========================================================================== #
class VariableBlock(list):
"""A block of abinit variables."""
def __init__(self, title, register=''):
# The block title
self.title = title
# A register of all possible input variable.
if isinstance(register, str):
self.register = register.split()
else:
self.register = list(register)
def clear(self):
del self[:]
def __str__(self):
lines = ['#== {} ==#'.format(self.title)]
for variable in sorted(self):
svar = str(variable)
if svar:
lines.append(svar)
return '\n'.join(lines)
# =========================================================================== #
def structure_to_abivars(structure):
"""Get abinit variables from a pymatgen.Structure object."""
rprim = structure.lattice.matrix / pymatgen_units.bohr_to_ang
xred = list()
for site in structure.sites:
xred.append(site.frac_coords.round(14).tolist())
natom = structure.num_sites
ntypat = structure.ntypesp
znucl_atom = structure.atomic_numbers
itypat = 0
typat = list()
znucl = list()
for z in znucl_atom:
if z not in znucl:
itypat += 1
znucl.append(z)
typat.append(itypat)
else:
i = znucl.index(z)
typat.append(i+1)
d = dict(
rprim=rprim.tolist(),
acell=np.ones(3, dtype=float).tolist(),
natom=natom,
ntypat=ntypat,
znucl=znucl,
typat=typat,
xred=xred,
)
return d
|
trangel/OPTpy
|
OPTpy/Abinit/abinitinput.py
|
Python
|
gpl-3.0
| 5,902
|
[
"ABINIT",
"pymatgen"
] |
e06a4209e829b80451e512a24dd0cbb03c17722e1446a505eb4effeb1f86acaa
|
from distutils.core import setup
setup(
name='CLQC',
version='1.1',
author='Sven H. Giese',
author_email='sven.giese@tu-berlin.de',
packages=['CLQC'],
scripts=['bin/CLQC_ContactMap.py','bin/CLQC_Distogram.py', 'bin/CLQC_CoverageProfile.py'],
url='http://pypi.python.org/pypi/TowelStuff/',
license='LICENSE.txt',
description='Package for fast quality control for CLMS data.',
long_description=open('README.txt').read(),
install_requires =["HTSeq >= 0.0.0"],
)
|
gieses/CLQC
|
setup.py
|
Python
|
mit
| 504
|
[
"HTSeq"
] |
9c51ef9b248b48aac8114c9a7b1f274e9cdb69482070681514d8901ec1bf54f7
|
# -*- coding: utf-8 -*-
"""
codegen
~~~~~~~
Extension to ast that allow ast -> python code generation.
:copyright: Copyright 2008 by Armin Ronacher.
:license: BSD.
"""
from ast import *
BOOLOP_SYMBOLS = {
And: 'and',
Or: 'or'
}
BINOP_SYMBOLS = {
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
}
CMPOP_SYMBOLS = {
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def to_source(node, indent_with=' ' * 4, add_line_information=False):
"""This function can convert a node tree back into python sourcecode.
This is useful for debugging purposes, especially if you're dealing with
custom asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
If `add_line_information` is set to `True` comments for the line numbers
of the nodes are added to the output. This can be used to spot wrong line
number information of statement nodes.
"""
generator = SourceGenerator(indent_with, add_line_information)
generator.visit(node)
return ''.join(generator.result)
class SourceGenerator(NodeVisitor):
"""This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with, add_line_information=False):
self.result = []
self.indent_with = indent_with
self.add_line_information = add_line_information
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, node=None, extra=0):
self.new_lines = max(self.new_lines, 1 + extra)
if node is not None and self.add_line_information:
self.write('# line: %s' % node.lineno)
self.new_lines = 1
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline(decorator)
self.write('@')
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline(node)
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline(node)
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline(node)
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline(node)
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline(node)
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(extra=1)
self.decorators(node)
self.newline(node)
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(extra=2)
self.decorators(node)
self.newline(node)
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline(node)
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline(node)
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline(node)
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline(node)
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline(node)
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline(node)
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline(node)
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline(node)
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline(node)
self.write('try:')
self.body(node.body)
self.newline(node)
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline(node)
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline(node)
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline(node)
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline(node)
self.write('break')
def visit_Continue(self, node):
self.newline(node)
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline(node)
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.write(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s %%' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline(node)
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)
|
wwright2/dcim3-angstrom1
|
sources/bitbake/lib/codegen.py
|
Python
|
mit
| 16,325
|
[
"VisIt"
] |
9e37fe08e0f42772feee2ccf36a71c7f526e24c8891664f34a9efa21a223995b
|
CALLS=[{
"method":"POST",
"path":"/accounts/",
"view_func_name":"account_create",
"access_doc":"Any admin app.",
"url_params":{
},
"query_opts":{
},
"data_fields":{
'contact_email':'A valid email at which to reach the account holder.',
'secondary_secret_p':'0 or 1: Does this account require a secondary secret?',
'primary_secret_p':'0 or 1: Does this account require a primary secret?',
'account_id':'An identifier for the new account. Must be a valid email address. **REQUIRED**',
'full_name':'The full name to associate with the account.',
},
"description":"Create a new account, and send out initialization emails.",
"return_desc":":http:statuscode:`200` with information about the new account on success, :http:statuscode:`400` if ``ACCOUNT_ID`` isn't passed or is already used.",
"return_ex":'''
<Account id="joeuser@indivo.example.org">
<fullName>Joe User</fullName>
<contactEmail>joeuser@gmail.com</contactEmail>
<lastLoginAt>2010-05-04T15:34:23Z</lastLoginAt>
<totalLoginCount>43</totalLoginCount>
<failedLoginCount>0</failedLoginCount>
<state>active</state>
<lastStateChange>2009-04-03T13:12:12Z</lastStateChange>
<authSystem name="password" username="joeuser" />
<authSystem name="hospital_sso" username="Joe_User" />
</Account>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/accounts/search",
"view_func_name":"account_search",
"access_doc":"Any admin app.",
"url_params":{
},
"query_opts":{
'fullname':'The full name of the account to search for',
'contact_email':'The contact email of the account to search for',
},
"data_fields":{
},
"description":"Search for accounts by name or email.",
"return_desc":":http:statuscode:`200` with information about matching accounts, or :http:statuscode:`400` if no search parameters are passed.",
"return_ex":'''
<Accounts>
<Account id="joeuser@indivo.example.org">
<fullName>Joe User</fullName>
<contactEmail>joeuser@gmail.com</contactEmail>
<lastLoginAt>2010-05-04T15:34:23Z</lastLoginAt>
<totalLoginCount>43</totalLoginCount>
<failedLoginCount>0</failedLoginCount>
<state>active</state>
<lastStateChange>2009-04-03T13:12:12Z</lastStateChange>
<authSystem name="password" username="joeuser" />
<authSystem name="hospital_sso" username="Joe_User" />
</Account>
...
</Accounts>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/accounts/{ACCOUNT_EMAIL}",
"view_func_name":"account_info",
"access_doc":"Any admin app, or the Account owner.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
},
"description":"Display information about an account.",
"return_desc":":http:statuscode:`200` with information about the account",
"return_ex":'''
<Account id="joeuser@indivo.example.org">
<fullName>Joe User</fullName>
<contactEmail>joeuser@gmail.com</contactEmail>
<lastLoginAt>2010-05-04T15:34:23Z</lastLoginAt>
<totalLoginCount>43</totalLoginCount>
<failedLoginCount>0</failedLoginCount>
<state>active</state>
<lastStateChange>2009-04-03T13:12:12Z</lastStateChange>
<authSystem name="password" username="joeuser" />
<authSystem name="hospital_sso" username="Joe_User" />
</Account>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/accounts/{ACCOUNT_EMAIL}/authsystems/",
"view_func_name":"account_authsystem_add",
"access_doc":"Any admin app.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
'username':'The username for this account',
'password':'The password for this account',
'system':'The identifier of the desired authsystem. ``password`` indicates the internal password system.',
},
"description":"Add a new method of authentication to an account.",
"return_desc":":http:statuscode:`200 Success`, :http:statuscode:`403` if the indicated auth system doesn't exist, and :http:statuscode:`400` if a system and a username weren't passed, or if the account is already registered with the passed system, or if the username is already taken for the passed authsystem.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/accounts/{ACCOUNT_EMAIL}/authsystems/password/change",
"view_func_name":"account_password_change",
"access_doc":"The Account owner.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
'new':'The desired new password.',
'old':'The existing account password.',
},
"description":"Change a account's password.",
"return_desc":":http:statuscode:`200 Success`, :http:statuscode:`403` if the old password didn't validate, or :http:statuscode:`400` if both a new and old password weren't passed.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/accounts/{ACCOUNT_EMAIL}/authsystems/password/set",
"view_func_name":"account_password_set",
"access_doc":"Any admin app.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
'password':'The new password to set.',
},
"description":"Force the password of an account to a given value.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`400` if a new password wasn't passed.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/accounts/{ACCOUNT_EMAIL}/authsystems/password/set-username",
"view_func_name":"account_username_set",
"access_doc":"Any admin app, or the Account owner.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
'username':'The new username to set.',
},
"description":"Force the username of an account to a given value.",
"return_desc":":http:statuscode:`200 Success`, :http:statuscode:`400` if a username wasn't passed.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/accounts/{ACCOUNT_EMAIL}/check-secrets/{PRIMARY_SECRET}",
"view_func_name":"account_check_secrets",
"access_doc":"Any admin app.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
'PRIMARY_SECRET':'A confirmation string sent securely to the patient from Indivo',
},
"query_opts":{
'secondary_secret':'The secondary secret of the account to check.',
},
"data_fields":{
},
"description":"Validate an account's primary and secondary secrets.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`403` if validation fails.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/accounts/{ACCOUNT_EMAIL}/forgot-password",
"view_func_name":"account_forgot_password",
"access_doc":"Any admin app.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
},
"description":"Resets an account if the user has forgotten its password.",
"return_desc":":http:statuscode`200` with the account's new secondary secret, or :http:statuscode:`400` if the account hasn't yet been initialized.",
"return_ex":'''
<secret>123456</secret>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/accounts/{ACCOUNT_EMAIL}/inbox/",
"view_func_name":"account_inbox",
"access_doc":"The Account owner.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
'status':'The account or document status to filter by',
'limit':'See :ref:`query-operators`',
'order_by':'See :ref:`query-operators`',
'include_archive':'0 or 1: whether or not to include archived messages in the result set.',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List messages in an account's inbox.",
"return_desc":":http:statuscode:`200`, with a list of inbox messages.",
"return_ex":'''
<Messages>
<Message id="879">
<sender>doctor@example.indivo.org</sender>
<received_at>2010-09-04T14:12:12Z</received_at>
<read_at>2010-09-04T17:13:24Z</read_at>
<subject>your test results are looking good</subject>
<severity>normal</severity>
<record id="123" />
<attachment num="1" type="http://indivo.org/vocab/xml/documents#Lab" size="12546" />
</Message>
...
</Messages>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/accounts/{ACCOUNT_EMAIL}/inbox/",
"view_func_name":"account_send_message",
"access_doc":"Any admin app.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
'body':'The message body. Defaults to ``[no body]``.',
'subject':'The message subject. Defaults to ``[no subject]``.',
'message_id':'An external identifier for the message, for idempotency.',
'severity':'The importance of the message. Options are ``low``, ``medium``, ``high``. Defaults to ``low``.',
},
"description":"Send a message to an account.",
"return_desc":":http:statuscode:`200 Success` with XML describing the message, or http:statuscode:`400` if the passed message_id is a duplicate. Also emails account to alert them that a new message has arrived.",
"return_ex":'''
<Message id="63de173d-0dba-4cbd-92bd-5ef3b638ffd2">
<sender>test@indivo.org</sender>
<received_at>2012-07-13T15:59:25.102905Z</received_at>
<read_at></read_at>
<archived_at></archived_at>
<subject>subj</subject>
<severity>low</severity>
<record id="03302536-a00d-425a-8b87-533d0d37478e" />
</Message>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/accounts/{ACCOUNT_EMAIL}/inbox/{MESSAGE_ID}",
"view_func_name":"account_inbox_message",
"access_doc":"The Account owner.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
'MESSAGE_ID':'The unique identifier of the Indivo Message',
},
"query_opts":{
},
"data_fields":{
},
"description":"Retrieve an individual message from an account's inbox.",
"return_desc":":http:statuscode:`200`, with XML describing the message.",
"return_ex":'''
<Message id="879">
<sender>doctor@example.indivo.org</sender>
<received_at>2010-09-04T14:12:12Z</received_at>
<read_at>2010-09-04T17:13:24Z</read_at>
<archived_at>2010-09-04T17:15:24Z</archived_at>
<subject>your test results are looking good</subject>
<body>Great results!
It seems you'll live forever!</body>
<severity>normal</severity>
<record id="123" />
<attachment num="1" type="http://indivo.org/vocab/xml/documents#Lab" size="12546" />
</Message>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/accounts/{ACCOUNT_EMAIL}/inbox/{MESSAGE_ID}/archive",
"view_func_name":"account_message_archive",
"access_doc":"The Account owner.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
'MESSAGE_ID':'The unique identifier of the Indivo Message',
},
"query_opts":{
},
"data_fields":{
},
"description":"Archive a message.",
"return_desc":":http:statuscode:`200 Success`.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/accounts/{ACCOUNT_EMAIL}/inbox/{MESSAGE_ID}/attachments/{ATTACHMENT_NUM}/accept",
"view_func_name":"account_inbox_message_attachment_accept",
"access_doc":"The Account owner.",
"url_params":{
'ATTACHMENT_NUM':'The 1-indexed number corresponding to the message attachment',
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
'MESSAGE_ID':'The unique external identifier of the Indivo Message',
},
"query_opts":{
},
"data_fields":{
},
"description":"Accept a message attachment into the record it corresponds to.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`410` if the attachment has already been saved.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/accounts/{ACCOUNT_EMAIL}/info-set",
"view_func_name":"account_info_set",
"access_doc":"Any admin app, or the Account owner.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
'contact_email':'A valid email at which to reach the account holder.',
'full_name':'The full name of the account.',
},
"description":"Set basic information about an account.",
"return_desc":":http:statuscode:`200`, or :http:statuscode:`400` if no parameters are passed in.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/accounts/{ACCOUNT_EMAIL}/initialize/{PRIMARY_SECRET}",
"view_func_name":"account_initialize",
"access_doc":"Any Indivo UI app.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
'PRIMARY_SECRET':'A confirmation string sent securely to the patient from Indivo',
},
"query_opts":{
},
"data_fields":{
'secondary_secret':'',
},
"description":"Initialize an account, activating it.",
"return_desc":":http:statuscode:`200 Success`, :http:statuscode:`403` if the account has already been initialized or if secrets didn't validate, and :http:statuscode:`400` if a secondary secret was required but missing.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/accounts/{ACCOUNT_EMAIL}/notifications/",
"view_func_name":"account_notifications",
"access_doc":"The Account owner.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
'status':'The account or document status to filter by',
'order_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List an account's notifications.",
"return_desc":":http:statuscode:`200` with a list of the account's notifications.",
"return_ex":'''
<Notifications>
<Notification id="468">
<sender>labs@apps.indivo.org</sender>
<received_at>2010-09-03T15:12:12Z</received_at>
<content>A new lab result has been delivered to your account</content>
<record id="123" label="Joe User" />
<document id="579" label="Lab Test 2" />
</Notification>
...
</Notifications>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/accounts/{ACCOUNT_EMAIL}/permissions/",
"view_func_name":"account_permissions",
"access_doc":"The Account owner.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
},
"description":"List the carenets that an account has access to.",
"return_desc":":http:statuscode:`200` with a list of carenets.",
"return_ex":'''
<Carenets record_id="01234">
<Carenet id="456" name="family" mode="explicit" />
<Carenet id="567" name="school" mode="explicit" />
</Carenets>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/accounts/{ACCOUNT_EMAIL}/primary-secret",
"view_func_name":"account_primary_secret",
"access_doc":"Any admin app.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
},
"description":"Display an account's primary secret.",
"return_desc":":http:statuscode:`200`, with the primary secret.",
"return_ex":'''
<secret>123absxzyasdg13b</secret>
''',
"deprecated": ('1.0.0', 'Avoid sending primary secrets over the wire. Instead, use :http:get:`/accounts/{ACCOUNT_EMAIL}/check-secrets/{PRIMARY_SECRET}`.'),
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/accounts/{ACCOUNT_EMAIL}/records/",
"view_func_name":"record_list",
"access_doc":"The Account owner.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
'status':'The account or document status to filter by',
'limit':'See :ref:`query-operators`',
'order_by':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List all available records for an account.",
"return_desc":":http:statuscode:`200`, with a list of records owned or shared with the account.",
"return_ex":'''
<Records>
<Record id="123" label="John R. Smith" />
<Record id="234" label="John R. Smith Jr. (shared)" shared="true" role_label="Guardian" />
<Record id="345" label="Juanita R. Smith (carenet)" shared="true" carenet_id="678" carenet_name="family" />
...
</Records>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/accounts/{ACCOUNT_EMAIL}/reset",
"view_func_name":"account_reset",
"access_doc":"Any admin app.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
},
"description":"Reset an account to an ``uninitialized`` state.",
"return_desc":":http:statuscode:`200 Success`.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/accounts/{ACCOUNT_EMAIL}/secret",
"view_func_name":"account_secret",
"access_doc":"Any admin app.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
},
"description":"Return the secondary secret of an account.",
"return_desc":":http:statuscode:`200`, with the secondary secret.",
"return_ex":'''
<secret>123456</secret>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/accounts/{ACCOUNT_EMAIL}/secret-resend",
"view_func_name":"account_resend_secret",
"access_doc":"Any admin app.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
},
"description":"Sends an account user their primary secret in case they lost it.",
"return_desc":":http:statuscode:`200 Success`. Also emails the account with their new secret.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/accounts/{ACCOUNT_EMAIL}/set-state",
"view_func_name":"account_set_state",
"access_doc":"Any admin app.",
"url_params":{
'ACCOUNT_EMAIL':'The email identifier of the Indivo account',
},
"query_opts":{
},
"data_fields":{
'state':'The desired state of the account. Options are ``active``, ``disabled``, ``retired``.',
},
"description":"Set the state of an account.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`403` if the account has been retired and can no longer change state.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/apps/",
"view_func_name":"all_phas",
"access_doc":"Any principal in Indivo.",
"url_params":{
},
"query_opts":{
},
"data_fields":{
},
"description":"List all available userapps.",
"return_desc":":http:statuscode:`200`, with a list of userapps.",
"return_ex":'''
<Apps>
<App id="problems@apps.indivo.org">
<startURLTemplate>http://problems.indivo.org/auth/start?record_id={record_id}&carenet_id={carenet_id}</startURLTemplate>
<name>Problem List</name>
<description>Managing your problem list</description>
<autonomous>false</autonomous>
<frameable>true</frameable>
<ui>true</ui>
</App>
...
</Apps>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"DELETE",
"path":"/apps/{PHA_EMAIL}",
"view_func_name":"pha_delete",
"access_doc":"The user app itself.",
"url_params":{
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
},
"description":"Delete a userapp from Indivo.",
"return_desc":":http:statuscode:`200 Success`.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/apps/{PHA_EMAIL}",
"view_func_name":"pha",
"access_doc":"Any principal in Indivo.",
"url_params":{
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
},
"description":"Return a description of a single userapp.",
"return_desc":":http:statuscode:`200`, with information about the userapp.",
"return_ex":'''
<App id="problems@apps.indivo.org">
<startURLTemplate>http://problems.indivo.org/auth/start?record_id={record_id}&carenet_id={carenet_id}</startURLTemplate>
<name>Problem List</name>
<description>Managing your problem list</description>
<autonomous>false</autonomous>
<frameable>true</frameable>
<ui>true</ui>
</App>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/apps/{PHA_EMAIL}/documents/",
"view_func_name":"app_document_list",
"access_doc":"A user app with an id matching the app email in the URL.",
"url_params":{
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
'status':'The account or document status to filter by',
'type':'The Indivo document type to filter by',
'order_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List app-specific documents.",
"return_desc":":http:statuscode:`200` with A list of documents, or http:statuscode:`404` if an invalid type was passed in the querystring.",
"return_ex":'''
<Documents record_id="" total_document_count="4" pha="problems@apps.indivo.org">
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
...
</Documents>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/apps/{PHA_EMAIL}/documents/",
"view_func_name":"app_document_create",
"access_doc":"A user app with an id matching the app email in the URL.",
"url_params":{
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create.',
},
"description":"Create an app-specific Indivo document.",
"return_desc":":http:statuscode:`200` with the metadata of the created document, or :http:statuscode:`400` if the new document failed validation.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/apps/{PHA_EMAIL}/documents/external/{EXTERNAL_ID}",
"view_func_name":"app_document_create_or_update_ext",
"access_doc":"A user app with an id matching the app email in the URL.",
"url_params":{
'EXTERNAL_ID':'The external identifier of the desired resource',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create.',
},
"description":"Create an app-specific Indivo document with an associated external id.",
"return_desc":":http:statuscode:`200` with the metadata of the created or updated document, or :http:statuscode:`400` if the passed content didn't validate.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/apps/{PHA_EMAIL}/documents/external/{EXTERNAL_ID}/meta",
"view_func_name":"app_document_meta_ext",
"access_doc":"A user app with an id matching the app email in the URL.",
"url_params":{
'EXTERNAL_ID':'The external identifier of the desired resource',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
},
"description":"Fetch the metadata of an app-specific document identified by external id.",
"return_desc":":http:statuscode:`200` with metadata describing the specified document, or http:statuscode:`404` if the external_id is invalid.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="problems@apps.indivo.org" type="pha">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"DELETE",
"path":"/apps/{PHA_EMAIL}/documents/{DOCUMENT_ID}",
"view_func_name":"app_document_delete",
"access_doc":"A user app with an id matching the app email in the URL.",
"url_params":{
'PHA_EMAIL':'The email identifier of the Indivo user app',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"Delete an app-specific document.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
</ok>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/apps/{PHA_EMAIL}/documents/{DOCUMENT_ID}",
"view_func_name":"app_specific_document",
"access_doc":"A user app with an id matching the app email in the URL.",
"url_params":{
'PHA_EMAIL':'The email identifier of the Indivo user app',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"Retrive an app-specific document.",
"return_desc":":http:statuscode:`200` with the raw content of the document, or :http:statuscode:`404` if the document could not be found.",
"return_ex":'''
<DefaultProblemsPreferences record_id="123">
<Preference name="hide_void" value="true" />
<Preference name="show_rels" value="false" />
</DefaultProblemsPreferences>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/apps/{PHA_EMAIL}/documents/{DOCUMENT_ID}",
"view_func_name":"app_document_create_or_update",
"access_doc":"A user app with an id matching the app email in the URL.",
"url_params":{
'PHA_EMAIL':'The email identifier of the Indivo user app',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create.',
},
"description":"Create or Overwrite an app-specific Indivo document.",
"return_desc":":http:statuscode:`200` with metadata describing the created or updated document, or :http:statuscode:`400` if the passed content didn't validate.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="problems@apps.indivo.org" type="pha">
</creator>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading preferences</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/apps/{PHA_EMAIL}/documents/{DOCUMENT_ID}/label",
"view_func_name":"app_document_label",
"access_doc":"A user app with an id matching the app email in the URL.",
"url_params":{
'PHA_EMAIL':'The email identifier of the Indivo user app',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
'':'The new label for the document',
},
"description":"Set the label of an app-specific document.",
"return_desc":":http:statuscode:`200` with metadata describing the re-labeled document, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>RELABELED: New HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/apps/{PHA_EMAIL}/documents/{DOCUMENT_ID}/meta",
"view_func_name":"app_document_meta",
"access_doc":"A user app with an id matching the app email in the URL.",
"url_params":{
'PHA_EMAIL':'The email identifier of the Indivo user app',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"Fetch the metadata of an app-specific document.",
"return_desc":":http:statuscode:`200` with the document metadata, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/apps/{PHA_EMAIL}/records/",
"view_func_name":"app_record_list",
"access_doc":"Any autonomous user app.",
"url_params":{
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
},
"description":"Return a list of all records that have this pha enabled.",
"return_desc":":http:statuscode:`200` with a list of records on success.",
"return_ex":'''
<Records>
<Record id="123" label="John R. Smith" />
<Record id = "234" label="Frank Frankson" />
...
</Records>
''',
"deprecated": None,
"added": ('1.0.0', ''),
"changed": None,
},
{
"method":"POST",
"path":"/apps/{PHA_EMAIL}/records/{RECORD_ID}/access_token",
"view_func_name":"autonomous_access_token",
"access_doc":"An autonomous user app with a record on which the app is authorized to run.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
},
"description":"Fetch an access token for an autonomous app to access a record.",
"return_desc":":http:statuscode:`200` with a valid access token for the app bound to the record on success.",
"return_ex":'''
oauth_token=abcd1fw3gasdgh3&oauth_token_secret=jgrlhre4291hfjas&xoauth_indivo_record_id=123
''',
"deprecated": None,
"added": ('1.0.0', ''),
"changed": None,
},
{
"method":"DELETE",
"path":"/carenets/{CARENET_ID}",
"view_func_name":"carenet_delete",
"access_doc":"A principal in full control of the carenet's record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
},
"description":"Delete a carenet.",
"return_desc":":http:statuscode:`200 Success`.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/accounts/",
"view_func_name":"carenet_account_list",
"access_doc":"A principal in the carenet, in full control of the carenet's record, or any admin app.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
},
"description":"List the accounts in a carenet.",
"return_desc":":http:statuscode:`200` with a list of accounts in the specified carenet.",
"return_ex":'''
<CarenetAccounts>
<CarenetAccount id="johndoe@indivo.org" fullName="John Doe" write="true" />
...
</CarenetAccounts>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/carenets/{CARENET_ID}/accounts/",
"view_func_name":"carenet_account_create",
"access_doc":"A principal in full control of the carenet's record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
'write':'``true`` or ``false``. Whether this account can write to the carenet.',
'account_id':'An identifier for the account. Must be a valid email address.',
},
"description":"Add an account to a carenet.",
"return_desc":":http:statuscode:`200 Success`, :http:statuscode:`404` if the specified account or carenet don't exist, or :http:statuscode:`400` if an account_id isn't passed.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"DELETE",
"path":"/carenets/{CARENET_ID}/accounts/{ACCOUNT_ID}",
"view_func_name":"carenet_account_delete",
"access_doc":"A principal in full control of the carenet's record.",
"url_params":{
'ACCOUNT_ID':'The email identifier of the Indivo account',
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
},
"description":"Remove an account from a carenet.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`404` if either the passed account or the passed carenet doesn't exist.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/accounts/{ACCOUNT_ID}/permissions",
"view_func_name":"carenet_account_permissions",
"access_doc":"A user app with access to the carenet and proxying the account, a principal in full control of the carenet's record, or any admin app.",
"url_params":{
'ACCOUNT_ID':'The email identifier of the Indivo account',
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
},
"description":"List the permissions of an account within a carenet.",
"return_desc":":http:statuscode:`200` with a list of document types that the account can access within a carenet. Currently always returns all document types.",
"return_ex":'''
<Permissions>
<DocumentType type="*" write="true" />
</Permissions>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/apps/",
"view_func_name":"carenet_apps_list",
"access_doc":"A principal in the carenet, in full control of the carenet's record, or any admin app.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
},
"description":"List Apps within a given carenet.",
"return_desc":":http:statuscode:`200` with a list of applications in the carenet.",
"return_ex":'''
<Apps>
<App id="problems@apps.indivo.org">
<startURLTemplate>http://problems.indivo.org/auth/start?record_id={record_id}&carenet_id={carenet_id}</startURLTemplate>
<name>Problem List</name>
<description>Managing your problem list</description>
<autonomous>false</autonomous>
<frameable>true</frameable>
<ui>true</ui>
</App>
...
</Apps>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"DELETE",
"path":"/carenets/{CARENET_ID}/apps/{PHA_EMAIL}",
"view_func_name":"carenet_apps_delete",
"access_doc":"A principal in full control of the carenet's record.",
"url_params":{
'PHA_EMAIL':'The email identifier of the Indivo user app',
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
},
"description":"Remove an app from a given carenet.",
"return_desc":":http:statuscode:`200 Success`.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/carenets/{CARENET_ID}/apps/{PHA_EMAIL}",
"view_func_name":"carenet_apps_create",
"access_doc":"A principal in full control of the carenet's record.",
"url_params":{
'PHA_EMAIL':'The email identifier of the Indivo user app',
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
},
"description":"Add an app to a carenet",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`400` if the passed PHA is autonomous (autonomous apps can't be scoped to carenets).",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/apps/{PHA_EMAIL}/permissions",
"view_func_name":"carenet_app_permissions",
"access_doc":"Nobody",
"url_params":{
'PHA_EMAIL':'The email identifier of the Indivo user app',
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
},
"description":"Retrieve the permissions for an app within a carenet. NOT IMPLEMENTED.",
"return_desc":":http:statuscode:`200`. This call is unimplemented, and has no effect.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/documents/",
"view_func_name":"carenet_document_list",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
'type':'The Indivo document type to filter by',
},
"data_fields":{
},
"description":"List documents from a given carenet.",
"return_desc":":http:statuscode:`200` with a document list on success, :http:statuscode:`404` if *type* doesn't exist.",
"return_ex":'''
<Documents record_id="123" total_document_count="3" pha="" >
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
...
</Documents>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/documents/special/{SPECIAL_DOCUMENT}",
"view_func_name":"read_special_document_carenet",
"access_doc":"A user app with access to the carenet or the entire carenet's record, an account in the carenet or in control of the record, or the admin app that created the carenet's record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
'SPECIAL_DOCUMENT':'The type of special document to access. Options are ``demographics``, ``contact``',
},
"query_opts":{
},
"data_fields":{
},
"description":"Read a special document from a carenet.",
"return_desc":":http:statuscode:`200` with the special document's raw content, or :http:statuscode:`404` if the document hasn't been created yet.",
"return_ex":'''
<Contact xmlns="http://indivo.org/vocab/xml/documents#">
<name>
<fullName>Sebastian Rockwell Cotour</fullName>
<givenName>Sebastian</givenName>
<familyName>Cotour</familyName>
</name>
<email type="personal">
scotour@hotmail.com
</email>
<email type="work">
sebastian.cotour@childrens.harvard.edu
</email>
<address type="home">
<streetAddress>15 Waterhill Ct.</streetAddress>
<postalCode>53326</postalCode>
<locality>New Brinswick</locality>
<region>Montana</region>
<country>US</country>
<timeZone>-7GMT</timeZone>
</address>
<location type="home">
<latitude>47N</latitude>
<longitude>110W</longitude>
</location>
<phoneNumber type="home">5212532532</phoneNumber>
<phoneNumber type="work">6217233734</phoneNumber>
<instantMessengerName protocol="aim">scotour</instantMessengerName>
</Contact>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/documents/{DOCUMENT_ID}",
"view_func_name":"carenet_document",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"Return a document from a carenet.",
"return_desc":":http:statuscode:`200` with the document content on success, :http:statuscode:`404` if document_id is invalid or if the document is not shared in the carenet.",
"return_ex":'''
<ExampleDocument>
<content>That's my content</content>
<otherField attr="val" />
</ExampleDocument>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/documents/{DOCUMENT_ID}/meta",
"view_func_name":"carenet_document_meta",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"Fetch the metadata of a record-specific document via a carenet.",
"return_desc":":http:statuscode:`200` with the document's metadata, or :http:statuscode:`404` if ``document_id`` doesn't identify an existing document in the carenet.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/record",
"view_func_name":"carenet_record",
"access_doc":"Nobody",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
},
"description":"Get basic information about the record to which a carenet belongs.",
"return_desc":":http:statuscode:`200` with XML describing the record.",
"return_ex":'''
<Record id="123" label="Joe User">
<contact document_id="790" />
<demographics document_id="467" />
<created at="2010-10-23T10:23:34Z" by="indivoconnector@apps.indivo.org" />
</Record>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/carenets/{CARENET_ID}/rename",
"view_func_name":"carenet_rename",
"access_doc":"A principal in full control of the carenet's record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
'name':'The new name for the carenet.',
},
"description":"Change a carenet's name.",
"return_desc":":http:statuscode:`200` with XML describing the renamed carenet on success, :http:statuscode:`400` if ``name`` wasn't passed or if a carenet named ``name`` already exists on this record.",
"return_ex":'''
<Carenets record_id="123">
<Carenet id="789" name="Work/School" mode="explicit" />
</Carenets>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/reports/minimal/allergies/",
"view_func_name":"carenet_allergy_list",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the allergy data for a given carenet.",
"return_desc":":http:statuscode:`200` with a list of allergies, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
<Filter name="allergen_name" value="penicillin"/>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Lab" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Allergy xmlns="http://indivo.org/vocab/xml/documents#">
<dateDiagnosed>2009-05-16</dateDiagnosed>
<diagnosedBy>Children's Hospital Boston</diagnosedBy>
<allergen>
<type type="http://codes.indivo.org/codes/allergentypes/" value="drugs">Drugs</type>
<name type="http://codes.indivo.org/codes/allergens/" value="penicillin">Penicillin</name>
</allergen>
<reaction>blue rash</reaction>
<specifics>this only happens on weekends</specifics>
</Allergy>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/reports/minimal/equipment/",
"view_func_name":"carenet_equipment_list",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the equipment data for a given carenet.",
"return_desc":":http:statuscode:`200` with a list of equipment, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
<Filter name="allergen_name" value="penicillin"/>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Lab" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Equipment xmlns="http://indivo.org/vocab/xml/documents#">
<dateStarted>2009-02-05</dateStarted>
<dateStopped>2010-06-12</dateStopped>
<type>cardiac</type>
<name>Pacemaker</name>
<vendor>Acme Medical Devices</vendor>
<id>167-ABC-23</id>
<description>it works</description>
<specification>blah blah blah</specification>
</Equipment>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/reports/minimal/immunizations/",
"view_func_name":"carenet_immunization_list",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the immunization data for a given carenet.",
"return_desc":":http:statuscode:`200` with a list of immunizations, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
<Filter name="allergen_name" value="penicillin"/>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Lab" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Immunization xmlns="http://indivo.org/vocab/xml/documents#">
<dateAdministered>2009-05-16T12:00:00</dateAdministered>
<administeredBy>Children's Hospital Boston</administeredBy>
<vaccine>
<type type="http://codes.indivo.org/vaccines#" value="hep-B">Hepatitis B</type>
<manufacturer>Oolong Pharmaceuticals</manufacturer>
<lot>AZ1234567</lot>
<expiration>2009-06-01</expiration>
</vaccine>
<sequence>2</sequence>
<anatomicSurface type="http://codes.indivo.org/anatomy/surfaces#" value="shoulder">Shoulder</anatomicSurface>
<adverseEvent>pain and rash</adverseEvent>
</Immunization>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/reports/minimal/labs/",
"view_func_name":"carenet_lab_list",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the lab data for a given carenet.",
"return_desc":":http:statuscode:`200` with a list of labs, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
<Filter name="lab_type" value="hematology"/>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Lab" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<LabReport xmlns="http://indivo.org/vocab/xml/documents#">
<dateMeasured>1998-07-16T12:00:00Z</dateMeasured>
<labType>hematology</labType>
<laboratory>
<name>Quest</name>
<address>300 Longwood Ave, Boston MA 02215</address>
</laboratory>
<comments>was looking pretty sick</comments>
<firstPanelName>CBC</firstPanelName>
</LabReport>
</Item>
</Report>
<Report>
<Meta>
<Document id="1b7270a6-5925-450c-9273-5a74386cef63" type="http://indivo.org/vocab/xml/documents#Lab" size="1653" digest="c1be22813ab83f6b3858878a802f372eef754fcdd285e44a5fdb7387d6ee3667" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="1b7270a6-5925-450c-9273-5a74386cef63"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<LabReport xmlns="http://indivo.org/vocab/xml/documents#">
<dateMeasured>2009-07-16T12:00:00Z</dateMeasured>
<labType>hematology</labType>
<laboratory>
<name>Quest</name>
<address>300 Longwood Ave, Boston MA 02215</address>
</laboratory>
<comments>was looking pretty sick</comments>
<firstPanelName>CBC</firstPanelName>
</LabReport>
</Item>
</Report>
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/reports/minimal/measurements/{LAB_CODE}/",
"view_func_name":"carenet_measurement_list",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
'LAB_CODE':'The identifier corresponding to the measurement being made.',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the measurement data for a given carenet.",
"return_desc":":http:statuscode:`200` with a list of measurements, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
<Filter name="lab_type" value="hematology"/>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Measurement" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Measurement id="1234" value="120" type="blood pressure systolic" datetime="2011-03-02T00:00:00Z" unit="mmHg" source_doc="3456" />
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/reports/minimal/medications/",
"view_func_name":"carenet_medication_list",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the medication data for a given carenet.",
"return_desc":":http:statuscode:`200` with a list of medications, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Medication" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Medication xmlns="http://indivo.org/vocab/xml/documents#">
<dateStarted>2009-02-05</dateStarted>
<name type="http://indivo.org/codes/meds#" abbrev="c2i" value="COX2 Inhibitor" />
<brandName type="http://indivo.org/codes/meds#" abbrev="vioxx" value="Vioxx" />
<dose>
<value>3</value>
<unit type="http://indivo.org/codes/units#" value="pills" abbrev="p" />
</dose>
<route type="http://indivo.org/codes/routes#" value="PO">By Mouth</route>
<strength>
<value>100</value>
<unit type="http://indivo.org/codes/units#" value="mg" abbrev="mg">Milligrams</unit>
</strength>
<frequency type="http://indivo.org/codes/frequency#" value="daily">daily</frequency>
<prescription>
<by>
<name>Dr. Ken Mandl</name>
<institution>Children's Hospital Boston</institution>
</by>
<on>2009-02-01</on>
<stopOn>2010-01-31</stopOn>
<dispenseAsWritten>true</dispenseAsWritten>
<!-- this duration means 2 months -->
<duration>P2M</duration>
<!-- does this need more structure? -->
<refillInfo>once a month for 3 months</refillInfo>
<instructions>don't take them all at once!</instructions>
</prescription>
</Medication>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/reports/minimal/problems/",
"view_func_name":"carenet_problem_list",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the problem data for a given carenet.",
"return_desc":":http:statuscode:`200` with a list of problems, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Problem" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Problem xmlns="http://indivo.org/vocab/xml/documents#">
<dateOnset>2009-05-16T12:00:00</dateOnset>
<dateResolution>2009-05-16T16:00:00</dateResolution>
<name type="http://codes.indivo.org/problems/" value="123" abbrev="MI">Myocardial Infarction</name>
<comments>mild heart attack</comments>
<diagnosedBy>Dr. Mandl</diagnosedBy>
</Problem>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/reports/minimal/procedures/",
"view_func_name":"carenet_procedure_list",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the procedure data for a given carenet.",
"return_desc":":http:statuscode:`200` with a list of procedures, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Procedure" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Procedure xmlns="http://indivo.org/vocab/xml/documents#">
<datePerformed>2009-05-16T12:00:00</datePerformed>
<name type="http://codes.indivo.org/procedures#" value="85" abbrev="append">Appendectomy</name>
<provider>
<name>Kenneth Mandl</name>
<institution>Children's Hospital Boston</institution>
</provider>
</Procedure>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/reports/minimal/simple-clinical-notes/",
"view_func_name":"carenet_simple_clinical_notes_list",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the simple_clinical_notes data for a given carenet.",
"return_desc":":http:statuscode:`200` with a list of notes, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#SimpleClinicalNote" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<SimpleClinicalNote xmlns="http://indivo.org/vocab/xml/documents#">
<dateOfVisit>2010-02-02T12:00:00Z</dateOfVisit>
<finalizedAt>2010-02-03T13:12:00Z</finalizedAt>
<visitType type="http://codes.indivo.org/visit-types#" value="acute">Acute Care</visitType>
<visitLocation>Longfellow Medical</visitLocation>
<specialty type="http://codes.indivo.org/specialties#" value="hem-onc">Hematology/Oncology</specialty>
<signature>
<at>2010-02-03T13:12:00Z</at>
<provider>
<name>Kenneth Mandl</name>
<institution>Children's Hospital Boston</institution>
</provider>
</signature>
<signature>
<provider>
<name>Isaac Kohane</name>
<institution>Children's Hospital Boston</institution>
</provider>
</signature>
<chiefComplaint>stomach ache</chiefComplaint>
<content>Patient presents with ... </content>
</SimpleClinicalNote>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/reports/minimal/vitals/",
"view_func_name":"carenet_vitals_list",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the vitals data for a given carenet.",
"return_desc":":http:statuscode:`200` with a list of notes, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#VitalSign" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<VitalSign xmlns="http://indivo.org/vocab/xml/documents#">
<dateMeasured>2009-05-16T15:23:21</dateMeasured>
<name type="http://codes.indivo.org/vitalsigns/" value="123" abbrev="BPsys">Blood Pressure Systolic</name>
<value>145</value>
<unit type="http://codes.indivo.org/units/" value="31" abbrev="mmHg">millimeters of mercury</unit>
<site>left arm</site>
<position>sitting down</position>
</VitalSign>
</Item>
</Report>
...
</Reports>
GIVE AN EXAMPLE OF A RETURN VALUE
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/carenets/{CARENET_ID}/reports/minimal/vitals/{CATEGORY}",
"view_func_name":"carenet_vitals_list",
"access_doc":"A user app with access to the carenet or the entire carenet's record, or an account in the carenet or in control of the record.",
"url_params":{
'CATEGORY':'The category of vital sign, i.e. ``weight``, ``Blood_Pressure_Systolic``',
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the vitals data for a given carenet.",
"return_desc":":http:statuscode:`200` with a list of notes, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#VitalSign" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<VitalSign xmlns="http://indivo.org/vocab/xml/documents#">
<dateMeasured>2009-05-16T15:23:21</dateMeasured>
<name type="http://codes.indivo.org/vitalsigns/" value="123" abbrev="BPsys">Blood Pressure Systolic</name>
<value>145</value>
<unit type="http://codes.indivo.org/units/" value="31" abbrev="mmHg">millimeters of mercury</unit>
<site>left arm</site>
<position>sitting down</position>
</VitalSign>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/codes/systems/",
"view_func_name":"coding_systems_list",
"access_doc":"Anybody",
"url_params":{
},
"query_opts":{
},
"data_fields":{
},
"description":"List available codingsystems. NOT IMPLEMENTED.",
"return_desc":":http:statuscode:`500`, as the system cannot process the call.",
"return_ex":'''
[{"short_name": "umls-snomed", "name": "UMLS SNOMED", "description" : "..."},
{..},
{..}]
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/codes/systems/{SYSTEM_SHORT_NAME}/query",
"view_func_name":"coding_system_query",
"access_doc":"Anybody",
"url_params":{
'SYSTEM_SHORT_NAME':'',
},
"query_opts":{
'q':'The query string to search for',
},
"data_fields":{
},
"description":"Query a codingsystem for a value.",
"return_desc":":http:statuscode:`200` with JSON describing codingsystems entries that matched *q*, or :http:statuscode:`404` if ``SYSTEM_SHORT_NAME`` is invalid.",
"return_ex":'''
[{"abbreviation": null, "code": "38341003", "consumer_value": null,
"umls_code": "C0020538",
"full_value": "Hypertensive disorder, systemic arterial (disorder)"},
{"abbreviation": null, "code": "55822004", "consumer_value": null,
"umls_code": "C0020473", "full_value": "Hyperlipidemia (disorder)"}]
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/oauth/access_token",
"view_func_name":"exchange_token",
"access_doc":"A request signed by a RequestToken.",
"url_params":{
},
"query_opts":{
},
"data_fields":{
},
"description":"Exchange a request token for a valid access token.",
"return_desc":":http:statuscode:`200` with an access token, or :http:statuscode:`403` if the request token didn't validate.",
"return_ex":'''
oauth_token=abcd1fw3gasdgh3&oauth_token_secret=jgrlhre4291hfjas&xoauth_indivo_record_id=123
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/oauth/internal/request_tokens/{REQTOKEN_ID}/approve",
"view_func_name":"request_token_approve",
"access_doc":"A principal in the carenet to which the request token is restricted (if the token is restricted), or a principal with full control over the record (if the token is not restricted).",
"url_params":{
'REQTOKEN_ID':'',
},
"query_opts":{
},
"data_fields":{
'record_id':'The record to bind to. Either *record_id* or *carenet_id* is required.',
'carenet_id':'The carenet to bind to. Either *record_id* or *carenet_id* is required.',
},
"description":"Indicate a user's consent to bind an app to a record or carenet.",
"return_desc":":http:statuscode:`200` with a redirect url to the app on success, :http:statuscode:`403` if *record_id*/*carenet_id* don't match *reqtoken*.",
"return_ex":'''
location=http%3A%2F%2Fapps.indivo.org%2Fproblems%2Fafter_auth%3Foauth_token%3Dabc123%26oauth_verifier%3Dabc123
(which is the urlencoded form of:
http://apps.indivo.org/problems/after_auth?oauth_token=abc123&oauth_verifier=abc123 )
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/oauth/internal/request_tokens/{REQTOKEN_ID}/claim",
"view_func_name":"request_token_claim",
"access_doc":"Any Account.",
"url_params":{
'REQTOKEN_ID':'',
},
"query_opts":{
},
"data_fields":{
},
"description":"Claim a request token on behalf of an account.",
"return_desc":":http:statuscode:`200` with the email of the claiming principal, or :http:statuscode:`403` if the token has already been claimed.",
"return_ex":'''
joeuser@indivo.org
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/oauth/internal/request_tokens/{REQTOKEN_ID}/info",
"view_func_name":"request_token_info",
"access_doc":"Any Account.",
"url_params":{
'REQTOKEN_ID':'',
},
"query_opts":{
},
"data_fields":{
},
"description":"Get information about a request token.",
"return_desc":":http:statuscode:`200` with information about the token.",
"return_ex":'''
<RequestToken token="XYZ">
<record id="123" />
<carenet />
<kind>new</kind>
<App id="problems@apps.indivo.org">
<name>Problem List</name>
<description>Managing your list of problems</description>
<autonomous>false</autonomous>
<frameable>true</frameable>
<ui>true</ui>
</App>
</RequestToken>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/oauth/internal/session_create",
"view_func_name":"session_create",
"access_doc":"Any Indivo UI app.",
"url_params":{
},
"query_opts":{
},
"data_fields":{
'username':'The username of the user to authenticate.',
'password':'The password to use with *username* against the internal password auth system. EITHER *password* or *system* is **Required**.',
'system':'An external auth system to authenticate the user with. EITHER *password* or *system* is **Required**.',
},
"description":"Authenticate a user and register a web session for them.",
"return_desc":":http:statuscode:`200` with a valid session token, or :http:statuscode:`403` if the passed credentials were invalid.",
"return_ex":'''
oauth_token=XYZ&oauth_token_secret=ABC&account_id=joeuser%40indivo.org
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/oauth/internal/surl-verify",
"view_func_name":"surl_verify",
"access_doc":"Any Account.",
"url_params":{
},
"query_opts":{
'surl_sig':'The computed signature (base-64 encoded sha1) of the url.',
'surl_timestamp':'when the url was generated. Must be within the past hour.',
'surl_token':'The access token used to sign the url.',
},
"data_fields":{
},
"description":"Verify a signed URL.",
"return_desc":":http:statuscode:`200` with XML describing whether the surl validated.",
"return_ex":'''
If the surl validated:
<result>ok</result>
If the surl was too old:
<result>old</result>
If the surl's signature was invalid:
<result>mismatch</result>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/oauth/request_token",
"view_func_name":"request_token",
"access_doc":"Any user app.",
"url_params":{
},
"query_opts":{
},
"data_fields":{
'indivo_record_id':'The record to which to bind the request token. EITHER *indivo_record_id* or *indivo_carenet_id* is **REQUIRED**.',
'indivo_carenet_id':'The carenet to which to bind the request token. EITHER *indivo_record_id* or *indivo_carenet_id* is **REQUIRED**.',
},
"description":"Get a new request token, bound to a record or carenet if desired.",
"return_desc":":http:statuscode:`200` with the request token on success, :http:statuscode:`403` if the oauth signature on the request of missing or faulty.",
"return_ex":'''
oauth_token=abcd1fw3gasdgh3&oauth_token_secret=jgrlhre4291hfjas&xoauth_indivo_record_id=123
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/",
"view_func_name":"record_create",
"access_doc":"Any admin app.",
"url_params":{
},
"query_opts":{
},
"data_fields":{
'':'A valid Indivo Contact Document (see :doc:`/schemas/contact-schema`).',
},
"description":"Create a new record.",
"return_desc":":http:statuscode:`200` with information about the record on success, :http:statuscode:`400` if the contact XML was empty or invalid.",
"return_ex":'''
<Record id="123" label="Joe Smith">
<contact document_id="234" />
<demographics document_id="" />
</Record>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/external/{PRINCIPAL_EMAIL}/{EXTERNAL_ID}",
"view_func_name":"record_create_ext",
"access_doc":"An admin app with an id matching the principal_email in the URL.",
"url_params":{
'PRINCIPAL_EMAIL':'The email with which to scope an external id.',
'EXTERNAL_ID':'The external identifier of the desired resource',
},
"query_opts":{
},
"data_fields":{
'':'A valid Indivo Contact Document (see :doc:`/schemas/contact-schema`).',
},
"description":"Create a new record with an associated external id.",
"return_desc":":http:statuscode:`200` with information about the record on success, :http:statuscode:`400` if the contact XML was empty or invalid.",
"return_ex":'''
<Record id="123" label="Joe Smith">
<contact document_id="234" />
<demographics document_id="" />
</Record>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}",
"view_func_name":"record",
"access_doc":"A principal in full control of the record, the admin app that created the record, or a user app with access to the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
},
"description":"Get information about an individual record.",
"return_desc":":http:statuscode:`200` with information about the record.",
"return_ex":'''
<Record id="123" label="Joe Smith">
<contact document_id="234" />
<demographics document_id="346" />
</Record>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/apps/",
"view_func_name":"record_phas",
"access_doc":"A principal in full control of the record, or any admin app.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'type':'A namespaced document type. If specified, only apps which explicitly declare themselves as supporting that document type will be returned.',
},
"data_fields":{
},
"description":"List userapps bound to a given record.",
"return_desc":":http:statuscode:`200` with a list of userapps.",
"return_ex":'''
<Apps>
<App id="problems@apps.indivo.org">
<startURLTemplate>http://problems.indivo.org/auth/start?record_id={record_id}&carenet_id={carenet_id}</startURLTemplate>
<name>Problem List</name>
<description>Managing your problem list</description>
<autonomous>false</autonomous>
<frameable>true</frameable>
<ui>true</ui>
</App>
...
</Apps>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"DELETE",
"path":"/records/{RECORD_ID}/apps/{PHA_EMAIL}",
"view_func_name":"pha_record_delete",
"access_doc":"Any admin app, or a principal in full control of the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
},
"description":"Remove a userapp from a record.",
"return_desc":":http:statuscode:`200 Success`.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/apps/{PHA_EMAIL}",
"view_func_name":"record_pha",
"access_doc":"A principal in full control of the record, or any admin app.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
},
"description":"Get information about a given userapp bound to a record.",
"return_desc":":http:statuscode:`200` with information about the app, or :http:statuscode:`404` if the app isn't bound to the record.",
"return_ex":'''
<App id="problems@apps.indivo.org">
<startURLTemplate>http://problems.indivo.org/auth/start?record_id={record_id}&carenet_id={carenet_id}</startURLTemplate>
<name>Problem List</name>
<description>Managing your problem list</description>
<autonomous>false</autonomous>
<frameable>true</frameable>
<ui>true</ui>
</App>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/apps/{PHA_EMAIL}",
"view_func_name":"record_pha_enable",
"access_doc":"Any admin app, or a principal in full control of the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
},
"description":"Enable a userapp for a record.",
"return_desc":":http:statuscode:`200` on success, :http:statuscode:`404` if either the specified record or the specified app doesn't exist.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": ('1.0.0', ''),
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/apps/{PHA_EMAIL}/documents/",
"view_func_name":"record_app_document_list",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
'status':'The account or document status to filter by',
'type':'The Indivo document type to filter by',
'order_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List record-app-specific documents.",
"return_desc":":http:statuscode:`200` with a list of documents, or :http:statuscode:`404` if an invalid type was passed in the querystring.",
"return_ex":'''
<Documents record_id="123" total_document_count="4" pha="problems@apps.indivo.org">
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading Preferences</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
...
</Documents>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/apps/{PHA_EMAIL}/documents/",
"view_func_name":"record_app_document_create",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create.',
},
"description":"Create a record-app-specific Indivo document.",
"return_desc":":http:statuscode:`200` with the metadata of the created document, or :http:statuscode:`400` if the new document failed validation.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading Preferences</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/apps/{PHA_EMAIL}/documents/external/{EXTERNAL_ID}",
"view_func_name":"record_app_document_create_or_update_ext",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'EXTERNAL_ID':'The external identifier of the desired resource',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create/update.',
},
"description":"Create or Overwrite a record-app-specific Indivo document with an associated external id.",
"return_desc":":http:statuscode:`200` with metadata describing the created or updated document, or :http:statuscode:`400` if the passed content didn't validate.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="problems@apps.indivo.org" type="pha">
</creator>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading preferences</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/apps/{PHA_EMAIL}/documents/external/{EXTERNAL_ID}",
"view_func_name":"record_app_document_create_or_update_ext",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'EXTERNAL_ID':'The external identifier of the desired resource',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create/update.',
},
"description":"Create or Overwrite a record-app-specific Indivo document with an associated external id.",
"return_desc":":http:statuscode:`200` with metadata describing the created or updated document, or :http:statuscode:`400` if the passed content didn't validate.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="problems@apps.indivo.org" type="pha">
</creator>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading preferences</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/apps/{PHA_EMAIL}/documents/external/{EXTERNAL_ID}/meta",
"view_func_name":"record_app_document_meta_ext",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'EXTERNAL_ID':'The external identifier of the desired resource',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
},
"description":"Fetch the metadata of a record-app-specific document identified by external id.",
"return_desc":":http:statuscode:`200` with metadata describing the specified document, or http:statuscode:`404` if the external_id is invalid.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="problems@apps.indivo.org" type="pha">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading Preferences</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"DELETE",
"path":"/records/{RECORD_ID}/apps/{PHA_EMAIL}/documents/{DOCUMENT_ID}",
"view_func_name":"record_app_document_delete",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'PHA_EMAIL':'The email identifier of the Indivo user app',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"Delete a record-app-specific document.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/apps/{PHA_EMAIL}/documents/{DOCUMENT_ID}",
"view_func_name":"record_app_specific_document",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'PHA_EMAIL':'The email identifier of the Indivo user app',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"Retrieve a record-app-specific document.",
"return_desc":":http:statuscode:`200` with the raw content of the document, or :http:statuscode:`404` if the document could not be found.",
"return_ex":'''
<ProblemsPreferences record_id="123">
<Preference name="hide_void" value="true" />
<Preference name="show_rels" value="false" />
</ProblemsPreferences>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/apps/{PHA_EMAIL}/documents/{DOCUMENT_ID}/label",
"view_func_name":"record_app_document_label",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'PHA_EMAIL':'The email identifier of the Indivo user app',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
'':'The new label for the document',
},
"description":"Set the label of a record-app-specific document.",
"return_desc":":http:statuscode:`200` with metadata describing the re-labeled document, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>RELABELED: New HBA1C reading Preferences</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/apps/{PHA_EMAIL}/documents/{DOCUMENT_ID}/meta",
"view_func_name":"record_app_document_meta",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'PHA_EMAIL':'The email identifier of the Indivo user app',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"Fetch the metadata of a record-app-specific document.",
"return_desc":":http:statuscode:`200` with the document metadata, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading Preferences</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/apps/{PHA_EMAIL}/setup",
"view_func_name":"record_pha_setup",
"access_doc":"Any admin app.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
'':'Raw content that will be used as a setup document for the record. **OPTIONAL**.',
},
"description":"Bind an app to a record without user authorization.",
"return_desc":":http:statuscode:`200` with a valid access token for the newly set up app.",
"return_ex":'''
oauth_token=abcd1fw3gasdgh3&oauth_token_secret=jgrlhre4291hfjas&xoauth_indivo_record_id=123
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/audits/",
"view_func_name":"audit_record_view",
"access_doc":"A principal in full control of the record, or a user app with access to the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'order_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"Return audits of calls touching *record*.",
"return_desc":":http:statuscode:`200`, with a list of Audit Reports.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<Filters>
</Filters>
</QueryParams>
<Report>
<Meta>
</Meta>
<Item>
<AuditEntry>
<BasicInfo datetime="2011-04-27T17:32:23Z" view_func="get_document" request_successful="true" />
<PrincipalInfo effective_principal="myapp@apps.indivoheatlh.org" proxied_principal="me@indivohealth.org" />
<Resources carenet_id="" record_id="123" pha_id="" document_id="234" external_id="" message_id="" />
<RequestInfo req_url="/records/123/documents/acd/" req_ip_address="127.0.0.1" req_domain="localhost" req_method="GET" />
<ResponseInfo resp_code="200" />
</AuditEntry>
</Item>
</Report>
...
</Reports>
''',
"deprecated": ('0.9.3', 'Use :http:get:`/records/{RECORD_ID}/audits/query/` instead.'),
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/audits/documents/{DOCUMENT_ID}/",
"view_func_name":"audit_document_view",
"access_doc":"A principal in full control of the record, or a user app with access to the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
'order_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"Return audits of calls touching *record* and *document_id*.",
"return_desc":":http:statuscode:`200`, with a list of Audit Reports.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<Filters>
<Filter name="document_id" value="234"/>
</Filters>
</QueryParams>
<Report>
<Meta>
</Meta>
<Item>
<AuditEntry>
<BasicInfo datetime="2011-04-27T17:32:23Z" view_func="get_document" request_successful="true" />
<PrincipalInfo effective_principal="myapp@apps.indivoheatlh.org" proxied_principal="me@indivohealth.org" />
<Resources carenet_id="" record_id="123" pha_id="" document_id="234" external_id="" message_id="" />
<RequestInfo req_url="/records/123/documents/acd/" req_ip_address="127.0.0.1" req_domain="localhost" req_method="GET" />
<ResponseInfo resp_code="200" />
</AuditEntry>
</Item>
</Report>
...
</Reports>
''',
"deprecated": ('0.9.3', 'Use :http:get:`/records/{RECORD_ID}/audits/query/` instead.'),
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/audits/documents/{DOCUMENT_ID}/functions/{FUNCTION_NAME}/",
"view_func_name":"audit_function_view",
"access_doc":"A principal in full control of the record, or a user app with access to the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
'FUNCTION_NAME':'The internal Indivo function name called by the API request',
},
"query_opts":{
'order_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"Return audits of calls to *function_name* touching *record* and *document_id*.",
"return_desc":":http:statuscode:`200`, with a list of Audit Reports.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<Filters>
<Filter name="document_id" value="234"/>
<Filter name="req_view_func" value="record_specific_document"/>
</Filters>
</QueryParams>
<Report>
<Meta>
</Meta>
<Item>
<AuditEntry>
<BasicInfo datetime="2011-04-27T17:32:23Z" view_func="get_document" request_successful="true" />
<PrincipalInfo effective_principal="myapp@apps.indivoheatlh.org" proxied_principal="me@indivohealth.org" />
<Resources carenet_id="" record_id="123" pha_id="" document_id="234" external_id="" message_id="" />
<RequestInfo req_url="/records/123/documents/acd/" req_ip_address="127.0.0.1" req_domain="localhost" req_method="GET" />
<ResponseInfo resp_code="200" />
</AuditEntry>
</Item>
</Report>
...
</Reports>
''',
"deprecated": ('0.9.3', 'Use :http:get:`/records/{RECORD_ID}/audits/query/` instead.'),
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/audits/query/",
"view_func_name":"audit_query",
"access_doc":"A principal in full control of the record, or a user app with access to the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`audit-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"Select Audit Objects via the Query API Interface.",
"return_desc":":http:statuscode:`200` with a list of audit records, or :http:statuscode:`400` if any of the arguments to the query interface are invalid.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="created_at*1995-03-10T00:00:00Z*" />
<Filters>
<Filter name="document_id" value="234"/>
</Filters>
</QueryParams>
<Report>
<Meta>
</Meta>
<Item>
<AuditEntry>
<BasicInfo datetime="2011-04-27T17:32:23Z" view_func="get_document" request_successful="true" />
<PrincipalInfo effective_principal="myapp@apps.indivoheatlh.org" proxied_principal="me@indivohealth.org" />
<Resources carenet_id="" record_id="123" pha_id="" document_id="234" external_id="" message_id="" />
<RequestInfo req_url="/records/123/documents/acd/" req_ip_address="127.0.0.1" req_domain="localhost" req_method="GET" />
<ResponseInfo resp_code="200" />
</AuditEntry>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": ('0.9.3', ''),
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/autoshare/bytype/",
"view_func_name":"autoshare_list",
"access_doc":"A principal in full control of the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'type':'The document schema type to check autoshares for. **REQUIRED**.',
},
"data_fields":{
},
"description":"For a single record, list all carenets that a given doctype is autoshared with.",
"return_desc":":http:statuscode:`200` with a list of carenets, or :http:statuscode:`404` if the passed document type is invalid.",
"return_ex":'''
<Carenets record_id="123">
<Carenet id="789" name="Work/School" mode="explicit" />
...
</Carenets>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/autoshare/bytype/all",
"view_func_name":"autoshare_list_bytype_all",
"access_doc":"A principal in full control of the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
},
"description":"For a single record, list all doctypes autoshared into carenets.",
"return_desc":":http:statuscode:`200` with a list of doctypes and their shared carenets.",
"return_ex":'''
<DocumentSchemas>
<DocumentSchema type="http://indivo.org/vocab/xml/documents#Medication">
<Carenet id="123" name="Family" mode="explicit" />
...
</DocumentSchema>
...
</DocumentSchemas>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/autoshare/carenets/{CARENET_ID}/bytype/set",
"view_func_name":"autoshare_create",
"access_doc":"A principal in full control of the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
'type':'the document schema type to create an autoshare for',
},
"description":"Automatically share all documents of a certain type into a carenet.",
"return_desc":":http:statuscode:`200`, or :http:statuscode:`404` if the passed document type doesn't exist.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/autoshare/carenets/{CARENET_ID}/bytype/unset",
"view_func_name":"autoshare_delete",
"access_doc":"A principal in full control of the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
'type':'the document schema type to remove an autoshare for',
},
"description":"Remove an autoshare from a carenet.",
"return_desc":":http:statuscode:`200`, or :http:statuscode:`404` if the passed document type doesn't exist.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/carenets/",
"view_func_name":"carenet_list",
"access_doc":"A principal in full control of the record, or any admin app.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
},
"description":"List all carenets for a record.",
"return_desc":":http:statuscode:`200`, with a list of carenets.",
"return_ex":'''
<Carenets record_id="123">
<Carenet id="789" name="Work/School" mode="explicit" />
...
</Carenets>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/carenets/",
"view_func_name":"carenet_create",
"access_doc":"A principal in full control of the record, or any admin app.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
'name':'The label for the new carenet.',
},
"description":"Create a new carenet for a record.",
"return_desc":":http:statuscode:`200` with a description of the new carenet, or :http:statuscode:`400` if the name of the carenet wasn't passed or already exists.",
"return_ex":'''
<Carenets record_id="123">
<Carenet id="789" name="Work/School" mode="explicit" />
</Carenets>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"DELETE",
"path":"/records/{RECORD_ID}/documents/",
"view_func_name":"documents_delete",
"access_doc":"Nobody",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
},
"description":"Delete all documents associated with a record.",
"return_desc":":http:statuscode:`200 Success`",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/documents/",
"view_func_name":"record_document_list",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'status':'The account or document status to filter by',
'type':'The Indivo document type to filter by',
'order_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List record-specific documents.",
"return_desc":":http:statuscode:`200` with a list of documents, or :http:statuscode:`404` if an invalid type was passed in the querystring.",
"return_ex":'''
<Documents record_id="123" total_document_count="4">
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
...
</Documents>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/documents/",
"view_func_name":"document_create",
"access_doc":"A user app with access to the record, a principal in full control of the record, or the admin app that created the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create.',
},
"description":"Create a record-specific Indivo Document.",
"return_desc":":http:statuscode:`200` with the metadata of the created document, or :http:statuscode:`400` if the new document failed validation.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/documents/external/{PHA_EMAIL}/{EXTERNAL_ID}",
"view_func_name":"document_create_by_ext_id",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'EXTERNAL_ID':'The external identifier of the desired resource',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create.',
},
"description":"Create a record-specific Indivo Document with an associated external id.",
"return_desc":":http:statuscode:`200` with the metadata of the created document, or :http:statuscode:`400` if the new document failed validation, or if the external id was taken.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/documents/external/{PHA_EMAIL}/{EXTERNAL_ID}/label",
"view_func_name":"record_document_label_ext",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'EXTERNAL_ID':'The external identifier of the desired resource',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
'':'The new label for the document',
},
"description":"Set the label of a record-specific document, specified by external id.",
"return_desc":":http:statuscode:`200` with metadata describing the re-labeled document, or :http:statuscode:`404` if ``EXTERNAL_ID`` is invalid.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>RELABELED: New HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/documents/external/{PHA_EMAIL}/{EXTERNAL_ID}/meta",
"view_func_name":"record_document_meta_ext",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'EXTERNAL_ID':'The external identifier of the desired resource',
'PHA_EMAIL':'The email identifier of the Indivo user app',
},
"query_opts":{
},
"data_fields":{
},
"description":"Fetch the metadata of a record-specific document identified by external id.",
"return_desc":":http:statuscode:`200` with the document metadata, or :http:statuscode:`404` if ``EXTERNAL_ID`` is invalid.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/documents/special/{SPECIAL_DOCUMENT}",
"view_func_name":"read_special_document",
"access_doc":"A user app with access to the record, a principal in full control of the record, or the admin app that created the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'SPECIAL_DOCUMENT':'The type of special document to access. Options are ``demographics``, ``contact``',
},
"query_opts":{
},
"data_fields":{
},
"description":"Read a special document from a record.",
"return_desc":":http:statuscode:`200` with the special document's raw content, or :http:statuscode:`404` if the document hasn't been created yet.",
"return_ex":'''
<Contact xmlns="http://indivo.org/vocab/xml/documents#">
<name>
<fullName>Sebastian Rockwell Cotour</fullName>
<givenName>Sebastian</givenName>
<familyName>Cotour</familyName>
</name>
<email type="personal">
scotour@hotmail.com
</email>
<email type="work">
sebastian.cotour@childrens.harvard.edu
</email>
<address type="home">
<streetAddress>15 Waterhill Ct.</streetAddress>
<postalCode>53326</postalCode>
<locality>New Brinswick</locality>
<region>Montana</region>
<country>US</country>
<timeZone>-7GMT</timeZone>
</address>
<location type="home">
<latitude>47N</latitude>
<longitude>110W</longitude>
</location>
<phoneNumber type="home">5212532532</phoneNumber>
<phoneNumber type="work">6217233734</phoneNumber>
<instantMessengerName protocol="aim">scotour</instantMessengerName>
</Contact>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/documents/special/{SPECIAL_DOCUMENT}",
"view_func_name":"save_special_document",
"access_doc":"A user app with access to the record, a principal in full control of the record, or the admin app that created the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'SPECIAL_DOCUMENT':'The type of special document to access. Options are ``demographics``, ``contact``',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create.',
},
"description":"Create or update a special document on a record.",
"return_desc":":http:statuscode:`200` with metadata on the updated document, or :http:statuscode:`400` if the new content didn't validate.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="http://indivo.org/vocab/xml/documents#Contact" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>Contacts</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/documents/special/{SPECIAL_DOCUMENT}",
"view_func_name":"save_special_document",
"access_doc":"A user app with access to the record, a principal in full control of the record, or the admin app that created the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'SPECIAL_DOCUMENT':'The type of special document to access. Options are ``demographics``, ``contact``',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create.',
},
"description":"Create or update a special document on a record.",
"return_desc":":http:statuscode:`200` with metadata on the updated document, or :http:statuscode:`400` if the new content didn't validate.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="http://indivo.org/vocab/xml/documents#Contact" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>Contacts</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID_0}/rels/{REL}/{DOCUMENT_ID_1}",
"view_func_name":"document_rels",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID_1':'The id of the document that is the subject of the relationship, i.e. DOCUMENT_ID_1 *annotates* DOCUMENT_ID_0',
'DOCUMENT_ID_0':'The id of the document that is the object of the relationship, i.e. DOCUMENT_ID_0 *is annotated by* DOCUMENT_ID_1',
'REL':'The type of relationship between the documents, i.e. ``annotation``, ``interpretation``',
},
"query_opts":{
},
"data_fields":{
},
"description":"Create a new relationship between two existing documents.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`404` if ``DOCUMENT_ID_0``, ``DOCUMENT_ID_1``, or ``REL`` don't exist.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}",
"view_func_name":"record_specific_document",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"Retrieve a record-specific document.",
"return_desc":":http:statuscode:`200` with the raw content of the document, or :http:statuscode:`404` if the document could not be found.",
"return_ex":'''
<HBA1C xmlns="http://indivo.org/vocab#" value="5.3" unit="percent" datetime="2011-01-15T17:00:00.000Z" />
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/carenets/",
"view_func_name":"document_carenets",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"List all the carenets into which a document has been shared.",
"return_desc":":http:statuscode:`200` with a list of carenets.",
"return_ex":'''
<Carenets record_id="123">
<Carenet id="789" name="Work/School" mode="explicit" />
...
</Carenets>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"DELETE",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/carenets/{CARENET_ID}",
"view_func_name":"carenet_document_delete",
"access_doc":"A principal in full control of the carenet's record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
},
"description":"Unshare a document from a given carenet.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid or if either the passed carenet or document do not belong to the passed record.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/carenets/{CARENET_ID}",
"view_func_name":"carenet_document_placement",
"access_doc":"A principal in full control of the carenet's record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
},
"description":"Place a document into a given carenet.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid or nevershared.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/carenets/{CARENET_ID}/autoshare-revert",
"view_func_name":"autoshare_revert",
"access_doc":"A principal in full control of the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
'CARENET_ID':'The id string associated with the Indivo carenet',
},
"query_opts":{
},
"data_fields":{
},
"description":"Revert the document-sharing of a document in a carent to whatever rules are specified by autoshares. NOT IMPLEMENTED.",
"return_desc":":http:statuscode:`200 Success`.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/label",
"view_func_name":"record_document_label",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
'':'The new label for the document',
},
"description":"Set the label of a record-specific document.",
"return_desc":":http:statuscode:`200` with metadata describing the re-labeled document, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>RELABELED: New HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/meta",
"view_func_name":"record_document_meta",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"Fetch the metadata of a record-specific document.",
"return_desc":":http:statuscode:`200` with the document metadata, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/meta",
"view_func_name":"update_document_meta",
"access_doc":"Nobody",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"Set metadata fields on a document. NOT IMPLEMENTED.",
"return_desc":":http:statuscode:`200 Success`.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"DELETE",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/nevershare",
"view_func_name":"document_remove_nevershare",
"access_doc":"A principal in full control of the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"Remove the nevershare flag from a document.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/nevershare",
"view_func_name":"document_set_nevershare",
"access_doc":"A principal in full control of the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"Flag a document to never be shared, anywhere.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/rels/{REL}/",
"view_func_name":"get_documents_by_rel",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
'REL':'The type of relationship between the documents, i.e. ``annotation``, ``interpretation``',
},
"query_opts":{
'status':'The account or document status to filter by.',
'order_by':'See :ref:`query-operators`. **CURRENTLY UNIMPLEMENTED**.',
'limit':'See :ref:`query-operators`. **CURRENTLY UNIMPLEMENTED**.',
'offset':'See :ref:`query-operators`. **CURRENTLY UNIMPLEMENTED**',
},
"data_fields":{
},
"description":"Get all documents related to the passed document_id by a relation of the passed relation-type.",
"return_desc":":http:statuscode:`200` with a list of related documents, or :http:statuscode:`400` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
<Documents record_id="123" total_document_count="4">
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
...
</Documents>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/rels/{REL}/",
"view_func_name":"document_create_by_rel",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
'REL':'The type of relationship between the documents, i.e. ``annotation``, ``interpretation``',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create.',
},
"description":"Create a document and relate it to an existing document.",
"return_desc":":http:statuscode:`200 Success`, :http:statuscode:`400` if the new content was invalid, or :http:statuscode:`404` if ``DOCUMENT_ID`` or ``REL`` are invalid.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/rels/{REL}/external/{PHA_EMAIL}/{EXTERNAL_ID}",
"view_func_name":"document_create_by_rel_with_ext_id",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
'EXTERNAL_ID':'The external identifier of the desired resource',
'PHA_EMAIL':'The email identifier of the Indivo user app',
'REL':'The type of relationship between the documents, i.e. ``annotation``, ``interpretation``',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create.',
},
"description":"Create a document, assign it an external id, and relate it to an existing document.",
"return_desc":":http:statuscode:`200 Success`, :http:statuscode:`400` if the new content was invalid, or :http:statuscode:`404` if ``DOCUMENT_ID`` or ``REL`` are invalid.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/rels/{REL}/external/{PHA_EMAIL}/{EXTERNAL_ID}",
"view_func_name":"document_create_by_rel_with_ext_id",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
'EXTERNAL_ID':'The external identifier of the desired resource',
'PHA_EMAIL':'The email identifier of the Indivo user app',
'REL':'The type of relationship between the documents, i.e. ``annotation``, ``interpretation``',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create.',
},
"description":"Create a document, assign it an external id, and relate it to an existing document.",
"return_desc":":http:statuscode:`200 Success`, :http:statuscode:`400` if the new content was invalid, or :http:statuscode:`404` if ``DOCUMENT_ID`` or ``REL`` are invalid.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/replace",
"view_func_name":"document_version",
"access_doc":"A user app with access to the record, a principal in full control of the record, or the admin app that created the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create.',
},
"description":"Create a new version of a record-specific document.",
"return_desc":":http:statuscode:`200` with metadata on the new document, :http:statuscode:`400` if the old document has already been replaced by a newer version, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid or if the new content is invalid.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<replaces id="abe8130e2-ba54-1234-eeef-45a3b6cd9a8e" />
<original id="abe8130e2-ba54-1234-eeef-45a3b6cd9a8e" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/replace/external/{PHA_EMAIL}/{EXTERNAL_ID}",
"view_func_name":"document_version_by_ext_id",
"access_doc":"A user app with access to the record, with an id matching the app email in the URL.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'EXTERNAL_ID':'The external identifier of the desired resource',
'PHA_EMAIL':'The email identifier of the Indivo user app',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
'':'The raw content of the document to create.',
},
"description":"Create a new version of a record-specific document and assign it an external id.",
"return_desc":":http:statuscode:`200` with metadata on the new document, :http:statuscode:`400` if the old document has already been replaced by a newer version, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid or if the new content is invalid.",
"return_ex":'''
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<replaces id="abe8130e2-ba54-1234-eeef-45a3b6cd9a8e" />
<original id="abe8130e2-ba54-1234-eeef-45a3b6cd9a8e" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/set-status",
"view_func_name":"document_set_status",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
'status':'The new status for the document. Options are ``active``, ``void``, ``archived``.',
'reason':'The reason for the status change.',
},
"description":"Set the status of a record-specific document.",
"return_desc":":http:statuscode:`200 Success`, :http:statuscode:`400` if *status* or *reason* are missing, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/status-history",
"view_func_name":"document_status_history",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
},
"data_fields":{
},
"description":"List all changes to a document's status over time.",
"return_desc":":http:statuscode:`200` with a the document's status history, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
<DocumentStatusHistory document_id="456">
<DocumentStatus by="joeuser@indivo.example.org" at="2010-09-03T12:45:12Z" status="archived">
<reason>no longer relevant</reason>
</DocumentStatus>
...
</DocumentStatusHistory>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/documents/{DOCUMENT_ID}/versions/",
"view_func_name":"document_versions",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'DOCUMENT_ID':'The unique identifier of the Indivo document',
},
"query_opts":{
'status':'The account or document status to filter by.',
'order_by':'See :ref:`query-operators`.',
'limit':'See :ref:`query-operators`.',
'offset':'See :ref:`query-operators`.',
},
"data_fields":{
},
"description":"Retrieve the versions of a document.",
"return_desc":":http:statuscode:`200` with a list of document versions, or :http:statuscode:`404` if ``DOCUMENT_ID`` is invalid.",
"return_ex":'''
<Documents record_id="123" total_document_count="4">
<Document id="14c81023-c84f-496d-8b8e-9438280441d3" type="" digest="7e9bc09276e0829374fd810f96ed98d544649703db3a9bc231550a0b0e5bcb1c" size="77">
<createdAt>2009-05-04T17:05:33</createdAt>
<creator id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</creator>
<suppressedAt>2009-05-06T17:05:33</suppressedAt>
<suppressor id="steve@indivo.org" type="account">
<fullname>Steve Zabak</fullname>
</suppressor>
<original id="14c81023-c84f-496d-8b8e-9438280441d3" />
<latest id="14c81023-c84f-496d-8b8e-9438280441d3" createdAt="2009-05-05T17:05:33" createdBy="steve@indivo.org" />
<label>HBA1C reading</label>
<status>active</status>
<nevershare>false</nevershare>
<relatesTo>
<relation type="http://indivo.org/vocab/documentrels#attachment" count="1" />
<relation type="http://indivo.org/vocab/documentrels#annotation" count="5" />
</relatesTo>
<isRelatedFrom>
<relation type="http://indivo.org/vocab/documentrels#interpretation" count="1" />
</isRelatedFrom>
</Document>
...
</Documents>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/inbox/{MESSAGE_ID}",
"view_func_name":"record_send_message",
"access_doc":"Any admin app, or a user app with access to the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'MESSAGE_ID':'The unique external identifier of the Indivo Message, for idempotency.',
},
"query_opts":{
},
"data_fields":{
'body':'The message body. Defaults to ``[no body]``.',
'body_type':'The formatting for the message body. Options are ``plaintext``, ``markdown``. Defaults to ``plaintext``.',
'num_attachments':'The number of attachments this message requires. Attachments are uploaded with calls to :http:post:`/records/{RECORD_ID}/inbox/{MESSAGE_ID}/attachments/{ATTACHMENT_NUM}`. Defaults to 0.',
'severity':'The importance of the message. Options are ``low``, ``medium``, ``high``. Defaults to ``low``.',
'subject':'The message subject. Defaults to ``[no subject]``.',
},
"description":"Send a message to a record.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`400` if ``MESSAGE_ID`` was a duplicate. Also triggers notification emails to accounts authorized to view messages for the passed record.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/inbox/{MESSAGE_ID}/attachments/{ATTACHMENT_NUM}",
"view_func_name":"record_message_attach",
"access_doc":"Any admin app, or a user app with access to the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'ATTACHMENT_NUM':'The 1-indexed number corresponding to the message attachment',
'MESSAGE_ID':'The unique identifier of the Indivo Message',
},
"query_opts":{
},
"data_fields":{
'':'The raw XML attachment data.',
},
"description":"Attach a document to an Indivo message.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`400` if ``ATTACHMENT_NUM`` has already been uploaded.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/notifications/",
"view_func_name":"record_notify",
"access_doc":"Any admin app, or a user app with access to the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
'content':'The plaintext content of the notification.',
'app_url':'A callback url to the app for more information. **OPTIONAL**.',
'document_id':'The id of the document to which this notification pertains. **OPTIONAL**.',
},
"description":"Send a notification about a record to all accounts authorized to be notified.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`400` if *content* wasn't passed.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/notify",
"view_func_name":"record_notify",
"access_doc":"Any admin app, or a user app with access to the record.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
'content':'The plaintext content of the notification.',
'app_url':'A callback url to the app for more information. **OPTIONAL**.',
'document_id':'The id of the document to which this notification pertains. **OPTIONAL**.',
},
"description":"Send a notification about a record to all accounts authorized to be notified.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`400` if *content* wasn't passed.",
"return_ex":'''
<ok/>
''',
"deprecated": ('1.0', 'Use :http:post:`/records/{RECORD_ID}/notifications/` instead.'),
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/owner",
"view_func_name":"record_get_owner",
"access_doc":"A principal in full control of the record, or any admin app.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
},
"description":"Get the owner of a record.",
"return_desc":":http:statuscode:`200 Success.`",
"return_ex":'''
<Account id='joeuser@example.com' />
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/owner",
"view_func_name":"record_set_owner",
"access_doc":"Any admin app.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
'':'The email address of the new account owner.',
},
"description":"Set the owner of a record.",
"return_desc":":http:statuscode:`200` with information about the account, or :http:statuscode:`400` if the passed email address is invalid.",
"return_ex":'''
<Account id="joeuser@indivo.example.org">
<fullName>Joe User</fullName>
<contactEmail>joeuser@gmail.com</contactEmail>
<lastLoginAt>2010-05-04T15:34:23Z</lastLoginAt>
<totalLoginCount>43</totalLoginCount>
<failedLoginCount>0</failedLoginCount>
<state>active</state>
<lastStateChange>2009-04-03T13:12:12Z</lastStateChange>
<authSystem name="password" username="joeuser" />
<authSystem name="hospital_sso" username="Joe_User" />
</Account>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"PUT",
"path":"/records/{RECORD_ID}/owner",
"view_func_name":"record_set_owner",
"access_doc":"Any admin app.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
'':'The email address of the new account owner.',
},
"description":"Set the owner of a record.",
"return_desc":":http:statuscode:`200` with information about the account, or :http:statuscode:`400` if the passed email address is invalid.",
"return_ex":'''
<Account id="joeuser@indivo.example.org">
<fullName>Joe User</fullName>
<contactEmail>joeuser@gmail.com</contactEmail>
<lastLoginAt>2010-05-04T15:34:23Z</lastLoginAt>
<totalLoginCount>43</totalLoginCount>
<failedLoginCount>0</failedLoginCount>
<state>active</state>
<lastStateChange>2009-04-03T13:12:12Z</lastStateChange>
<authSystem name="password" username="joeuser" />
<authSystem name="hospital_sso" username="Joe_User" />
</Account>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/reports/experimental/ccr",
"view_func_name":"report_ccr",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
},
"description":"Export patient data as a Continuity of Care Record (CCR) document.",
"return_desc":":http:statuscode:`200` with an **EXPERIMENTAL** CCR document.",
"return_ex":'''
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>0</CCRDocumentObjectID>
<Language>
<Text>ENGLISH</Text>
</Language>
<Version>V1.0</Version>
<DateTime>
<Type>
<Text>Create</Text>
<ObjectAttribute>
<Attribute>DisplayDate</Attribute>
<AttributeValue>
<Value>09/30/10</Value>
</AttributeValue>
</ObjectAttribute>
</Type>
<ExactDateTime>2010-05-04T15:34:23Z</ExactDateTime>
</DateTime>
<Patient>
<ActorID>123</ActorID>
</Patient>
<From>
<ActorLink/>
</From>
<Body>
<Medications>
<Medication>
<CCRDataObjectID>789</CCRDataObjectID>
<DateTime>
<Type>
<Text>Dispense date</Text>
</Type>
<ExactDateTime>2010-05-04T15:34:23Z</ExactDateTime>
</DateTime>
<Status>
<Text>Active</Text>
</Status>
<Product>
<ProductName>
<Text>Vioxx</Text>
<Code>
<Value>C1234</Value>
<CodingSystem>RxNorm</CodingSystem>
</Code>
</ProductName>
<Strength>
<Value>20</Value>
<Units>
<Unit>mg</Unit>
</Units>
</Strength>
</Product>
<Directions>
<Direction>
<Dose>
<Value>1</Value>
<Units>
<Unit>Pills</Unit>
</Units>
</Dose>
<Route>
<Text>Oral</Text>
</Route>
<Frequency>
<Value>1QR</Value>
</Frequency>
</Direction>
</Directions>
</Medication>
...
</Medications>
<Immunizations>
<Immunization>
<CCRDataObjectID>567</CCRDataObjectID>
<DateTime>
<Type>
<Text>Start date</Text>
</Type>
<ExactDateTime>2010-05-04T15:34:23Z</ExactDateTime>
</DateTime>
<Product>
<ProductName>
<Text>Rubella</Text>
<Code>
<Value>C1345</Value>
<CodingSystem>HL7 Vaccines</CodingSystem>
</Code>
</ProductName>
</Product>
</Immunization>
...
</Immunizations>
<VitalSigns>
...
</VitalSigns>
...
</Body>
<Actors>
</Actors>
</ContinuityOfCareRecord>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/reports/minimal/allergies/",
"view_func_name":"allergy_list",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the allergy data for a given record.",
"return_desc":":http:statuscode:`200` with a list of allergies, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
<Filter name="allergen_name" value="penicillin"/>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Lab" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Allergy xmlns="http://indivo.org/vocab/xml/documents#">
<dateDiagnosed>2009-05-16</dateDiagnosed>
<diagnosedBy>Children's Hospital Boston</diagnosedBy>
<allergen>
<type type="http://codes.indivo.org/codes/allergentypes/" value="drugs">Drugs</type>
<name type="http://codes.indivo.org/codes/allergens/" value="penicillin">Penicillin</name>
</allergen>
<reaction>blue rash</reaction>
<specifics>this only happens on weekends</specifics>
</Allergy>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/reports/minimal/equipment/",
"view_func_name":"equipment_list",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the equipment data for a given record.",
"return_desc":":http:statuscode:`200` with a list of equipment, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
<Filter name="allergen_name" value="penicillin"/>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Lab" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Equipment xmlns="http://indivo.org/vocab/xml/documents#">
<dateStarted>2009-02-05</dateStarted>
<dateStopped>2010-06-12</dateStopped>
<type>cardiac</type>
<name>Pacemaker</name>
<vendor>Acme Medical Devices</vendor>
<id>167-ABC-23</id>
<description>it works</description>
<specification>blah blah blah</specification>
</Equipment>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/reports/minimal/immunizations/",
"view_func_name":"immunization_list",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the immunization data for a given record.",
"return_desc":":http:statuscode:`200` with a list of immunizations, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
<Filter name="allergen_name" value="penicillin"/>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Lab" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Immunization xmlns="http://indivo.org/vocab/xml/documents#">
<dateAdministered>2009-05-16T12:00:00</dateAdministered>
<administeredBy>Children's Hospital Boston</administeredBy>
<vaccine>
<type type="http://codes.indivo.org/vaccines#" value="hep-B">Hepatitis B</type>
<manufacturer>Oolong Pharmaceuticals</manufacturer>
<lot>AZ1234567</lot>
<expiration>2009-06-01</expiration>
</vaccine>
<sequence>2</sequence>
<anatomicSurface type="http://codes.indivo.org/anatomy/surfaces#" value="shoulder">Shoulder</anatomicSurface>
<adverseEvent>pain and rash</adverseEvent>
</Immunization>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/reports/minimal/labs/",
"view_func_name":"lab_list",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the lab data for a given record.",
"return_desc":":http:statuscode:`200` with a list of labs, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
<Filter name="lab_type" value="hematology"/>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Lab" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<LabReport xmlns="http://indivo.org/vocab/xml/documents#">
<dateMeasured>1998-07-16T12:00:00Z</dateMeasured>
<labType>hematology</labType>
<laboratory>
<name>Quest</name>
<address>300 Longwood Ave, Boston MA 02215</address>
</laboratory>
<comments>was looking pretty sick</comments>
<firstPanelName>CBC</firstPanelName>
</LabReport>
</Item>
</Report>
<Report>
<Meta>
<Document id="1b7270a6-5925-450c-9273-5a74386cef63" type="http://indivo.org/vocab/xml/documents#Lab" size="1653" digest="c1be22813ab83f6b3858878a802f372eef754fcdd285e44a5fdb7387d6ee3667" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="1b7270a6-5925-450c-9273-5a74386cef63"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<LabReport xmlns="http://indivo.org/vocab/xml/documents#">
<dateMeasured>2009-07-16T12:00:00Z</dateMeasured>
<labType>hematology</labType>
<laboratory>
<name>Quest</name>
<address>300 Longwood Ave, Boston MA 02215</address>
</laboratory>
<comments>was looking pretty sick</comments>
<firstPanelName>CBC</firstPanelName>
</LabReport>
</Item>
</Report>
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/reports/minimal/measurements/{LAB_CODE}/",
"view_func_name":"measurement_list",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'LAB_CODE':'The identifier corresponding to the measurement being made.',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the measurement data for a given record.",
"return_desc":":http:statuscode:`200` with a list of measurements, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
<Filter name="lab_type" value="hematology"/>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Measurement" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Measurement id="1234" value="120" type="blood pressure systolic" datetime="2011-03-02T00:00:00Z" unit="mmHg" source_doc="3456" />
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/reports/minimal/medications/",
"view_func_name":"medication_list",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the medication data for a given record.",
"return_desc":":http:statuscode:`200` with a list of medications, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Medication" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Medication xmlns="http://indivo.org/vocab/xml/documents#">
<dateStarted>2009-02-05</dateStarted>
<name type="http://indivo.org/codes/meds#" abbrev="c2i" value="COX2 Inhibitor" />
<brandName type="http://indivo.org/codes/meds#" abbrev="vioxx" value="Vioxx" />
<dose>
<value>3</value>
<unit type="http://indivo.org/codes/units#" value="pills" abbrev="p" />
</dose>
<route type="http://indivo.org/codes/routes#" value="PO">By Mouth</route>
<strength>
<value>100</value>
<unit type="http://indivo.org/codes/units#" value="mg" abbrev="mg">Milligrams</unit>
</strength>
<frequency type="http://indivo.org/codes/frequency#" value="daily">daily</frequency>
<prescription>
<by>
<name>Dr. Ken Mandl</name>
<institution>Children's Hospital Boston</institution>
</by>
<on>2009-02-01</on>
<stopOn>2010-01-31</stopOn>
<dispenseAsWritten>true</dispenseAsWritten>
<!-- this duration means 2 months -->
<duration>P2M</duration>
<!-- does this need more structure? -->
<refillInfo>once a month for 3 months</refillInfo>
<instructions>don't take them all at once!</instructions>
</prescription>
</Medication>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/reports/minimal/problems/",
"view_func_name":"problem_list",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the problem data for a given record.",
"return_desc":":http:statuscode:`200` with a list of problems, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Problem" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Problem xmlns="http://indivo.org/vocab/xml/documents#">
<dateOnset>2009-05-16T12:00:00</dateOnset>
<dateResolution>2009-05-16T16:00:00</dateResolution>
<name type="http://codes.indivo.org/problems/" value="123" abbrev="MI">Myocardial Infarction</name>
<comments>mild heart attack</comments>
<diagnosedBy>Dr. Mandl</diagnosedBy>
</Problem>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/reports/minimal/procedures/",
"view_func_name":"procedure_list",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the procedure data for a given record.",
"return_desc":":http:statuscode:`200` with a list of procedures, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#Procedure" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<Procedure xmlns="http://indivo.org/vocab/xml/documents#">
<datePerformed>2009-05-16T12:00:00</datePerformed>
<name type="http://codes.indivo.org/procedures#" value="85" abbrev="append">Appendectomy</name>
<provider>
<name>Kenneth Mandl</name>
<institution>Children's Hospital Boston</institution>
</provider>
</Procedure>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/reports/minimal/simple-clinical-notes/",
"view_func_name":"simple_clinical_notes_list",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the simple_clinical_notes data for a given record.",
"return_desc":":http:statuscode:`200` with a list of notes, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#SimpleClinicalNote" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<SimpleClinicalNote xmlns="http://indivo.org/vocab/xml/documents#">
<dateOfVisit>2010-02-02T12:00:00Z</dateOfVisit>
<finalizedAt>2010-02-03T13:12:00Z</finalizedAt>
<visitType type="http://codes.indivo.org/visit-types#" value="acute">Acute Care</visitType>
<visitLocation>Longfellow Medical</visitLocation>
<specialty type="http://codes.indivo.org/specialties#" value="hem-onc">Hematology/Oncology</specialty>
<signature>
<at>2010-02-03T13:12:00Z</at>
<provider>
<name>Kenneth Mandl</name>
<institution>Children's Hospital Boston</institution>
</provider>
</signature>
<signature>
<provider>
<name>Isaac Kohane</name>
<institution>Children's Hospital Boston</institution>
</provider>
</signature>
<chiefComplaint>stomach ache</chiefComplaint>
<content>Patient presents with ... </content>
</SimpleClinicalNote>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/reports/minimal/vitals/",
"view_func_name":"vitals_list",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the vitals data for a given record.",
"return_desc":":http:statuscode:`200` with a list of notes, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#VitalSign" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<VitalSign xmlns="http://indivo.org/vocab/xml/documents#">
<dateMeasured>2009-05-16T15:23:21</dateMeasured>
<name type="http://codes.indivo.org/vitalsigns/" value="123" abbrev="BPsys">Blood Pressure Systolic</name>
<value>145</value>
<unit type="http://codes.indivo.org/units/" value="31" abbrev="mmHg">millimeters of mercury</unit>
<site>left arm</site>
<position>sitting down</position>
</VitalSign>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/reports/minimal/vitals/{CATEGORY}/",
"view_func_name":"vitals_list",
"access_doc":"A user app with access to the record, or a principal in full control of the record",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'CATEGORY':'The category of vital sign, i.e. ``weight``, ``Blood_Pressure_Systolic``',
},
"query_opts":{
'status':'The account or document status to filter by',
'{FIELD}':'See :ref:`query-operators`, :ref:`valid-query-fields`',
'order_by':'See :ref:`query-operators`',
'aggregate_by':'See :ref:`query-operators`',
'date_range':'See :ref:`query-operators`',
'date_group':'See :ref:`query-operators`',
'group_by':'See :ref:`query-operators`',
'limit':'See :ref:`query-operators`',
'offset':'See :ref:`query-operators`',
},
"data_fields":{
},
"description":"List the vitals data for a given record.",
"return_desc":":http:statuscode:`200` with a list of notes, or :http:statuscode:`400` if any invalid query parameters were passed.",
"return_ex":'''
<Reports xmlns="http://indivo.org/vocab/xml/documents#">
<Summary total_document_count="2" limit="100" offset="0" order_by="date_measured" />
<QueryParams>
<DateRange value="date_measured*1995-03-10T00:00:00Z*" />
<Filters>
</Filters>
</QueryParams>
<Report>
<Meta>
<Document id="261ca370-927f-41af-b001-7b615c7a468e" type="http://indivo.org/vocab/xml/documents#VitalSign" size="1653" digest="0799971784e5a2d199cd6585415a8cd57f7bf9e4f8c8f74ef67a1009a1481cd6" record_id="">
<createdAt>2011-05-02T17:48:13Z</createdAt>
<creator id="mymail@mail.ma" type="Account">
<fullname>full name</fullname>
</creator>
<original id="261ca370-927f-41af-b001-7b615c7a468e"/>
<label>testing</label>
<status>active</status>
<nevershare>false</nevershare>
</Document>
</Meta>
<Item>
<VitalSign xmlns="http://indivo.org/vocab/xml/documents#">
<dateMeasured>2009-05-16T15:23:21</dateMeasured>
<name type="http://codes.indivo.org/vitalsigns/" value="123" abbrev="BPsys">Blood Pressure Systolic</name>
<value>145</value>
<unit type="http://codes.indivo.org/units/" value="31" abbrev="mmHg">millimeters of mercury</unit>
<site>left arm</site>
<position>sitting down</position>
</VitalSign>
</Item>
</Report>
...
</Reports>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/records/{RECORD_ID}/shares/",
"view_func_name":"record_shares",
"access_doc":"The owner of the record, or any admin app.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
},
"description":"List the shares of a record.",
"return_desc":":http:statuscode:`200` with a list of shares.",
"return_ex":'''
<Shares record="123">
<Share id="678" account="joeuser@example.com" />
<Share id="789" pha="problems@apps.indivo.org" />
...
</Shares>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/shares/",
"view_func_name":"record_share_add",
"access_doc":"The owner of the record, or any admin app.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
},
"query_opts":{
},
"data_fields":{
'account_id':'The email address of the recipient account. **REQUIRED**.',
'role_label':'A label for the share, usually the relationship between the owner and the recipient (i.e. ``Guardian``). **OPTIONAL**.',
},
"description":"Fully share a record with another account.",
"return_desc":":http:statuscode:`200 Success`, :http:statuscode:`400` if *account_id* was not passed, or :http:statuscode:`404` if the passed *account_id* was invalid.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"DELETE",
"path":"/records/{RECORD_ID}/shares/{OTHER_ACCOUNT_ID}",
"view_func_name":"record_share_delete",
"access_doc":"The owner of the record, or any admin app.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'OTHER_ACCOUNT_ID':'The email identifier of the Indivo account to share with',
},
"query_opts":{
},
"data_fields":{
},
"description":"Undo a full record share with an account.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`404` if ``OTHER_ACCOUNT_ID`` is invalid.",
"return_ex":'''
<ok/>
''',
"deprecated": None,
"added": None,
"changed": None,
},
{
"method":"POST",
"path":"/records/{RECORD_ID}/shares/{OTHER_ACCOUNT_ID}/delete",
"view_func_name":"record_share_delete",
"access_doc":"The owner of the record, or any admin app.",
"url_params":{
'RECORD_ID':'The id string associated with the Indivo record',
'OTHER_ACCOUNT_ID':'The email identifier of the Indivo account to share with',
},
"query_opts":{
},
"data_fields":{
},
"description":"Undo a full record share with an account.",
"return_desc":":http:statuscode:`200 Success`, or :http:statuscode:`404` if ``OTHER_ACCOUNT_ID`` is invalid.",
"return_ex":'''
<ok/>
''',
"deprecated": ('1.0', 'Use :http:delete:`/records/{RECORD_ID}/shares/{OTHER_ACCOUNT_ID}` instead.'),
"added": None,
"changed": None,
},
{
"method":"GET",
"path":"/version",
"view_func_name":"get_version",
"access_doc":"Any principal in Indivo.",
"url_params":{
},
"query_opts":{
},
"data_fields":{
},
"description":"Return the current version of Indivo.",
"return_desc":":http:statuscode:`200` with the current version of Indivo.",
"return_ex":'''
1.0.0.0
''',
"deprecated": None,
"added": None,
"changed": None,
}]
|
newmediamedicine/indivo_server_1_0
|
doc/sphinx/autogen/api-skeleton.py
|
Python
|
gpl-3.0
| 210,189
|
[
"VisIt"
] |
acd8c2761eb5111dd3b71883341755ebf6269384dd96879816681fb4b2116d61
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
import sys
import keyword
import re
import types
import warnings
import inspect
from itertools import zip_longest
from scipy._lib import doccer
from scipy._lib._util import _lazywhere
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import (comb, chndtr, entr, xlogy, ive)
# for root finding for continuous distribution ppf, and max likelihood
# estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
# for scipy.stats.entropy. Attempts to import just that function or file
# have cause import problems
from scipy import stats
from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,
logical_and, log, sqrt, place, argmax, vectorize, asarray,
nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(k, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(k, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative distribution function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative distribution function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data)
Parameter estimates for generic data.
See `scipy.stats.rv_continuous.fit <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.fit.html#scipy.stats.rv_continuous.fit>`__ for detailed documentation of the
keyword arguments.
"""
_doc_expect = """\
expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains fraction alpha [0, 1] of the
distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``. Note that shifting the location of a distribution
does not make it a "noncentral" distribution; noncentral generalizations of
some distributions are available in separate classes.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""kurtosis is fourth central moment / variance**2 - 3."""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
def _fit_determine_optimizer(optimizer):
if not callable(optimizer) and isinstance(optimizer, str):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError as e:
raise ValueError("%s is not a valid optimizer" % optimizer) from e
return optimizer
# Frozen RV class
class rv_frozen:
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.a, self.b = self.dist._get_support(*shapes)
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def support(self):
return self.dist.support(*self.args, **self.kwds)
def argsreduce(cond, *args):
"""Clean arguments to:
1. Ensure all arguments are iterable (arrays of dimension at least one
2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is
True, in 1D.
Return list of processed arguments.
Examples
--------
>>> rng = np.random.default_rng()
>>> A = rng.random((4, 5))
>>> B = 2
>>> C = rng.random((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(4, 5)
>>> B1.shape
(1,)
>>> C1.shape
(1, 5)
>>> cond[2,:] = 0
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(15,)
>>> B1.shape
(1,)
>>> C1.shape
(15,)
"""
# some distributions assume arguments are iterable.
newargs = np.atleast_1d(*args)
# np.atleast_1d returns an array if only one argument, or a list of arrays
# if more than one argument.
if not isinstance(newargs, list):
newargs = [newargs, ]
if np.all(cond):
# broadcast arrays with cond
*newargs, cond = np.broadcast_arrays(*newargs, cond)
return [arg.ravel() for arg in newargs]
s = cond.shape
# np.extract returns flattened arrays, which are not broadcastable together
# unless they are either the same size or size == 1.
return [(arg if np.size(arg) == 1
else np.extract(cond, np.broadcast_to(arg, s)))
for arg in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# The function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the
# factor of exp(-xs*ns) into the ive function to improve numerical
# stability at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
# Return res + np.log(corr) avoiding np.log(0)
return _lazywhere(
corr > 0,
(res, corr),
f=lambda r, c: r + np.log(c),
fillvalue=-np.inf)
def _ncx2_pdf(x, df, nc):
# Copy of _ncx2_log_pdf avoiding np.log(0) when corr = 0
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
return np.exp(res) * corr
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic:
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super().__init__()
# figure out if _stats signature has 'moments' keyword
sig = _getfullargspec(self._stats)
self._stats_has_moments = ((sig.varkw is not None) or
('moments' in sig.args) or
('moments' in sig.kwonlyargs))
self._random_state = check_random_state(seed)
# For historical reasons, `size` was made an attribute that was read
# inside _rvs(). The code is being changed so that 'size'
# is an argument
# to self._rvs(). However some external (non-SciPy) distributions
# have not
# been updated. Maintain backwards compatibility by checking if
# the self._rvs() signature has the 'size' keyword, or a **kwarg,
# and if not set self._size inside self.rvs()
# before calling self._rvs().
argspec = inspect.getfullargspec(self._rvs)
self._rvs_uses_size_attribute = (argspec.varkw is None and
'size' not in argspec.args and
'size' not in argspec.kwonlyargs)
# Warn on first use only
self._rvs_size_warned = False
@property
def random_state(self):
"""Get or set the generator object for generating random variates.
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __setstate__(self, state):
try:
self.__dict__.update(state)
# attaches the dynamically created methods on each instance.
# if a subclass overrides rv_generic.__setstate__, or implements
# it's own _attach_methods, then it must make sure that
# _attach_argparser_methods is called.
self._attach_methods()
except ValueError:
# reconstitute an old pickle scipy<1.6, that contains
# (_ctor_param, random_state) as state
self._ctor_param = state[0]
self._random_state = state[1]
self.__init__()
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_* instance.
This method must be overridden by subclasses, and must itself call
_attach_argparser_methods. This method is called in __init__ in
subclasses, and in __setstate__
"""
raise NotImplementedError
def _attach_argparser_methods(self):
"""
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Should be called from `_attach_methods`, typically in __init__ and
during unpickling (__setstate__)
"""
ns = {}
exec(self._parse_arg_template, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name, types.MethodType(ns[name], self))
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser string for the shape arguments.
This method should be called in __init__ of a class for each
distribution. It creates the `_parse_arg_template` attribute that is
then used by `_attach_argparser_methods` to dynamically create and
attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs`
methods to the instance.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, str):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getfullargspec(meth) # NB does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.varkw is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.kwonlyargs:
raise TypeError(
'kwonly args are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
# this string is used by _attach_argparser_methods
self._parse_arg_template = parse_arg_template % dct
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
try:
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
except TypeError as e:
raise Exception("Unable to construct docstring for "
"distribution \"%s\": %s" %
(self.name, repr(e))) from e
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Noncentral moments (also known as the moment about the origin).
# Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime".
# The primed mu is a widely used notation for the noncentral moment.
def _munp(self, n, *args):
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = self.generic_moment(n, *args)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters. %s, %s, %s" % (size, size_,
bcast_shape))
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
# These are the methods you must define (standard form functions)
# NB: generic _pdf, _logpdf, _cdf are different for
# rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _get_support(self, *args, **kwargs):
"""Return the support of the (unscaled, unshifted) distribution.
*Must* be overridden by distributions which have support dependent
upon the shape parameters of the distribution. Any such override
*must not* set or change any of the class members, as these members
are shared amongst all instances of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support for the specified
shape parameters.
"""
return self.a, self.b
def _support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a <= x) & (x <= b)
def _open_support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a < x) & (x < b)
def _rvs(self, *args, size=None, random_state=None):
# This method must handle size being a tuple, and it must
# properly broadcast *args and size. size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
# Use basic inverse cdf algorithm for RV generation as default.
U = random_state.uniform(size=size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
random_state = check_random_state(rndm)
else:
random_state = self._random_state
# Maintain backwards compatibility by setting self._size
# for distributions that still need it.
if self._rvs_uses_size_attribute:
if not self._rvs_size_warned:
warnings.warn(
f'The signature of {self._rvs} does not contain '
f'a "size" keyword. Such signatures are deprecated.',
np.VisibleDeprecationWarning)
self._rvs_size_warned = True
self._size = size
self._random_state = random_state
vals = self._rvs(*args)
else:
vals = self._rvs(*args, size=size, random_state=random_state)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = np.full(shape(cond), fill_value=self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
if g2 is None:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
# if mean is inf then var is also inf
with np.errstate(invalid='ignore'):
mu2 = np.where(np.isfinite(mu), mu2p - mu**2, np.inf)
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
with np.errstate(invalid='ignore'):
mu3 = (-mu*mu - 3*mu2)*mu + mu3p
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
with np.errstate(invalid='ignore'):
mu3 = (-mu * mu - 3 * mu2) * mu + mu3p
with np.errstate(invalid='ignore'):
mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = [default.copy() for _ in moments]
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, scale, *args)
goodscale = goodargs[0]
goodargs = goodargs[1:]
place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
return output
def moment(self, n, *args, **kwds):
"""n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
shapes, loc, scale = self._parse_args(*args, **kwds)
args = np.broadcast_arrays(*(*shapes, loc, scale))
*shapes, loc, scale = args
i0 = np.logical_and(self._argcheck(*shapes), scale > 0)
i1 = np.logical_and(i0, loc == 0)
i2 = np.logical_and(i0, loc != 0)
args = argsreduce(i0, *shapes, loc, scale)
*shapes, loc, scale = args
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*shapes, **mdict)
val = np.empty(loc.shape) # val needs to be indexed by loc
val[...] = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, shapes)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
result = zeros(i0.shape)
place(result, ~i0, self.badvalue)
if i1.any():
res1 = scale[loc == 0]**n * val[loc == 0]
place(result, i1, res1)
if i2.any():
mom = [mu, mu2, g1, g2]
arrs = [i for i in mom if i is not None]
idx = [i for i in range(4) if mom[i] is not None]
if any(idx):
arrs = argsreduce(loc != 0, *arrs)
j = 0
for i in idx:
mom[i] = arrs[j]
j += 1
mu, mu2, g1, g2 = mom
args = argsreduce(loc != 0, *shapes, loc, scale, val)
*shapes, loc, scale, val = args
res2 = zeros(loc.shape, dtype='d')
fac = scale / loc
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp,
shapes)
res2 += comb(n, k, exact=True)*fac**k * valk
res2 += fac**n * val
res2 *= loc**n
place(result, i2, res2)
if result.ndim == 0:
return result.item()
return result
def median(self, *args, **kwds):
"""Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
def support(self, *args, **kwargs):
"""Support of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : array_like
end-points of the distribution's support.
"""
args, loc, scale = self._parse_args(*args, **kwargs)
arrs = np.broadcast_arrays(*args, loc, scale)
args, loc, scale = arrs[:-2], arrs[-2], arrs[-1]
cond = self._argcheck(*args) & (scale > 0)
_a, _b = self._get_support(*args)
if cond.all():
return _a * scale + loc, _b * scale + loc
elif cond.ndim == 0:
return self.badvalue, self.badvalue
# promote bounds to at least float to fill in the badvalue
_a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d')
out_a, out_b = _a * scale + loc, _b * scale + loc
place(out_a, 1-cond, self.badvalue)
place(out_b, 1-cond, self.badvalue)
return out_a, out_b
def _get_fixed_fit_value(kwds, names):
"""
Given names such as `['f0', 'fa', 'fix_a']`, check that there is
at most one non-None value in `kwds` associaed with those names.
Return that value, or None if none of the names occur in `kwds`.
As a side effect, all occurrences of those names in `kwds` are
removed.
"""
vals = [(name, kwds.pop(name)) for name in names if name in kwds]
if len(vals) > 1:
repeated = [name for name, val in vals]
raise ValueError("fit method got multiple keyword arguments to "
"specify the same fixed parameter: " +
', '.join(repeated))
return vals[0][1] if vals else None
# continuous random variables: implement maybe later
#
# hf --- Hazard Function (PDF / SF)
# chf --- Cumulative hazard function (-log(SF))
# psf --- Probability sparsity function (reciprocal of the pdf) in
# units of percent-point-function (as a function of q).
# Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
support
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of the distribution.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
For most of the scipy.stats distributions, the support interval doesn't
depend on the shape parameters. ``x`` being in the support interval is
equivalent to ``self.a <= x <= self.b``. If either of the endpoints of
the support do depend on the shape parameters, then
i) the distribution must implement the ``_get_support`` method; and
ii) those dependent endpoints must be omitted from the distribution's
call to the ``rv_continuous`` initializer.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,
applied to a uniform random variate. In order to generate random variates
efficiently, either the default ``_ppf`` needs to be overwritten (e.g.
if the inverse cdf can expressed in an explicit form) or a sampling
method needs to be implemented in a custom ``_rvs`` method.
If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.
The main reason would be to improve numerical accuracy: for example,
the survival function ``_sf`` is computed as ``1 - _cdf`` which can
result in loss of precision if ``_cdf(x)`` is close to one.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
_get_support
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
`rv_frozen` object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self.extradoc = extradoc
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
self._attach_methods()
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
# _random_state attribute is taken care of by rv_generic
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "vecentropy", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""
Attaches dynamically created methods to the rv_continuous instance.
"""
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
if self.moment_type == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
factor = 10.
left, right = self._get_support(*args)
if np.isinf(left):
left = min(-factor, right)
while self._ppf_to_solve(left, q, *args) > 0.:
left, right = left * factor, left
# left is now such that cdf(left) <= q
# if right has changed, then cdf(right) > q
if np.isinf(right):
right = max(factor, left)
while self._ppf_to_solve(right, q, *args) < 0.:
left, right = right, right * factor
# right is now such that cdf(right) >= q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._mom_integ0, _a, _b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
# Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._pdf, _a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
# generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
# in rv_generic
def pdf(self, x, *args, **kwds):
"""Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= np.asarray(_b)) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= _b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -np.sum(self._logpdf(x, *args), axis=0)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def nnlf(self, theta, x):
"""Negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x, *args)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x, *args)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpdf = self._logpdf(x, *args)
finite_logpdf = np.isfinite(logpdf)
n_bad += np.sum(~finite_logpdf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpdf[finite_logpdf], axis=0) + penalty
return -np.sum(logpdf, axis=0)
def _penalized_nnlf(self, theta, x):
"""Penalized negative loglikelihood function.
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
def _fitstart(self, data, args=None):
"""Starting point for fit (shape arguments + loc + scale)."""
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
def _reduce_func(self, args, kwds, data=None):
"""
Return the (possibly reduced) function to optimize in order to find MLE
estimates for the .fit method.
"""
# Convert fixed shape parameters to the standard numeric form: e.g. for
# stats.beta, shapes='a, b'. To fix `a`, the caller can give a value
# for `f0`, `fa` or 'fix_a'. The following converts the latter two
# into the first (numeric) form.
shapes = []
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
if val is not None:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
methods = {"mle", "mm"}
method = kwds.pop('method', "mle").lower()
if method == "mm":
n_params = len(shapes) + 2 - len(fixedn)
exponents = (np.arange(1, n_params+1))[:, np.newaxis]
data_moments = np.sum(data[None, :]**exponents/len(data), axis=1)
def objective(theta, x):
return self._moment_error(theta, x, data_moments)
elif method == "mle":
objective = self._penalized_nnlf
else:
raise ValueError("Method '{0}' not available; must be one of {1}"
.format(method, methods))
if len(fixedn) == 0:
func = objective
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return objective(newtheta, x)
return x0, func, restore, args
def _moment_error(self, theta, x, data_moments):
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale)
for i in range(len(data_moments))])
if np.any(np.isnan(dist_moments)):
raise ValueError("Method of moments encountered a non-finite "
"distribution moment and cannot continue. "
"Consider trying method='MLE'.")
return (((data_moments - dist_moments) /
np.maximum(np.abs(data_moments), 1e-8))**2).sum()
def fit(self, data, *args, **kwds):
"""
Return estimates of shape (if applicable), location, and scale
parameters from data. The default estimation method is Maximum
Likelihood Estimation (MLE), but Method of Moments (MM)
is also available.
Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in estimating the distribution parameters.
arg1, arg2, arg3,... : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
- `loc`: initial guess of the distribution's location parameter.
- `scale`: initial guess of the distribution's scale parameter.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa`` and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use.
The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
- method : The method to use. The default is "MLE" (Maximum
Likelihood Estimate); "MM" (Method of Moments)
is also available.
Returns
-------
parameter_tuple : tuple of floats
Estimates for any shape parameters (if applicable),
followed by those for location and scale.
For most random variables, shape statistics
will be returned, but there are exceptions (e.g. ``norm``).
Notes
-----
With ``method="MLE"`` (default), the fit is computed by minimizing
the negative log-likelihood function. A large, finite penalty
(rather than infinite negative log-likelihood) is applied for
observations beyond the support of the distribution.
With ``method="MM"``, the fit is computed by minimizing the L2 norm
of the relative errors between the first *k* raw (about zero) data
moments and the corresponding distribution moments, where *k* is the
number of non-fixed parameters.
More precisely, the objective function is::
(((data_moments - dist_moments)
/ np.maximum(np.abs(data_moments), 1e-8))**2).sum()
where the constant ``1e-8`` avoids division by zero in case of
vanishing data moments. Typically, this error norm can be reduced to
zero.
Note that the standard method of moments can produce parameters for
which some data are outside the support of the fitted distribution;
this implementation does nothing to prevent this.
For either method,
the returned answer is not guaranteed to be globally optimal; it
may only be locally optimal, or the optimization may fail altogether.
If the data contain any of ``np.nan``, ``np.inf``, or ``-np.inf``,
the `fit` method will raise a ``RuntimeError``.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc``
and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
data = np.asarray(data)
method = kwds.get('method', "mle").lower()
# memory for method of moments
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds, data=data)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
optimizer = _fit_determine_optimizer(optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# In some cases, method of moments can be done with fsolve/root
# instead of an optimizer, but sometimes no solution exists,
# especially when the user fixes parameters. Minimizing the sum
# of squares of the error generalizes to these cases.
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
obj = func(vals, data)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
loc, scale, shapes = self._unpack_loc_scale(vals)
if not (np.all(self._argcheck(*shapes)) and scale > 0):
raise Exception("Optimization converged to parameters that are "
"outside the range allowed by the distribution.")
if method == 'mm':
if not np.isfinite(obj):
raise Exception("Optimization failed: either a data moment "
"or fitted distribution moment is "
"non-finite.")
return vals
def _fit_loc_scale_support(self, data, *args):
"""Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
_a, _b = self._get_support(*args)
a, b = _a, _b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale
# estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
_a, _b = self._get_support(*args)
with np.errstate(over='ignore'):
h = integrate.quad(integ, _a, _b)[0]
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(_b):
upper = upp
else:
upper = _b
if np.isinf(_a):
lower = low
else:
lower = _a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution by numerical integration.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ub
E[f(x)] = Integral(f(x) * dist.pdf(x)),
lb
where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``
distribution. If the bounds ``lb`` and ``ub`` correspond to the
support of the distribution, e.g. ``[-inf, inf]`` in the default
case, then the integral is the unrestricted expectation of ``f(x)``.
Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``
outside a finite interval in which case the expectation is
calculated within the finite range ``[lb, ub]``.
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`scipy.integrate.quad`. Neither this function nor
`scipy.integrate.quad` can verify whether the integral exists or is
finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and
``cauchy(0).expect()`` returns ``0.0``.
The function is not vectorized.
Examples
--------
To understand the effect of the bounds of integration consider
>>> from scipy.stats import expon
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)
0.6321205588285578
This is close to
>>> expon(1).cdf(2.0) - expon(1).cdf(0.0)
0.6321205588285577
If ``conditional=True``
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)
1.0000000000000002
The slight deviation from 1 is due to numerical integration.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
_a, _b = self._get_support(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + _a * scale
if ub is None:
ub = loc + _b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
return vals
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
_a, _b = self._get_support(*args)
return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
_a, _b = self._get_support(*args)
b = _b
a = _a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= _b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= _a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero
probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``
and ``pk`` must have the same shape.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
support
Notes
-----
This class is similar to `rv_continuous`. Whether a shape parameter is
valid is decided by an ``_argcheck`` method (which defaults to checking
that its arguments are strictly positive.)
The main differences are:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_discrete instance."""
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = types.MethodType(_vec_generic_moment, self)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = types.MethodType(_vppf, self)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
_a, _b = self._get_support(*args)
m = arange(int(_a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super().rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, cond2*(cond0 == cond0), 1.0)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _a-1 + loc)
place(output, cond2, _b + loc)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond3 = (q == 0) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
lower_bound = _a - 1 + loc
upper_bound = _b + loc
place(output, cond2*(cond == cond), lower_bound)
place(output, cond3*(cond == cond), upper_bound)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return stats.entropy(self.pk)
else:
_a, _b = self._get_support(*args)
return _expect(lambda x: entr(self.pmf(x, *args)),
_a, _b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution by numerical summation.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``lb <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``lb <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or
may not exist,
depending on the function, `func`. If it does exist, but the
sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result,
but may also make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
_a, _b = self._get_support(*args)
if lb is None:
lb = _a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = _b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
if isinstance(self, rv_sample):
res = self._expect(fun, lb, ub)
return res / invfac
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is
infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if np.shape(xk) != np.shape(pk):
raise ValueError("xk and pk must have the same shape.")
if np.less(pk, 0.0).any():
raise ValueError("All elements of pk must be non-negative.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in rv_generic.__setstate__,
# which calls rv_generic._attach_methods
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created argparser methods."""
self._attach_argparser_methods()
def _get_support(self, *args):
"""Return the support of the (unscaled, unshifted) distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support.
"""
return self.a, self.b
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self, size=None, random_state=None):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = random_state.uniform(size=size)
if size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return stats.entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _expect(self, fun, lb, ub, *args, **kwds):
# ignore all args, just do a brute force summation
supp = self.xk[(lb <= self.xk) & (self.xk <= ub)]
vals = fun(supp)
return np.sum(vals)
def _check_shape(argshape, size):
"""
This is a utility function used by `_rvs()` in the class geninvgauss_gen.
It compares the tuple argshape to the tuple size.
Parameters
----------
argshape : tuple of integers
Shape of the arguments.
size : tuple of integers or integer
Size argument of rvs().
Returns
-------
The function returns two tuples, scalar_shape and bc.
scalar_shape : tuple
Shape to which the 1-d array of random variates returned by
_rvs_scalar() is converted when it is copied into the
output array of _rvs().
bc : tuple of booleans
bc is an tuple the same length as size. bc[j] is True if the data
associated with that index is generated in one call of _rvs_scalar().
"""
scalar_shape = []
bc = []
for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],
fillvalue=1):
if sizedim > argdim or (argdim == sizedim == 1):
scalar_shape.append(sizedim)
bc.append(True)
else:
bc.append(False)
return tuple(scalar_shape[::-1]), tuple(bc[::-1])
def get_distribution_names(namespace_pairs, rv_base_class):
"""Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
|
matthew-brett/scipy
|
scipy/stats/_distn_infrastructure.py
|
Python
|
bsd-3-clause
| 137,114
|
[
"Gaussian"
] |
28422a670a1c88e7aa7b3e9e15b1d77a7f24551e78b8297f4656aa4135d71664
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import numpy as np
from pyscf.nao.m_xjl import xjl
#
#
#
class sbt_c():
'''
Spherical Bessel Transform by James Talman. Functions are given on logarithmic mesh
See m_log_mesh
Args:
nr : integer, number of points on radial mesh
rr : array of points in coordinate space
kk : array of points in momentum space
lmax : integer, maximal angular momentum necessary
with_sqrt_pi_2 : if one, then transforms will be multiplied by sqrt(pi/2)
fft_flags : ??
Returns:
a class preinitialized to perform the spherical Bessel Transform
Examples:
label = 'siesta'
sv = system_vars_c(label)
sbt = sbt_c(sv.ao_log.rr, sv.ao_log.pp)
print(sbt.exe(sv.ao_log.psi_log[0,0,:], 0))
'''
def __init__(self, rr, kk, lmax=12, with_sqrt_pi_2=True, fft_flags=None):
assert(type(rr)==np.ndarray)
assert(rr[0]>0.0)
assert(type(kk)==np.ndarray)
assert(kk[0]>0.0)
self.nr = len(rr)
n = self.nr
assert(self.nr>1)
assert(lmax>-1)
self.rr,self.kk = rr,kk
nr2, self.rr3, self.kk3 = self.nr*2, rr**3, kk**3
self.rmin,self.kmin = rr[0],kk[0]
self.rhomin,self.kapmin= np.log(self.rmin),np.log(self.kmin)
self.dr_jt = np.log(rr[1]/rr[0])
dr = self.dr_jt
dt = 2.0*np.pi/(nr2*dr)
self._smallr = self.rmin*np.array([np.exp(-dr*(n-i)) for i in range(n)], dtype='float64')
self._premult = np.array([np.exp(1.5*dr*(i-n)) for i in range(2*n)], dtype='float64')
coeff = 1.0/np.sqrt(np.pi/2.0) if with_sqrt_pi_2 else 1.0
self._postdiv = np.array([coeff*np.exp(-1.5*dr*i) for i in range(n)], dtype='float64')
temp1 = np.zeros((nr2), dtype='complex128')
temp2 = np.zeros((nr2), dtype='complex128')
temp1[0] = 1.0
temp2 = np.fft.fft(temp1)
xx = sum(np.real(temp2))
if abs(nr2-xx)>1e-10 :
print(__name__, 'abs(nr2-xx)', nr2, xx)
raise SystemError('err: sbt_plan: problem with fftw sum(temp2):')
self._mult_table1 = np.zeros((lmax+1, self.nr), dtype='complex128')
for it in range(n):
tt = it*dt # Define a t value
phi3 = (self.kapmin+self.rhomin)*tt # See Eq. (33)
rad,phi = np.sqrt(10.5**2+tt**2),np.arctan((2.0*tt)/21.0)
phi1 = -10.0*phi-np.log(rad)*tt+tt+np.sin(phi)/(12.0*rad) \
-np.sin(3.0*phi)/(360.0*rad**3)+np.sin(5.0*phi)/(1260.0*rad**5) \
-np.sin(7.0*phi)/(1680.0*rad**7)
for ix in range(1,11): phi1=phi1+np.arctan((2.0*tt)/(2.0*ix-1)) # see Eqs. (27) and (28)
phi2 = -np.arctan(1.0) if tt>200.0 else -np.arctan(np.sinh(np.pi*tt/2)/np.cosh(np.pi*tt/2)) # see Eq. (20)
phi = phi1+phi2+phi3
self._mult_table1[0,it] = np.sqrt(np.pi/2)*np.exp(1j*phi)/n # Eq. (18)
if it==0 : self._mult_table1[0,it] = 0.5*self._mult_table1[0,it]
phi = -phi2 - np.arctan(2.0*tt)
if lmax>0 : self._mult_table1[1,it] = np.exp(2.0*1j*phi)*self._mult_table1[0,it] # See Eq. (21)
# Apply Eq. (24)
for lk in range(1,lmax):
phi = -np.arctan(2*tt/(2*lk+1))
self._mult_table1[lk+1,it] = np.exp(2.0*1j*phi)*self._mult_table1[lk-1,it]
# END of it in range(n):
# make the initialization for the calculation at small k values for 2N mesh values
self._mult_table2 = np.zeros((lmax+1, self.nr+1), dtype='complex128')
j_ltable = np.zeros((lmax+1,nr2), dtype='float64')
for i in range(nr2): j_ltable[0:lmax+1,i] = xjl( np.exp(self.rhomin+self.kapmin+i*dr), lmax )
for ll in range(lmax+1):
self._mult_table2[ll,:] = np.fft.rfft(j_ltable[ll,:]) /nr2
if with_sqrt_pi_2 : self._mult_table2 = self._mult_table2/np.sqrt(np.pi/2)
#
# The calculation of the Sperical Bessel Transform for a given data...
#
def sbt(self, ff, am, direction=1, npow=0) :
"""
Args:
ff : numpy array containing radial orbital (values of radial orbital on logarithmic grid) to be transformed. The data must be on self.rr grid or self.kk grid provided during initialization.
am : angular momentum of the radial orbital ff[:]
direction : 1 -- real-space --> momentum space transform; -1 -- momentum space --> real-space transform.
npow : additional power for the shape of the orbital
f(xyz) = rr[i]**npow * ff[i] * Y_lm( xyz )
Result:
gg : numpy array containing the result of the Spherical Bessel Transform
gg(k) = int_0^infty ff(r) j_{am}(k*r) r**2 dr ( direction == 1 )
gg(r) = int_0^infty ff(k) j_{am}(k*r) k**2 dk ( direction == -1 )
"""
assert(type(ff)==np.ndarray)
assert(len(ff)==self.nr)
assert(am > -1)
assert(am < self._mult_table1.shape[0])
if direction==1 :
rmin, kmin, ptr_rr3 = self.rmin, self.kmin, self.rr3
dr = np.log(self.rr[1]/self.rr[0])
C = ff[0]/self.rr[0]**(npow+am)
elif direction==-1 :
rmin, kmin, ptr_rr3 = self.kmin, self.rmin, self.kk3
dr = np.log(self.kk[1]/self.kk[0])
C = ff[0]/self.kk[0]**(npow+am)
else:
raise SystemError('!direction=+/-1')
gg = np.zeros((self.nr), dtype='float64') # Allocate the result
# make the calculation for LARGE k values extend the input to the doubled mesh, extrapolating the input as C r**(np+li)
nr2 = self.nr*2
r2c_in = np.zeros((nr2), dtype='float64')
r2c_in[0:self.nr] = C*self._premult[0:self.nr]*self._smallr[0:self.nr]**(npow+am)
r2c_in[self.nr:nr2] = self._premult[self.nr:nr2]*ff[0:self.nr]
r2c_out = np.fft.rfft(r2c_in)
temp1 = np.zeros((nr2), dtype='complex128')
temp1[0:self.nr] = np.conj(r2c_out[0:self.nr])*self._mult_table1[am,0:self.nr]
temp2 = np.fft.ifft(temp1)*nr2
gg[0:self.nr] = (rmin/kmin)**1.5 * (temp2[self.nr:nr2]).real * self._postdiv[0:self.nr]
# obtain the SMALL k results in the array c2r_out
r2c_in[0:self.nr] = ptr_rr3[0:self.nr] * ff[0:self.nr]
r2c_in[self.nr:nr2] = 0.0
r2c_out = np.fft.rfft(r2c_in)
c2r_in = np.conj(r2c_out[0:self.nr+1]) * self._mult_table2[am,0:self.nr+1]
c2r_out = np.fft.irfft(c2r_in)*dr*nr2
r2c_in[0:self.nr] = abs(gg[0:self.nr]-c2r_out[0:self.nr])
kdiv = np.argmin(r2c_in[0:self.nr])
gg[0:kdiv] = c2r_out[0:kdiv]
return gg
|
gkc1000/pyscf
|
pyscf/nao/m_sbt.py
|
Python
|
apache-2.0
| 6,825
|
[
"PySCF",
"SIESTA"
] |
cb4bbaf0182862d9c47bcd40080d4e4d6021708a727fbef249416dc4de89a195
|
import logging
import os
from django.shortcuts import render
from django.views.generic import View
from django.http import HttpResponse
from django.conf import settings
class FrontendAppView(View):
"""
Serves the compiled frontend entry point (only works if you have run `make
build-frontend`).
"""
def get(self, request):
try:
with open(
os.path.join(settings.FRONTEND_DIR, 'build', 'index.html'),
) as frontend:
return HttpResponse(frontend.read())
except FileNotFoundError:
logging.exception('Production build of app not found')
return HttpResponse(
"""
This URL is only used when you have built the production
version of the app. Visit http://localhost:3000/ instead, or
run `make build-frontend` to test the production version.
""",
status=501,
)
def index(request):
context = {
'component': 'App',
'props': {
'env': 'Django',
'user': {
'username': request.user.username,
},
},
}
return render(request, 'index.html', context)
|
nadege/food-organizer
|
backend/apps/project/views.py
|
Python
|
gpl-3.0
| 1,258
|
[
"VisIt"
] |
d865bad499cf5fb96e412c923fd08d1f7e166e4e422e8c4d93ec5759c7d58dee
|
# -*- coding: utf-8 -*-
import psycopg2
import ast
import json
import numpy as np
import distance
import math
import helpers
from us_state_abbrevation import *
with open('api_key_list.config') as key_file:
api_key_list = json.load(key_file)
api_key = api_key_list["distance_api_key_list"]
conn_str = api_key_list["conn_str"]
def ajax_available_events(county, state):
county=county.upper()
state = state.title()
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
cur.execute("SELECT index, name FROM poi_detail_table WHERE county='%s' AND state='%s';" % (county, state))
poi_lst = [item for item in cur.fetchall()]
conn.close()
return poi_lst
def add_event(trip_locations_id, event_day, new_event_id=None, event_name=None, full_day=True, unseen_event=False):
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
cur.execute("SELECT * FROM day_trip_table WHERE trip_locations_id='%s'" % (trip_locations_id))
(index, trip_locations_id, full_day, regular, county, state, detail, event_type, event_ids) = cur.fetchone()
if unseen_event:
index += 1
trip_locations_id = '-'.join([str(eval(i)['id']) for i in eval(detail)]) + '-' + event_name.replace(' ', '-') + '-' + event_day
cur.execute("SELECT details FROM day_trip_locations WHERE trip_locations_id='%s';" % (trip_locations_id))
a = cur.fetchone()
if bool(a):
conn.close()
return trip_locations_id, a[0]
else:
cur.execute("SELECT max(index) FROM day_trip_locations;")
index = cur.fetchone()[0] + 1
detail = list(eval(detail))
#need to make sure the type is correct for detail!
new_event = "{'address': 'None', 'id': 'None', 'day': %s, 'name': u'%s'}" % (event_day, event_name)
detail.append(new_event)
#get the right format of detail: change FROM list to string AND remove brackets AND convert quote type
new_detail = str(detail).replace('"', '').replace('[', '').replace(']', '').replace("'", '"')
cur.execute("INSERT INTO day_trip_locations VALUES (%i, '%s',%s,%s,'%s','%s','%s');" % (index, trip_locations_id, full_day, False, county, state, new_detail))
conn.commit()
conn.close()
return trip_locations_id, detail
else:
event_ids = helpers.db_event_cloest_distance(trip_locations_id, new_event_id)
event_ids, google_ids, name_list, driving_time_list, walking_time_list = helpers.db_google_driving_walking_time(event_ids, event_type='add')
trip_locations_id = '-'.join(event_ids) + '-' + event_day
cur.execute("SELECT details FROM day_trip_locations WHERE trip_locations_id='%s';" % (trip_locations_id))
if not cur.fetchone():
details = []
helpers.db_address(event_ids)
for item in event_ids:
cur.execute("SELECT index, name, address FROM poi_detail_table WHERE index = '%s';" % (item))
a = cur.fetchone()
detail = {'id': a[0],'name': a[1], 'address': a[2], 'day': event_day}
details.append(detail)
#need to make sure event detail can append to table!
cur.execute("insert into day_trip_table (trip_locations_id,full_day, regular, county, state, details, event_type, event_ids) VALUES ( '%s', %s, %s, '%s', '%s', '%s', '%s', '%s');" % (trip_locations_id, full_day, False, county, state, details, event_type, event_ids))
conn.commit()
conn.close()
return trip_locations_id, details
else:
conn.close()
#need to make sure type is correct.
return trip_locations_id, a[0]
def remove_event(trip_locations_id, remove_event_id, remove_event_name=None, event_day=None, full_day=True):
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
cur.execute("SELECT * FROM day_trip_table WHERE trip_locations_id='%s';" % (trip_locations_id))
(index, trip_locations_id, full_day, regular, county, state, detail, event_type, event_ids) = cur.fetchone()
new_event_ids = ast.literal_eval(event_ids)
new_event_ids.remove(remove_event_id)
new_trip_locations_id = '-'.join(str(event_id) for event_id in new_event_ids)
cur.execute("SELECT * FROM day_trip_table WHERE trip_locations_id='%s';" % (new_trip_locations_id))
check_id = cur.fetchone()
if check_id:
return new_trip_locations_id, check_id[-3]
detail = ast.literal_eval(detail[1:-1])
for index, trip_detail in enumerate(detail):
if ast.literal_eval(trip_detail)['id'] == remove_event_id:
remove_index = index
break
new_detail = list(detail)
new_detail.pop(remove_index)
new_detail = str(new_detail).replace("'", "''")
regular = False
cur.execute("SELECT max(index) FROM day_trip_table WHERE trip_locations_id='%s';" % (trip_locations_id))
new_index = cur.fetchone()[0]
new_index += 1
cur.execute("INSERT INTO day_trip_table VALUES (%i, '%s', %s, %s, '%s', '%s', '%s', '%s','%s');" % (new_index, new_trip_locations_id, full_day, regular, county, state, new_detail, event_type, new_event_ids))
conn.commit()
conn.close()
return new_trip_locations_id, new_detail
def event_type_time_spent(adjusted_normal_time_spent):
if adjusted_normal_time_spent > 180:
return 'big'
elif adjusted_normal_time_spent >= 120:
return 'med'
else:
return 'small'
def switch_event_list(full_trip_id, trip_locations_id, switch_event_id, switch_event_name=None, event_day=None, full_day=True):
# new_trip_locations_id, new_detail = remove_event(trip_locations_id, switch_event_id)
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
cur.execute("SELECT name, city, county, state, coord_lat, coord_long,ranking, adjusted_visit_length FROM poi_detail_table WHERE index=%s;" % (switch_event_id))
name, city, county, state,coord_lat, coord_long,poi_rank, adjusted_normal_time_spent = cur.fetchone()
event_type = event_type_time_spent(adjusted_normal_time_spent)
avialable_lst = ajax_available_events(county, state)
cur.execute("SELECT trip_location_ids, details FROM full_trip_table WHERE full_trip_id=%s;" % (full_trip_id))
full_trip_detail = cur.fetchone()
full_trip_detail = ast.literal_eval(full_trip_detail)
full_trip_ids = [ast.literal_eval(item)['id'] for item in full_trip_detail]
switch_lst = []
for item in avialable_lst:
index = item[0]
if index not in full_trip_ids:
event_ids = [switch_event_id, index]
event_ids, google_ids, name_list, driving_time_list, walking_time_list = helpers.db_google_driving_walking_time(event_ids, event_type='switch')
if min(driving_time_list[0], walking_time_list[0]) <= 60:
cur.execute("SELECT ranking, review_score, adjusted_visit_length FROM poi_detail_table WHERE index=%s;" % (index))
target_poi_rank, target_rating, target_adjusted_normal_time_spent = cur.fetchone()
target_event_type = event_type_time_spent(target_adjusted_normal_time_spent)
switch_lst.append([target_poi_rank, target_rating, target_event_type == event_type])
#need to sort target_event_type, target_poi_rank AND target_rating
return {switch_event_id: switch_lst}
def switch_event(trip_locations_id, switch_event_id, final_event_id, event_day):
new_trip_locations_id, new_detail = remove_event(trip_locations_id, switch_event_id)
new_trip_locations_id, new_detail = add_event(new_trip_locations_id, event_day, final_event_id, full_day=True, unseen_event=False)
return new_trip_locations_id, new_detail
def angle_between(p1, p2):
ang1 = np.arctan2(*p1[::-1])
ang2 = np.arctan2(*p2[::-1])
return np.rad2deg((ang1 - ang2) % (2 * np.pi))
def calculate_initial_compass_bearing(pointA, pointB):
"""
Calculates the bearing between two points.
The formulae used is the following:
theta = atan2(sin(delta(long)).cos(lat2),
cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(delta(long)))
:Parameters:
- `pointA: The tuple representing the latitude/longitude for the
first point. Latitude AND longitude must be in decimal degrees
- `pointB: The tuple representing the latitude/longitude for the
second point. Latitude AND longitude must be in decimal degrees
:Returns:
The bearing in degrees
:Returns Type:
float
"""
if (type(pointA) != tuple) or (type(pointB) != tuple):
raise TypeError("Only tuples are supported as arguments")
lat1 = math.radians(pointA[0])
lat2 = math.radians(pointB[0])
diffLong = math.radians(pointB[1] - pointA[1])
x = math.sin(diffLong) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1) * math.cos(lat2) * math.cos(diffLong))
initial_bearing = math.atan2(x, y)
# Now we have the initial bearing but math.atan2 return values
# FROM -180° to + 180° which is not what we want for a compass bearing
# The solution is to normalize the initial bearing as shown below
initial_bearing = math.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing
# def direction_from_orgin(start_coord_long, start_coord_lat, target_coord_long, target_coord_lat):
# angle = calculate_initial_compass_bearing((start_coord_lat, start_coord_long), (target_coord_lat, target_coord_long))
# if (angle > 45) and (angle < 135):
# return 'E'
# elif (angle > 135) and (angle < 215):
# return 'S'
# elif (angle > 215) and (angle < 305):
# return 'W'
# else:
# return 'N'
def check_direction(start_lat, start_long, outside_lat, outside_long, target_direction):
angle_dict={"E": range(45, 135), "S": range(135, 215), "W": range(215, 305), "N": range(0, 45) + range(305, 360)}
angle = calculate_initial_compass_bearing((start_lat, start_long), (outside_lat, outside_long))
if int(angle) in angle_dict[target_direction]:
return True
else:
return False
def travel_outside_coords(current_city, current_state, direction=None, n_days=1):
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
#coord_long, coord_lat
cur.execute("SELECT index, coord_lat, coord_long FROM all_cities_coords_table WHERE city ='%s' AND state = '%s';" % (current_city, current_state))
id_, coord_lat, coord_long = cur.fetchone()
#city, coord_lat, coord_long
cur.execute("SELECT distinct city, coord_lat, coord_long FROM all_cities_coords_table WHERE city !='%s' AND state = '%s';" % (current_city, current_state))
coords = cur.fetchall()
conn.close()
return id_, coords, coord_lat, coord_long
def travel_outside_with_direction(origin_city, origin_state, target_direction, furthest_len, n_days=1):
poi_info = []
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
#coord_long, coord_lat
cur.execute("SELECT index, coord_lat, coord_long FROM all_cities_coords_table WHERE city ='%s' AND state = '%s';" % (origin_city, origin_state))
id_, start_lat, start_long = cur.fetchone()
cur.execute("SELECT index, coord_lat, coord_long, adjusted_visit_length, ranking, review_score, num_reviews FROM poi_detail_table WHERE city != '%s' AND interesting = True AND ST_Distance_Sphere(geom, ST_MakePoint(%s,%s)) <= %s * 1609.34;" % (origin_city, start_long, start_lat, furthest_len))
item = cur.fetchall()
conn.close()
for coords in item:
if check_direction(start_lat, start_long, coords[1], coords[2], target_direction):
poi_info.append(coords)
## add those into table
return id_, start_lat, start_long, np.array(poi_info)
def check_outside_trip_id(outside_trip_id, debug):
'''
Check outside trip id exist or not.
'''
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
cur.execute("SELECT outside_trip_id FROM outside_trip_table WHERE outside_trip_id = '%s';" % (outside_trip_id))
a = cur.fetchone()
# print 'outside stuff id', a, bool(a)
conn.close()
if bool(a):
if not debug:
return a[0]
else:
return True
else:
return False
def db_outside_route_trip_details(event_ids, route_i):
conn=psycopg2.connect(conn_str)
cur = conn.cursor()
details = []
#details dict includes: id, name,address, day
for event_id in event_ids:
cur.execute("SELECT index, name, address, coord_lat, coord_long, poi_type, adjusted_visit_length, num_reviews, ranking, review_score, icon_url, check_full_address, city, state , img_url FROM poi_detail_table WHERE index = %s;" %(event_id))
a = cur.fetchone()
details.append({'id': a[0], 'name': a[1], 'address': a[2], 'coord_lat': a[3], 'coord_long':a[4], 'route': route_i, 'poi_type': a[5], 'adjusted_visit_length': a[6], 'num_reviews': a[7], 'ranking': a[8], 'review_score': a[9], 'icon_url': a[10], 'check_full_address': a[11], 'city': a[12], 'state': a[13], 'img_url': a[14]})
conn.close()
return details
def db_outside_google_driving_walking_time(city_id, start_coord_lat, start_coord_long, event_ids, event_type, origin_city, origin_state):
'''
Get estimated travel time FROM google api.
Limit 1000 calls per day.
'''
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
google_ids = []
driving_time_list = []
walking_time_list = []
name_list = []
api_i = 0
city_to_poi_id = str(int(city_id)) + '0000' + str(int(event_ids[0]))
if not check_city_to_poi(city_to_poi_id):
cur.execute("SELECT name, coord_lat, coord_long FROM poi_detail_table WHERE index = %s;" % (event_ids[0]))
dest_name, dest_coord_lat, dest_coord_long = cur.fetchone()
orig_coords = str(start_coord_lat) + ',' + str(start_coord_long)
dest_coords = str(dest_coord_lat) + ',' + str(dest_coord_long)
orig_name = origin_city
google_result = helpers.find_google_result(orig_coords, dest_coords, orig_name, dest_name, api_i)
while google_result == False:
api_i += 1
if api_i > len(api_key)-1:
print "all api_key are used"
else:
google_result = helpers.find_google_result(orig_coords, dest_coords, orig_name, dest_name, api_i)
driving_result, walking_result, google_driving_url, google_walking_url = google_result
if (driving_result['rows'][0]['elements'][0]['status'] == 'NOT_FOUND') and (walking_result['rows'][0]['elements'][0]['status'] == 'NOT_FOUND'):
new_event_ids = list(event_ids)
new_event_ids.pop(0)
new_event_ids = db_outside_event_cloest_distance(start_coord_lat, start_coord_long, event_ids=new_event_ids, event_type=event_type)
return db_outside_google_driving_walking_time(city_id, start_coord_lat, start_coord_long, new_event_ids, event_type, origin_city, origin_state)
try:
city_to_poi_driving_time = driving_result['rows'][0]['elements'][0]['duration']['value'] / 60
except:
print city, state, dest_name, driving_result #need to debug for this
try:
city_to_poi_walking_time = walking_result['rows'][0]['elements'][0]['duration']['value'] / 60
except:
city_to_poi_walking_time = 9999
'''
Need to work on rest of it!
'''
cur.execute("SELECT max(index) FROM google_city_to_poi_table")
index = cur.fetchone()[0]+1
driving_result = str(driving_result).replace("'", '"')
walking_result = str(walking_result).replace("'", '"')
orig_name = orig_name.replace("'", "''")
dest_name = dest_name.replace("'", "''")
cur.execute("INSERT INTO google_city_to_poi_table VALUES (%i, %s, %i, '%s','%s', '%s','%s', '%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s', '%s', %s, %s);" % (index, city_to_poi_id, city_id, origin_city.replace("'", "''"), origin_state, orig_name, dest_name, event_ids[0], start_coord_lat, start_coord_long, dest_coord_lat, dest_coord_long, orig_coords, dest_coords, google_driving_url, google_walking_url, str(driving_result), str(walking_result), city_to_poi_driving_time,city_to_poi_walking_time))
conn.commit()
name_list.extend([orig_name + " to " + dest_name,dest_name + " to " + orig_name])
google_ids.extend([city_to_poi_id] * 2)
driving_time_list.extend([city_to_poi_driving_time] * 2)
walking_time_list.extend([city_to_poi_walking_time] * 2)
else:
cur.execute("SELECT orig_name, dest_name, city_to_poi_driving_time, city_to_poi_walking_time FROM google_city_to_poi_table WHERE city_to_poi_id = %s;" %(city_to_poi_id))
orig_name, dest_name, city_to_poi_driving_time, city_to_poi_walking_time = cur.fetchone()
name_list.append(orig_name + " to " + dest_name)
google_ids.extend([city_to_poi_id] * 2)
driving_time_list.extend([city_to_poi_driving_time] * 2)
walking_time_list.extend([city_to_poi_walking_time] * 2)
for i,v in enumerate(event_ids[:-1]):
id_ = str(int(v)) + '0000' + str(int(event_ids[i+1]))
result_check_travel_time_id = helpers.check_travel_time_id(id_)
if not result_check_travel_time_id:
cur.execute("SELECT name, coord_lat, coord_long FROM poi_detail_table WHERE index = %s;" % (v))
orig_name, orig_coord_lat, orig_coord_long = cur.fetchone()
orig_idx = v
cur.execute("SELECT name, coord_lat, coord_long FROM poi_detail_table WHERE index = %s;" % (event_ids[i + 1]))
dest_name, dest_coord_lat, dest_coord_long = cur.fetchone()
dest_idx = event_ids[i+1]
orig_coords = str(orig_coord_lat) + ',' + str(orig_coord_long)
dest_coords = str(dest_coord_lat) + ',' + str(dest_coord_long)
google_result = helpers.find_google_result(orig_coords, dest_coords, orig_name, dest_name, api_i)
while google_result == False:
api_i += 1
if api_i > len(api_key)-1:
print "all api_key are used"
else:
google_result = helpers.find_google_result(orig_coords, dest_coords, orig_name, dest_name, api_i)
driving_result, walking_result, google_driving_url, google_walking_url = google_result
if (driving_result['rows'][0]['elements'][0]['status'] == 'NOT_FOUND') and (walking_result['rows'][0]['elements'][0]['status'] == 'NOT_FOUND'):
new_event_ids = list(event_ids)
new_event_ids.pop(i+1)
new_event_ids = helpers.db_event_cloest_distance(event_ids=new_event_ids, event_type=event_type)
return helpers.db_google_driving_walking_time(new_event_ids, event_type)
try:
google_driving_time = driving_result['rows'][0]['elements'][0]['duration']['value']/60
except:
print v, id_, driving_result #need to debug for this
try:
google_walking_time = walking_result['rows'][0]['elements'][0]['duration']['value']/60
except:
google_walking_time = 9999
cur.execute("SELECT max(index) FROM google_travel_time_table")
index = cur.fetchone()[0] + 1
driving_result = str(driving_result).replace("'", '"')
walking_result = str(walking_result).replace("'", '"')
orig_name = orig_name.replace("'","''")
dest_name = dest_name.replace("'","''")
cur.execute("INSERT INTO google_travel_time_table VALUES (%i, '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s', '%s', %s, %s);" % (index, id_, orig_name, orig_idx, dest_name, dest_idx, orig_coord_lat, orig_coord_long, dest_coord_long, dest_coord_long, orig_coords, dest_coords, google_driving_url, google_walking_url, str(driving_result), str(walking_result), google_driving_time, google_walking_time))
conn.commit()
name_list.append(orig_name + " to " + dest_name)
google_ids.append(id_)
driving_time_list.append(google_driving_time)
walking_time_list.append(google_walking_time)
else:
cur.execute("SELECT orig_name, dest_name, google_driving_time, google_walking_time FROM google_travel_time_table WHERE id_field = '%s';" % (id_))
orig_name, dest_name, google_driving_time, google_walking_time = cur.fetchone()
name_list.append(orig_name + " to " + dest_name)
google_ids.append(id_)
driving_time_list.append(google_driving_time)
walking_time_list.append(google_walking_time)
conn.close()
return event_ids, google_ids, name_list, driving_time_list, walking_time_list
def db_outside_event_cloest_distance(coord_lat, coord_long, trip_locations_id=None,event_ids=None, event_type = 'add',new_event_id = None):
'''
Get matrix cloest distance
'''
if new_event_id or not event_ids:
event_ids, event_type = helpers.get_event_ids_list(trip_locations_id)
if new_event_id:
event_ids.append(new_event_id)
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
points = np.zeros((len(event_ids), 3))
for i, v in enumerate(event_ids):
cur.execute("SELECT index, coord_lat, coord_long FROM poi_detail_table WHERE index = %i;" % (float(v)))
points[i] = cur.fetchone()
conn.close()
points = np.vstack((np.array([0, coord_lat, coord_long]), points))
n, D = distance.mk_matrix(points[:, 1:], distance.geopy_dist)
if len(points) >= 3:
if event_type == 'add':
tour = distance.nearest_neighbor(n, 0, D)
# create a greedy tour, visiting city 'i' first
z = distance.length(tour, D)
z = distance.localsearch(tour, z, D)
tour = np.array(tour[1:]) - 1
event_ids = np.array(event_ids)
return np.array(event_ids)[tour[1:]], event_type
#need to figure out other cases
else:
tour = distance.nearest_neighbor(n, 0, D)
# create a greedy tour, visiting city 'i' first
z = distance.length(tour, D)
z = distance.localsearch(tour, z, D)
tour = np.array(tour[1:]) - 1
event_ids = np.array(event_ids)
return event_ids[tour], event_type
else:
return np.array(event_ids), event_type
def check_city_to_poi(city_to_poi_id):
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
cur.execute("SELECT index FROM google_city_to_poi_table WHERE city_to_poi_id = %s;" % (city_to_poi_id))
a = cur.fetchone()
conn.close()
if bool(a):
return True
else:
return False
def db_remove_outside_extra_events(event_ids, driving_time_list, walking_time_list, max_time_spent=600):
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
if len(event_ids) == 1:
cur.execute("SELECT DISTINCT SUM(adjusted_visit_length) FROM poi_detail_table WHERE index = %s;" % (event_ids[0]))
else:
cur.execute("SELECT DISTINCT SUM(adjusted_visit_length) FROM poi_detail_table WHERE index IN %s;" % (tuple(event_ids),))
total_travel_time = sum(np.minimum(np.array(driving_time_list),np.array(walking_time_list)))
time_spent = float(cur.fetchone()[0]) + float(total_travel_time)
conn.close()
if len(event_ids) == 1:
return event_ids, driving_time_list, walking_time_list, time_spent
if time_spent > max_time_spent:
update_event_ids = event_ids[:-1]
update_driving_time_list = driving_time_list[:-1]
update_walking_time_list = walking_time_list[:-1]
return db_remove_outside_extra_events(update_event_ids, update_driving_time_list, update_walking_time_list)
else:
return event_ids, driving_time_list, walking_time_list, time_spent
def check_outside_route_id(outside_route_id, debug = True):
'''
Check day trip id exist or not.
'''
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
cur.execute("SELECT details FROM outside_route_table WHERE outside_route_id = '%s';" % (outside_route_id))
a = cur.fetchone()
conn.close()
if bool(a):
if not debug:
return a[0]
else:
return True
else:
return False
def sorted_outside_events(info, ix):
'''
find the event_id, ranking AND review_score, num_reviews columns
sorted base on ranking then review_score, num_reviews
return sorted list
'''
event_ = info[ix][:, [0, 4, 5, 6]]
return np.array(sorted(event_, key=lambda x: (-x[3], x[1], -x[2],)))
#num_reviews, ranking, review_score
def create_outside_event_id_list(big_, medium_, small_):
# print big_,medium_,small_
event_type = ''
if big_.shape[0] >= 1:
if (medium_.shape[0] < 2) or (big_[0,1] >= medium_[0, 1]):
if small_.shape[0] >= 6:
event_ids = list(np.concatenate((big_[:1, 0], small_[0:6, 0]), axis=0))
elif small_.shape[0] > 0:
event_ids = list(np.concatenate((big_[:1, 0], small_[:, 0]), axis=0))
else:
event_ids = list(np.array(sorted(big_[0:, :], key=lambda x: (-x[1], x[2])))[:, 0])
event_type = 'big'
else:
if small_.shape[0] >= 8:
event_ids = list(np.concatenate((medium_[0:2, 0], small_[0:8,0]), axis=0))
elif small_.shape[0] > 0:
event_ids = list(np.concatenate((medium_[0:2, 0], small_[:,0]), axis=0))
else:
event_ids = list(np.array(sorted(medium_[0:, :], key=lambda x: (-x[1], x[2])))[:, 0])
event_type = 'med'
elif medium_.shape[0] >= 2:
if small_.shape[0] >= 8:
event_ids = list(np.concatenate((medium_[0:2, 0], small_[0:8, 0]), axis=0))
elif small_.shape[0] > 0:
event_ids = list(np.concatenate((medium_[0:2, 0], small_[:, 0]), axis=0))
else:
event_ids = list(np.array(sorted(medium_[0:, :], key=lambda x: (-x[1], x[2])))[:, 0])
event_type = 'med'
else:
if small_.shape[0] >= 10:
if medium_.shape[0] == 0:
event_ids = list(np.array(sorted(small_[0:10, :], key=lambda x: (-x[1], x[2])))[:, 0])
else:
event_ids = list(np.array(sorted(np.vstack((medium_[:1, :], small_[0:10, :])), key=lambda x: (-x[1], x[2])))[:, 0])
elif small_.shape[0] > 0:
if medium_.shape[0] == 0:
event_ids = list(np.array(sorted(small_[0:, :], key=lambda x: (-x[1], x[2])))[:, 0])
else:
event_ids = list(np.array(sorted(np.vstack((medium_, small_)), key=lambda x: (-x[1], x[2])))[:, 0])
else:
event_ids = list(np.array(sorted(medium_[0:, :], key=lambda x: (x[1], -x[2])))[:, 0])
event_type = 'small'
return event_ids, event_type
def assign_theme(details):
theme_list_dict = {
"family": ["Park", "Zoo", "Game"],
"lifestyle": ["Nightlife", "Shopping", "Theater", "Food", "Spa", "Casino", "Show", "ShoppingMall"],
"nature": ["StatePark", "NationalWildlifeRefuge", "NationalHistoricalPark", "NationalForest", "NationalMonument", "NationalMemorial"],
"cultural": ["Landmark", "Museum", "OutdoorActivities", "Library", "Stadium"],
"theme_park": ["ThemePark"],
"national_park": ["NationalPark"],
"other_list": ["Other", "VisotorCenter", "Transportation", "Tour", "Unuse_theater", "Unuse_transportation"]
}
assign_dict = {"family" : 0,"lifestyle": 0,"nature": 0,"cultural": 0,"theme_park": 0,"national_park": 0,"other_list": 0}
assign_dict2 = {"family" : 0, "lifestyle": 0, "nature": 0, "cultural": 0, "theme_park": 0, "national_park": 0, "other_list": 0}
assign_dict3 = {"family" : -1, "lifestyle": -1, "nature": -1, "cultural": -1, "theme_park": -1, "national_park": -1, "other_list": -1}
assign_dict4 = {"family" : [], "lifestyle": [], "nature": [], "cultural": [], "theme_park": [], "national_park": [], "other_list": []}
#create a list for each poi
all_type = []
for i in details:
all_type.append([i["poi_type"], i["adjusted_visit_length"], i["num_reviews"], i["ranking"], i["review_score"]])
for i in all_type:
for key, value in theme_list_dict.items():
if i[0] in value: #locate the theme
assign_dict[key] += int(i[1]) #total time of theme
assign_dict2[key] += int(i[2]) #total # of review of theme
if assign_dict3[key] < 0:
assign_dict3[key] = int(i[3])
else:
assign_dict3[key] = min(assign_dict3[key], int(i[3]))
assign_dict4[key].append(float(i[4]))
assign_dict = sort_dict(assign_dict) #order descending by time
# theme1 = assign_dict[0][1]
# theme2 = assign_dict[1][1]
# num_reviews = assign_dict2
# ranking = assign_dict3
if assign_dict[0][0] == assign_dict[1][0]: #check if the total time is same
if assign_dict2[assign_dict[0][1]] > assign_dict2[assign_dict[1][1]]: #check number of review
return [assign_dict[0][1], assign_dict2[assign_dict[0][1]], assign_dict3[assign_dict[0][1]], avg_list(assign_dict4[assign_dict[0][1]])]
elif assign_dict2[assign_dict[0][1]] < assign_dict2[assign_dict[1][1]]:
return [assign_dict[1][1], assign_dict2[assign_dict[1][1]], assign_dict3[assign_dict[1][1]], avg_list(assign_dict4[assign_dict[1][1]])]
elif assign_dict3[assign_dict[0][1]] < assign_dict3[assign_dict[1][1]]: #check for ranking
return [assign_dict[0][1], assign_dict2[assign_dict[0][1]], assign_dict3[assign_dict[0][1]], avg_list(assign_dict4[assign_dict[0][1]])]
elif assign_dict3[assign_dict[0][1]] > assign_dict3[assign_dict[1][1]]:
return [assign_dict[1][1], assign_dict2[assign_dict[1][1]], assign_dict3[assign_dict[1][1]], avg_list(assign_dict4[assign_dict[1][1]])]
#return [theme, num of review, ranking, review_score]
return [assign_dict[0][1], assign_dict2[assign_dict[0][1]], assign_dict3[assign_dict[0][1]], avg_list(assign_dict4[assign_dict[0][1]])]
def sort_dict(input_dict):
temp_dict = [(input_dict[key], key) for key in input_dict]
temp_dict.sort(reverse=True)
return temp_dict
def avg_list(l):
#for finding avg of review socre
if len(l) != 0:
return sum(l) / len(l)
else:
return 0
def clean_details(details_theme):
details_array= np.array(details_theme)
final = []
used =[]
for count, i, in enumerate(details_array):
if (i[0] == "national_park") or (i[0] == "theme_park"):
final.append(i[4:])
used.append(count)
details_array = np.delete(details_array, used, axis=0)
# a = np.array(sorted(details_array, key=lambda x: (x[2].astype(np.int), -x[1].astype(np.float), x[3].astype(np.float))))
a = np.array(sorted(details_array, key=lambda x: (x[2], -x[1], x[3])))
theme_SELECT_dict = {}
backup = []
for count, i in enumerate(a):
if i[0] not in theme_SELECT_dict:
theme_SELECT_dict[i[0]] = 1
final.append(i[4:])
else:
backup.append(i[4:])
# while len(final) < 6: #if less then 6 route, pick route from backup
# final.append(backup.pop(0))
return final
def check_state(origin_state):
if not helpers.check_valid_state(origin_state):
try:
origin_state = abb2state[str(origin_state).upper()]
except:
return False
return origin_state
|
kennethcc2005/travel_with_friends
|
outside_helpers.py
|
Python
|
mit
| 31,962
|
[
"CASINO"
] |
9de97ff9c59b9476c19c3692ce3b8cf390f2c456972638cc42dc3d96a6b9b336
|
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from django.db.models import F
from django.http import HttpResponsePermanentRedirect, HttpResponseServerError
from django.shortcuts import get_object_or_404
from django.template import loader, RequestContext
from lib.render import render
from forms import ShortenForm
from models import Link
def forward(request, slug):
"""The actual forwarder magic"""
link = get_object_or_404(Link, slug=slug)
Link.objects.filter(pk=link.pk).update(visited=F('visited')+1) # count visit
return HttpResponsePermanentRedirect(link.url)
@login_required
def index(request):
current_site = Site.objects.get_current()
data = {}
if request.method == 'POST':
form = ShortenForm(request.POST)
if form.is_valid():
try:
link = Link.objects.filter(url=form.cleaned_data['url'],
is_autoslug=True)[0]
except IndexError:
link = form.save()
link.users.add(request.user)
link.save()
# success data
data.update({
'success': True,
'long_url': form.cleaned_data['url'],
'short_url': 'http://%s/%s' % (current_site.domain, link.slug)
})
else:
# allow pre-populating url with GET (from bookmarklet)
initial_url = request.GET.get('url', '')
# no circular bookmarking...
if initial_url.startswith('http://%s' % current_site.domain):
initial_url = ''
form = ShortenForm(initial={'url': initial_url})
data.update({'form': form})
return render(request, 'shortener/index.html', data)
def server_error(request, template_name='500.html'):
"""Include MEDIA_URL in 500 error pages."""
t = loader.get_template(template_name)
c = RequestContext(request)
return HttpResponseServerError(t.render(c))
|
fwenzel/millimeter
|
apps/shortener/views.py
|
Python
|
bsd-3-clause
| 2,030
|
[
"VisIt"
] |
2c041145b151a245f5ad56fe81e7d04321e17da0f004167ed00932ce6db8d449
|
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from mock import patch, call, Mock
from cctrl.error import InputErrorException
from cctrl.app import AppController
from cctrl.settings import Settings
from pycclib.cclib import GoneError
class AppControllerTestCase(unittest.TestCase):
def test_get_size_from_memory_1gb(self):
app = AppController(None, Settings())
self.assertEqual(8, app._get_size_from_memory('1GB'))
@patch('cctrl.app.sys')
def test_get_size_from_memory_mb_rounded(self, _sys):
app = AppController(None, Settings())
self.assertEqual(6, app._get_size_from_memory('666MB'))
self.assertEqual([
call.stderr.write(
'Memory size has to be a multiple of 128MB and has been ' +
'rounded up to 768MB.'),
call.stderr.write('\n')], _sys.mock_calls)
def test_get_size_from_memory_nop_match(self):
with self.assertRaises(InputErrorException) as ctx:
AppController(None, Settings())._get_size_from_memory('0.7')
self.assertEqual(
'[ERROR] Memory size should be an integer between 128 and 1024 MB.',
str(ctx.exception))
def test_get_size_from_memory_unrecognized_unit(self):
with self.assertRaises(InputErrorException) as ctx:
AppController(None, Settings())._get_size_from_memory('4kb')
self.assertEqual(
'[ERROR] Memory size should be an integer between 128 and 1024 MB.',
str(ctx.exception))
@patch('cctrl.app.check_call')
def test_push_with_ship(self, check_call):
app = AppController(None, Settings())
app.redeploy = Mock()
app.log_from_now = Mock()
app._get_or_create_deployment = Mock(
return_value=({'branch': 'default', 'name': 'dep'}, 'name'))
args = Mock()
args.name = 'app/dep'
args.deploy = False
app.push(args)
self.assertTrue(check_call.called)
self.assertTrue(app.redeploy.called)
self.assertTrue(app.log_from_now.called)
@patch('cctrl.app.check_call')
def test_push_with_deploy(self, check_call):
app = AppController(None, Settings())
app.redeploy = Mock()
app._get_or_create_deployment = Mock(
return_value=({'branch': 'default'}, 'name'))
args = Mock()
args.name = 'app/dep'
args.ship = False
app.push(args)
self.assertTrue(check_call.called)
self.assertTrue(app.redeploy.called)
@patch('cctrl.app.check_call')
def test_push_with_ship_and_deploy_error(self, check_call):
app = AppController(None, Settings())
app._get_or_create_deployment = Mock(
return_value=({'branch': 'default', 'name': 'dep'}, 'name'))
args = Mock()
args.name = 'app/dep'
with self.assertRaises(InputErrorException) as sd:
app.push(args)
self.assertEqual(
'[ERROR] --ship and --push options cannot be used simultaneously.',
str(sd.exception))
def test_restart_worker_with_wrk(self):
app = AppController(None, Settings())
app._restartWorker = Mock()
app._restartWorkers = Mock()
app.api = Mock()
app.api.read_worker.return_value = {'wrk_id': 'wrk1', 'command': 'command', 'params': 'params', 'size': 'size'}
args = Mock()
args.name = 'app/dep'
args.wrk_id = 'wrk1'
args.all = False
app.restartWorker(args)
self.assertTrue(app._restartWorker.called)
self.assertFalse(app._restartWorkers.called)
def test_restart_worker_with_all(self):
app = AppController(None, Settings())
app._restartWorker = Mock()
app._restartWorkers = Mock()
app.api = Mock()
app.api.read_worker.return_value = {'wrk_id': 'wrk1', 'command': 'command', 'params': 'params', 'size': 'size'}
args = Mock()
args.name = 'app/dep'
args.wrk_id = False
args.all = True
app.restartWorker(args)
self.assertTrue(app._restartWorkers.called)
def test_restart_worker_gone_error(self):
app = AppController(None, Settings())
app._restartWorker = Mock()
app._restartWorkers = Mock()
app.api = Mock()
app.api.read_worker.side_effect = GoneError
args = Mock()
args.name = 'app/dep'
args.wrk_id = 'wrkgone'
args.all = False
self.assertRaises(InputErrorException, app.restartWorker, args)
self.assertFalse(app._restartWorker.called)
self.assertFalse(app._restartWorkers.called)
def test__restart_workers(self):
app = AppController(None, Settings())
app.api = Mock()
app.api.read_workers.return_value = [{'wrk_id': 'wrk1'}]
app.api.read_worker.return_value = {'wrk_id': 'wrk1', 'command': 'command', 'params': 'params', 'size': 8}
app._restartWorkers('app', 'dep')
self.assertTrue(app.api.delete_worker.called)
self.assertTrue(app.api.create_worker.called)
def test__restart_worker(self):
app = AppController(None, Settings())
app.api = Mock()
app._restartWorker('app_name', 'deployment_name', 'wrk_id', 'command', 'params', 'size')
self.assertTrue(app.api.delete_worker.called)
self.assertTrue(app.api.create_worker.called)
@patch('cctrl.app.time')
def test_deploy_restart_workers(self, time):
app = AppController(None, Settings())
app.api = Mock()
app.api.read_deployment.return_value = {'state': 'deployed'}
app._restartWorkers = Mock()
args = Mock()
args.name = 'app/dep'
args.memory = False
app.deploy(args)
self.assertTrue(app._restartWorkers.called)
self.assertEqual(call('app', 'dep'), app.api.read_deployment.call_args_list[0])
@patch('cctrl.app.time')
def test_deploy_restart_workers_no_dep_name(self, time):
app = AppController(None, Settings())
app.api = Mock()
app.api.read_deployment.return_value = {'state': 'deployed'}
app._restartWorkers = Mock()
args = Mock()
args.name = 'app'
args.memory = False
app.deploy(args)
self.assertTrue(app._restartWorkers.called)
self.assertEqual(call('app', 'default'), app.api.read_deployment.call_args_list[0])
|
cloudControl/cctrl
|
tests/app_test.py
|
Python
|
apache-2.0
| 6,467
|
[
"cclib"
] |
482200ae10ee7de800dd21d9b51bcd7819149dd07701fb215675ddd075969ca4
|
# Orca
#
# Copyright 2010 Joanmarie Diggs.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Commonly-required utility methods needed by -- and potentially
customized by -- application and toolkit scripts. They have
been pulled out from the scripts because certain scripts had
gotten way too large as a result of including these methods."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
import pyatspi
import orca.script_utilities as script_utilities
#############################################################################
# #
# Utilities #
# #
#############################################################################
class Utilities(script_utilities.Utilities):
def __init__(self, script):
"""Creates an instance of the Utilities class.
Arguments:
- script: the script with which this instance is associated.
"""
script_utilities.Utilities.__init__(self, script)
#########################################################################
# #
# Utilities for finding, identifying, and comparing accessibles #
# #
#########################################################################
def isSameObject(self, obj1, obj2, comparePaths=False, ignoreNames=False):
"""Compares two objects to determine if they are functionally
the same object. This is needed because some applications and
toolkits kill and replace accessibles."""
if (obj1 == obj2):
return True
elif (not obj1) or (not obj2):
return False
elif (obj1.name != obj2.name) or (obj1.childCount != obj2.childCount):
return False
# This is to handle labels in trees. In some cases the default
# script's method gives us false positives; other times false
# negatives.
#
if obj1.getRole() == obj2.getRole() == pyatspi.ROLE_LABEL:
try:
ext1 = obj1.queryComponent().getExtents(0)
ext2 = obj2.queryComponent().getExtents(0)
except:
pass
else:
if ext1.x == ext2.x and ext1.y == ext2.y \
and ext1.width == ext2.width and ext1.height == ext2.height:
return True
# In java applications, TRANSIENT state is missing for tree items
# (fix for bug #352250)
#
try:
parent1 = obj1
parent2 = obj2
while parent1 and parent2 and \
parent1.getRole() == pyatspi.ROLE_LABEL and \
parent2.getRole() == pyatspi.ROLE_LABEL:
if parent1.getIndexInParent() != parent2.getIndexInParent():
return False
parent1 = parent1.parent
parent2 = parent2.parent
if parent1 and parent2 and parent1 == parent2:
return True
except:
pass
return script_utilities.Utilities.isSameObject(self, obj1, obj2, comparePaths, ignoreNames)
def nodeLevel(self, obj):
"""Determines the node level of this object if it is in a tree
relation, with 0 being the top level node. If this object is
not in a tree relation, then -1 will be returned.
Arguments:
-obj: the Accessible object
"""
newObj = obj
if newObj and self.isZombie(newObj):
newObj = self.findReplicant(self._script.lastDescendantChangedSource, obj)
if not newObj:
return script_utilities.Utilities.nodeLevel(self, obj)
count = 0
while newObj:
state = newObj.getState()
if state.contains(pyatspi.STATE_EXPANDABLE) \
or state.contains(pyatspi.STATE_COLLAPSED):
if state.contains(pyatspi.STATE_VISIBLE):
count += 1
newObj = newObj.parent
else:
break
return count - 1
|
GNOME/orca
|
src/orca/scripts/toolkits/J2SE-access-bridge/script_utilities.py
|
Python
|
lgpl-2.1
| 5,118
|
[
"ORCA"
] |
15e98be0420fa70e1d4afa4a3d7d56561f413c55422f0c776d91c00fcfbd8ace
|
#!/usr/bin/env python2.7
'''
RSD: The reciprocal smallest distance algorithm.
Wall, D.P., Fraser, H.B. and Hirsh, A.E. (2003) Detecting putative orthologs, Bioinformatics, 19, 1710-1711.
Original author: Dennis P. Wall, Department of Biological Sciences, Stanford University.
Contributors: I-Hsien Wu, Computational Biology Initiative, Harvard Medical School
Maintainer: Todd F. DeLuca, Center for Biomedical Informatics, Harvard Medical School
This program is written to run on linux. It has not been tested on Windows.
To run this program you need to have installed on your system:
Python 2.7
NCBI BLAST 2.2.24
paml 4.4
Kalign 2.04 (recommended) or clustalw 2.0.9 (deprecated)
See README for full details.
'''
# python package version
# should match r"^__version__ = '(?P<version>[^']+)'$" for setup.py
__version__ = '1.1.7'
import cStringIO
import glob
import logging
import os
import re
import shutil
import subprocess
import time
import fasta
import nested
import util
PAML_ERROR_MSG = 'paml_error'
FORWARD_DIRECTION = 0
REVERSE_DIRECTION = 1
DASHLEN_RE = re.compile('^(-*)(.*?)(-*)$')
MAX_HITS = 3
MATRIX_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'jones.dat')
CODEML_CONTROL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'codeml.ctl')
# Constants used when aligning seqs with clustalw. Kalign does not need these.
USE_CLUSTALW = util.getBoolFromEnv('RSD_USE_CLUSTALW', False)
CLUSTAL_INPUT_FILENAME = 'clustal_fasta.faa'
CLUSTAL_ALIGNMENT_FILENAME = 'clustal_fasta.aln'
#################
# BLAST FUNCTIONS
#################
#
# Used to compute blast hits between two genomes, parse the results, and save the best hits to a file
#
def formatForBlast(fastaPath):
# os.chdir(os.path.dirname(fastaPath))
# cmd = 'formatdb -p -o -i'+os.path.basename(fastaPath)
# cmd = 'formatdb -p -o -i'+fastaPath
# redirect stdout to /dev/null to make the command quiter.
cmd = ['makeblastdb', '-in', fastaPath, '-dbtype', 'prot', '-parse_seqids']
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout=devnull)
def getHitId(hit):
return hit[0]
def getHitEvalue(hit):
'''
returns evalue as a float
'''
return hit[1]
def loadBlastHits(path):
'''
path: location of stored blast hits computed by computeBlastHits()
returns: mapping object from query id to hits. used to be a bsddb, now is a dict.
'''
return util.loadObject(path)
def getBlastHits(queryFastaPath, subjectIndexPath, evalue, limitHits=MAX_HITS, workingDir='.', copyToWorking=False):
'''
queryFastaPath: location of fasta file of query sequences
subjectIndexPath: location and name of blast-formatted indexes.
evalue: a string or float representing the maximum evalue threshold of hits to get.
workingDir: creates, uses, and removes a directory under workingDir.
copyToWorking: if True, copy query fasta path and subject index files to within the working directory and use the copies to blast.
can improve performance if the working directory is on local disk and the files are on a slow network.
blasts every sequence in query agaist subject, adding hits that are better than evalue to a list stored in a dict keyed on the query id.
'''
# work in a nested tmp dir to avoid junking up the working dir.
with nested.NestedTempDir(dir=workingDir, nesting=0) as tmpDir:
if copyToWorking:
localFastaPath = os.path.join(tmpDir, 'query.fa')
shutil.copyfile(queryFastaPath, localFastaPath)
localIndexDir = os.path.join(tmpDir, 'local_blast')
os.makedirs(localIndexDir, 0770)
localIndexPath = os.path.join(localIndexDir, os.path.basename(subjectIndexPath))
for path in glob.glob(subjectIndexPath+'*'):
if os.path.isfile:
shutil.copy(path, localIndexDir)
queryFastaPath = localFastaPath
subjectIndexPath = localIndexPath
blastResultsPath = os.path.join(tmpDir, 'blast_results')
# blast query vs subject, using /opt/blast-2.2.22/bin/blastp
cmd = ['blastp', '-outfmt', '6', '-evalue', str(evalue),
'-query', queryFastaPath, '-db', subjectIndexPath,
'-out', blastResultsPath]
subprocess.check_call(cmd)
# parse results
hitsMap = parseResults(blastResultsPath, limitHits)
return hitsMap
def computeBlastHits(queryFastaPath, subjectIndexPath, outPath, evalue, limitHits=MAX_HITS, workingDir='.', copyToWorking=False):
'''
queryFastaPath: location of fasta file of query sequences
subjectIndexPath: location and name of blast-formatted indexes.
evalue: a string or float representing the maximum evalue threshold of hits to get.
outPath: location of file where blast hits are saved.
workingDir: creates, uses, and removes a directory under workingDir.
copyToWorking: if True, copy query fasta path and subject index files to within the working directory and use the copies to blast.
can improve performance if the working directory is on local disk and the files are on a slow network.
Runs getBlastHits() and persists the hits to outPath.
'''
hitsMap = getBlastHits(queryFastaPath, subjectIndexPath, evalue, limitHits, workingDir, copyToWorking)
util.dumpObject(hitsMap, outPath)
def parseResults(blastResultsPath, limitHits=MAX_HITS):
'''
returns: a map from query seq id to a list of tuples of (subject seq id, evalue) for the top hits of the query sequence in the subject genome
'''
# parse tabular results into hits. thank you, ncbi, for creating results this easy to parse.
hitsMap = {}
hitsCountMap = {}
prevSeqId = None
prevHitId = None
fh = open(blastResultsPath)
for line in fh:
splits = line.split()
try:
seqId = fasta.idFromName(splits[0]) # remove namespace prefix, e.g. 'gi|'
hitId = fasta.idFromName(splits[1])
hitEvalue = float(splits[10])
except Exception as e:
logging.exception('parseResults(): prevSeqId: {}, prevHitId: {}, line: {}'.format(prevSeqId, prevHitId, line))
# results table reports multiple "alignments" per "hit" in ascending order by evalue
# we only store the top hits.
if prevSeqId != seqId or prevHitId != hitId:
prevSeqId = seqId
prevHitId = hitId
if seqId not in hitsCountMap:
hitsCountMap[seqId] = 0
hitsMap[seqId] = []
if not limitHits or hitsCountMap[seqId] < limitHits:
hitsCountMap[seqId] += 1
hitsMap[seqId].append((hitId, hitEvalue))
fh.close()
return hitsMap
###############
# RSD FUNCTIONS
###############
def pamlGetDistance(path):
filename = '%s/2AA.t'%path
# adding a pause on the off-chance that the filesystem might be lagging a bit, causing the open() to fail below.
# I think it is more likely that codeml in runPaml_all() is failing before writing the file.
if not os.path.isfile(filename):
time.sleep(0.5)
with open(filename) as rst:
get_rst = rst.readlines()
os.unlink(filename)
if not get_rst:
raise Exception(PAML_ERROR_MSG, path)
str = ''
for line in get_rst[1:]:
cd1 = line.split()
if not len(cd1) > 1:
str += "%s "%(line.split('\n')[0])
continue
if len(cd1) > 1:
str+="%s %s"%(cd1[0], cd1[1])
dist = float(str.split()[2])
return dist
def alignFastaKalign(input):
'''
input: string containing fasta formatted sequences to be aligned.
runs alignment program kalign
Returns: fasta-formatted aligned sequences
'''
alignedFasta = util.run(['kalign', '-f', 'fasta'], input) # output clustalw format
return alignedFasta.replace('\n\n', '\n') # replace fixes a bug in Kalign version 2.04, where if a seq is exactly 60 chars long, an extra newline is output.
def alignFastaClustalw(input, path):
'''
input: string containing fasta formatted sequences to be aligned.
path: working directory where fasta will be written and clustal will write output files.
runs alignment program clustalw
Returns: fasta-formatted aligned sequences
'''
clustalFastaPath = os.path.join(path, CLUSTAL_INPUT_FILENAME)
clustalAlignmentPath = os.path.join(path, CLUSTAL_ALIGNMENT_FILENAME)
util.writeToFile(input, clustalFastaPath)
try:
cmd = ['clustalw', '-output', 'fasta', '-infile', clustalFastaPath, '-outfile', clustalAlignmentPath]
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
logging.exception('runClustal Error: clustalFastaPath data = %s'%open(clustalFastaPath).read())
raise
alignedFasta = util.readFromFile(clustalAlignmentPath)
return alignedFasta
def dashlen_check(seq):
'''
Objective: calculate the density of gaps in a sequence at 5' and 3' ends -- caused by poor alignment or by diff length seqs
Arguments: sequence
Result: the number of bases to be cut from the subjects 5' and 3' ends, and the divergence of the trimmed seq.
'''
seq = seq.strip()
# trim the dashes from the front and end
(frontDashes, trimmedSeq, endDashes) = DASHLEN_RE.search(seq).groups()
# logging.debug('dashlen_check: seq=%s'%seq)
# all dashes -- do not trim anything
if not trimmedSeq:
return (0, 0)
# ignore trims < 10.
frontTrim = len(frontDashes)
if frontTrim < 10:
frontTrim = 0
endTrim = len(endDashes)
if endTrim < 10:
endTrim = 0
trimmedSeqDivergence = (trimmedSeq.count('-') / float(len(trimmedSeq)))
return (frontTrim, endTrim, trimmedSeqDivergence)
def makeGetSeqForId(genomeFastaPath):
'''
genomeFastaPath: location of fasta file. also location/name of blast formatted indexes of the fasta file.
'''
# suck fasta file into memory, converting it into a map from id to sequence
# in memory dict performs much better than on-disk retrieval with xdget or fastacmd.
# and genome fasta files do not take much space (on a modern computer).
fastaMap = {}
for (seqNameline, seq) in fasta.readFasta(genomeFastaPath):
seqId = fasta.idFromName(seqNameline)
fastaMap[seqId] = seq
def getSeqForIdInMemory(seqId):
return fastaMap[seqId]
return getSeqForIdInMemory
def makeGetHitsOnTheFly(genomeIndexPath, evalue, workingDir='.'):
'''
genomeIndexPath: location of blast formatted indexes. usually same directory/name as genome fasta path
evalue: float or string. Hits with evalues >= evalue will not be included in the returned blast hits.
workingDir: a directory in which to create, use, and delete temporary files and dirs.
returns: a function that returns that takes as input a sequence id and sequence and returns the blast hits
'''
def getHitsOnTheFly(seqid, seq):
with nested.NestedTempDir(dir=workingDir, nesting=0) as tmpDir:
queryFastaPath = os.path.join(tmpDir, 'query.faa')
# add 'lcl|' to make ncbi blast happy.
util.writeToFile('{0}\n{1}\n'.format('>lcl|'+seqid, seq), queryFastaPath)
hitsDb = getBlastHits(queryFastaPath, genomeIndexPath, evalue, workingDir=workingDir)
return hitsDb.get(seqid)
return getHitsOnTheFly
def makeGetSavedHits(filename):
'''
returns a function which can be used to get the hits
from a file containing pre-computed blast results
'''
# in memory retrieval is faster than on-disk retrieval with bsddb, but this has a minor impact on overall roundup performance.
hitsDb = loadBlastHits(filename)
def getHitsInMemory(seqid, seq):
return hitsDb.get(seqid)
return getHitsInMemory
def getGoodEvalueHits(seqId, seq, getHitsFunc, getSeqFunc, evalue):
'''
evalue: a float.
returns: a list of pairs of (hitSeqId, hitSequence, hitEvalue) that have a hitEvalue below evalue. hitEvalue is a float.
'''
goodhits = []
hits = getHitsFunc(seqId, seq)
# check for 3 or fewer blast hits below evalue threshold
if hits:
hitCount = 0
for hit in hits:
if hitCount >= MAX_HITS:
break
hitSeqId = getHitId(hit)
hitEvalue = getHitEvalue(hit)
if hitEvalue < evalue:
hitCount += 1
hitSeq = getSeqFunc(hitSeqId)
goodhits.append((hitSeqId, hitSeq, hitEvalue))
return goodhits
def getDistanceForAlignedSeqPair(seqId, alignedSeq, hitSeqId, alignedHitSeq, workPath):
# paranoid check: aligned and trimmed seqs need to be the same length.
# if len(alignedSeq) != len(alignedHitSeq):
# raise Exception('getDistanceForAlignedSeqPairs: different lengths for seqs: '+str(((seqId, alignedSeq), (hitSeqId, alignedHitSeq))))
dataFileName = 'datafile.seq'
treeFileName = 'treefile.seq'
outFileName = 'outfile.seq'
dataFilePath = os.path.join(workPath, dataFileName)
treeFilePath = os.path.join(workPath, treeFileName)
outFilePath = os.path.join(workPath, outFileName)
# heading is number of seqs and length of each seq (which all need to be the same len).
heading = '2 %s\n'%len(alignedSeq)
pamlData = heading + '%s\n%s\n'%(seqId, alignedSeq) + '%s\n%s\n'%(hitSeqId, alignedHitSeq)
# logging.debug('pamlData=%s'%pamlData)
util.writeToFile(pamlData, dataFilePath)
# workPath is simply your folder that will contain codeml (Yang 2000), codeml.ctl (the codeml control file), and the jones.dat (Jones et. al, 1998)
# write the codeml control file that will run codeml
# run the codeml
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(['codeml'], cwd=workPath, stdout=devnull)
distance = pamlGetDistance(workPath)
return distance
finally:
for filePath in [dataFilePath, treeFilePath, outFilePath]:
if os.path.exists(filePath):
os.remove(filePath)
def getGoodDivergenceAlignedTrimmedSeqPair(seqId, seq, hitSeqId, hitSeq, workPath):
'''
aligns seq to hit. trims aligned seq and hit seq.
returns: pairs of pairs of id and aligned trimmed sequences for sequences in hits,
and a predicate function that, given a divergence threshold, says if the divergence of the sequences exceeds the threshold.
e.g. ((seqId, alignedTrimmedSeq), (hitSeqId, alignedTrimmedHitSeq), divergencePredicateFunc)
'''
# ALIGN SEQ and HIT
# need to align the sequences so we'z can study the rate of evolution per site
inputFasta = '>%s\n%s\n>%s\n%s\n'%(seqId, seq, hitSeqId, hitSeq)
if USE_CLUSTALW:
alignedFasta = alignFastaClustalw(inputFasta, workPath)
else:
alignedFasta = alignFastaKalign(inputFasta)
# try to recover from rare, intermittent failure of fasta alignment
if not alignedFasta:
logging.error('fasta alignment failed.\ninputFasta=%s\n' +
'alignedFasta=%s\nSleep and retry alignment.',
inputFasta, alignedFasta)
time.sleep(0.1)
alignedFasta = alignFastaKalign(inputFasta)
try:
# parse the aligned fasta into sequence ids and sequences
namelinesAndSeqs = list(fasta.readFasta(cStringIO.StringIO(alignedFasta)))
idAndSeqs = [(fasta.idFromName(seqNameline), seq) for seqNameline, seq in namelinesAndSeqs]
alignedIdAndSeq, alignedHitIdAndSeq = idAndSeqs
except Exception as e:
e.args += (inputFasta, alignedFasta)
raise
# CHECK FOR EXCESSIVE DIVERGENCE AND TRIMMING
# find most diverged sequence
# sort sequences by dash count. why?
divIdSeqs = []
for id, seq in (alignedIdAndSeq, alignedHitIdAndSeq):
dashCount = seq.count('-')
div = dashCount / float(len(seq))
g = (dashCount, div, id, seq)
divIdSeqs.append(g)
divIdSeqs.sort()
# check for excessive divergence
leastDivergedDashCount, leastDivergedDiv, leastDivergedId, leastDivergedSeq = divIdSeqs[0]
# check for excessive divergence and generate dashtrim.
mostDivergedDashCount, mostDivergedDiv, mostDivergedId, mostDivergedSeq = divIdSeqs[1]
# dashtrim = dashlen_check(mostDivergedSeq, divergence)
startTrim, endTrim, trimDivergence = dashlen_check(mostDivergedSeq)
# logging.debug('dashtrim='+str(dashtrim))
# trim and add seqs to output
def divergencePredicate(divergenceThreshold):
'''Why this logic? Ask Dennis. Function closed over local variables that returns whether or not the alignment of the sequences is too diverged.'''
if leastDivergedSeq and leastDivergedDiv > divergenceThreshold:
return True
if (startTrim or endTrim) and trimDivergence >= divergenceThreshold:
return True
return False
alignedTrimmedIdAndSeq, alignedTrimmedHitIdAndSeq = [(id, seq[startTrim:(len(seq)-endTrim)]) for id, seq in (alignedIdAndSeq, alignedHitIdAndSeq)]
return alignedTrimmedIdAndSeq, alignedTrimmedHitIdAndSeq, divergencePredicate
def minimumDicts(dicts, key):
'''
dicts: list of dictionaries.
key: a key present in every dict in dicts.
returns: list of d in dicts, s.t. d[key] <= e[key] for every d, e in dicts.
e.g.: [{'a':4, 'b':1}, {'a':5, 'b':0}, {'b': 0, 'a': 3}], 'b' -> [{'a':5, 'b':0} and {'b': 0, 'a': 3}] (not necessarily in that order)
'''
if not dicts:
return []
sortedDicts = sorted(dicts, key=lambda x: x[key])
minValue = sortedDicts[0][key]
return [d for d in sortedDicts if d[key] == minValue]
def computeOrthologs(queryFastaPath, subjectFastaPath, divEvalues, getForwardHits, getReverseHits, querySeqIds=None, workingDir='.'):
'''
queryFastaPath: fasta file path for query genome.
subjectFastaPath: fasta file path for subject genome.
divEvalues: list of (div, evalue) tuples. orthologs are computed using the given div and evalue thresholds. div and evalue can be a float or string.
getForwardHits: a function mapping a query seq id to a list of subject genome blast hits. see makeGetSavedHits() and makeGetHitsOnTheFly().
getReverseHits: a function mapping a subject seq id to a list of query genome blast hits. see makeGetSavedHits() and makeGetHitsOnTheFly().
querySeqIds: a list of sequence ids for the query genome. orthologs are only computed for those sequences.
If False, orthologs are computed for every sequence in the query genome.
workingDir: under workingDir, a temp directory is created, worked in (files and dirs created and deleted), and removed.
returns: a mapping from (div, evalue) tuples to lists of orthologs.
'''
# optimization: internally swap query and subject if subject has fewer sequences than query and no querySeqIds were given.
# compute orthologs and unswap results.
# roundup time complexity is roughly linear in the number of sequences in the query genome.
genomeSwapOptimization = True
if not querySeqIds and genomeSwapOptimization and fasta.numSeqsInFastaDb(subjectFastaPath) < fasta.numSeqsInFastaDb(queryFastaPath):
# print 'roundup(): subject genome has fewer sequences than query genome. internally swapping query and subject to improve speed.'
isSwapped = True
# swap query and subject, forward and reverse
queryFastaPath, subjectFastaPath = subjectFastaPath, queryFastaPath
getForwardHits, getReverseHits = getReverseHits, getForwardHits
else:
isSwapped = False
# make functions to look up a sequence from a sequence id.
getQuerySeqFunc = makeGetSeqForId(queryFastaPath)
getSubjectSeqFunc = makeGetSeqForId(subjectFastaPath)
# if no querySeqIds were specified, get orthologs for every query sequence
if not querySeqIds:
querySeqIds = list(fasta.readIds(queryFastaPath))
# get orthologs for every (div, evalue) combination
with nested.NestedTempDir(dir=workingDir, nesting=0) as tmpDir:
divEvalueToOrthologs = _computeOrthologsSub(querySeqIds, getQuerySeqFunc, getSubjectSeqFunc, divEvalues, getForwardHits, getReverseHits, workingDir)
# if swapped query and subject genome, need to swap back the ids in orthologs before returning them.
if isSwapped:
swappedDivEvalueToOrthologs = divEvalueToOrthologs
divEvalueToOrthologs = {}
for divEvalue, swappedOrthologs in swappedDivEvalueToOrthologs.items():
orthologs = [(query, subject, distance) for subject, query, distance in swappedOrthologs]
divEvalueToOrthologs[divEvalue] = orthologs
return divEvalueToOrthologs
def _computeOrthologsSub(querySeqIds, getQuerySeqFunc, getSubjectSeqFunc, divEvalues, getForwardHits, getReverseHits, workingDir):
'''
querySeqIds: a list of sequence ids from query genome. Only orthologs for these ids are searched for.
getQuerySeqFunc: a function that takes a seq id and returns the matching sequence from the query genome.
getSubjectSeqFunc: a function that takes a seq id and returns the matching sequence from the subject genome.
divEvalues: a list of (div, evalue) pairs which are thresholds for finding orthologs. All pairs are searched simultaneously.
div can be a float or string. So can evalue.
getForwardHits: a function that takes a query seq id and a query seq and returns the blast hits in the subject genome.
getReverseHits: a function that takes a subject seq id and a subject seq and returns the blast hits in the query genome.
find orthologs for every sequence in querySeqIds and every (div, evalue) combination.
return: a mapping from (div, evalue) pairs to lists of orthologs.
'''
# Note: the divs and evalues in divEvalues are strings which need to be converted to floats at the appropriate times below.
# copy config files to working dir
shutil.copy(MATRIX_PATH, workingDir)
shutil.copy(CODEML_CONTROL_PATH, workingDir)
divEvalueToOrthologs = dict(((div, evalue), list()) for div, evalue in divEvalues)
maxEvalue = max(float(evalue) for div, evalue in divEvalues)
maxDiv = max(float(div) for div, evalue in divEvalues)
# get ortholog(s) for each query sequence
for queryId in querySeqIds:
querySeq = getQuerySeqFunc(queryId)
# get forward hits, evalues, alignments, divergences, and distances that meet the loosest standards of all the divs and evalues.
# get forward hits and evalues, filtered by max evalue
idSeqEvalueOfForwardHits = getGoodEvalueHits(queryId, querySeq, getForwardHits, getSubjectSeqFunc, maxEvalue)
hitDataList = [{'hitId': hitId, 'hitSeq': hitSeq, 'hitEvalue': hitEvalue} for hitId, hitSeq, hitEvalue in idSeqEvalueOfForwardHits]
# get alignments and divergences
for hitData in hitDataList:
(queryId, alignedQuerySeq), (hitId, alignedHitSeq), tooDivergedPred = getGoodDivergenceAlignedTrimmedSeqPair(queryId, querySeq, hitData['hitId'], hitData['hitSeq'], workingDir)
hitData['alignedQuerySeq'] = alignedQuerySeq
hitData['alignedHitSeq'] = alignedHitSeq
hitData['tooDivergedPred'] = tooDivergedPred
# filter by max divergence.
hitDataList = [hitData for hitData in hitDataList if not hitData['tooDivergedPred'](maxDiv)]
# get distances of remaining hits, discarding hits for which paml generates no rst data.
distancesHitDataList = []
for hitData in hitDataList:
try:
hitData['distance'] = getDistanceForAlignedSeqPair(queryId, hitData['alignedQuerySeq'], hitData['hitId'], hitData['alignedHitSeq'], workingDir)
distancesHitDataList.append(hitData)
except Exception as e:
if e.args and e.args[0] == PAML_ERROR_MSG:
continue
else:
raise
# filter hits by specific div and evalue combinations.
divEvalueToMinimumDistanceHitDatas = {}
minimumHitIdToDivEvalues = {}
minimumHitIdToHitData = {}
for divEvalue in divEvalues:
div, evalue = divEvalue
# collect hit datas that pass thresholds.
goodHitDatas = []
for hitData in distancesHitDataList:
if hitData['hitEvalue'] < float(evalue) and not hitData['tooDivergedPred'](float(div)):
goodHitDatas.append(hitData)
# get the minimum hit or hits.
minimumHitDatas = minimumDicts(goodHitDatas, 'distance')
divEvalueToMinimumDistanceHitDatas[divEvalue] = minimumHitDatas
for hitData in minimumHitDatas:
minimumHitIdToDivEvalues.setdefault(hitData['hitId'], []).append(divEvalue)
minimumHitIdToHitData[hitData['hitId']] = hitData # possibly redundant, since if two divEvalues have same minimum hit, it gets inserted into dict twice.
# get reverese hits that meet the loosest standards of the divs and evalues associated with that minimum distance hit.
# performance note: wasteful or necessary to realign and compute distance between minimum hit and query seq?
for hitId in minimumHitIdToHitData:
hitData = minimumHitIdToHitData[hitId]
hitSeq = hitData['hitSeq']
# since minimum hit might not be associated with all divs and evalues, need to find the loosest div and evalue associated with this minimum hit.
maxHitEvalue = max(float(evalue) for div, evalue in minimumHitIdToDivEvalues[hitId])
maxHitDiv = max(float(div) for div, evalue in minimumHitIdToDivEvalues[hitId])
# get reverse hits and evalues, filtered by max evalue
idSeqEvalueOfReverseHits = getGoodEvalueHits(hitId, hitSeq, getReverseHits, getQuerySeqFunc, maxHitEvalue)
revHitDataList = [{'revHitId': revHitId, 'revHitSeq': revHitSeq, 'revHitEvalue': revHitEvalue} for revHitId, revHitSeq, revHitEvalue in idSeqEvalueOfReverseHits]
# if the query is not in the reverese hits, there is no way we can find an ortholog
if queryId not in [revHitData['revHitId'] for revHitData in revHitDataList]:
continue
for revHitData in revHitDataList:
values = getGoodDivergenceAlignedTrimmedSeqPair(hitId, hitSeq, revHitData['revHitId'], revHitData['revHitSeq'], workingDir)
(hitId, alignedHitSeq), (revHitId, alignedRevHitSeq), tooDivergedPred = values
revHitData['alignedHitSeq'] = alignedHitSeq
revHitData['alignedRevHitSeq'] = alignedRevHitSeq
revHitData['tooDivergedPred'] = tooDivergedPred
# filter by max divergence.
revHitDataList = [revHitData for revHitData in revHitDataList if not revHitData['tooDivergedPred'](maxHitDiv)]
# if the query is not in the reverese hits, there is no way we can find an ortholog
if queryId not in [revHitData['revHitId'] for revHitData in revHitDataList]:
continue
# get distances of remaining reverse hits, discarding reverse hits for which paml generates no rst data.
distancesRevHitDataList = []
for revHitData in revHitDataList:
try:
revHitData['distance'] = getDistanceForAlignedSeqPair(hitId, revHitData['alignedHitSeq'], revHitData['revHitId'], revHitData['alignedRevHitSeq'], workingDir)
distancesRevHitDataList.append(revHitData)
except Exception as e:
if e.args and e.args[0] == PAML_ERROR_MSG:
continue
else:
raise
# if passes div and evalue thresholds of the minimum hit and minimum reverse hit == query, write ortholog.
# filter hits by specific div and evalue combinations.
for divEvalue in minimumHitIdToDivEvalues[hitId]:
div, evalue = divEvalue
# collect hit datas that pass thresholds.
goodRevHitDatas = []
for revHitData in distancesRevHitDataList:
if revHitData['revHitEvalue'] < float(evalue) and not revHitData['tooDivergedPred'](float(div)):
goodRevHitDatas.append(revHitData)
# get the minimum hit or hits.
minimumRevHitDatas = minimumDicts(goodRevHitDatas, 'distance')
if queryId in [revHitData['revHitId'] for revHitData in minimumRevHitDatas]:
divEvalueToOrthologs[divEvalue].append((queryId, hitId, hitData['distance']))
return divEvalueToOrthologs
def computeOrthologsUsingOnTheFlyHits(queryFastaPath, subjectFastaPath, divEvalues, querySeqIds=None, workingDir='.'):
'''
Convenience function around computeOrthologs()
querySeqIds: a list of sequence ids from query genome to find orthologs for. If empty/falsy, will compute orthologs for every sequence in query genome.
queryFastaPath: location and name of of fasta file and blast indexes of the query genome. e.g. /groups/rodeo/roundup/genomes/current/Homo_sapiens.aa/Homo_sapiens.aa
subjectFastaPath: location and name of of fasta file and blast indexes of the subject genome.
workingDir: a directory in which to create, use, and delete temporary files and dirs.
This computes blast hits on-the-fly, so it slower than rounduPrecompute() for computing orthologs for full genomes.
'''
# get blast hits using the least stringent evalue from among all the evalues in divEvalues.
maxEvalue = str(max(float(evalue) for div, evalue in divEvalues))
getForwardHits = makeGetHitsOnTheFly(subjectFastaPath, maxEvalue, workingDir)
getReverseHits = makeGetHitsOnTheFly(queryFastaPath, maxEvalue, workingDir)
divEvalueToOrthologs = computeOrthologs(queryFastaPath, subjectFastaPath, divEvalues, getForwardHits, getReverseHits, querySeqIds, workingDir)
return divEvalueToOrthologs
def computeOrthologsUsingSavedHits(queryFastaPath, subjectFastaPath, divEvalues, forwardHitsPath, reverseHitsPath, querySeqIds=None, workingDir='.'):
'''
Convenience function around computeOrthologs()
returns: a mapping from (div, evalue) pairs to lists of orthologs.
'''
getForwardHits = makeGetSavedHits(forwardHitsPath)
getReverseHits = makeGetSavedHits(reverseHitsPath)
divEvalueToOrthologs = computeOrthologs(queryFastaPath, subjectFastaPath, divEvalues, getForwardHits, getReverseHits, querySeqIds, workingDir)
return divEvalueToOrthologs
def writeToOutfile(orthologs, outfile):
'''
orthologs: a list of tuples of (queryid, subjectid, distance).
outfile: where to write the orthologs
write the orthologs to the outfile in the canonical format: one ortholog per line. each line is tab-separated query id subject id and distance.
'''
data = ''.join(['%s\t%s\t%s\n'%(query, subject, distance) for query, subject, distance in orthologs])
with open(outfile, 'w') as fh:
fh.write(data)
###################################
# COMMAND-LINE PROCESSING FUNCTIONS
###################################
def copyFastaArg(srcFile, destDir):
'''
srcFile: FASTA format genome file.
destDir: where to move the fasta file.
Copy the source file to the destination dir. If the source file is already in the destination dir, it will not be copied.
return: path of the copied fasta file.
'''
# use absolute paths
srcFile = os.path.abspath(os.path.expanduser(srcFile))
destDir = os.path.abspath(os.path.expanduser(destDir))
destFile = os.path.join(destDir, os.path.basename(srcFile))
# copy GENOME to DIR if necessary
if srcFile != destFile:
shutil.copyfile(srcFile, destFile)
return destFile
def formatFastaArg(fastaFile):
'''
formatting puts blast indexes in the same dir as fastaFile.
returns: fastaFile
'''
fastaFile = os.path.abspath(os.path.expanduser(fastaFile))
formatForBlast(fastaFile)
return fastaFile
if __name__ == '__main__':
pass
# last line
|
todddeluca/reciprocal_smallest_distance
|
rsd/rsd.py
|
Python
|
mit
| 32,578
|
[
"BLAST"
] |
e724e2367d1fa058fef95c6d946ee5c0081e578fae596fa1a391fb4cbb87277a
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create a cyberware source
#
cyber = vtk.vtkPolyDataReader()
cyber.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fran_cut.vtk")
normals = vtk.vtkPolyDataNormals()
#enable this for cool effect
normals.SetInputConnection(cyber.GetOutputPort())
normals.FlipNormalsOn()
stripper = vtk.vtkStripper()
stripper.SetInputConnection(cyber.GetOutputPort())
mask = vtk.vtkMaskPolyData()
mask.SetInputConnection(stripper.GetOutputPort())
mask.SetOnRatio(2)
cyberMapper = vtk.vtkPolyDataMapper()
cyberMapper.SetInputConnection(mask.GetOutputPort())
cyberActor = vtk.vtkActor()
cyberActor.SetMapper(cyberMapper)
cyberActor.GetProperty().SetColor(1.0,0.49,0.25)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(cyberActor)
ren1.SetBackground(1,1,1)
renWin.SetSize(300,300)
#ren1 SetBackground 0.1 0.2 0.4
ren1.SetBackground(1,1,1)
# render the image
#
cam1 = vtk.vtkCamera()
cam1.SetFocalPoint(0.0520703,-0.128547,-0.0581083)
cam1.SetPosition(0.419653,-0.120916,-0.321626)
cam1.SetViewAngle(21.4286)
cam1.SetViewUp(-0.0136986,0.999858,0.00984497)
ren1.SetActiveCamera(cam1)
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Core/Testing/Python/stripF.py
|
Python
|
gpl-3.0
| 1,534
|
[
"VTK"
] |
37e0ee8b9567abbd47b4dbb0a11879e11b672332b4d1f9899e114083266c7d64
|
arenaRankName = {
"01": "Novice",
"02": "Contender",
"03": "Duelist",
"04": "Berserker",
"05": "Sage",
"06": "Guardian",
"07": "Titan",
"08": "General",
"09": "Master",
"10": "Archfiend",
"11": "Ascended",
"12": "Celestial",
"13": "Apollo",
"14": "Phanes",
"15": "Eurynomos",
"16": "Emperor",
"17": "Suppressor",
"18": "Beam",
"19": "Barrager",
"20": "Heavenly Judge",
"21": "Gleaming God",
"22": "Slumber God",
"23": "Original God",
"24": "Darkstalker",
"25": "Ouranos",
"26": "Phoebus",
"27": "Athena"
}
mapName = {
"1": {
"dungeon": {
"0": "Adventurer's Prairie",
"1": "Cave of Flames",
"2": "Egor Snowfield",
"3": "Forest of Beasts",
"4": "Magutagal Wetlands",
"5": "Remains of Mirza",
"6": "Monster's Nest",
"7": "Tower of Mistral"
},
"special": "Egor Mountains",
"name": "Mistral",
"order": 1
},
"10": {
"dungeon": {
"1": "Agni Empire",
"2": "Agniria Coast",
"3": "Granasta Plateau",
"4": "Thunder Summit Navara",
"5": "King's Tomb",
"6": "Lognea Cove",
"7": "Ocean Shrine Albina",
"8": ""
},
"special": "Agni Dungeon",
"name": "Agni",
"order": 10
},
"11": {
"dungeon": {
"1": "Cave of Darkness",
"2": "Arvest Snowfield",
"3": "Marleg Ocean of Trees",
"4": "Bog of Eternal Rain",
"5": "Cape Narasta",
"6": "Mirvana Ruins",
"7": "Tower of the Gods",
"8": ""
},
"special": "",
"name": "Mirvana",
"order": 11
},
"12": {
"dungeon": {
"1": "Ulga Lava Cave",
"2": "Sea Capital Almeria",
"3": "Forest of Old",
"4": "Danrard Fortress",
"5": "Lanara's Light Ruins",
"6": "Cave of Dark Crystals",
"7": "Alnakeid Castle",
"8": ""
},
"special": "",
"name": "Lanara",
"order": 12
},
"13": {
"dungeon": {
"1": "Olmond Volcano",
"2": "Rownor Beach",
"3": "Principality of Vriksha",
"4": "The Red Lake",
"5": "Sky Palace Talahr",
"6": "Dark Castle Damrigia",
"7": "Adan's Tower",
"8": ""
},
"special": "",
"name": "Vriksha",
"order": 13
},
"14": {
"dungeon": {
"1": "Ogurono Desert",
"2": "Great Temple Eveneze",
"3": "Unarva Forest",
"4": "Aldahlia of the Rain",
"5": "Pilgrimage Path",
"6": "La Grenna Deadlands",
"7": "Wolf's Lair",
"8": ""
},
"special": "Elnada Sea Temple",
"name": "Aldahlia",
"order": 14
},
"15": {
"dungeon": {
"1": "Ghost Town Ornea",
"2": "Great Argant Desert",
"3": "Ekmet River",
"4": "Great Forest Nalmea",
"5": "Mt. Granakia",
"6": "Lowe Grimm Garden",
"7": "Amuris Cemetery",
"8": "Crystal Palace"
},
"special": "Garnes Research Cave",
"name": "Atharva",
"order": 15
},
"16": {
"dungeon": {
"1": "Ancient Dragon's Wood",
"2": "Boruk Blazing Dunes",
"3": "Scarlet River Laorkia",
"4": "Deserted Town Yuufim",
"5": "Tower of the Oracle",
"6": "Imperial Capital",
"7": "Sacred Mt. Bari",
"8": "Grand Gaia Crater"
},
"special": "Subterranean Cave",
"name": "Bariura",
"order": 16
},
"17": {
"dungeon": {
"1": "Forgotten Ruins",
"2": "Lava Bridges",
"3": "Hogun Acid Cascades",
"4": "Farv Miasma Forest",
"5": "Thunder Grotto Yuuram",
"6": "Piercing Light Tower",
"7": "Poisonous Bug Forest",
"8": "Soaring Tree Tower"
},
"special": "Cave of Sealed Might",
"name": "Celgrad",
"order": 17
},
"18": {
"dungeon": {
"1": "Jimug Lava Caverns",
"2": "Baraku Wetlands",
"3": "Vordo Primeval Forest",
"4": "Fossilized Dragon",
"5": "Lem Shrine Ruins",
"6": "Dark Sierra Oritira",
"7": "Bridge of Despair",
"8": ""
},
"special": "Cape of Sealed Ploys",
"name": "Lem",
"order": 18
},
"19": {
"dungeon": {
"1": "Moldia Lava Road",
"2": "Crimson Beach Shurin",
"3": "Reis's Lab Ruins",
"4": "Windmill Garden",
"5": "Ancient City Maldoor",
"6": "Endless Staircase",
"7": "Beiorg Castle",
"8": ""
},
"special": "Sealed Forest",
"name": "Beiorg",
"order": 19
},
"2": {
"dungeon": {
"1": "Kagan Desert",
"2": "Breeze Beach",
"3": "Nocturnal Forest",
"4": "Asekutt Wastelands",
"5": "Shrine of Lystia",
"6": "Destroyed Cathedral",
"7": "Tower of Morgan",
"8": ""
},
"special": "",
"name": "Morgan",
"order": 2
},
"20": {
"dungeon": {
"1": "Burning Capital Garm",
"2": "Waste Disposal Lake",
"3": "Wulgee Flower Bed",
"4": "Avor Lighting Bridge",
"5": "Forsaken Disciple's Lair",
"6": "Demon Cavern Feig",
"7": "Forsaken Castle Biorad",
"8": ""
},
"special": "Sealed Ship Graveyard",
"name": "Wulgee",
"order": 20
},
"21": {
"dungeon": {
"1": "Gadoum Furnace",
"2": "Luminous River Falun",
"3": "Botanical Garden",
"4": "Gimo Junkyard",
"5": "Shining Capital",
"6": "Rakshult Underground",
"7": "Weather Tower",
"8": ""
},
"special": "Sealed Empty Garden",
"name": "Rakshult",
"order": 21
},
"22": {
"dungeon": {
"1": "Bivorg Lava Cave",
"2": "Red Ore Beach",
"3": "Twilight Forest Ruins",
"4": "Thunder Canyon Rahda",
"5": "Despair Castle Kayohn",
"6": "Great Giant's Remains",
"7": "Tower of Amu Yunos",
"8": ""
},
"special": "Sealed Crystal Cave",
"name": "Zamburg",
"order": 22
},
"23": {
"dungeon": {
"1": "Fire Dragon's Roar",
"2": "Water Dragon Town Treve",
"3": "Ilesia Plains",
"4": "Fort Maharada",
"5": "Light Dragon Town Sinn",
"6": "God-Eater's Web",
"7": "Mt. Nerga",
"8": ""
},
"special": "Sealed Dragon's Deep",
"name": "Fal Nerga",
"order": 23
},
"24": {
"dungeon": {
"1": "Oil Platform Adel",
"2": "Zig Snowfield",
"3": "Ice Forest Sulsoria",
"4": "Gate of Abdication",
"5": "Shining Cave Navoa",
"6": "Olaim Ice Ridge",
"7": "Demon Castle Estria",
"8": ""
},
"special": "Sealed Altar",
"name": "Estria",
"order": 24
},
"25": {
"dungeon": {
"1": "Sealed God's Gate",
"2": "Hydra Geyser",
"3": "Remains of Raza",
"4": "Thunder Hill Padam",
"5": "Hope's Light Castle",
"6": "Cursed Town Ygmeon",
"7": "Empty Seal Menon",
"8": ""
},
"special": "Ark of the Sealed God",
"name": "Menon",
"order": 25
},
"26": {
"dungeon": {
"1": "Ruined Capital Zera",
"2": "Ruined Capital Fuve",
"3": "Flying Castle Mildran",
"4": "Eternal Temple",
"5": "",
"6": "",
"7": "",
"8": ""
},
"special": "Hermitage",
"name": "Mildran",
"order": 26
},
"101": {
"dungeon": {
"1": "Iron District",
"2": "Selza Arsenal",
"3": "Demolished Site",
"4": "Tech City Mersas",
"5": "Information Tower",
"6": "Zoldas Mountains",
"7": "Elevated Station",
"8": ""
},
"special1": "EX1",
"special2": "EX2",
"special": "Michele's Sepulcher",
"name": "Bectas",
"order": 101
},
"102": {
"dungeon": {
"1": "Bariura Lake Castle",
"2": "Castle Road",
"3": "Snowstorm Woods",
"4": "Raujis Lake",
"5": "Mirage Snowfields",
"6": "Subzero Road",
"7": "Gallum Ice Ridge",
"8": "Mystical Wolf's Lair"
},
"special1": "Divine Beast's Prison",
"special2": "Merciful Glory ",
"special": "EX",
"name": "Vilanciel",
"order": 102
},
"103": {
"dungeon": {
"1": "Bluesun Grove Path",
"2": "Prosperity Fields",
"3": "Calamity Plains",
"4": "Defensive Checkpoint",
"5": "Grand Bridge",
"6": "Skyblock Walls",
"7": "Thousand-Mat Hall",
"8": "Phoenix Tower"
},
"special1": "Fields of Despair",
"special2": "Abandoned Temple",
"special": "EX",
"name": "Ohbanahara",
"order": 103
},
"104": {
"dungeon": {
"1": "Vista Plateau",
"2": "Ancient Cave",
"3": "Ankash Ruins",
"4": "Flowing Waterfall",
"5": "Hollow Ravine",
"6": "Divine Helm",
"7": "Armor Hall",
"8": "Area 8"
},
"special1": "EX1",
"special2": "EX2",
"special": "Demolished Tomb",
"name": "Rokkzalm",
"order": 104
},
"105": {
"dungeon": {
"1": "Hill of Falling Stars",
"2": "Dragon's Flame Tomb",
"3": "Thunder Gem Mountain",
"4": "Moonlit Mirror Lake",
"5": "Deep Divine Forest",
"6": "Kahral Temple Ruins",
"7": "Guiding Star Cave",
"8": "Dragon's Chambers"
},
"special1": "EX1",
"special2": "EX2",
"special": "EX",
"name": "Valdroar",
"order": 105
},
"106": {
"dungeon": {
"1": "Area 1",
"2": "Area 2",
"3": "Area 3",
"4": "Area 4",
"5": "Area 5",
"6": "Area 6",
"7": "Area 7",
"8": "Area 8"
},
"special1": "EX1",
"special2": "EX2",
"special": "EX",
"name": "FINALE - St. Creek",
"order": 106
},
"3": {
"dungeon": {
"1": "Volcano Eldent",
"2": "Sacred Mt. Craylia",
"3": "Blood Forest",
"4": "Mt. Wistorea",
"5": "Secluded Sanctuary",
"6": "Cave of Malice",
"7": "St. Lamia Palace",
"8": ""
},
"special": "",
"name": "St. Lamia",
"order": 3
},
"4": {
"dungeon": {
"1": "Greskya Caves",
"2": "Ignia Falls",
"3": "Lomass Forest",
"4": "Valtan Fortress",
"5": "Tower of Light",
"6": "Cordelica Mine",
"7": "Giant's Ruins",
"8": ""
},
"special": "Ignia Cavern",
"name": "Cordelica",
"order": 4
},
"5": {
"dungeon": {
"1": "Julep Village",
"2": "Ghost Ship Legnia",
"3": "Elios Plains",
"4": "Saji Mines",
"5": "Lamellia Temple",
"6": "Ghost Town Edila",
"7": "Amdahl Castle",
"8": ""
},
"special": "",
"name": "Amdahl",
"order": 5
},
"6": {
"dungeon": {
"1": "Elsta Crater",
"2": "Lake Aldela",
"3": "Castle Avenia",
"4": "Gadillian Ravine",
"5": "Alman Mausoleum",
"6": "Land of the Dead",
"7": "Sky Fort Solaris",
"8": ""
},
"special": "Valmodora's Nest",
"name": "Encervis",
"order": 6
},
"7": {
"dungeon": {
"1": "Dejour Ruins",
"2": "Obselion Castle",
"3": "Emerald Path",
"4": "Grandelt Ruins",
"5": "Arlind Seminary",
"6": "Noera Battleground",
"7": "Spirit World Palmyna",
"8": ""
},
"special": "",
"name": "Palmyna",
"order": 7
},
"8": {
"dungeon": {
"1": "Erinecht Plains",
"2": "Golzo Mountains",
"3": "Lodan Frozen Lake",
"4": "The Impassable Marshlands",
"5": "Lightning Forest Zaljiba",
"6": "Lost City La Veda",
"7": "The Black Bridge",
"8": "Tower of Destruction"
},
"special": "Golzo Underground Lake",
"name": "Lizeria",
"order": 8
},
"9": {
"dungeon": {
"1": "Glomore Hollow",
"2": "Snow Area Khelne",
"3": "Stokhelna Forest",
"4": "Azura Ruins",
"5": "Ryvern Mountains",
"6": "The Abyss Cave",
"7": "The White Tower",
"8": ""
},
"special": "",
"name": "Ryvern",
"order": 9
}
}
grandQuestName = {
"1": {
"name": "Demonic Agitation"
},
"2": {
"name": "Proud Soldier's Feast"
},
"3": {
"name": "Cobalt Spirit's Waking"
},
"4": {
"name": "The Crimson God's Cry"
},
"5": {
"name": "The Old Dawn General"
},
"6": {
"name": "Lin's Long Day"
},
"7": {
"name": "Virtual Garden"
},
"8": {
"name": "Warped Reflection"
},
"9": {
"name": "Gathering Hope"
},
"10": {
"name": "Four Heroes of Palmyna"
},
"11": {
"name": "Ten-Winged Tormentor"
},
"12": {
"name": "Collaboration GQ with Chain Chronicles"
},
"13": {
"name": "Hopes and Regrets"
},
"14": {
"name": "The Correct Path"
},
"15": {
"name": "Collaboration GQ with Thousand Memories"
},
"16": {
"name": "Collaboration GQ with Shin Megami Tensei"
},
"17": {
"name": "Entrusted Will"
},
"18": {
"name": "Collaboration GQ with Eagle Talon/Takanotsume"
},
"19": {
"name": "Sera's Grand Quest"
},
"20": {
"name": "Those Who Lead"
},
"21": {
"name": "The End of an Empire"
},
"X1": {
"name": "The Tinkerer's Deceit"
},
"X2": {
"name": "Rih'alnase -- Genesis"
},
}
full_ills = {
#======================
# BRAVE FRONTIER JAPAN
#======================
"navi_chara1": "http://i.imgur.com/CXKkw3X.png",
"navi_chara2": "http://i.imgur.com/h4yd8uW.png",
"navi_chara3": "http://i.imgur.com/SjueK8c.png",
"navi_chara4": "http://i.imgur.com/OVgKa0l.png",
"navi_chara5": "http://i.imgur.com/PZWh1tF.png",
"navi_chara6": "http://i.imgur.com/zsLacF6.png",
"navi_chara7": "http://i.imgur.com/SleuAHd.png",
"navi_chara8": "http://i.imgur.com/lZXxURz.png",
"navi_chara9": "http://i.imgur.com/GGYLlQ1.png",
"navi_chara10": "http://i.imgur.com/finJi4d.png",
"navi_chara11": "http://i.imgur.com/m5pCMgJ.png",
"navi_chara12": "http://i.imgur.com/qt2TqxA.png",
"navi_chara13": "http://i.imgur.com/8fowV0h.png",
"navi_chara14": "http://i.imgur.com/GbFQykt.png",
"navi_chara15": "http://i.imgur.com/Bc1c1qB.png",
"navi_chara16": "http://i.imgur.com/Ui3lhIr.png",
"navi_chara18": "http://i.imgur.com/Eue5HpB.png",
"navi_chara19": "http://i.imgur.com/fePmOnx.png",
"navi_chara20": "http://i.imgur.com/vEFHO89.png",
"navi_chara21": "http://i.imgur.com/myzBDkZ.png",
"navi_chara22": "http://i.imgur.com/8UJ3xI2.png",
"navi_chara23": "http://i.imgur.com/vKOpxJ4.png",
"navi_chara24": "http://i.imgur.com/ODe8xzK.png",
"navi_chara26": "http://i.imgur.com/dvcia8Y.png",
"navi_chara27": "http://i.imgur.com/QEwhDCb.png",
"navi_chara28": "http://i.imgur.com/YTUavY0.png",
"navi_chara29": "http://i.imgur.com/0TwgoWJ.png",
"navi_chara30": "http://i.imgur.com/3iXVCZU.png",
"navi_chara32": "http://i.imgur.com/MnHOIYr.png",
"navi_chara33": "http://i.imgur.com/Ccdhw6O.png",
"navi_chara34": "http://i.imgur.com/kg5fWlL.png",
"navi_chara35": "http://i.imgur.com/O2c4BKw.png",
"navi_chara36": "http://i.imgur.com/ZRLG1mS.png",
"navi_chara37": "http://i.imgur.com/ynEfJPK.png",
"navi_chara38": "http://i.imgur.com/E7dwgLC.png",
"navi_chara39": "http://i.imgur.com/ijt4dmH.png",
"navi_chara40": "http://i.imgur.com/oJuzJMw.png",
"navi_chara41": "http://i.imgur.com/ZCG3Eqd.png",
"navi_chara42": "http://i.imgur.com/hqMPpmp.png",
"navi_chara43": "http://i.imgur.com/iuJ31LX.png",
"navi_chara44": "http://i.imgur.com/hWHolPc.png",
"navi_chara45": "http://i.imgur.com/4J949uh.png",
"navi_chara46": "http://i.imgur.com/e5zv4hc.png",
"navi_chara47": "http://i.imgur.com/zOdDW2c.png",
"navi_chara48": "http://i.imgur.com/SMeXcKY.png",
"navi_chara50": "http://i.imgur.com/kBZfBP7.png",
"navi_chara51": "http://i.imgur.com/L9KARyy.png",
"navi_chara52": "http://i.imgur.com/OOjc9af.png",
"navi_chara53": "http://i.imgur.com/oud5IW3.png",
"navi_chara54": "http://i.imgur.com/rI8Y37X.png",
"navi_chara55": "http://i.imgur.com/IaON2es.png",
"navi_chara56": "http://i.imgur.com/jk8ekPr.png",
"navi_chara57": "http://i.imgur.com/kDHOGqs.png",
"navi_chara58": "http://i.imgur.com/nIrPmAc.png",
"navi_chara59": "http://i.imgur.com/U8JsJws.png",
"navi_chara60": "http://i.imgur.com/qIbREwG.png",
"navi_chara61": "http://i.imgur.com/C9jp4vx.png",
"navi_chara62": "http://i.imgur.com/itYJ6fj.png",
"navi_chara63": "http://i.imgur.com/BgWhwYc.png",
"navi_chara64": "http://i.imgur.com/vL8Dsga.png",
"navi_chara65": "http://i.imgur.com/6YOb8XU.png",
"navi_chara66": "http://i.imgur.com/LbXT56Q.png",
"navi_chara67": "http://i.imgur.com/oA9D5dJ.png",
"navi_chara68": "http://i.imgur.com/BCtLv0k.png",
"navi_chara69": "http://i.imgur.com/O6vlM9e.png",
"navi_chara70": "http://i.imgur.com/viPFq3O.png",
"navi_chara71": "http://i.imgur.com/ZkExuYV.png",
"navi_chara73": "http://i.imgur.com/qds7fgW.png",
"navi_chara74": "http://i.imgur.com/1RbHCse.png",
"navi_chara75": "http://i.imgur.com/x8fo5nu.png",
"navi_chara76": "http://i.imgur.com/SP2DGDe.png",
"navi_chara77": "http://i.imgur.com/0TEwyPJ.png",
"navi_chara78": "http://i.imgur.com/f9eZbMZ.png",
"navi_chara79": "http://i.imgur.com/iCmkgX9.png",
"navi_chara80": "http://i.imgur.com/5G3z2Ca.png",
"navi_chara81": "http://i.imgur.com/LtqrCpV.png",
"navi_chara82": "http://i.imgur.com/MNJ6onV.png",
"navi_chara83": "http://i.imgur.com/cY2V1a9.png",
"navi_chara84": "http://i.imgur.com/yBwwFMW.png",
"navi_chara85": "http://i.imgur.com/icHbdWD.png",
"navi_chara87": "http://i.imgur.com/lpNc8u2.png",
"navi_chara89": "http://i.imgur.com/K1aAaDu.png",
"navi_chara90": "http://i.imgur.com/UGpvlMj.png",
"navi_chara91": "http://i.imgur.com/SgYSVSz.png",
"navi_chara92": "http://i.imgur.com/Rqc7cV3.png",
"navi_chara93": "http://i.imgur.com/3i9FkKa.png",
"navi_chara94": "http://i.imgur.com/BR66Vtb.png",
"navi_chara95": "http://i.imgur.com/7DC2TRK.png",
"navi_chara96": "http://i.imgur.com/qCflqmZ.png",
"navi_chara97": "http://i.imgur.com/0ETuoqY.png",
"navi_chara98": "http://i.imgur.com/v0zDdhS.png",
"navi_chara99": "http://i.imgur.com/U8KPN1A.png",
"navi_chara100": "http://i.imgur.com/52aFCdJ.png",
"navi_chara101": "http://i.imgur.com/n4pWkPV.png",
"navi_chara102": "http://i.imgur.com/74lX1pW.png",
"navi_chara103": "http://i.imgur.com/ZmLZnmu.png",
"navi_chara104": "http://i.imgur.com/xJPeKpR.png",
"navi_chara105": "http://i.imgur.com/bE1ouRw.png",
"navi_chara106": "http://i.imgur.com/pstdRvJ.png",
"navi_chara107": "http://i.imgur.com/vTteKcl.png",
"navi_chara108": "http://i.imgur.com/lvrwqT9.png",
"navi_chara109": "http://i.imgur.com/oikmX9W.png",
"navi_chara110": "http://i.imgur.com/4zJqN7F.png",
"navi_chara111": "http://i.imgur.com/66x9v0I.png",
"navi_chara112": "http://i.imgur.com/HTgpr2E.png",
"navi_chara113": "http://i.imgur.com/oROJqA5.png",
"navi_chara114": "http://i.imgur.com/KdxgRwB.png",
"navi_chara115": "http://i.imgur.com/BRzd8hE.png",
"navi_chara116": "http://i.imgur.com/pDCgvC9.png",
"navi_chara117": "http://i.imgur.com/hoaZRus.png",
"navi_chara118": "http://i.imgur.com/mH1dT8m.png",
"navi_chara119": "http://i.imgur.com/2rKwdqU.png",
"navi_chara120": "http://i.imgur.com/dJ3HmvV.png",
"navi_chara121": "http://i.imgur.com/5DaFbm4.png",
"navi_chara122": "http://i.imgur.com/rqjtwf5.png",
"navi_chara123": "http://i.imgur.com/gBQNDpg.png",
"navi_chara124": "http://i.imgur.com/781DNor.png",
"navi_chara127": "http://i.imgur.com/9U6NRpx.png",
"navi_chara129": "http://i.imgur.com/bDMGx35.png",
"navi_chara130": "http://i.imgur.com/Mhv9xBS.png",
"navi_chara131": "http://i.imgur.com/MUUBbcv.png",
"navi_chara132": "http://i.imgur.com/gCuQT3Z.png",
"navi_chara134": "http://i.imgur.com/RyIcYLt.png",
"navi_chara135": "http://i.imgur.com/x9uDaNe.png",
"navi_chara136": "http://i.imgur.com/zk8rtRR.png",
"navi_chara137": "http://i.imgur.com/F3gyA14.jpg",
"navi_chara138": "http://i.imgur.com/xHPz8A0.png",
"navi_chara140": "http://i.imgur.com/n8xwQoe.png",
"navi_chara142": "http://i.imgur.com/1LlVKdQ.png",
#=======================
# BRAVE FRONTIER GLOBAL
#=======================
"navi_chara80016": "http://i.imgur.com/Yxn3pn3.png",
"navi_chara80025": "http://i.imgur.com/8iQ29zE.png",
"navi_chara80032": "http://i.imgur.com/mLQbLwX.png",
"navi_chara80033": "http://i.imgur.com/X8CwkqD.png",
"navi_chara80034": "http://i.imgur.com/5MOoHGo.png",
"navi_chara80035": "http://i.imgur.com/x35aR1z.png",
"navi_chara80036": "http://i.imgur.com/Vq7WojX.png",
"navi_chara80037": "http://i.imgur.com/vbFlxUG.png",
"navi_chara80038": "http://i.imgur.com/ZLwSPY7.png",
"navi_chara80039": "http://i.imgur.com/BxvlKWa.png",
"navi_chara80040": "http://i.imgur.com/WyJStne.png",
"navi_chara80041": "http://i.imgur.com/4uoeaL0.png",
"navi_chara80042": "http://i.imgur.com/Czl78Co.png",
"navi_chara80043": "http://i.imgur.com/JmQCk6L.png",
"navi_chara80046": "http://i.imgur.com/KWME5yf.png",
"navi_chara80047": "http://i.imgur.com/pm6hOA7.png",
"navi_chara80048": "http://i.imgur.com/prhxMuS.png",
"navi_chara80049": "http://i.imgur.com/EeVM9vL.png",
"navi_chara80050": "http://i.imgur.com/KX54a6c.png",
"navi_chara80051": "http://i.imgur.com/tsb940i.png",
"navi_chara80052": "http://i.imgur.com/YS9rVWE.png",
"navi_chara80053": "http://i.imgur.com/xWaszjH.png",
"navi_chara80054": "http://i.imgur.com/OC1bgm1.png",
"navi_chara80055": "http://i.imgur.com/J3drt4x.png",
"navi_chara80056": "http://i.imgur.com/XrszfWa.png",
"navi_chara80057": "http://i.imgur.com/pm5D6rA.png",
"navi_chara80058": "http://i.imgur.com/LDyK3I8.png",
"navi_chara80059": "http://i.imgur.com/VWf5dmR.png",
"navi_chara80060": "http://i.imgur.com/2muX1zc.png",
"navi_chara80061": "http://i.imgur.com/kiDrEhA.png",
"navi_chara80062": "http://i.imgur.com/7YzOeqC.png",
"navi_chara80063": "http://i.imgur.com/vT3yiDy.png",
"navi_chara80064": "http://i.imgur.com/NahdD7W.png",
"navi_chara80065": "http://i.imgur.com/yR3UDqf.png",
"navi_chara80066": "http://i.imgur.com/xMYAI0v.png",
#===========================
# COLLABORATIONS AND OTHERS
#===========================
"navi_chara_tm01": "http://i.imgur.com/C6Rk4DY.png",
"navi_chara_tm02": "http://i.imgur.com/1FpSckh.png",
"navi_chara_tm03": "http://i.imgur.com/wVwclpW.png",
"navi_chara_cc01": "http://i.imgur.com/Vjya869.png",
"navi_chara_cc02": "http://i.imgur.com/B2skyWn.png",
"navi_chara_cc06": "http://i.imgur.com/1UmNNpx.png",
"navi_chara_cc07": "http://i.imgur.com/S3YVKzH.png",
"navi_chara_cc08": "http://i.imgur.com/bM6NkFq.png",
"navi_chara_mt01": "http://i.imgur.com/KH9QOWU.png",
"navi_chara_mt02": "http://i.imgur.com/BnclFHK.png",
"navi_chara_mt03": "http://i.imgur.com/bnCTVKZ.png",
"navi_chara_mt04": "http://i.imgur.com/RkdMbaI.png",
"navi_chara_mt05": "http://i.imgur.com/rZhliPw.png",
"navi_chara_mt06": "http://i.imgur.com/Tf2HoL6.png"
}
if __name__ == '__main__':
print("navi_chara80047" in full_ills)
'''
import json
resultingdict = []
for i in range(1, 26):
resultingdict.append((str(i), {"name":mapName[str(i)],"order":i,"dungeon":dict([(str(j),"") for j in range(1, 9)])}))
resultingdict = dict(resultingdict)
print json.dumps(resultingdict, indent=4, sort_keys=True)
print json.dumps(resultingdict["25"]["dungeon"], indent=4, sort_keys=True)
#print json.dump(, indent=4)
'''
|
Blackrobe/blackrobe.github.io
|
BFJPStoryArchive/storyutil.py
|
Python
|
gpl-2.0
| 26,218
|
[
"CRYSTAL"
] |
38e3b3703508e3061bd025d952f3d6794bb197105058f75bb0b518cae5508f81
|
#!/usr/bin/python
"""Test of sayAll."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Add"))
sequence.append(utils.AssertPresentationAction(
"1. KP_Add to do a SayAll",
["SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'News'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Projects'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Art'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Support'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Development'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Community'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'leaving list.'",
"SPEECH OUTPUT: 'live.gnome.org'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'form'",
"SPEECH OUTPUT: 'entry'",
"SPEECH OUTPUT: 'Search'",
"SPEECH OUTPUT: 'Titles'",
"SPEECH OUTPUT: 'push button'",
"SPEECH OUTPUT: 'grayed'",
"SPEECH OUTPUT: 'Text'",
"SPEECH OUTPUT: 'push button'",
"SPEECH OUTPUT: 'grayed'",
"SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'RecentChanges'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'FindPage'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'HelpContents'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Orca'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'en Español'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Home'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Download/Installation'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Configuration/Use'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Accessible Applications'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'Mailing List'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'Archives'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ') |'",
"SPEECH OUTPUT: 'FAQ'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '|'",
"SPEECH OUTPUT: 'DocIndex'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Welcome to Orca!'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'Orca Logo'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'HOT HOT HOT: Notes on'",
"SPEECH OUTPUT: 'access to Firefox 3.0'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Contents'",
"SPEECH OUTPUT: 'List with 8 items'",
"SPEECH OUTPUT: '1.'",
"SPEECH OUTPUT: 'Welcome to Orca!'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '2.'",
"SPEECH OUTPUT: 'About'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '3.'",
"SPEECH OUTPUT: 'Audio Guides'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '4.'",
"SPEECH OUTPUT: 'Download/Installation'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '5.'",
"SPEECH OUTPUT: 'Configuration/Use'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '6.'",
"SPEECH OUTPUT: 'Accessible Applications'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '7.'",
"SPEECH OUTPUT: 'How Can I Help?'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '8.'",
"SPEECH OUTPUT: 'More Information'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'leaving list.'",
"SPEECH OUTPUT: 'About'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'Orca is a free, open source, flexible, extensible, and powerful assistive technology for people with visual impairments.'",
"SPEECH OUTPUT: 'Using various combinations of speech synthesis, braille, and magnification, Orca helps provide access to applications and toolkits that support the AT-SPI \\(e.g.,'",
"SPEECH OUTPUT: 'the GNOME desktop\\).'",
"SPEECH OUTPUT: 'The development of Orca has been led by the'",
"SPEECH OUTPUT: 'Accessibility Program Office of Sun Microsystems, Inc.'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'with'",
"SPEECH OUTPUT: 'contributions from many community members'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '.'",
"SPEECH OUTPUT: 'The complete list of work to do, including bugs and feature requests, along with known problems in other components, is maintained in'",
"SPEECH OUTPUT: 'Bugzilla'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '(please see our'",
"SPEECH OUTPUT: 'notes on how we use Bugzilla'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ').'",
"SPEECH OUTPUT: 'Please join and participate on the'",
"SPEECH OUTPUT: 'Orca mailing list'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'archives'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '): it's a helpful, kind, and productive environment composed of users and developers.'",
"SPEECH OUTPUT: 'Audio Guides'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'Darragh \xd3 H\xe9iligh'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'has created several audio guides for Orca.'",
"SPEECH OUTPUT: 'This is a fantastic contribution (THANKS!)!!!'",
"SPEECH OUTPUT: 'The audio guides can be found at'",
"SPEECH OUTPUT: 'http://www.digitaldarragh.com/linuxat.asp'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'and include the following:'",
"SPEECH OUTPUT: 'List with 3 items'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Walk through of the installation of Ubuntu 7.4. Very helpful tutorial'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Review of Fedora 7 and the Orca screen reader for the Gnome graphical desktop'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Guide to installing the latest versions of Firefox and Orca'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'leaving list.'",
"SPEECH OUTPUT: 'Download/Installation'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'As of GNOME 2.16,'",
"SPEECH OUTPUT: 'Orca is a part of the GNOME platform.'",
"SPEECH OUTPUT: 'As a result, Orca is already provided by default on a number of operating system distributions, including'",
"SPEECH OUTPUT: 'Open Solaris'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'and'",
"SPEECH OUTPUT: 'Ubuntu'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '.'",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Download/Installation page'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'for detailed information on various distributions as well as installing Orca directly from source.'",
"SPEECH OUTPUT: 'Configuration/Use'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'The command to run orca is orca.'",
"SPEECH OUTPUT: 'You can enter this command by pressing Alt+F2 when logged in, waiting for a second or so, then typing orca and pressing return.'",
"SPEECH OUTPUT: 'Orca is designed to present information as you navigate the desktop using the'",
"SPEECH OUTPUT: 'built-in navigation mechanisms of GNOME'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '.'",
"SPEECH OUTPUT: 'These navigation mechanisms are consistent across most desktop applications.'",
"SPEECH OUTPUT: 'You may sometimes wish to control Orca itself, such as bringing up the'",
"SPEECH OUTPUT: 'Orca Configuration GUI'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '(accessed by pressing Insert+Space when Orca is running) and for using flat review mode to examine a window.'",
"SPEECH OUTPUT: 'Refer to'",
"SPEECH OUTPUT: 'Orca Keyboard Commands'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '(Laptop Layout)'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'for more information on Orca-specific keyboard commands.'",
"SPEECH OUTPUT: 'The'",
"SPEECH OUTPUT: 'Orca Configuration GUI'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'also includes a \"Key Bindings\" tab that allows you to get a complete list of Orca key bindings.'",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Configuration/Use page'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'for detailed information.'",
"SPEECH OUTPUT: 'Accessible Applications'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'Orca is designed to work with applications and toolkits that support the assistive technology service provider interface (AT-SPI).'",
"SPEECH OUTPUT: 'This includes the GNOME desktop and its applications,'",
"SPEECH OUTPUT: 'OpenOffice'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ', Firefox, and the Java platform.'",
"SPEECH OUTPUT: 'Some applications work better than others, however, and the Orca community continually works to provide compelling access to more and more applications.'",
"SPEECH OUTPUT: 'On the'",
"SPEECH OUTPUT: 'Accessible Applications page'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ', you will find a growing list of information regarding various applications that can be accessed with Orca as well as tips and tricks for using them.'",
"SPEECH OUTPUT: 'The list is not to be a conclusive list of all applications.'",
"SPEECH OUTPUT: 'Rather, the goal is to provide a repository within which users can share experiences regarding applications they have tested.'",
"SPEECH OUTPUT: 'See also the'",
"SPEECH OUTPUT: 'Application Specific Settings'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'page for how to configure settings specific to an application.'",
"SPEECH OUTPUT: 'Please also refer to the'",
"SPEECH OUTPUT: 'Accessible Applications page'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'for detailed information.'",
"SPEECH OUTPUT: 'How Can I Help?'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'There's a bunch you can do!'",
"SPEECH OUTPUT: 'Please refer to the'",
"SPEECH OUTPUT: 'How Can I Help page'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'for detailed information.'",
"SPEECH OUTPUT: 'More Information'",
"SPEECH OUTPUT: 'heading level 1'",
"SPEECH OUTPUT: 'List with 7 items'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Frequently Asked Questions:'",
"SPEECH OUTPUT: 'FAQ'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Mailing list:'",
"SPEECH OUTPUT: 'orca-list@gnome.org'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'Archives'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ')'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Bug database:'",
"SPEECH OUTPUT: 'GNOME Bug Tracking System (Bugzilla)'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '('",
"SPEECH OUTPUT: 'current bug list'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ')'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Design documents:'",
"SPEECH OUTPUT: 'Orca Documentation Series'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Dive Into Python, Mark Pilgrim'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Python in a Nutshell, Alex Martelli'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '•'",
"SPEECH OUTPUT: 'Python Pocket Reference, Mark Lutz'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'leaving list.'",
"SPEECH OUTPUT: 'separator'",
"SPEECH OUTPUT: 'The information on this page and the other Orca-related pages on this site are distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.'",
"SPEECH OUTPUT: 'separator'",
"SPEECH OUTPUT: 'CategoryAccessibility'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Orca (last edited 2007-12-07 22:09:22 by'",
"SPEECH OUTPUT: 'WillieWalker'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: ')'",
"SPEECH OUTPUT: 'User'",
"SPEECH OUTPUT: 'heading level 3'",
"SPEECH OUTPUT: 'Login'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'leaving list.'",
"SPEECH OUTPUT: 'Page'",
"SPEECH OUTPUT: 'heading level 3'",
"SPEECH OUTPUT: 'List with 4 items'",
"SPEECH OUTPUT: 'Immutable Page'",
"SPEECH OUTPUT: 'Info'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Attachments'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'form'",
"SPEECH OUTPUT: 'More Actions:'",
"SPEECH OUTPUT: 'combo box'",
"SPEECH OUTPUT: 'leaving list.'",
"SPEECH OUTPUT: 'GNOME World Wide'",
"SPEECH OUTPUT: 'heading level 3'",
"SPEECH OUTPUT: 'GnomeWorldWide'",
"SPEECH OUTPUT: 'image'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: 'Copyright \xa9 2005, 2006, 2007'",
"SPEECH OUTPUT: 'The GNOME Project'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '.'",
"SPEECH OUTPUT: 'Hosted by'",
"SPEECH OUTPUT: 'Red Hat'",
"SPEECH OUTPUT: 'link'",
"SPEECH OUTPUT: '.'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
GNOME/orca
|
test/keystrokes/firefox/say_all_wiki.py
|
Python
|
lgpl-2.1
| 13,353
|
[
"ORCA"
] |
0c2ed160d22b4e09160e5e99ebb6600ef7e9476fad03f81e42e5f357b89e4430
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
####################################################################################
### Copyright (C) 2015-2019 by ABLIFE
####################################################################################
####################################################################################
####################################################################################
# Date Version Author ChangeLog
#
#
#
#
#####################################################################################
"""
程序功能说明:
1.读取annovar的txt文件
2.通过关键字获取统计信息
"""
import re, os, sys, logging, time, datetime
from optparse import OptionParser, OptionGroup
reload(sys)
sys.setdefaultencoding('utf-8')
import subprocess
import threading
import gffutils
import numpy
import HTSeq
import multiprocessing
import pysam
from matplotlib import pyplot
from ablib.utils.tools import *
from ablib.utils.distribution import *
if sys.version_info < (2, 7):
print("Python Version error: please use phthon2.7")
sys.exit(-1)
_version = 'v0.1'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def configOpt():
"""Init for option
"""
usage = 'Usage: %prog [-f] [other option] [-h]'
p = OptionParser(usage)
##basic options
p.add_option(
'-g', '--gff_file', dest='gff_file', action='store',
type='string', help='gff file')
p.add_option(
'-f','--fasta_file',dest='fasta_file',action = 'store',
type = 'string',help = 'fasta file')
p.add_option(
'-r','--region',dest='region',action='store',
type = 'string',help ='the region needed for extract')
p.add_option(
'-o', '--outfile', dest='outfile', action='store',
type='string', help='insection_distribution.txt')
group = OptionGroup(p, "Preset options")
##preset options
group.add_option(
'-O', '--outDir', dest='outDir', default='./', action='store',
type='string', help='output directory', metavar="DIR")
group.add_option(
'-L', '--logDir', dest='logDir', default='', action='store',
type='string', help='log dir ,default is same as outDir')
group.add_option(
'-P', '--logPrefix', dest='logPrefix', default='', action='store',
type='string', help='log file prefix')
group.add_option(
'-E', '--email', dest='email', default='none', action='store',
type='string',
help='email address, if you want get a email when this job is finished,default is no email',
metavar="EMAIL")
group.add_option(
'-Q', '--quiet', dest='quiet', default=False, action='store_true',
help='do not print messages to stdout')
group.add_option(
'-K', '--keepTemp', dest='keepTemp', default=False, action='store_true',
help='keep temp dir')
group.add_option(
'-T', '--test', dest='isTest', default=False, action='store_true',
help='run this program for test')
p.add_option_group(group)
opt, args = p.parse_args()
return (p, opt, args)
def listToString(x):
"""获得完整的命令
"""
rVal = ''
for a in x:
rVal += a + ' '
return rVal
opt_parser, opt, args = configOpt()
if opt.logDir == "":
opt.logDir = opt.outDir + '/log/'
# sample = ""
if opt.samplename != "":
sample = opt.samplename
# if opt.outfile == 'distance2tss_peaks.txt':
# opt.outfile = sample + '_distance2tss_peaks.txt'
#
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
scriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path
binPath = scriptPath + '/bin' # absolute bin path
outPath = os.path.abspath(opt.outDir) # absolute output path
os.mkdir(outPath) if not os.path.isdir(outPath) else None
logPath = os.path.abspath(opt.logDir)
os.mkdir(logPath) if not os.path.isdir(logPath) else None
tempPath = outPath + '/temp/' # absolute bin path
# os.mkdir(tempPath) if not os.path.isdir(tempPath) else None
resultPath = outPath + '/result/'
# os.mkdir(resultPath) if not os.path.isdir(resultPath) else None
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s : %(levelname)s] %(message)s',
datefmt='%y-%m-%d %H:%M',
filename=logFilename,
filemode='w')
if not opt.quiet:
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('[%(asctime)s : %(levelname)s] %(message)s',
datefmt='%y-%m-%d %H:%M')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
dt = datetime.datetime.now()
logFile = logPath + '/' + opt.logPrefix + 'log.' + str(dt.strftime('%Y%m%d.%H%M%S.%f')) + '.txt'
initLogging(logFile)
logging.debug(sys.modules[__name__].__doc__)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug('Program version: %s' % _version)
logging.debug('Start the program with [%s]\n', listToString(sys.argv))
startTime = datetime.datetime.now()
logging.debug("计时器:Program start at %s" % startTime)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### S
# -----------------------------------------------------------------------------------
def get_gff(chr,Region):
db = gffutils.FeatureDB(opt.db)
for region in db.features_of_type(opt.region, seqid=chr, order_by='start'):
Region[chr].append(str(region.start) + '\t' + str(region.end))
def get_fasta(infile,chr):
flag = 0
for eachLine in open(infile):
if eachLine.startswith(chr):
flag = 1
continue
if flag == 1 and !eachLine.starswith(">"):
def Filter_ts_tv(outfile,data):
"""
To calculate the SNV of
"""
ts = 0
tv = 0
novel_ts = 0
novel_tv = 0
for s in data:
line = s.split("\t")
if(s.startswith("Chr")):
index_dbsnp = line.index(opt.dbsnp)
Cytosine = ["C","T"]
Guanie = ["G","A"]
if (line[3] in Cytosine) and (line[4] in Cytosine):
ts += 1
if line[index_dbsnp]==".":
novel_ts += 1
# if line[10] == ".":
# novel_ts += 1
elif (line[3] in Guanie) and (line[4] in Guanie):
ts += 1
if line[index_dbsnp] == ".":
novel_ts += 1
# if line[10] == ".":
# novel_ts +=1
else:
tv += 1
if line[index_dbsnp] == ".":
novel_tv += 1
# if line[10] == ".":
# novel_tv += 1
with open(outfile,'w') as OUT:
OUT.writelines("Sample\tnovel_ts\tnovel_ts/tv\tnovel_tv\tts\tts/tv\ttv\n")
novel_pro = '{:.2f}'.format(novel_ts/float(novel_tv))
pro = '{:.2f}'.format(ts/float(tv))
OUT.writelines(sample + "\t" + str(novel_ts) + "\t" + str(novel_pro) + "\t" + str(novel_tv) + "\t" + str(ts) + "\t" + str(pro) + "\t" + str(tv) + "\n")
# -----------------------------------------------------------------------------------
### E
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def main():
print("Main procedure start...")
INDEL_data = []
SNV_data = []
get_Data(opt.file,INDEL_data,SNV_data)
snp_region_file1 = sample + '_SNP_Region.txt'
Filter_Region(snp_region_file1,SNV_data)
indel_region_file1 = sample + '_InDel_Region.txt'
Filter_Region(indel_region_file1,INDEL_data)
snp_type_file1 = sample + '_SNP_Type.txt'
Filter_Type(snp_type_file1,"SNP",SNV_data)
indel_type_file1 = sample + '_InDel_Type.txt'
Filter_Type(indel_type_file1,"InDel",INDEL_data)
snp_genotype_file = sample + '_SNP_GenoType.txt'
Filter_Genotype(snp_genotype_file,SNV_data)
indel_genotype_file = sample + '_InDel_GenoType.txt'
Filter_Genotype(indel_genotype_file,SNV_data)
snp_ts_tv_file = sample + '_SNP_TS_TV.txt'
Filter_ts_tv(snp_ts_tv_file,SNV_data)
if __name__ == '__main__':
main()
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if not opt.keepTemp:
os.system('rm -rf ' + tempPath)
logging.debug("Temp folder is deleted..")
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug("Program ended")
currentTime = datetime.datetime.now()
runningTime = (currentTime - startTime).seconds # in seconds
logging.debug("计时器:Program start at %s" % startTime)
logging.debug("计时器:Program end at %s" % currentTime)
logging.debug("计时器:Program ran %.2d:%.2d:%.2d" % (
runningTime / 3600, (runningTime % 3600) / 60, runningTime % 60))
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if opt.email != "none":
run_cmd = listToString(sys.argv)
sendEmail(opt.email, str(startTime), str(currentTime), run_cmd, outPath)
logging.info("发送邮件通知到 %s" % opt.email)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def countProgram(programName, startT, runT, isTest):
countProgramFile = open('/users/ablife/ablifepy/countProgram.txt', 'a')
countProgramFile.write(
programName + '\t' + str(os.getlogin()) + '\t' + str(startT) + '\t' + str(
runT) + 's\t' + isTest + '\n')
countProgramFile.close()
testStr = 'P'
if opt.isTest:
testStr = 'T'
countProgram(sys.argv[0], startTime, runningTime, testStr)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
|
ablifedev/ABLIRC
|
ABLIRC/bin/public/Extract_Region_sequence.py
|
Python
|
mit
| 12,799
|
[
"HTSeq",
"pysam"
] |
ce374db4a91b2e3f42494950ee6d64ae81037e4a92435c7e2198745e6a470117
|
#!/usr/bin/env python
#
# Wrapper script for starting the biopet-seqstat JAR package
#
# This script is written for use with the Conda package manager and is copied
# from the peptide-shaker wrapper. Only the parameters are changed.
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
#
# This file was automatically generated by the sbt-bioconda plugin.
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'seqstat-assembly-0.1.jar'
default_jvm_mem_opts = []
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
java = java_executable()
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
joachimwolff/bioconda-recipes
|
recipes/biopet-seqstat/0.1/biopet-seqstat.py
|
Python
|
mit
| 3,365
|
[
"Bioconda"
] |
5dd33d554e12d3e8689357716f0ecefaff6d3f6a907275f7626b437ce05fb534
|
#!/usr/bin/env python2.7
"""
relion_it.py
============
Script for automated, on-the-fly single-particle analysis in RELION 3
Authors: Sjors H.W. Scheres, Takanori Nakane & Colin M. Palmer
Usage:
relion_it.py [extra_options.py [extra_options2.py ....] ] [--gui] [--continue]
To get started, go to the intended location of your RELION project directory and make sure your micrographs are
accessible from within it (e.g. in a subdirectory called `Movies/' - use a symlink if necessary). Then run this
script, providing the names of files containing options if needed. (To call the script, you'll need to enter the full
path to it, put the directory containing it on your PATH environment variable, or put a copy of the script in the
current directory.)
Run with the `--gui' option to launch a simple GUI which will set up a run from a few basic options. (The GUI can
also be used to save a complete options file that you can then edit as required.)
Once the script is running, open a normal RELION GUI to see what's happening and visualise the results.
See below for full instructions including how to handle errors. If you have any problems, please edit the script as
needed, call on your local Python expert or email the CCP-EM mailing list (https://www.jiscmail.ac.uk/ccpem).
Overview
--------
relion_it.py creates a number of RELION jobs and then runs one or more `relion_pipeliner' processes to schedule them
(exactly like using the "Schedule" button in the RELION GUI). Instructions and information are printed to the terminal
by relion_it.py as it runs.
relion_it.py uses a large number of options to control how the jobs are run. It's designed to be very flexible and so
these options can be changed in a number of ways:
- The easiest way is to use the simple GUI (enabled by passing the `--gui' argument), which allows you to set a few
simple options. These are then used to calculate appropriate values for the complete set of options. (See "Using the
GUI" below for more information on this.)
- For more control, options can be put into one or more Python files (with a simple "option_name = value" format or
with more complicated calculations - see "Options files" below for more information). The names of these options
files can passed as command line arguments to relion_it.py.
- For maximum control, you can make your own copy of this script and change the option values and the code itself
however you want.
Before running relion_it.py, you need to make sure you're in your intended RELION project directory, and that your
movie files are accessible by relative paths within that directory (as usual for a RELION project). You could do this
by moving the files from the microscope straight into the project directory, using a symlink from your project
directory to the real location of the data, or running a script to create a new symlink to each micrograph as it is
collected.
Options files
-------------
relion_it.py uses a large number of options for controlling both the flow of the script and the parameters for
individual jobs. These options can be read from Python script files when relion_it.py is started.
The options are all listed the body of the script below, with a comment to explain each option. One way to use this
script is to copy it in its entirety into your project directory, edit the options directly in the script and then
run it (with no command line arguments). However, it's often better to keep the script in the RELION source directory
(where it can be updated easily) and use options files to configure it.
An example of a simple options file is:
angpix = 1.06
This would override the default pixel size value, but leave all other options at their defaults.
The options files are read and interpreted as Python scripts. A simple list of "option_name = value" lines is all
that is needed, though you can also use any Python commands you like to do more complex calculations. To generate
an example file containing all of the options, run "relion_it.py --gui" and then click the "Save options" button,
which will save all the current options to a file called `relion_it_options.py' in the working directory.
The options are named descriptively so you can probably understand what most of them do quite easily. For more help on
any particular option, look at the comment above its definition in this script, or search the script's code to see
how it is used.
Options files can be useful as templates. As an example, at Diamond Light Source's eBIC facility, we have a template
file called `dls_cluster_options.py' that contains the necessary settings to make relion_it.py submit most of its jobs
to run on the DLS GPU cluster. You could also set up standard templates for a particular microscope (say, voltage and
Cs settings) or for a particular project or computer configuration.
When relion_it.py starts, it reads all options files in the order they are given on the command line. Subsequent files
will override earlier ones, so the last value given for any particular option will be the value that is used.
If you start relion_it.py with the `--continue' argument, it will automatically add `relion_it_options.py' to the end
of the list of options files. This means that if you are in a project directory where the relion_it.py GUI has
previously been used, all options will be defined in the relion_it_options.py file and they will override any other
options files given on the command line. (This is very useful for restarting the script after a problem, but it would
be pointless to combine `--continue' with any options template files.)
Note that if relion_it.py finds option names that it doesn't recognise while it's reading an options file, it will
print a warning (but continue anyway). If you've been editing options files by hand, you should check the output from
relion_it.py when it starts to make sure there are no typos in the options you wanted to set. (If you're using local
variables for intermediate Python calculations in an options file, it's a good idea to use names starting with a
leading underscore so you can immediately tell them apart from warnings about genuine spelling mistakes.)
Using the GUI
-------------
The GUI provides a simple way to start new projects with relion_it.py. If you want to use it, prepare your project
directory as described above, then start the GUI with "relion_it.py --gui". (If you're using any template options
files, you can give those too, for example "relion_it.py /path/to/site/options.py --gui".)
The window that appears should be self-explanatory. Fill in the options as needed for your project, and use the check
boxes on the right to control what processing steps will be done. When you're ready, click either "Save options" or
"Save & run". The program will check the values you've entered and then use them to calculate a few extra options for
relion_it.py. The options will then be saved to a file called `relion_it_options.py', and if you clicked "Save & run"
the processing run will start immediately.
If any of the entered values are invalid (for example, if there are letters in a field which should be a number), the
GUI will display a message box with an error when you click one of the buttons. It will also display a warning if any
values appear to be incorrect (but you can choose to ignore the warning by clicking "OK").
The GUI will try to calculate some extra options from the values you enter using the following rules:
1. If a 3D reference is given, use a single pass with reference-based autopicking, minimum distance between particles
of 0.7 times the particle size, and a batch size of 100,000 particles.
2. If no 3D reference is given, run a first pass with reference-free LoG autopicking and a batch size of 10,000, and
then a second pass with reference-based autopicking and a batch size of 100,000.
These options should be sensible in many cases, but if you'd like to change them, save the options from the GUI using
the "Save options" button, close the GUI, and edit the `relion_it_options.py' file to change the option values as
needed. You can then start the processing run with "relion_it.py --continue".
Running the pipelines
---------------------
relion_it.py uses several different scheduling pipelines to run its jobs. While each one is running, a file is
created in the project directory called `RUNNING_PIPELINER_<name>'. A log of the jobs run by that pipeline is stored
in `pipeline_<name>.log'.
If you want to stop one of the pipelines for any reason, delete its `RUNNING_' file and within a minute or two the
pipeliner will notice that the file has been removed and stop.
relion_it.py itself uses a similar file called `RUNNING_RELION_IT', and you can delete this to stop the script (which
will not affect any pipelines that are already running). It keeps a list of all of the jobs it has submitted in a
file called `RELION_IT_SUBMITTED_JOBS'. This file can be edited manually if necessary (but not while the script is
running!) Most of the jobs are run by the `preprocessing' pipeline. This will do the following:
1. Import movies
2. Motion correction
3. CTF estimation
4. Particle auto-picking
5. Particle extraction
6. Batch selection
After a number of particles have been extracted (1,000 by default), a 2D classification job will be run to provide
feedback on the quality of the data collection and particle picking.
Particles are split into batches of a fixed size (default 10,000 for the first pass with no reference, or 100,000
otherwise). The first batch is special: as it grows, the 2D classification job is re-run repeatedly to provide early
feedback on the quality of the data. For subsequent batches, the script waits for each batch to be complete before
running 2D classification on it.
You can provide reference structures for auto-picking and 3D classification. (If you provide a 3D reference in the
GUI it will automatically be used for both tasks.)
If you do not provide a reference for auto-picking, reference-free LoG picking will be used. If you do not provide a
reference for classification, relion_it.py will run the preprocessing pipeline twice. In the first pass, an initial
model will be generated, and then a second pass of preprocessing will be done using the initial model as a reference
for auto-picking and classification.
relion_it.py makes an effort to try to identify a suitable reference to use from the classes produced by the
InitialModel job, but if it selects an inappropriate reference, you can change it by stopping the pipelines and
script ("rm RUNNING_*"), updating the reference filename stored in the file named `RELION_IT_2NDPASS_3DREF', deleting
the relevant jobs (`autopick2_job' and those following) from the `RELION_IT_SUBMITTED_JOBS' file, then restarting the
pipeline with "relion_it.py --continue".
Fixing problems
---------------
One-off job failure
```````````````````
Occasionally, a single job can fail with an isolated error, for example if there are temporary network problems while
working on a remote filesystem. If this happens, RELION will wait forever for the files to appear that would indicate
the job has finished. In the meantime, no new jobs will be run, which can cause a backlog of micrographs to build up.
To fix this (for a preprocessing job), you can just try to re-run the job from the RELION GUI. Select the job in the
"Running jobs" list, then click "Job actions" -> "Mark as finished". Select the job again in the "Finished jobs"
list, then click "Continue!" to re-start the job.
That approach should work for preprocessing jobs, but probably won't work for classification or inital model
generation jobs, since those cannot be continued and must instead be restarted from the beginning. The best way to do
that is to restart the job manually, outside the RELION GUI, and then when the job finishes RELION should continue as
if the job had never failed.
For example, with a failed local job:
ps -e | grep relion # to check if the job is still active
kill <process_id> # to stop the job
# now re-run the commands from the job's `note.txt' file
or with a job that was submitted to an SGE cluster queue:
qstat # to check if the job is still active in the queue
qdel <job_id> # to remove the job from the queue
qsub job_type/job_directory/run_submit.script # to re-submit the job
The other option is to just run a new job from the RELION GUI in the normal way (select the job you want to "copy" in
the jobs list, make a "new" job by clicking on the job type in the list in the top-left of the GUI, then click
"Run!"). However, if you do this, relion_it.py will not know about the new job and will not run any further
downstream processing based on it. In this situation, you can either continue to process your data manually in RELION,
or you could edit the `RELION_IT_SUBMITTED_JOBS' file to replace the failed job with the manual one, and delete the
jobs that followed the original one. After that, if you re-run the script it should continue as normal from that
job onwards.
Repeated job failure
````````````````````
If a job fails repeatedly, it usually indicates that there is some problem with the job parameters or the files that
the job needs to access.
In favourable cases, it's possible you could fix the problem by selecting the job in the RELION GUI, changing one of
the parameters that is not greyed out, then clicking "Continue!". Often, though, the problem will be with one of the
parameters that can't be changed for a job that already exists, so the job will need to be deleted and recreated with
a different set of parameters.
To handle this situation, stop all of the pipelines and the relion_it.py script ("rm RUNNING_*"), then identify and
fix the problem. Often, the problem will be an error in one of the job parameters, which can usually be fixed by
changing one of the script options (for example by changing the settings in `relion_it_options.py', if you originally
used the GUI to start the run).
If the problem is caused by missing files from an upstream job, you might need to check the output of previous jobs
and look in the job directories to figure out what the problem is. Again, if it's an error in the parameters for a
job, you can probably fix it by editing `relion_it_options.py'.
After changing any script options, you'll need to use the RELION GUI to delete the affected job and all jobs
downstream of it, and also remove them from the list in the `RELION_IT_SUBMITTED_JOBS' file. Then you should be able
to restart the pipelines by running "relion_it.py --continue".
If you still can't get a particular job to run without errors, you can at least continue to run the upstream jobs
that are working properly. You can do this either by changing the options for relion_it.py (there are options to
switch off 2D or 3D classification, or to stop after CTF estimation), or by manually scheduling the jobs you want
using the RELION GUI. Remember that after running relion_it.py, you have a normal RELION project, so if the script
can't do what you want, you can simply stop it and then use all of RELION's normal job management and scheduling
abilities.
Advanced usage
--------------
It's possible to customise many aspects of the way relion_it.py works, but the details go beyond the scope of this
introduction. Simple customisation can be done by setting appropriate option values (see "Option files" above). For
more substantial changes, you might need to edit the script's Python code to get the behaviour you want. Most of the
important logic is in the `run_pipeline()' function so that's a good place to start. Good luck!
"""
from __future__ import division # always use float division
import argparse
import glob
import inspect
import math
import os
import runpy
import time
import traceback
try:
import Tkinter as tk
import tkMessageBox
import tkFileDialog
except ImportError:
# The GUI is optional. If the user requests it, it will fail when it tries
# to open so we can ignore the error for now.
pass
# Constants
PIPELINE_STAR = 'default_pipeline.star'
RUNNING_FILE = 'RUNNING_RELION_IT'
SECONDPASS_REF3D_FILE = 'RELION_IT_2NDPASS_3DREF'
SETUP_CHECK_FILE = 'RELION_IT_SUBMITTED_JOBS'
PREPROCESS_SCHEDULE_PASS1 = 'PREPROCESS'
PREPROCESS_SCHEDULE_PASS2 = 'PREPROCESS_PASS2'
OPTIONS_FILE = 'relion_it_options.py'
class RelionItOptions(object):
"""
Options for the relion_it pipeline setup script.
When initialised, this contains default values for all options. Call
``update_from()`` to override the defaults with a dictionary of new values.
"""
#############################################################################
# Change the parameters below to reflect your experiment #
# Current defaults reflect cryo-ARM betagal data set of RELION-3.0 tutorial #
#############################################################################
### General parameters
# Pixel size in Angstroms in the input movies
angpix = 0.885
# Acceleration voltage (in kV)
voltage = 200
# Polara = 2.0; Talos/Krios = 2.7; some Cryo-ARM = 1.4
Cs = 1.4
### Import images (Linux wild card; movies as *.mrc, *.mrcs, *.tiff or *.tif; single-frame micrographs as *.mrc)
import_images = 'Movies/*.tiff'
# Are these multi-frame movies? Set to False for single-frame micrographs (and motion-correction will be skipped)
images_are_movies = True
### MotionCorrection parameters
# Dose in electrons per squared Angstrom per frame
motioncor_doseperframe = 1.277
# Gain-reference image in MRC format (only necessary if input movies are not yet gain-corrected, e.g. compressed TIFFs from K2)
motioncor_gainreference = 'Movies/gain.mrc'
### CTF estimation parameters
# Most cases won't need changes here...
### Autopick parameters
# Use reference-free Laplacian-of-Gaussian picking (otherwise use reference-based template matching instead)
autopick_do_LoG = True
# Minimum and maximum diameter in Angstrom for the LoG filter
autopick_LoG_diam_min = 150
autopick_LoG_diam_max = 180
# Use positive values (0-1) to pick fewer particles; use negative values (-1-0) to pick more particles
autopick_LoG_adjust_threshold = 0.0
#
# OR:
#
# References for reference-based picking (when autopick_do_LoG = False)
autopick_2dreferences = ''
# OR: provide a 3D references for reference-based picking (when autopick_do_LoG = False)
autopick_3dreference = ''
# Threshold for reference-based autopicking (threshold 0 will pick too many particles. Default of 0.4 is hopefully better. Ultimately, just hope classification will sort it all out...)
autopick_refs_threshold = 0.4
# Minimum inter-particle distance for reference-based picking (~70% of particle diameter often works well)
autopick_refs_min_distance = 120
#
# For both LoG and refs:
#
# Use this to remove false positives from carbon edges (useful range: 1.0-1.2, -1 to switch off)
autopick_stddev_noise = -1
# Use this to remove false positives from carbon edges (useful range: -0.5-0.0; -999 to switch off)
autopick_avg_noise = -999
### Extract parameters
# Box size of particles in the averaged micrographs (in pixels)
extract_boxsize = 256
# Down-scale the particles upon extraction?
extract_downscale = False
# Box size of the down-scaled particles (in pixels)
extract_small_boxsize = 64
# In second pass, down-scale the particles upon extraction?
extract2_downscale = False
# In second pass, box size of the down-scaled particles (in pixels)
extract2_small_boxsize = 128
### Now perform 2D and/or 3D classification with the extracted particles?
do_class2d = True
# And/or perform 3D classification?
do_class3d = True
# Repeat 2D and/or 3D-classification for batches of this many particles
batch_size = 10000
# Number of 2D classes to use
class2d_nr_classes = 50
# Diameter of the mask used for 2D/3D classification (in Angstrom)
mask_diameter = 190
# Symmetry group (when using SGD for initial model generation, C1 may work best)
symmetry = 'C1'
#
### 3D-classification parameters
# Number of 3D classes to use
class3d_nr_classes = 4
# Have initial 3D model? If not, calculate one using SGD initial model generation
have_3d_reference = False
# Initial reference model
class3d_reference = ''
# Is reference on correct greyscale?
class3d_ref_is_correct_greyscale = False
# Has the initial reference been CTF-corrected?
class3d_ref_is_ctf_corrected = True
# Initial lowpass filter on reference
class3d_ini_lowpass = 40
### Use the largest 3D class from the first batch as a 3D reference for a second pass of autopicking? (only when do_class3d is True)
do_second_pass = True
# Only move on to template-based autopicking if the 3D references achieves this resolution (in A)
minimum_resolution_3dref_2ndpass = 20
# In the second pass, perform 2D classification?
do_class2d_pass2 = True
# In the second pass, perform 3D classification?
do_class3d_pass2 = False
# Batch size in the second pass
batch_size_pass2 = 100000
###################################################################################
############ Often the parameters below can be kept the same for a given set-up
###################################################################################
### Repeat settings for entire pipeline
# Repeat the pre-processing runs this many times (or until RUNNING_PIPELINER_default_PREPROCESS file is deleted)
preprocess_repeat_times = 999
# Wait at least this many minutes between each repeat cycle
preprocess_repeat_wait = 1
### Stop after CTF estimation? I.e., skip autopicking, extraction, 2D/3D classification, etc?
stop_after_ctf_estimation = False
# Check every this many minutes if enough particles have been extracted for a new batch of 2D-classification
batch_repeat_time = 1
### MotionCorrection parameters
# Use RELION's own implementation of motion-correction (CPU-only) instead of the UCSF implementation?
motioncor_do_own = False
# The number of threads (only for RELION's own implementation) is optimal when nr_movie_frames/nr_threads = integer
motioncor_threads = 12
# Exectutable of UCSF MotionCor2
motioncor_exe = '/public/EM/MOTIONCOR2/MotionCor2'
# On which GPU(s) to execute UCSF MotionCor2
motioncor_gpu = '0'
# How many MPI processes to use for running motion correction?
motioncor_mpi = 1
# Local motion-estimation patches for MotionCor2
motioncor_patches_x = 5
motioncor_patches_y = 5
# B-factor in A^2 for downweighting of high-spatial frequencies
motioncor_bfactor = 150
# Use binning=2 for super-resolution K2 movies
motioncor_binning = 1
# Provide a defect file for your camera if you have one
motioncor_defectfile = ''
# orientation of the gain-reference w.r.t your movies (if input movies are not yet gain-corrected, e.g. TIFFs)
motioncor_gainflip = 'No flipping (0)'
motioncor_gainrot = 'No rotation (0)'
# Other arguments for MotionCor2
motioncor_other_args = ''
# Submit motion correction job to the cluster?
motioncor_submit_to_queue = False
### CTF estimation parameters
# Amplitude contrast (Q0)
ampl_contrast = 0.1
# CTFFIND-defined parameters
ctffind_boxsize = 512
ctffind_astigmatism = 100
ctffind_maxres = 5
ctffind_minres = 30
ctffind_defocus_max = 50000
ctffind_defocus_min = 5000
ctffind_defocus_step = 500
# For Gctf: ignore parameters on the 'Searches' tab?
ctffind_do_ignore_search_params = True
# For Gctf: perform equi-phase averaging?
ctffind_do_EPA = True
# Also estimate phase shifts (for VPP data)
ctffind_do_phaseshift = False
# Executable to Kai Zhang's Gctf
gctf_exe = '/public/EM/Gctf/bin/Gctf'
# On which GPU(s) to execute Gctf
gctf_gpu = '0'
# Use Alexis Rohou's CTFFIND4 (CPU-only) instead?
use_ctffind_instead = False
# Executable for Alexis Rohou's CTFFIND4
ctffind4_exe = '/public/EM/ctffind/ctffind.exe'
# How many MPI processes to use for running CTF estimation?
ctffind_mpi = 1
# Submit CTF estimation job to the cluster?
ctffind_submit_to_queue = False
### Autopick parameters
# Use GPU-acceleration for autopicking?
autopick_do_gpu = True
# Which GPU(s) to use for autopicking
autopick_gpu = '0'
# Low-pass filter for auto-picking the micrographs
autopick_lowpass = 20
# Shrink factor for faster picking (0 = fastest; 1 = slowest)
autopick_shrink_factor = 0
# How many MPI processes to use for running auto-picking?
autopick_mpi = 1
# Additional arguments for autopicking
autopick_other_args = ''
# Submit Autopick job to the cluster?
autopick_submit_to_queue = False
# Are the references CTF-corrected?
autopick_refs_are_ctf_corrected = True
# Do the references have inverted contrast wrt the micrographs?
autopick_refs_have_inverted_contrast = True
# Ignore CTFs until the first peak
autopick_refs_ignore_ctf1stpeak = False
# Diameter of mask for the references (in A; negative value for automated detection of mask diameter)
autopick_refs_mask_diam = -1
# In-plane angular sampling interval
autopick_inplane_sampling = 10
# Symmetry of the 3D reference for autopicking
autopick_3dref_symmetry = 'C1'
# 3D angular sampling for generating projections of the 3D reference for autopicking (30 degrees is usually enough)
autopick_3dref_sampling = '30 degrees'
# Pixel size in the provided 2D/3D references (negative for same as in motion-corrected movies)
autopick_ref_angpix = -1
### Extract parameters
# Diameter for background normalisation (in pixels; negative value: default is 75% box size)
extract_bg_diameter = -1
# How many MPI processes to use for running particle extraction?
extract_mpi = 1
# Submit Extract job to the cluster?
extract_submit_to_queue = False
## Discard particles based on average/stddev values? (this may be important for SGD initial model generation)
do_discard_on_image_statistics = False
# Discard images that have average/stddev values that are more than this many sigma away from the ensemble average
discard_sigma = 4
# Submit discard job to the cluster?
discard_submit_to_queue = False
#### Common relion_refine paremeters used for 2D/3D classification and initial model generation
# Read all particles in one batch into memory?
refine_preread_images = False
# Or copy particles to scratch disk?
refine_scratch_disk = ''
# Number of pooled particles?
refine_nr_pool = 10
# Use GPU-acceleration?
refine_do_gpu = True
# Which GPU to use (different from GPU used for pre-processing?)
refine_gpu = '1'
# How many MPI processes to use
refine_mpi = 1
# How many threads to use
refine_threads = 6
# Skip padding?
refine_skip_padding = False
# Submit jobs to the cluster?
refine_submit_to_queue = False
# Use fast subsets in 2D/3D classification when batch_size is bigger than this
refine_batchsize_for_fast_subsets = 100000
### 2D classification parameters
# Wait with the first 2D classification batch until at least this many particles are extracted
minimum_batch_size = 1000
# Number of iterations to perform in 2D classification
class2d_nr_iter = 20
# Rotational search step (in degrees)
class2d_angle_step = 6
# Offset search range (in pixels)
class2d_offset_range = 5
# Offset search step (in pixels)
class2d_offset_step = 1
# Option to ignore the CTFs until their first peak (try this if all particles go into very few classes)
class2d_ctf_ign1stpeak = False
# Additional arguments to pass to relion-refine
class2d_other_args = ''
### 3D classification parameters
# Number of iterations to perform in 3D classification
class3d_nr_iter = 20
# Reference mask
class3d_reference_mask = ''
# Option to ignore the CTFs until their first peak (try this if all particles go into very few classes)
class3d_ctf_ign1stpeak = False
# Regularisation parameter (T)
class3d_T_value = 4
# Angular sampling step
class3d_angle_step = '7.5 degrees'
# Offset search range (in pixels)
class3d_offset_range = 5
# Offset search step (in pixels)
class3d_offset_step = 1
# Additional arguments to pass to relion-refine
class3d_other_args = ''
## SGD initial model generation
# Number of models to generate simulatenously (K>1 may be useful for getting rid of outliers in the particle images)
inimodel_nr_classes = 4
# Ignore CTFs until first peak?
inimodel_ctf_ign1stpeak = False
# Enforce non-negative solvent?
inimodel_solvent_flatten = True
# Initial angular sampling
inimodel_angle_step = '15 degrees'
# Initial search range (in pixels)
inimodel_offset_range = 6
# Initial offset search step (in pixels)
inimodel_offset_step = 2
# Number of initial iterations
inimodel_nr_iter_initial = 50
# Number of in-between iterations
inimodel_nr_iter_inbetween = 200
# Number of final iterations
inimodel_nr_iter_final = 50
# Frequency to write out information
inimodel_freq_writeout = 10
# Initial resolution (in A)
inimodel_resol_ini = 35
# Final resolution (in A)
inimodel_resol_final = 15
# Initial mini-batch size
inimodel_batchsize_ini = 100
# Final mini-batch size
inimodel_batchsize_final = 500
# Increased noise variance half-life (off, i.e. -1, by default; values of ~1000 have been observed to be useful in difficult cases)
inimodel_sigmafudge_halflife = -1
# Additional arguments to pass to relion_refine (skip annealing to get rid of outlier particles)
inimodel_other_args = ' --sgd_skip_anneal '
### Cluster submission settings
# Name of the queue to which to submit the job
queue_name = 'openmpi'
# Name of the command used to submit scripts to the queue
queue_submit_command = 'qsub'
# The template for your standard queue job submission script
queue_submission_template = '/public/EM/RELION/relion/bin/qsub.csh'
# Minimum number of dedicated cores that need to be requested on each node
queue_minimum_dedicated = 1
### End of options
#######################################################################
############ typically no need to change anything below this line
#######################################################################
def update_from(self, other):
"""
Update this RelionItOptions object from a dictionary.
Special values (with names like '__xxx__') are removed, allowing this
method to be given a dictionary containing the namespace from a script
run with ``runpy``.
"""
while len(other) > 0:
key, value = other.popitem()
if not (key.startswith('__') and key.endswith('__')): # exclude __name__, __builtins__ etc.
if hasattr(self, key):
setattr(self, key, value)
else:
print " RELION_IT: Unrecognised option '{}'".format(key)
def print_options(self, out_file=None):
"""
Print the current options.
This method prints the options in the same format as they are read,
allowing options to be written to a file and re-used.
Args:
out_file: A file object (optional). If supplied, options will be
written to this file, otherwise they will be printed to
sys.stdout.
Raises:
ValueError: If there is a problem printing the options.
"""
print >>out_file, "# Options file for relion_it.py"
print >>out_file
seen_start = False
option_names = [key for key in dir(self) if (not (key.startswith('__') and key.endswith('__'))
and not callable(getattr(self, key)))]
# Parse the source code for this class, and write out all comments along with option lines containing new values
for line in inspect.getsourcelines(RelionItOptions)[0]:
line = line.strip()
if not seen_start:
if line != "### General parameters":
# Ignore lines until this one
continue
seen_start = True
if line == "### End of options":
# Stop here
break
if line.startswith('#') or len(line) == 0:
# Print comments or blank lines as-is
print >>out_file, line
else:
# Assume all other lines define an option name and value. Replace with new value.
equals_index = line.find('=')
if equals_index > 0:
option_name = line[:equals_index].strip()
if option_name in option_names:
print >>out_file, '{} = {}'.format(option_name, repr(getattr(self, option_name)))
option_names.remove(option_name)
else:
# This error should not occur. If it does, there is probably a programming error.
raise ValueError("Unrecognised option name '{}'".format(option_name))
if len(option_names) > 0:
# This error should not occur. If it does, there is probably a programming error.
raise ValueError("Some options were not written to the output file: {}".format(option_names))
class RelionItGui(object):
def __init__(self, main_window, options):
self.main_window = main_window
self.options = options
# Convenience function for making file browser buttons
def new_browse_button(master, var_to_set, filetypes=(('MRC file', '*.mrc'), ('All files', '*'))):
def browse_command():
chosen_file = tkFileDialog.askopenfilename(filetypes=filetypes)
if chosen_file is not None:
# Make path relative if it's in the current directory
if chosen_file.startswith(os.getcwd()):
chosen_file = os.path.relpath(chosen_file)
var_to_set.set(chosen_file)
return tk.Button(master, text="Browse...", command=browse_command)
### Create GUI
main_frame = tk.Frame(main_window)
main_frame.pack(fill=tk.BOTH, expand=1)
left_frame = tk.Frame(main_frame)
left_frame.pack(side=tk.LEFT, anchor=tk.N, fill=tk.X, expand=1)
right_frame = tk.Frame(main_frame)
right_frame.pack(side=tk.LEFT, anchor=tk.N, fill=tk.X, expand=1)
###
expt_frame = tk.LabelFrame(left_frame, text="Experimental details", padx=5, pady=5)
expt_frame.pack(padx=5, pady=5, fill=tk.X, expand=1)
tk.Grid.columnconfigure(expt_frame, 1, weight=1)
row = 0
tk.Label(expt_frame, text="Voltage (kV):").grid(row=row, sticky=tk.W)
self.voltage_entry = tk.Entry(expt_frame)
self.voltage_entry.grid(row=row, column=1, sticky=tk.W+tk.E)
self.voltage_entry.insert(0, str(options.voltage))
row += 1
tk.Label(expt_frame, text="Cs (mm):").grid(row=row, sticky=tk.W)
self.cs_entry = tk.Entry(expt_frame)
self.cs_entry.grid(row=row, column=1, sticky=tk.W+tk.E)
self.cs_entry.insert(0, str(options.Cs))
row += 1
tk.Label(expt_frame, text="Phase plate?").grid(row=row, sticky=tk.W)
self.phaseplate_var = tk.IntVar()
phaseplate_button = tk.Checkbutton(expt_frame, var=self.phaseplate_var)
phaseplate_button.grid(row=row, column=1, sticky=tk.W)
if options.ctffind_do_phaseshift:
phaseplate_button.select()
row += 1
tk.Label(expt_frame, text=u"Pixel size (\u212B):").grid(row=row, sticky=tk.W)
self.angpix_var = tk.StringVar() # for data binding
self.angpix_entry = tk.Entry(expt_frame, textvariable=self.angpix_var)
self.angpix_entry.grid(row=row, column=1, sticky=tk.W+tk.E)
self.angpix_entry.insert(0, str(options.angpix))
row += 1
tk.Label(expt_frame, text=u"Exposure rate (e\u207B / \u212B\u00B2 / frame):").grid(row=row, sticky=tk.W)
self.exposure_entry = tk.Entry(expt_frame)
self.exposure_entry.grid(row=row, column=1, sticky=tk.W + tk.E)
self.exposure_entry.insert(0, str(options.motioncor_doseperframe))
###
particle_frame = tk.LabelFrame(left_frame, text="Particle details", padx=5, pady=5)
particle_frame.pack(padx=5, pady=5, fill=tk.X, expand=1)
tk.Grid.columnconfigure(particle_frame, 1, weight=1)
row = 0
tk.Label(particle_frame, text=u"Longest diameter (\u212B):").grid(row=row, sticky=tk.W)
self.particle_max_diam_var = tk.StringVar() # for data binding
self.particle_max_diam_entry = tk.Entry(particle_frame, textvariable=self.particle_max_diam_var)
self.particle_max_diam_entry.grid(row=row, column=1, sticky=tk.W+tk.E, columnspan=2)
self.particle_max_diam_entry.insert(0, str(options.autopick_LoG_diam_max))
row += 1
tk.Label(particle_frame, text=u"Shortest diameter (\u212B):").grid(row=row, sticky=tk.W)
self.particle_min_diam_entry = tk.Entry(particle_frame)
self.particle_min_diam_entry.grid(row=row, column=1, sticky=tk.W+tk.E, columnspan=2)
self.particle_min_diam_entry.insert(0, str(options.autopick_LoG_diam_min))
row += 1
tk.Label(particle_frame, text="3D reference (optional):").grid(row=row, sticky=tk.W)
self.ref_3d_var = tk.StringVar() # for data binding
self.ref_3d_entry = tk.Entry(particle_frame, textvariable=self.ref_3d_var)
self.ref_3d_entry.grid(row=row, column=1, sticky=tk.W+tk.E)
self.ref_3d_entry.insert(0, str(options.autopick_3dreference))
new_browse_button(particle_frame, self.ref_3d_var).grid(row=row, column=2)
row += 1
tk.Label(particle_frame, text=u"Mask diameter (\u212B):").grid(row=row, sticky=tk.W)
self.mask_diameter_var = tk.StringVar() # for data binding
self.mask_diameter_entry = tk.Entry(particle_frame, textvariable=self.mask_diameter_var)
self.mask_diameter_entry.grid(row=row, column=1, sticky=tk.W+tk.E)
self.mask_diameter_entry.insert(0, str(options.mask_diameter))
self.mask_diameter_px = tk.Label(particle_frame, text="= NNN px")
self.mask_diameter_px.grid(row=row, column=2,sticky=tk.W)
row += 1
tk.Label(particle_frame, text="Box size (px):").grid(row=row, sticky=tk.W)
self.box_size_var = tk.StringVar() # for data binding
self.box_size_entry = tk.Entry(particle_frame, textvariable=self.box_size_var)
self.box_size_entry.grid(row=row, column=1, sticky=tk.W+tk.E)
self.box_size_entry.insert(0, str(options.extract_boxsize))
self.box_size_in_angstrom = tk.Label(particle_frame, text=u"= NNN \u212B")
self.box_size_in_angstrom.grid(row=row, column=2,sticky=tk.W)
row += 1
tk.Label(particle_frame, text="Down-sample to (px):").grid(row=row, sticky=tk.W)
self.extract_small_boxsize_var = tk.StringVar() # for data binding
self.extract_small_boxsize_entry = tk.Entry(particle_frame, textvariable=self.extract_small_boxsize_var)
self.extract_small_boxsize_entry.grid(row=row, column=1, sticky=tk.W+tk.E)
self.extract_small_boxsize_entry.insert(0, str(options.extract_small_boxsize))
self.extract_angpix = tk.Label(particle_frame, text=u"= NNN \u212B/px")
self.extract_angpix.grid(row=row, column=2,sticky=tk.W)
row += 1
tk.Label(particle_frame, text="Calculate for me:").grid(row=row, sticky=tk.W)
self.auto_boxsize_var = tk.IntVar()
auto_boxsize_button = tk.Checkbutton(particle_frame, var=self.auto_boxsize_var)
auto_boxsize_button.grid(row=row, column=1, sticky=tk.W)
auto_boxsize_button.select()
###
project_frame = tk.LabelFrame(right_frame, text="Project details", padx=5, pady=5)
project_frame.pack(padx=5, pady=5, fill=tk.X, expand=1)
tk.Grid.columnconfigure(project_frame, 1, weight=1)
row = 0
tk.Label(project_frame, text="Project directory:").grid(row=row, sticky=tk.W)
tk.Label(project_frame, text=os.getcwd(), anchor=tk.W).grid(row=row, column=1, sticky=tk.W, columnspan=2)
row += 1
tk.Label(project_frame, text="Pattern for movies:").grid(row=row, sticky=tk.W)
self.import_images_var = tk.StringVar() # for data binding
self.import_images_entry = tk.Entry(project_frame, textvariable=self.import_images_var)
self.import_images_entry.grid(row=row, column=1, sticky=tk.W+tk.E)
self.import_images_entry.insert(0, self.options.import_images)
import_button = new_browse_button(project_frame, self.import_images_var,
filetypes=(('Image file', '{*.mrc, *.mrcs, *.tif, *.tiff}'), ('All files', '*')))
import_button.grid(row=row, column=2)
row += 1
tk.Label(project_frame, text="Gain reference (optional):").grid(row=row, sticky=tk.W)
self.gainref_var = tk.StringVar() # for data binding
self.gainref_entry = tk.Entry(project_frame, textvariable=self.gainref_var)
self.gainref_entry.grid(row=row, column=1, sticky=tk.W+tk.E)
self.gainref_entry.insert(0, self.options.motioncor_gainreference)
new_browse_button(project_frame, self.gainref_var).grid(row=row, column=2)
###
pipeline_frame = tk.LabelFrame(right_frame, text="Pipeline control", padx=5, pady=5)
pipeline_frame.pack(padx=5, pady=5, fill=tk.X, expand=1)
tk.Grid.columnconfigure(expt_frame, 1, weight=1)
row = 0
tk.Label(pipeline_frame, text="Stop after CTF estimation?").grid(row=row, sticky=tk.W)
self.stop_after_ctf_var = tk.IntVar()
stop_after_ctf_button = tk.Checkbutton(pipeline_frame, var=self.stop_after_ctf_var)
stop_after_ctf_button.grid(row=row, column=1, sticky=tk.W)
if options.stop_after_ctf_estimation:
stop_after_ctf_button.select()
row += 1
tk.Label(pipeline_frame, text="Do 2D classification?").grid(row=row, sticky=tk.W)
self.class2d_var = tk.IntVar()
class2d_button = tk.Checkbutton(pipeline_frame, var=self.class2d_var)
class2d_button.grid(row=row, column=1, sticky=tk.W)
if options.do_class2d:
class2d_button.select()
row += 1
tk.Label(pipeline_frame, text="Do 3D classification?").grid(row=row, sticky=tk.W)
self.class3d_var = tk.IntVar()
class3d_button = tk.Checkbutton(pipeline_frame, var=self.class3d_var)
class3d_button.grid(row=row, column=1, sticky=tk.W)
if options.do_class3d:
class3d_button.select()
row += 1
tk.Label(pipeline_frame, text="Do second pass? (only if no 3D ref)").grid(row=row, sticky=tk.W)
self.second_pass_var = tk.IntVar()
second_pass_button = tk.Checkbutton(pipeline_frame, var=self.second_pass_var)
second_pass_button.grid(row=row, column=1, sticky=tk.W)
if options.do_second_pass:
second_pass_button.select()
row += 1
tk.Label(pipeline_frame, text="Do 2D classification (2nd pass)?").grid(row=row, sticky=tk.W)
self.class2d_pass2_var = tk.IntVar()
class2d_pass2_button = tk.Checkbutton(pipeline_frame, var=self.class2d_pass2_var)
class2d_pass2_button.grid(row=row, column=1, sticky=tk.W)
class2d_pass2_button.select()
if options.do_class2d_pass2:
class2d_pass2_button.select()
row += 1
tk.Label(pipeline_frame, text="Do 3D classification (2nd pass)?").grid(row=row, sticky=tk.W)
self.class3d_pass2_var = tk.IntVar()
class3d_pass2_button = tk.Checkbutton(pipeline_frame, var=self.class3d_pass2_var)
class3d_pass2_button.grid(row=row, column=1, sticky=tk.W)
if options.do_class3d_pass2:
class3d_pass2_button.select()
### Add logic to the box size boxes
def calculate_box_size(particle_size_pixels):
# Use box 20% larger than particle and ensure size is even
box_size_exact = 1.2 * particle_size_pixels
box_size_int = int(math.ceil(box_size_exact))
return box_size_int + box_size_int % 2
def calculate_downscaled_box_size(box_size_pix, angpix):
for small_box_pix in (48, 64, 96, 128, 160, 192, 256, 288, 300, 320, 360,
384, 400, 420, 450, 480, 512, 640, 768, 896, 1024):
# Don't go larger than the original box
if small_box_pix > box_size_pix:
return box_size_pix
# If Nyquist freq. is better than 8.5 A, use this downscaled box, otherwise continue to next size up
small_box_angpix = angpix * box_size_pix / small_box_pix
if small_box_angpix < 4.25:
return small_box_pix
# Fall back to a warning message
return "Box size is too large!"
def update_box_size_labels(*args_ignored, **kwargs_ignored):
try:
angpix = float(self.angpix_entry.get())
except ValueError:
# Can't update any of the labels without angpix
self.mask_diameter_px.config(text="= NNN px")
self.box_size_in_angstrom.config(text=u"= NNN \u212B")
self.extract_angpix.config(text=u"= NNN \u212B/px")
return
try:
mask_diameter = float(self.mask_diameter_entry.get())
mask_diameter_px = mask_diameter / angpix
self.mask_diameter_px.config(text="= {:.1f} px".format(mask_diameter_px))
except (ValueError, ZeroDivisionError):
self.mask_diameter_px.config(text="= NNN px")
# Don't return - an error here doesn't stop us calculating the other labels
try:
box_size = float(self.box_size_entry.get())
box_angpix = angpix * box_size
self.box_size_in_angstrom.config(text=u"= {:.1f} \u212B".format(box_angpix))
except ValueError:
# Can't update these without the box size
self.box_size_in_angstrom.config(text=u"= NNN \u212B")
self.extract_angpix.config(text=u"= NNN \u212B/px")
return
try:
extract_small_boxsize = float(self.extract_small_boxsize_entry.get())
small_box_angpix = box_angpix / extract_small_boxsize
self.extract_angpix.config(text=u"= {:.3f} \u212B/px".format(small_box_angpix))
except (ValueError, ZeroDivisionError):
# Can't update the downscaled pixel size unless the downscaled box size is valid
self.extract_angpix.config(text=u"= NNN \u212B/px")
def update_box_sizes(*args_ignored, **kwargs_ignored):
# Always activate entry boxes - either we're activating them anyway, or we need to edit the text.
# For text editing we need to activate the box first then deactivate again afterwards.
self.mask_diameter_entry.config(state=tk.NORMAL)
self.box_size_entry.config(state=tk.NORMAL)
self.extract_small_boxsize_entry.config(state=tk.NORMAL)
if self.get_var_as_bool(self.auto_boxsize_var):
try:
particle_size_angstroms = float(self.particle_max_diam_entry.get())
mask_diameter = 1.1 * particle_size_angstroms
self.mask_diameter_entry.delete(0, tk.END)
self.mask_diameter_entry.insert(0, str(mask_diameter))
angpix = float(self.angpix_entry.get())
particle_size_pixels = particle_size_angstroms / angpix
box_size = calculate_box_size(particle_size_pixels)
self.box_size_entry.delete(0, tk.END)
self.box_size_entry.insert(0, str(box_size))
small_boxsize = calculate_downscaled_box_size(int(box_size), angpix)
self.extract_small_boxsize_entry.delete(0, tk.END)
self.extract_small_boxsize_entry.insert(0, str(small_boxsize))
except:
# Ignore errors - they will be picked up if the user tries to save the options
pass
self.mask_diameter_entry.config(state=tk.DISABLED)
self.box_size_entry.config(state=tk.DISABLED)
self.extract_small_boxsize_entry.config(state=tk.DISABLED)
update_box_size_labels()
self.box_size_var.trace('w', update_box_size_labels)
self.extract_small_boxsize_var.trace('w', update_box_size_labels)
self.angpix_var.trace('w', update_box_sizes)
self.particle_max_diam_var.trace('w', update_box_sizes)
auto_boxsize_button.config(command=update_box_sizes)
### Add logic to the check boxes
def update_pipeline_control_state(*args_ignored, **kwargs_ignored):
new_state = tk.DISABLED if self.stop_after_ctf_var.get() else tk.NORMAL
class2d_button.config(state=new_state)
class3d_button.config(state=new_state)
self.particle_max_diam_entry.config(state=new_state)
self.particle_min_diam_entry.config(state=new_state)
self.ref_3d_entry.config(state=new_state)
# Update the box size controls with care to avoid activating them when we shouldn't
auto_boxsize_button.config(state=new_state)
if new_state == tk.DISABLED:
self.mask_diameter_entry.config(state=new_state)
self.box_size_entry.config(state=new_state)
self.extract_small_boxsize_entry.config(state=new_state)
else:
update_box_sizes()
can_do_second_pass = (self.class3d_var.get()
and len(self.ref_3d_var.get()) == 0
and not self.stop_after_ctf_var.get())
second_pass_button.config(state=tk.NORMAL if can_do_second_pass else tk.DISABLED)
will_do_second_pass = can_do_second_pass and self.second_pass_var.get()
class2d_pass2_button.config(state=tk.NORMAL if will_do_second_pass else tk.DISABLED)
class3d_pass2_button.config(state=tk.NORMAL if will_do_second_pass else tk.DISABLED)
stop_after_ctf_button.config(command=update_pipeline_control_state)
class3d_button.config(command=update_pipeline_control_state)
second_pass_button.config(command=update_pipeline_control_state)
self.ref_3d_var.trace('w', update_pipeline_control_state)
###
button_frame = tk.Frame(right_frame)
button_frame.pack(padx=5, pady=5, fill=tk.X, expand=1)
self.run_button = tk.Button(button_frame, text="Save & run", command=self.run_pipeline)
self.run_button.pack(padx=5, pady=5, side=tk.RIGHT)
self.save_button = tk.Button(button_frame, text="Save options", command=self.save_options)
self.save_button.pack(padx=5, pady=5, side=tk.RIGHT)
# Show initial pixel sizes
update_box_sizes()
def get_var_as_bool(self, var):
"""Helper function to convert a Tk IntVar (linked to a checkbox) to a boolean value"""
return True if var.get() == 1 else False
def fetch_options_from_gui(self):
"""
Fetch the current values from the GUI widgets and store them in the options object.
Returns:
A list of warning messages about possible incorrect option values.
Raises:
ValueError: If an option value is invalid.
"""
opts = self.options
warnings = []
opts.stop_after_ctf_estimation = self.get_var_as_bool(self.stop_after_ctf_var)
opts.do_class2d = self.get_var_as_bool(self.class2d_var)
opts.do_class3d = self.get_var_as_bool(self.class3d_var)
opts.do_second_pass = self.get_var_as_bool(self.second_pass_var)
opts.do_class2d_pass2 = self.get_var_as_bool(self.class2d_pass2_var)
opts.do_class3d_pass2 = self.get_var_as_bool(self.class3d_pass2_var)
try:
opts.voltage = float(self.voltage_entry.get())
except ValueError:
raise ValueError("Voltage must be a number")
if opts.voltage <= 0.0:
warnings.append("- Voltage should be a positive number")
try:
opts.Cs = float(self.cs_entry.get())
except ValueError:
raise ValueError("Cs must be a number")
opts.ctffind_do_phaseshift = self.get_var_as_bool(self.phaseplate_var)
try:
opts.angpix = float(self.angpix_entry.get())
except ValueError:
raise ValueError("Pixel size must be a number")
if opts.angpix <= 0.0:
warnings.append("- Pixel size should be a positive number")
try:
opts.motioncor_doseperframe = float(self.exposure_entry.get())
except ValueError:
raise ValueError("Exposure rate must be a number")
if opts.motioncor_doseperframe <= 0.0:
warnings.append("- Exposure rate should be a positive number")
try:
opts.autopick_LoG_diam_max = float(self.particle_max_diam_entry.get())
except ValueError:
if len(self.particle_max_diam_entry.get()) == 0 and opts.stop_after_ctf_estimation:
# This was left blank and won't be used, set to zero to avoid errors in calculations later
opts.autopick_LoG_diam_max = 0.0
else:
raise ValueError("Particle longest diameter must be a number")
try:
opts.autopick_LoG_diam_min = float(self.particle_min_diam_entry.get())
except ValueError:
if len(self.particle_min_diam_entry.get()) == 0 and opts.stop_after_ctf_estimation:
# This was left blank and won't be used, set to zero to avoid errors in calculations later
opts.autopick_LoG_diam_min = 0.0
else:
raise ValueError("Particle shortest diameter must be a number")
opts.autopick_3dreference = self.ref_3d_entry.get()
if len(opts.autopick_3dreference) > 0 and not os.path.isfile(opts.autopick_3dreference):
warnings.append("- 3D reference file '{}' does not exist".format(opts.autopick_3dreference))
try:
opts.mask_diameter = float(self.mask_diameter_entry.get())
except ValueError:
raise ValueError("Mask diameter must be a number")
if opts.mask_diameter <= 0:
warnings.append("- Mask diameter should be a positive number")
try:
opts.extract_boxsize = int(self.box_size_entry.get())
except ValueError:
raise ValueError("Box size must be a number")
if opts.extract_boxsize <= 0:
warnings.append("- Box size should be a positive number")
try:
opts.extract_small_boxsize = int(self.extract_small_boxsize_entry.get())
opts.extract2_small_boxsize = opts.extract_small_boxsize
opts.extract_downscale = True
opts.extract2_downscale = True
except ValueError:
raise ValueError("Down-sampled box size must be a number")
if opts.extract_small_boxsize <= 0:
warnings.append("- Down-sampled box size should be a positive number")
opts.import_images = self.import_images_entry.get()
if opts.import_images.startswith(('/', '..')):
warnings.append("- Movies should be located inside the project directory")
if '*' not in opts.import_images:
warnings.append("- Pattern for input movies should normally contain a '*' to select more than one file")
opts.motioncor_gainreference = self.gainref_entry.get()
if len(opts.motioncor_gainreference) > 0 and not os.path.isfile(opts.motioncor_gainreference):
warnings.append("- Gain reference file '{}' does not exist".format(opts.motioncor_gainreference))
return warnings
def calculate_full_options(self):
"""
Update the options from the values that have been fetched from the GUI.
This method uses the values that the user has set in the GUI to calculate a number of other options for the
script.
"""
opts = self.options
# If we have a 3D reference, do a single pass with a large batch size
if len(opts.autopick_3dreference) > 0:
opts.autopick_do_LoG = False
opts.autopick_refs_min_distance = opts.autopick_LoG_diam_max * 0.7
opts.class3d_reference = opts.autopick_3dreference
opts.do_second_pass = False
else:
# No 3D reference - do LoG autopicking in the first pass
opts.autopick_do_LoG = True
opts.class3d_reference = ''
# Now set a sensible batch size (leaving batch_size_pass2 at its default 100,000)
if opts.do_second_pass:
opts.batch_size = 10000
else:
opts.batch_size = 100000
def save_options(self):
"""
Update the full set of options from the values in the GUI, and save them to a file.
Returns:
True if the options were valid and saved successfully, otherwise False.
"""
try:
warnings = self.fetch_options_from_gui()
if len(warnings) == 0 or tkMessageBox.askokcancel("Warning", "\n".join(warnings), icon='warning',
default=tkMessageBox.CANCEL):
self.calculate_full_options()
print " RELION_IT: Writing all options to {}".format(OPTIONS_FILE)
if os.path.isfile(OPTIONS_FILE):
print " RELION_IT: File {0} already exists; renaming old copy to {0}~".format(OPTIONS_FILE)
os.rename(OPTIONS_FILE, OPTIONS_FILE + '~')
with open(OPTIONS_FILE, 'w') as optfile:
self.options.print_options(optfile)
return True
except Exception as ex:
tkMessageBox.showerror("Error", ex.message)
traceback.print_exc()
return False
def run_pipeline(self):
"""
Update the full set of options from the values in the GUI, close the GUI and run the pipeline.
"""
if self.save_options():
self.main_window.destroy()
run_pipeline(self.options)
def safe_load_star(filename, max_try=5, wait=10, expected=[]):
for _ in xrange(max_try):
try:
star = load_star(filename)
entry = star
# make sure the expected key is present
for key in expected:
entry = entry[key]
return star
except:
print "safe_load_star is retrying to read: ", filename, ", expected key:", expected
import time
time.sleep(wait)
assert False, "Failed to read a star file: " + filename
def load_star(filename):
from collections import OrderedDict
datasets = OrderedDict()
current_data = None
current_colnames = None
in_loop = 0 # 0: outside 1: reading colnames 2: reading data
for line in open(filename):
line = line.strip()
# remove comments
comment_pos = line.find('#')
if comment_pos > 0:
line = line[:comment_pos]
if line == "":
continue
if line.startswith("data_"):
in_loop = 0
data_name = line[5:]
current_data = OrderedDict()
datasets[data_name] = current_data
elif line.startswith("loop_"):
current_colnames = []
in_loop = 1
elif line.startswith("_"):
if in_loop == 2:
in_loop = 0
elems = line[1:].split()
if in_loop == 1:
current_colnames.append(elems[0])
current_data[elems[0]] = []
else:
current_data[elems[0]] = elems[1]
elif in_loop > 0:
in_loop = 2
elems = line.split()
assert len(elems) == len(current_colnames), ("Error in STAR file {}, number of elements in {} does not match number of column names {}"
.format(filename, elems, current_colnames))
for idx, e in enumerate(elems):
current_data[current_colnames[idx]].append(e)
return datasets
# Don't get stuck in infinite while True loops....
def CheckForExit():
if not os.path.isfile(RUNNING_FILE):
print " RELION_IT:", RUNNING_FILE, "file no longer exists, exiting now ..."
exit(0)
# Allow direct progressing to the second pass
def getSecondPassReference():
if os.path.isfile(SECONDPASS_REF3D_FILE):
with open(SECONDPASS_REF3D_FILE, 'r') as myfile:
filename, angpix = myfile.readlines()
else:
filename = ''
angpix = '0'
return filename.replace('\n',''), angpix.replace('\n','')
def getJobName(name_in_script, done_file):
jobname = None
# See if we've done this job before, i.e. whether it is in the done_file
if (os.path.isfile(done_file)):
f = open(done_file,'r')
for line in f:
elems = line.split()
if len(elems) < 3: continue
if elems[0] == name_in_script:
jobname = elems[2]
break
f.close()
return jobname
def addJob(jobtype, name_in_script, done_file, options, alias=None):
jobname = getJobName(name_in_script, done_file)
# If we hadn't done it before, add it now
if (jobname is not None):
already_had_it = True
else:
already_had_it = False
optionstring = ''
for opt in options[:]:
optionstring += opt + ';'
command = 'relion_pipeliner --addJob ' + jobtype + ' --addJobOptions "' + optionstring + '"'
if alias is not None:
command += ' --setJobAlias "' + alias + '"'
os.system(command)
pipeline = safe_load_star(PIPELINE_STAR, expected=['pipeline_processes', 'rlnPipeLineProcessName'])
jobname = pipeline['pipeline_processes']['rlnPipeLineProcessName'][-1]
# Now add the jobname to the done_file
f = open(done_file,'a')
f.write(name_in_script + ' = ' + jobname + '\n')
f.close()
# return the name of the job in the RELION pipeline, e.g. 'Import/job001/'
return jobname, already_had_it
def RunJobs(jobs, repeat, wait, schedulename):
runjobsstring = ''
for job in jobs[:]:
runjobsstring += job + ' '
command = 'relion_pipeliner --schedule ' + schedulename + ' --repeat ' + str(repeat) + ' --min_wait ' + str(wait) + ' --RunJobs "' + runjobsstring + '" &'
os.system(command)
def WaitForJob(wait_for_this_job, seconds_wait):
time.sleep(seconds_wait)
print " RELION_IT: waiting for job to finish in", wait_for_this_job
while True:
pipeline = safe_load_star(PIPELINE_STAR, expected=['pipeline_processes', 'rlnPipeLineProcessName'])
myjobnr = -1
for jobnr in range(0,len(pipeline['pipeline_processes']['rlnPipeLineProcessName'])):
jobname = pipeline['pipeline_processes']['rlnPipeLineProcessName'][jobnr]
if jobname == wait_for_this_job:
myjobnr = jobnr
if myjobnr < 0:
print " ERROR: cannot find ", wait_for_this_job, " in ", PIPELINE_STAR
exit(1)
status = int(pipeline['pipeline_processes']['rlnPipeLineProcessStatus'][myjobnr])
if status == 2:
print " RELION_IT: job in", wait_for_this_job, "has finished now"
return
else:
CheckForExit()
time.sleep(seconds_wait)
def find_split_job_output(prefix, n, max_digits=6):
import os.path
for i in xrange(max_digits):
filename = prefix + str(n).rjust(i, '0') + '.star'
if os.path.isfile(filename):
return filename
return None
def writeManualPickingGuiFile(my_part_diam):
if not os.path.isfile('.gui_manualpickrun.job'):
with open('.gui_manualpickrun.job', 'w') as g:
g.write("""job_type == 3
Pixel size (A) == -1
Black value: == 0
Blue value: == 0
MetaDataLabel for color: == rlnParticleSelectZScore
Scale for CTF image: == 1
Particle diameter (A): == {}
Blue<>red color particles? == No
Highpass filter (A) == -1
Lowpass filter (A) == 20
Scale for micrographs: == 0.2
Red value: == 2
Sigma contrast: == 3
White value: == 0
""".format(my_part_diam))
return
def findBestClass(model_star_file, use_resol=True):
model_star = safe_load_star(model_star_file)
best_resol = 999
best_size = 0
best_class = 0
for iclass in range(0, len(model_star['model_classes']['rlnReferenceImage'])):
mysize = float(model_star['model_classes']['rlnClassDistribution'][iclass])
myresol = float(model_star['model_classes']['rlnEstimatedResolution'][iclass])
if (not use_resol and (mysize > best_size or (mysize == best_size and myresol < best_resol))) \
or (use_resol and (myresol < best_resol or (myresol == best_resol and mysize > best_size))):
best_size = mysize
best_class = model_star['model_classes']['rlnReferenceImage'][iclass]
best_resol = myresol
print " RELION_IT: found best class:",best_class,"with class size of",best_size,"and resolution of",best_resol
return best_class, best_resol, model_star['model_general']['rlnPixelSize']
def findOutputModelStar(job_dir):
found = None
try:
job_star = safe_load_star(job_dir + "job_pipeline.star", expected=['pipeline_output_edges', 'rlnPipeLineEdgeToNode'])
for output_file in job_star["pipeline_output_edges"]['rlnPipeLineEdgeToNode']:
if output_file.endswith("_model.star"):
found = output_file
break
except:
pass
return found
def run_pipeline(opts):
"""
Configure and run the RELION 3 pipeline with the given options.
Args:
opts: options for the pipeline, as a RelionItOptions object.
"""
# if this really necessary? dont think so...
if (os.path.isfile(PIPELINE_STAR) == False):
g = open(PIPELINE_STAR,'w')
g.write('data_pipeline_general\n')
g.write('_rlnPipeLineJobCounter 1\n')
g.close()
# Write RUNNING_RELION_IT file, when deleted, this script will stop
with open(RUNNING_FILE, 'w'):
pass
# Write mainGUI project file, so GUI won't ask to set up a project
with open('.gui_projectdir', 'w'):
pass
#### Set up GUI file for Manualpick job to allow easy viewing of autopick results
if opts.autopick_do_LoG:
my_part_diam = opts.autopick_LoG_diam_min
else:
my_part_diam = opts.autopick_refs_min_distance
writeManualPickingGuiFile(my_part_diam)
### Prepare the list of queue arguments for later use
queue_options = ['Submit to queue? == Yes',
'Queue name: == {}'.format(opts.queue_name),
'Queue submit command: == {}'.format(opts.queue_submit_command),
'Standard submission script: == {}'.format(opts.queue_submission_template),
'Minimum dedicated cores per node: == {}'.format(opts.queue_minimum_dedicated)]
# If we're only doing motioncorr and ctf estimation, then forget about the second pass and the batch processing
if opts.stop_after_ctf_estimation:
opts.do_class2d = False
opts.do_class3d = False
opts.do_second_pass = False
if opts.do_second_pass:
nr_passes = 2
else:
nr_passes = 1
# if SECONDPASS_REF3D_FILE exists, go straight into the second pass
first_pass = 0
if opts.do_second_pass:
secondpass_ref3d, secondpass_ref3d_angpix = getSecondPassReference()
if not secondpass_ref3d == '':
print ' RELION_IT: found', secondpass_ref3d,'with angpix=',secondpass_ref3d_angpix,'as a 3D reference for second pass in file',SECONDPASS_REF3D_FILE
print ' RELION_IT: if the automatic selection of the reference turned out to be unsatisfactory,'
print ' RELION_IT: you can re-run the second pass with another reference by:'
print ' RELION_IT: stopping the pipeline by deleting RUNNING_*'
print ' RELION_IT: updating the reference filename in',SECONDPASS_REF3D_FILE
print ' RELION_IT: deleting relevant jobs (autopick2_job and followings) in',SETUP_CHECK_FILE
print ' RELION_IT: and restarting the pipeline.'
first_pass = 1
opts.autopick_3dreference = secondpass_ref3d
opts.autopick_ref_angpix = secondpass_ref3d_angpix
opts.autopick_2dreferences = ''
opts.autopick_do_LoG = False
opts.class3d_reference = secondpass_ref3d
opts.have_3d_reference = True
# Allow to perform two passes through the entire pipeline (PREPROCESS and CLASS2D/3D batches)
# The second pass, a 3D reference generated in the first pass will be used for template-based autopicking
for ipass in range(first_pass, nr_passes):
#### Set up the Import job
import_options = ['Input files: == {}'.format(opts.import_images)]
if opts.images_are_movies:
import_options.append('Node type: == 2D micrograph movies (*.mrcs)')
else:
import_options.append('Node type: == 2D micrographs/tomograms (*.mrc)')
import_job, already_had_it = addJob('Import','import_job', SETUP_CHECK_FILE, import_options)
if opts.images_are_movies:
#### Set up the MotionCor job
motioncorr_options = ['Input movies STAR file: == {}movies.star'.format(import_job),
'MOTIONCOR2 executable: == {}'.format(opts.motioncor_exe),
'Defect file: == {}'.format(opts.motioncor_defectfile),
'Gain-reference image: == {}'.format(opts.motioncor_gainreference),
'Gain flip: == {}'.format(opts.motioncor_gainflip),
'Gain rotation: == {}'.format(opts.motioncor_gainrot),
'Do dose-weighting? == Yes',
'Voltage (kV): == {}'.format(opts.voltage),
'Dose per frame (e/A2): == {}'.format(opts.motioncor_doseperframe),
'Pixel size (A): == {}'.format(opts.angpix),
'Number of patches X: == {}'.format(opts.motioncor_patches_x),
'Number of patches Y: == {}'.format(opts.motioncor_patches_y),
'Bfactor: == {}'.format(opts.motioncor_bfactor),
'Binning factor: == {}'.format(opts.motioncor_binning),
'Which GPUs to use: == {}'.format(opts.motioncor_gpu),
'Other MOTIONCOR2 arguments == {}'.format(opts.motioncor_other_args),
'Number of threads: == {}'.format(opts.motioncor_threads),
'Number of MPI procs: == {}'.format(opts.motioncor_mpi)]
if (opts.motioncor_do_own):
motioncorr_options.append('Use RELION\'s own implementation? == Yes')
else:
motioncorr_options.append('Use RELION\'s own implementation? == No')
if opts.motioncor_submit_to_queue:
motioncorr_options.extend(queue_options)
motioncorr_job, already_had_it = addJob('MotionCorr', 'motioncorr_job', SETUP_CHECK_FILE, motioncorr_options)
#### Set up the CtfFind job
ctffind_options = ['Voltage (kV): == {}'.format(opts.voltage),
'Spherical aberration (mm): == {}'.format(opts.Cs),
'Amplitude contrast: == {}'.format(opts.ampl_contrast),
'Amount of astigmatism (A): == {}'.format(opts.ctffind_astigmatism),
'FFT box size (pix): == {}'.format(opts.ctffind_boxsize),
'Maximum defocus value (A): == {}'.format(opts.ctffind_defocus_max),
'Minimum defocus value (A): == {}'.format(opts.ctffind_defocus_min),
'Defocus step size (A): == {}'.format(opts.ctffind_defocus_step),
'Magnified pixel size (Angstrom): == {}'.format(opts.angpix * opts.motioncor_binning),
'Maximum resolution (A): == {}'.format(opts.ctffind_maxres),
'Minimum resolution (A): == {}'.format(opts.ctffind_minres),
'Gctf executable: == {}'.format(opts.gctf_exe),
'Which GPUs to use: == {}'.format(opts.gctf_gpu),
'CTFFIND-4.1 executable: == {}'.format(opts.ctffind4_exe),
'Number of MPI procs: == {}'.format(opts.ctffind_mpi)]
if opts.images_are_movies:
ctffind_options.append('Input micrographs STAR file: == {}{}'.format(motioncorr_job, 'corrected_micrographs.star'))
else:
ctffind_options.append('Input micrographs STAR file: == {}{}'.format(import_job, 'micrographs.star'))
if opts.use_ctffind_instead:
ctffind_options.append('Use CTFFIND-4.1? == Yes')
ctffind_options.append('Use Gctf instead? == No')
else:
ctffind_options.append('Use CTFFIND-4.1? == No')
ctffind_options.append('Use Gctf instead? == Yes')
if (opts.ctffind_do_ignore_search_params):
ctffind_options.append('Ignore \'Searches\' parameters? == Yes')
else:
ctffind_options.append('Ignore \'Searches\' parameters? == No')
if (opts.ctffind_do_EPA):
ctffind_options.append('Perform equi-phase averaging? == Yes')
else:
ctffind_options.append('Perform equi-phase averaging? == No')
if opts.ctffind_do_phaseshift:
ctffind_options.append('Estimate phase shifts? == Yes')
else:
ctffind_options.append('Estimate phase shifts? == No')
if opts.ctffind_submit_to_queue:
ctffind_options.extend(queue_options)
ctffind_job, already_had_it = addJob('CtfFind', 'ctffind_job', SETUP_CHECK_FILE, ctffind_options)
runjobs = [import_job]
if opts.images_are_movies:
runjobs.append(motioncorr_job)
runjobs.append(ctffind_job)
# There is an option to stop on-the-fly processing after CTF estimation
if not opts.stop_after_ctf_estimation:
autopick_options = ['Input micrographs for autopick: == {}micrographs_ctf.star'.format(ctffind_job),
'Min. diameter for LoG filter (A) == {}'.format(opts.autopick_LoG_diam_min),
'Max. diameter for LoG filter (A) == {}'.format(opts.autopick_LoG_diam_max),
'Maximum resolution to consider (A) == {}'.format(opts.autopick_lowpass),
'Adjust default threshold == {}'.format(opts.autopick_LoG_adjust_threshold),
'2D references: == {}'.format(opts.autopick_2dreferences),
'3D reference: == {}'.format(opts.autopick_3dreference),
'Symmetry: == {}'.format(opts.autopick_3dref_symmetry),
'Pixel size in references (A) == {}'.format(opts.autopick_ref_angpix),
'3D angular sampling: == {}'.format(opts.autopick_3dref_sampling),
'In-plane angular sampling (deg) == {}'.format(opts.autopick_inplane_sampling),
'Picking threshold: == {}'.format(opts.autopick_refs_threshold),
'Minimum inter-particle distance (A): == {}'.format(opts.autopick_refs_min_distance),
'Mask diameter (A) == {}'.format(opts.autopick_refs_mask_diam),
'Maximum stddev noise: == {}'.format(opts.autopick_stddev_noise),
'Minimum avg noise: == {}'.format(opts.autopick_avg_noise),
'Shrink factor: == {}'.format(opts.autopick_shrink_factor),
'Which GPUs to use: == {}'.format(opts.autopick_gpu),
'Additional arguments: == {}'.format(opts.autopick_other_args),
'Number of MPI procs: == {}'.format(opts.autopick_mpi)]
if not opts.autopick_3dreference == '':
autopick_options.append('OR: provide a 3D reference? == Yes')
else:
autopick_options.append('OR: provide a 3D reference? == No')
if opts.autopick_do_LoG:
autopick_options.append('OR: use Laplacian-of-Gaussian? == Yes')
else:
autopick_options.append('OR: use Laplacian-of-Gaussian? == No')
if opts.autopick_refs_are_ctf_corrected:
autopick_options.append('Are References CTF corrected? == Yes')
else:
autopick_options.append('Are References CTF corrected? == No')
if opts.autopick_refs_have_inverted_contrast:
autopick_options.append('References have inverted contrast? == Yes')
else:
autopick_options.append('References have inverted contrast? == No')
if opts.autopick_refs_ignore_ctf1stpeak:
autopick_options.append('Ignore CTFs until first peak? == Yes')
else:
autopick_options.append('Ignore CTFs until first peak? == No')
if opts.autopick_do_gpu and (not opts.autopick_do_LoG):
autopick_options.append('Use GPU acceleration? == Yes')
else:
autopick_options.append('Use GPU acceleration? == No')
if opts.autopick_submit_to_queue:
autopick_options.extend(queue_options)
if ipass == 0:
autopick_job_name = 'autopick_job'
autopick_alias = 'pass 1'
else:
autopick_job_name = 'autopick2_job'
autopick_alias = 'pass 2'
autopick_job, already_had_it = addJob('AutoPick', autopick_job_name, SETUP_CHECK_FILE, autopick_options, alias=autopick_alias)
runjobs.append(autopick_job)
#### Set up the Extract job
extract_options = ['Input coordinates: == {}coords_suffix_autopick.star'.format(autopick_job),
'micrograph STAR file: == {}micrographs_ctf.star'.format(ctffind_job),
'Diameter background circle (pix): == {}'.format(opts.extract_bg_diameter),
'Particle box size (pix): == {}'.format(opts.extract_boxsize),
'Number of MPI procs: == {}'.format(opts.extract_mpi)]
if ipass == 0:
if opts.extract_downscale:
extract_options.append('Rescale particles? == Yes')
extract_options.append('Re-scaled size (pixels): == {}'.format(opts.extract_small_boxsize))
else:
if opts.extract2_downscale:
extract_options.append('Rescale particles? == Yes')
extract_options.append('Re-scaled size (pixels): == {}'.format(opts.extract2_small_boxsize))
if opts.extract_submit_to_queue:
extract_options.extend(queue_options)
if ipass == 0:
extract_job_name = 'extract_job'
extract_alias = 'pass 1'
else:
extract_job_name = 'extract2_job'
extract_alias = 'pass 2'
extract_job, already_had_it = addJob('Extract', extract_job_name, SETUP_CHECK_FILE, extract_options, alias=extract_alias)
runjobs.append(extract_job)
if (ipass == 0 and (opts.do_class2d or opts.do_class3d)) or (ipass == 1 and (opts.do_class2d_pass2 or opts.do_class3d_pass2)):
#### Set up the Select job to split the particle STAR file into batches
split_options = ['OR select from particles.star: == {}particles.star'.format(extract_job),
'OR: split into subsets? == Yes',
'OR: number of subsets: == -1']
if ipass == 0:
split_job_name = 'split_job'
split_options.append('Subset size: == {}'.format(opts.batch_size))
split_alias = 'into {}'.format(opts.batch_size)
else:
split_job_name = 'split2_job'
split_options.append('Subset size: == {}'.format(opts.batch_size_pass2))
split_alias = 'into {}'.format(opts.batch_size_pass2)
split_job, already_had_it = addJob('Select', split_job_name, SETUP_CHECK_FILE, split_options, alias=split_alias)
# Now start running stuff
runjobs.append(split_job)
# Now execute the entire preprocessing pipeliner
if ipass == 0:
preprocess_schedule_name = PREPROCESS_SCHEDULE_PASS1
else:
preprocess_schedule_name = PREPROCESS_SCHEDULE_PASS2
RunJobs(runjobs, opts.preprocess_repeat_times, opts.preprocess_repeat_wait, preprocess_schedule_name)
print ' RELION_IT: submitted',preprocess_schedule_name,'pipeliner with', opts.preprocess_repeat_times,'repeats of the preprocessing jobs'
print ' RELION_IT: this pipeliner will run in the background of your shell. You can stop it by deleting the file RUNNING_PIPELINER_'+preprocess_schedule_name
########## From now on, process extracted particles in batches for 2D or 3D classification, only perform SGD inimodel for first batch and if no 3D reference is available
# There is again an option to stop here...
if (ipass == 0 and (opts.do_class2d or opts.do_class3d)) or (ipass == 1 and (opts.do_class2d_pass2 or opts.do_class3d_pass2)):
### If necessary, rescale the 3D reference in the second pass!
# TODO: rescale initial reference if different from movies?
if ipass == 1 and (opts.extract_downscale or opts.extract2_downscale):
particles_angpix = opts.angpix
if opts.images_are_movies:
particles_angpix = particles_angpix * opts.motioncor_binning
if opts.extract2_downscale:
particles_angpix = particles_angpix * opts.extract_boxsize / opts.extract2_small_boxsize
particles_boxsize = opts.extract2_small_boxsize
else:
particles_boxsize = opts.extract_boxsize
if abs(float(particles_angpix) - float(opts.autopick_ref_angpix)) > 0.01:
# Now rescale the reference for 3D classification
opts.class3d_reference = opts.autopick_3dreference.replace('.mrc','_rescaled.mrc')
print ' RELION_IT: rescaling the 3D reference from pixel size',opts.autopick_ref_angpix,'to',particles_angpix,'and saving the new reference as',opts.class3d_reference
command = 'relion_image_handler --i ' + opts.autopick_3dreference + ' --o ' + opts.class3d_reference + ' --angpix ' + str(opts.autopick_ref_angpix) + ' --rescale_angpix ' + str(particles_angpix) + ' --new_box ' + str(particles_boxsize)
os.system(command)
print ' RELION_IT: now entering an infinite loop for batch-processing of particles. You can stop this loop by deleting the file', RUNNING_FILE
# It could be that this is a restart, so check previous_batch1_size in the output directory.
# Also check the presence of class2d_job_batch_001 in case the first job was not submitted yet.
first_split_file = find_split_job_output(split_job + 'particles_split', 1)
if getJobName("class2d_job_batch_001", SETUP_CHECK_FILE) is not None and \
first_split_file is not None:
batch1 = safe_load_star(first_split_file, expected=['', 'rlnMicrographName'])
previous_batch1_size = len(batch1['']['rlnMicrographName'])
else:
previous_batch1_size = 0
continue_this_pass = True
while continue_this_pass:
have_new_batch = False
nr_batches = len(glob.glob(split_job + "particles_split*.star"))
for ibatch in range(0, nr_batches):
iibatch = ibatch + 1
batch_name = find_split_job_output(split_job + "particles_split", iibatch)
batch = safe_load_star(batch_name, expected=['', 'rlnMicrographName'])
batch_size = len(batch['']['rlnMicrographName'])
rerun_batch1 = False
if ( iibatch == 1 and batch_size > previous_batch1_size and batch_size > opts.minimum_batch_size ):
previous_batch1_size = batch_size
rerun_batch1 = True
particles_star_file = batch_name
# The first batch is special: perform 2D classification with smaller batch size (but at least minimum_batch_size) and keep overwriting in the same output directory
if ( rerun_batch1 or batch_size == opts.batch_size):
# Discard particles with odd average/stddev values
if opts.do_discard_on_image_statistics:
#### Run a Select job to get rid of particles with outlier average/stddev values...
discard_options = ['OR select from particles.star: == {}'.format(batch_name),
'OR: select on image statistics? == Yes',
'Sigma-value for discarding images: == {}'.format(opts.discard_sigma),
'Metadata label for images: == rlnImageName']
if ipass == 0:
discard_job_name = 'discard_job'
else:
discard_job_name = 'discard2_job'
if opts.discard_submit_to_queue:
discard_options.extend(queue_options)
discard_job, already_had_it = addJob('Select', discard_job_name, SETUP_CHECK_FILE, discard_options)
if ((not already_had_it) or rerun_batch1):
have_new_batch = True
RunJobs([discard_job], 1, 1, 'DISCARD')
print " RELION_IT: submitted job to discard based on image statistics for", batch_size ,"particles in", batch_name
# Wait here until this Discard job is finished. Check every thirty seconds
WaitForJob(discard_job, 30)
particles_star_file = discard_job + 'particles.star'
# 2D classification
if (ipass == 0 and opts.do_class2d) or (ipass == 1 and opts.do_class2d_pass2):
class2d_options = ['Input images STAR file: == {}'.format(particles_star_file),
'Number of classes: == {}'.format(opts.class2d_nr_classes),
'Mask diameter (A): == {}'.format(opts.mask_diameter),
'Number of iterations: == {}'.format(opts.class2d_nr_iter),
'Angular search range - psi (deg): == {}'.format(opts.class2d_angle_step),
'Offset search range (pix): == {}'.format(opts.class2d_offset_range),
'Offset search step (pix): == {}'.format(opts.class2d_offset_step),
'Number of pooled particles: == {}'.format(opts.refine_nr_pool),
'Which GPUs to use: == {}'.format(opts.refine_gpu),
'Number of MPI procs: == {}'.format(opts.refine_mpi),
'Number of threads: == {}'.format(opts.refine_threads),
'Copy particles to scratch directory: == {}'.format(opts.refine_scratch_disk),
'Additional arguments: == {}'.format(opts.class2d_other_args)]
if batch_size > opts.refine_batchsize_for_fast_subsets:
class2d_options.append('Use fast subsets (for large data sets)? == Yes')
else:
class2d_options.append('Use fast subsets (for large data sets)? == No')
if opts.refine_do_gpu:
class2d_options.append('Use GPU acceleration? == Yes')
else:
class2d_options.append('Use GPU acceleration? == No')
if opts.class2d_ctf_ign1stpeak:
class2d_options.append('Ignore CTFs until first peak? == Yes')
else:
class2d_options.append('Ignore CTFs until first peak? == No')
if opts.refine_preread_images:
class2d_options.append('Pre-read all particles into RAM? == Yes')
else:
class2d_options.append('Pre-read all particles into RAM? == No')
if opts.refine_submit_to_queue:
class2d_options.extend(queue_options)
if ipass == 0:
jobname = 'class2d_job_batch_{:03d}'.format(iibatch)
alias = 'pass1_batch_{:03d}'.format(iibatch)
else:
jobname = 'class2d_pass2_job_batch_{:03d}'.format(iibatch)
alias = 'pass2_batch_{:03d}'.format(iibatch)
class2d_job, already_had_it = addJob('Class2D', jobname, SETUP_CHECK_FILE, class2d_options, alias=alias)
if ((not already_had_it) or rerun_batch1):
have_new_batch = True
RunJobs([class2d_job], 1, 1, 'CLASS2D')
print " RELION_IT: submitted 2D classification with", batch_size ,"particles in", class2d_job
# Wait here until this Class2D job is finished. Check every thirty seconds
WaitForJob(class2d_job, 30)
# Perform 3D classification
if (ipass == 0 and opts.do_class3d) or (ipass == 1 and opts.do_class3d_pass2):
# Do SGD initial model generation only in the first pass, when no reference is provided AND only for the first (complete) batch, for subsequent batches use that model
if (not opts.have_3d_reference) and ipass == 0 and iibatch == 1 and batch_size == opts.batch_size:
inimodel_options = ['Input images STAR file: == {}'.format(particles_star_file),
'Symmetry: == {}'.format(opts.symmetry),
'Mask diameter (A): == {}'.format(opts.mask_diameter),
'Number of classes: == {}'.format(opts.inimodel_nr_classes),
'Initial angular sampling: == {}'.format(opts.inimodel_angle_step),
'Offset search range (pix): == {}'.format(opts.inimodel_offset_range),
'Offset search step (pix): == {}'.format(opts.inimodel_offset_step),
'Number of initial iterations: == {}'.format(opts.inimodel_nr_iter_initial),
'Number of in-between iterations: == {}'.format(opts.inimodel_nr_iter_inbetween),
'Number of final iterations: == {}'.format(opts.inimodel_nr_iter_final),
'Write-out frequency (iter): == {}'.format(opts.inimodel_freq_writeout),
'Initial resolution (A): == {}'.format(opts.inimodel_resol_ini),
'Final resolution (A): == {}'.format(opts.inimodel_resol_final),
'Initial mini-batch size: == {}'.format(opts.inimodel_batchsize_ini),
'Final mini-batch size: == {}'.format(opts.inimodel_batchsize_final),
'SGD increased noise variance half-life: == {}'.format(opts.inimodel_sigmafudge_halflife),
'Number of pooled particles: == 1',
'Which GPUs to use: == {}'.format(opts.refine_gpu),
'Number of MPI procs: == {}'.format(opts.refine_mpi),
'Number of threads: == {}'.format(opts.refine_threads),
'Copy particles to scratch directory: == {}'.format(opts.refine_scratch_disk),
'Additional arguments: == {}'.format(opts.inimodel_other_args)]
if opts.inimodel_solvent_flatten:
inimodel_options.append('Flatten and enforce non-negative solvent? == Yes')
else:
inimodel_options.append('Flatten and enforce non-negative solvent? == No')
if opts.refine_skip_padding:
inimodel_options.append('Skip padding? == Yes')
else:
inimodel_options.append('Skip padding? == No')
if opts.refine_do_gpu:
inimodel_options.append('Use GPU acceleration? == Yes')
else:
inimodel_options.append('Use GPU acceleration? == No')
if opts.inimodel_ctf_ign1stpeak:
inimodel_options.append('Ignore CTFs until first peak? == Yes')
else:
inimodel_options.append('Ignore CTFs until first peak? == No')
if opts.refine_preread_images:
inimodel_options.append('Pre-read all particles into RAM? == Yes')
else:
inimodel_options.append('Pre-read all particles into RAM? == No')
if opts.refine_submit_to_queue:
inimodel_options.extend(queue_options)
inimodel_job, already_had_it = addJob('InitialModel', 'inimodel', SETUP_CHECK_FILE, inimodel_options)
if (not already_had_it):
have_new_batch = True
RunJobs([inimodel_job], 1, 1, 'INIMODEL')
print " RELION_IT: submitted initial model generation with", batch_size ,"particles in", inimodel_job
# Wait here until this inimodel job is finished. Check every thirty seconds
WaitForJob(inimodel_job, 30)
sgd_model_star = findOutputModelStar(inimodel_job)
if sgd_model_star is None:
print " RELION_IT: Initial model generation " + inimodel_job + " does not contain expected output maps."
print " RELION_IT: This job should have finished, but you may continue it from the GUI. "
raise Exception("ERROR!! quitting the pipeline.") # TODO: MAKE MORE ROBUST
# Use the model of the largest class for the 3D classification below
total_iter = opts.inimodel_nr_iter_initial + opts.inimodel_nr_iter_inbetween + opts.inimodel_nr_iter_final
best_inimodel_class, best_inimodel_resol, best_inimodel_angpix = findBestClass(sgd_model_star, use_resol=True)
opts.class3d_reference = best_inimodel_class
opts.class3d_ref_is_correct_greyscale = True
opts.class3d_ref_is_ctf_corrected = True
opts.have_3d_reference = True
if opts.have_3d_reference:
# Now perform the actual 3D classification
class3d_options = ['Input images STAR file: == {}'.format(particles_star_file),
'Reference map: == {}'.format(opts.class3d_reference),
'Initial low-pass filter (A): == {}'.format(opts.class3d_ini_lowpass),
'Symmetry: == {}'.format(opts.symmetry),
'Regularisation parameter T: == {}'.format(opts.class3d_T_value),
'Reference mask (optional): == {}'.format(opts.class3d_reference_mask),
'Number of classes: == {}'.format(opts.class3d_nr_classes),
'Mask diameter (A): == {}'.format(opts.mask_diameter),
'Number of iterations: == {}'.format(opts.class3d_nr_iter),
'Angular sampling interval: == {}'.format(opts.class3d_angle_step),
'Offset search range (pix): == {}'.format(opts.class3d_offset_range),
'Offset search step (pix): == {}'.format(opts.class3d_offset_step),
'Number of pooled particles: == {}'.format(opts.refine_nr_pool),
'Which GPUs to use: == {}'.format(opts.refine_gpu),
'Number of MPI procs: == {}'.format(opts.refine_mpi),
'Number of threads: == {}'.format(opts.refine_threads),
'Copy particles to scratch directory: == {}'.format(opts.refine_scratch_disk),
'Additional arguments: == {}'.format(opts.class3d_other_args)]
if batch_size > opts.refine_batchsize_for_fast_subsets:
class3d_options.append('Use fast subsets (for large data sets)? == Yes')
else:
class3d_options.append('Use fast subsets (for large data sets)? == No')
if opts.class3d_ref_is_correct_greyscale:
class3d_options.append('Ref. map is on absolute greyscale? == Yes')
else:
class3d_options.append('Ref. map is on absolute greyscale? == No')
if opts.class3d_ref_is_ctf_corrected:
class3d_options.append('Has reference been CTF-corrected? == Yes')
else:
class3d_options.append('Has reference been CTF-corrected? == No')
if opts.refine_skip_padding:
class3d_options.append('Skip padding? == Yes')
else:
class3d_options.append('Skip padding? == No')
if opts.refine_do_gpu:
class3d_options.append('Use GPU acceleration? == Yes')
else:
class3d_options.append('Use GPU acceleration? == No')
if opts.class3d_ctf_ign1stpeak:
class3d_options.append('Ignore CTFs until first peak? == Yes')
else:
class3d_options.append('Ignore CTFs until first peak? == No')
if opts.refine_preread_images:
class3d_options.append('Pre-read all particles into RAM? == Yes')
else:
class3d_options.append('Pre-read all particles into RAM? == No')
if opts.refine_submit_to_queue:
class3d_options.extend(queue_options)
if ipass == 0:
jobname = 'class3d_job_batch_{:03d}'.format(iibatch)
alias = 'pass1_batch_{:03d}'.format(iibatch)
else:
jobname = 'class3d2_job_batch_{:03d}'.format(iibatch)
alias = 'pass2_batch_{:03d}'.format(iibatch)
class3d_job, already_had_it = addJob('Class3D', jobname, SETUP_CHECK_FILE, class3d_options, alias=alias)
if ((not already_had_it) or rerun_batch1):
have_new_batch = True
RunJobs([class3d_job], 1, 1, 'CLASS3D')
print ' RELION_IT: submitted 3D classification with', batch_size ,'particles in', class3d_job
# Wait here until this Class2D job is finished. Check every thirty seconds
WaitForJob(class3d_job, 30)
class3d_model_star = findOutputModelStar(class3d_job)
if class3d_model_star is None:
print " RELION_IT: 3D Classification " + class3d_job + " does not contain expected output maps."
print " RELION_IT: This job should have finished, but you may continue it from the GUI."
raise Exception("ERROR!! quitting the pipeline.") # TODO: MAKE MORE ROBUST
best_class3d_class, best_class3d_resol, best_class3d_angpix = findBestClass(class3d_model_star, use_resol=True)
# Once the first batch in the first pass is completed: move on to the second pass
if (ipass == 0 and opts.do_second_pass and iibatch == 1 and best_class3d_resol < opts.minimum_resolution_3dref_2ndpass):
opts.autopick_3dreference = best_class3d_class
opts.autopick_ref_angpix = best_class3d_angpix
opts.autopick_2dreferences = ''
opts.autopick_do_LoG = False
opts.class3d_reference = best_class3d_class
opts.have_3d_reference = True
opts.autopick_3dref_symmetry = opts.symmetry
# Stop the PREPROCESS pipeliner of the first pass by removing its RUNNING file
filename_to_remove = 'RUNNING_PIPELINER_'+preprocess_schedule_name
if os.path.isfile(filename_to_remove):
print ' RELION_IT: removing file',filename_to_remove,'to stop the pipeliner from the first pass'
os.remove(filename_to_remove)
# Generate a file to indicate we're in the second pass, so that restarts of the python script will be smooth
g = open(SECONDPASS_REF3D_FILE,'w')
g.write(str(best_class3d_class)+'\n'+str(best_class3d_angpix)+'\n')
g.close()
# Move out of this ipass of the passes loop....
ibatch = nr_batches+1
continue_this_pass = False
print ' RELION_IT: moving on to the second pass using',opts.autopick_3dreference,'for template-based autopicking'
# break out of the for-loop over the batches
break
if not have_new_batch:
CheckForExit()
# The following prevents checking the particles.star file too often
time.sleep(60*opts.batch_repeat_time)
def main():
"""
Run the RELION 3 pipeline.
Options files given as command line arguments will be opened in order and
used to update the default options.
"""
# Start by parsing arguments
# (If --help is given, the program will print a usage message and exit)
parser = argparse.ArgumentParser()
parser.add_argument("extra_options", nargs="*", metavar="extra_options.py",
help="Python files containing options for relion_it.py")
parser.add_argument("--gui", action="store_true", help="launch a simple GUI to set options")
parser.add_argument("--continue", action="store_true", dest="continue_",
help="continue a previous run by loading options from ./relion_it_options.py")
args = parser.parse_args()
print ' RELION_IT: -------------------------------------------------------------------------------------------------------------------'
print ' RELION_IT: script for automated, on-the-fly single-particle analysis in RELION (>= 3.0-alpha-5)'
print ' RELION_IT: authors: Sjors H.W. Scheres, Takanori Nakane & Colin M. Palmer'
print ' RELION_IT: '
print ' RELION_IT: usage: ./relion_it.py [extra_options.py [extra_options2.py ....] ] [--gui] [--continue]'
print ' RELION_IT: '
print ' RELION_IT: this script will check whether processes are still running using files with names starting with RUNNING'
print ' RELION_IT: you can restart this script after stopping previous processes by deleting all RUNNING files'
print ' RELION_IT: this script keeps track of already submitted jobs in a filed called',SETUP_CHECK_FILE
print ' RELION_IT: upon a restart, jobs present in this file will be continued (for preprocessing), or ignored when already finished'
print ' RELION_IT: if you would like to re-do a specific job from scratch (e.g. because you changed its parameters)'
print ' RELION_IT: remove that job, and those that depend on it, from the',SETUP_CHECK_FILE
print ' RELION_IT: -------------------------------------------------------------------------------------------------------------------'
print ' RELION_IT: '
# Make sure no other version of this script are running...
if os.path.isfile(RUNNING_FILE):
print " RELION_IT: ERROR:", RUNNING_FILE, "is already present: delete this file and make sure no other copy of this script is running. Exiting now ..."
exit(0)
# Also make sure the preprocessing pipeliners are stopped before re-starting this script
for checkfile in ('RUNNING_PIPELINER_'+PREPROCESS_SCHEDULE_PASS1, 'RUNNING_PIPELINER_'+PREPROCESS_SCHEDULE_PASS2):
if os.path.isfile(checkfile):
print " RELION_IT: ERROR:", checkfile, "is already present: delete this file and make sure no relion_pipeliner job is still running. Exiting now ..."
exit(0)
if args.continue_:
print ' RELION_IT: continuing a previous run. Options will be loaded from ./relion_it_options.py'
args.extra_options.append(OPTIONS_FILE)
opts = RelionItOptions()
for user_opt_file in args.extra_options:
print ' RELION_IT: reading options from {}'.format(user_opt_file)
user_opts = runpy.run_path(user_opt_file)
opts.update_from(user_opts)
if args.gui:
print ' RELION_IT: launching GUI...'
tk_root = tk.Tk()
tk_root.title("relion_it.py setup")
RelionItGui(tk_root, opts)
tk_root.mainloop()
else:
run_pipeline(opts)
if __name__ == "__main__":
main()
|
bforsbe/relion
|
scripts/relion_it.py
|
Python
|
gpl-2.0
| 110,959
|
[
"Gaussian"
] |
1f8321895d6dd12cedb12f7a797124fa21b81214b7b27f7fec2805351d31625e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.