gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IPython Magics
.. function:: %%bigquery
IPython cell magic to run a query and display the result as a DataFrame
.. code-block:: python
%%bigquery [<destination_var>] [--project <project>] [--use_legacy_sql]
[--verbose] [--params <params>]
<query>
Parameters:
* ``<destination_var>`` (optional, line argument):
variable to store the query results.
* ``--project <project>`` (optional, line argument):
Project to use for running the query. Defaults to the context
:attr:`~google.cloud.bigquery.magics.Context.project`.
* ``--use_legacy_sql`` (optional, line argument):
Runs the query using Legacy SQL syntax. Defaults to Standard SQL if
this argument not used.
* ``--verbose`` (optional, line argument):
If this flag is used, information including the query job ID and the
amount of time for the query to complete will not be cleared after the
query is finished. By default, this information will be displayed but
will be cleared after the query is finished.
* ``--params <params>`` (optional, line argument):
If present, the argument following the ``--params`` flag must be
either:
* :class:`str` - A JSON string representation of a dictionary in the
format ``{"param_name": "param_value"}`` (ex. ``{"num": 17}``). Use
of the parameter in the query should be indicated with
``@param_name``. See ``In[5]`` in the Examples section below.
* :class:`dict` reference - A reference to a ``dict`` in the format
``{"param_name": "param_value"}``, where the value types must be JSON
serializable. The variable reference is indicated by a ``$`` before
the variable name (ex. ``$my_dict_var``). See ``In[6]`` and ``In[7]``
in the Examples section below.
* ``<query>`` (required, cell argument):
SQL query to run.
Returns:
A :class:`pandas.DataFrame` with the query results.
.. note::
All queries run using this magic will run using the context
:attr:`~google.cloud.bigquery.magics.Context.credentials`.
Examples:
The following examples can be run in an IPython notebook after loading
the bigquery IPython extension (see ``In[1]``) and setting up
Application Default Credentials.
.. code-block:: none
In [1]: %load_ext google.cloud.bigquery
In [2]: %%bigquery
...: SELECT name, SUM(number) as count
...: FROM `bigquery-public-data.usa_names.usa_1910_current`
...: GROUP BY name
...: ORDER BY count DESC
...: LIMIT 3
Out[2]: name count
...: -------------------
...: 0 James 4987296
...: 1 John 4866302
...: 2 Robert 4738204
In [3]: %%bigquery df --project my-alternate-project --verbose
...: SELECT name, SUM(number) as count
...: FROM `bigquery-public-data.usa_names.usa_1910_current`
...: WHERE gender = 'F'
...: GROUP BY name
...: ORDER BY count DESC
...: LIMIT 3
Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b
Query executing: 2.61s
Query complete after 2.92s
Out[3]: name count
...: ----------------------
...: 0 Mary 3736239
...: 1 Patricia 1568495
...: 2 Elizabeth 1519946
In [4]: df
Out[4]: name count
...: ----------------------
...: 0 Mary 3736239
...: 1 Patricia 1568495
...: 2 Elizabeth 1519946
In [5]: %%bigquery df --params {"num": 17}
...: SELECT @num AS num
Out[5]: num
...: -------
...: 0 17
In [6]: params = {"num": 17}
In [7]: %%bigquery df --params $params
...: SELECT @num AS num
Out[7]: num
...: -------
...: 0 17
"""
from __future__ import print_function
import ast
import time
from concurrent import futures
try:
import IPython
from IPython import display
from IPython.core import magic_arguments
except ImportError: # pragma: NO COVER
raise ImportError('This module can only be loaded in IPython.')
import google.auth
from google.cloud import bigquery
from google.cloud.bigquery.dbapi import _helpers
class Context(object):
"""Storage for objects to be used throughout an IPython notebook session.
A Context object is initialized when the ``magics`` module is imported,
and can be found at ``google.cloud.bigquery.magics.context``.
"""
def __init__(self):
self._credentials = None
self._project = None
@property
def credentials(self):
"""google.auth.credentials.Credentials: Credentials to use for queries
performed through IPython magics
Note:
These credentials do not need to be explicitly defined if you are
using Application Default Credentials. If you are not using
Application Default Credentials, manually construct a
:class:`google.auth.credentials.Credentials` object and set it as
the context credentials as demonstrated in the example below. See
`auth docs`_ for more information on obtaining credentials.
Example:
Manually setting the context credentials:
>>> from google.cloud.bigquery import magics
>>> from google.oauth2 import service_account
>>> credentials = (service_account
... .Credentials.from_service_account_file(
... '/path/to/key.json'))
>>> magics.context.credentials = credentials
.. _auth docs: http://google-auth.readthedocs.io
/en/latest/user-guide.html#obtaining-credentials
"""
if self._credentials is None:
self._credentials, _ = google.auth.default()
return self._credentials
@credentials.setter
def credentials(self, value):
self._credentials = value
@property
def project(self):
"""str: Default project to use for queries performed through IPython
magics
Note:
The project does not need to be explicitly defined if you have an
environment default project set. If you do not have a default
project set in your environment, manually assign the project as
demonstrated in the example below.
Example:
Manually setting the context project:
>>> from google.cloud.bigquery import magics
>>> magics.context.project = 'my-project'
"""
if self._project is None:
_, self._project = google.auth.default()
return self._project
@project.setter
def project(self, value):
self._project = value
context = Context()
def _run_query(client, query, job_config=None):
"""Runs a query while printing status updates
Args:
client (google.cloud.bigquery.client.Client):
Client to bundle configuration needed for API requests.
query (str):
SQL query to be executed. Defaults to the standard SQL dialect.
Use the ``job_config`` parameter to change dialects.
job_config (google.cloud.bigquery.job.QueryJobConfig, optional):
Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.QueryJob: the query job created
Example:
>>> client = bigquery.Client()
>>> _run_query(client, "SELECT 17")
Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b
Query executing: 1.66s
Query complete after 2.07s
'bf633912-af2c-4780-b568-5d868058632b'
"""
start_time = time.time()
query_job = client.query(query, job_config=job_config)
print('Executing query with job ID: {}'.format(query_job.job_id))
while True:
print('\rQuery executing: {:0.2f}s'.format(
time.time() - start_time), end='')
try:
query_job.result(timeout=0.5)
break
except futures.TimeoutError:
continue
print('\nQuery complete after {:0.2f}s'.format(time.time() - start_time))
return query_job
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'destination_var',
nargs='?',
help=('If provided, save the output to this variable in addition '
'to displaying it.'))
@magic_arguments.argument(
'--project',
type=str,
default=None,
help=('Project to use for executing this query. Defaults to the context '
'project.'))
@magic_arguments.argument(
'--use_legacy_sql', action='store_true', default=False,
help=('Sets query to use Legacy SQL instead of Standard SQL. Defaults to '
'Standard SQL if this argument is not used.'))
@magic_arguments.argument(
'--verbose', action='store_true', default=False,
help=('If set, print verbose output, including the query job ID and the '
'amount of time for the query to finish. By default, this '
'information will be displayed as the query runs, but will be '
'cleared after the query is finished.'))
@magic_arguments.argument(
'--params',
nargs='+',
default=None,
help=('Parameters to format the query string. If present, the --params '
'flag should be followed by a string representation of a dictionary '
'in the format {\'param_name\': \'param_value\'} (ex. {"num": 17}), '
'or a reference to a dictionary in the same format. The dictionary '
'reference can be made by including a \'$\' before the variable '
'name (ex. $my_dict_var).'))
def _cell_magic(line, query):
"""Underlying function for bigquery cell magic
Note:
This function contains the underlying logic for the 'bigquery' cell
magic. This function is not meant to be called directly.
Args:
line (str): "%%bigquery" followed by arguments as required
query (str): SQL query to run
Returns:
pandas.DataFrame: the query results.
"""
args = magic_arguments.parse_argstring(_cell_magic, line)
params = []
if args.params is not None:
try:
params = _helpers.to_query_parameters(
ast.literal_eval(''.join(args.params)))
except Exception:
raise SyntaxError(
'--params is not a correctly formatted JSON string or a JSON '
'serializable dictionary')
project = args.project or context.project
client = bigquery.Client(project=project, credentials=context.credentials)
job_config = bigquery.job.QueryJobConfig()
job_config.query_parameters = params
job_config.use_legacy_sql = args.use_legacy_sql
query_job = _run_query(client, query, job_config)
if not args.verbose:
display.clear_output()
result = query_job.to_dataframe()
if args.destination_var:
IPython.get_ipython().push({args.destination_var: result})
return result
|
|
"""
report test results in JUnit-XML format,
for use with Jenkins and build integration servers.
Based on initial code from Ross Lawley.
"""
# Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
# src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
import functools
import py
import os
import re
import sys
import time
import pytest
from _pytest.config import filename_arg
# Python 2.X and 3.X compatibility
if sys.version_info[0] < 3:
from codecs import open
else:
unichr = chr
unicode = str
long = int
class Junit(py.xml.Namespace):
pass
# We need to get the subset of the invalid unicode ranges according to
# XML 1.0 which are valid in this python build. Hence we calculate
# this dynamically instead of hardcoding it. The spec range of valid
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
# | [#x10000-#x10FFFF]
_legal_chars = (0x09, 0x0A, 0x0d)
_legal_ranges = (
(0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF),
)
_legal_xml_re = [
unicode("%s-%s") % (unichr(low), unichr(high))
for (low, high) in _legal_ranges if low < sys.maxunicode
]
_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
illegal_xml_re = re.compile(unicode('[^%s]') % unicode('').join(_legal_xml_re))
del _legal_chars
del _legal_ranges
del _legal_xml_re
_py_ext_re = re.compile(r"\.py$")
def bin_xml_escape(arg):
def repl(matchobj):
i = ord(matchobj.group())
if i <= 0xFF:
return unicode('#x%02X') % i
else:
return unicode('#x%04X') % i
return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
class _NodeReporter(object):
def __init__(self, nodeid, xml):
self.id = nodeid
self.xml = xml
self.add_stats = self.xml.add_stats
self.duration = 0
self.properties = []
self.nodes = []
self.testcase = None
self.attrs = {}
def append(self, node):
self.xml.add_stats(type(node).__name__)
self.nodes.append(node)
def add_property(self, name, value):
self.properties.append((str(name), bin_xml_escape(value)))
def make_properties_node(self):
"""Return a Junit node containing custom properties, if any.
"""
if self.properties:
return Junit.properties([
Junit.property(name=name, value=value)
for name, value in self.properties
])
return ''
def record_testreport(self, testreport):
assert not self.testcase
names = mangle_test_address(testreport.nodeid)
classnames = names[:-1]
if self.xml.prefix:
classnames.insert(0, self.xml.prefix)
attrs = {
"classname": ".".join(classnames),
"name": bin_xml_escape(names[-1]),
"file": testreport.location[0],
}
if testreport.location[1] is not None:
attrs["line"] = testreport.location[1]
self.attrs = attrs
def to_xml(self):
testcase = Junit.testcase(time=self.duration, **self.attrs)
testcase.append(self.make_properties_node())
for node in self.nodes:
testcase.append(node)
return testcase
def _add_simple(self, kind, message, data=None):
data = bin_xml_escape(data)
node = kind(data, message=message)
self.append(node)
def write_captured_output(self, report):
for capname in ('out', 'err'):
content = getattr(report, 'capstd' + capname)
if content:
tag = getattr(Junit, 'system-' + capname)
self.append(tag(bin_xml_escape(content)))
def append_pass(self, report):
self.add_stats('passed')
def append_failure(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
if hasattr(report, "wasxfail"):
self._add_simple(
Junit.skipped,
"xfail-marked test passes unexpectedly")
else:
if hasattr(report.longrepr, "reprcrash"):
message = report.longrepr.reprcrash.message
elif isinstance(report.longrepr, (unicode, str)):
message = report.longrepr
else:
message = str(report.longrepr)
message = bin_xml_escape(message)
fail = Junit.failure(message=message)
fail.append(bin_xml_escape(report.longrepr))
self.append(fail)
def append_collect_error(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
self.append(Junit.error(bin_xml_escape(report.longrepr),
message="collection failure"))
def append_collect_skipped(self, report):
self._add_simple(
Junit.skipped, "collection skipped", report.longrepr)
def append_error(self, report):
if getattr(report, 'when', None) == 'teardown':
msg = "test teardown failure"
else:
msg = "test setup failure"
self._add_simple(
Junit.error, msg, report.longrepr)
def append_skipped(self, report):
if hasattr(report, "wasxfail"):
self._add_simple(
Junit.skipped, "expected test failure", report.wasxfail
)
else:
filename, lineno, skipreason = report.longrepr
if skipreason.startswith("Skipped: "):
skipreason = bin_xml_escape(skipreason[9:])
self.append(
Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason),
type="pytest.skip",
message=skipreason))
self.write_captured_output(report)
def finalize(self):
data = self.to_xml().unicode(indent=0)
self.__dict__.clear()
self.to_xml = lambda: py.xml.raw(data)
@pytest.fixture
def record_xml_property(request):
"""Add extra xml properties to the tag for the calling test.
The fixture is callable with ``(name, value)``, with value being automatically
xml-encoded.
"""
request.node.warn(
code='C3',
message='record_xml_property is an experimental feature',
)
xml = getattr(request.config, "_xml", None)
if xml is not None:
node_reporter = xml.node_reporter(request.node.nodeid)
return node_reporter.add_property
else:
def add_property_noop(name, value):
pass
return add_property_noop
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group.addoption(
'--junitxml', '--junit-xml',
action="store",
dest="xmlpath",
metavar="path",
type=functools.partial(filename_arg, optname="--junitxml"),
default=None,
help="create junit-xml style report file at given path.")
group.addoption(
'--junitprefix', '--junit-prefix',
action="store",
metavar="str",
default=None,
help="prepend prefix to classnames in junit-xml output")
def pytest_configure(config):
xmlpath = config.option.xmlpath
# prevent opening xmllog on slave nodes (xdist)
if xmlpath and not hasattr(config, 'slaveinput'):
config._xml = LogXML(xmlpath, config.option.junitprefix)
config.pluginmanager.register(config._xml)
def pytest_unconfigure(config):
xml = getattr(config, '_xml', None)
if xml:
del config._xml
config.pluginmanager.unregister(xml)
def mangle_test_address(address):
path, possible_open_bracket, params = address.partition('[')
names = path.split("::")
try:
names.remove('()')
except ValueError:
pass
# convert file path to dotted path
names[0] = names[0].replace("/", '.')
names[0] = _py_ext_re.sub("", names[0])
# put any params back
names[-1] += possible_open_bracket + params
return names
class LogXML(object):
def __init__(self, logfile, prefix):
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.prefix = prefix
self.stats = dict.fromkeys([
'error',
'passed',
'failure',
'skipped',
], 0)
self.node_reporters = {} # nodeid -> _NodeReporter
self.node_reporters_ordered = []
self.global_properties = []
def finalize(self, report):
nodeid = getattr(report, 'nodeid', report)
# local hack to handle xdist report order
slavenode = getattr(report, 'node', None)
reporter = self.node_reporters.pop((nodeid, slavenode))
if reporter is not None:
reporter.finalize()
def node_reporter(self, report):
nodeid = getattr(report, 'nodeid', report)
# local hack to handle xdist report order
slavenode = getattr(report, 'node', None)
key = nodeid, slavenode
if key in self.node_reporters:
# TODO: breasks for --dist=each
return self.node_reporters[key]
reporter = _NodeReporter(nodeid, self)
self.node_reporters[key] = reporter
self.node_reporters_ordered.append(reporter)
return reporter
def add_stats(self, key):
if key in self.stats:
self.stats[key] += 1
def _opentestcase(self, report):
reporter = self.node_reporter(report)
reporter.record_testreport(report)
return reporter
def pytest_runtest_logreport(self, report):
"""handle a setup/call/teardown report, generating the appropriate
xml tags as necessary.
note: due to plugins like xdist, this hook may be called in interlaced
order with reports from other nodes. for example:
usual call order:
-> setup node1
-> call node1
-> teardown node1
-> setup node2
-> call node2
-> teardown node2
possible call order in xdist:
-> setup node1
-> call node1
-> setup node2
-> call node2
-> teardown node2
-> teardown node1
"""
if report.passed:
if report.when == "call": # ignore setup/teardown
reporter = self._opentestcase(report)
reporter.append_pass(report)
elif report.failed:
reporter = self._opentestcase(report)
if report.when == "call":
reporter.append_failure(report)
else:
reporter.append_error(report)
elif report.skipped:
reporter = self._opentestcase(report)
reporter.append_skipped(report)
self.update_testcase_duration(report)
if report.when == "teardown":
reporter = self._opentestcase(report)
reporter.write_captured_output(report)
self.finalize(report)
def update_testcase_duration(self, report):
"""accumulates total duration for nodeid from given report and updates
the Junit.testcase with the new total if already created.
"""
reporter = self.node_reporter(report)
reporter.duration += getattr(report, 'duration', 0.0)
def pytest_collectreport(self, report):
if not report.passed:
reporter = self._opentestcase(report)
if report.failed:
reporter.append_collect_error(report)
else:
reporter.append_collect_skipped(report)
def pytest_internalerror(self, excrepr):
reporter = self.node_reporter('internal')
reporter.attrs.update(classname="pytest", name='internal')
reporter._add_simple(Junit.error, 'internal error', excrepr)
def pytest_sessionstart(self):
self.suite_start_time = time.time()
def pytest_sessionfinish(self):
dirname = os.path.dirname(os.path.abspath(self.logfile))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(self.logfile, 'w', encoding='utf-8')
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped'] + self.stats['error']
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
logfile.write(Junit.testsuite(
self._get_global_properties_node(),
[x.to_xml() for x in self.node_reporters_ordered],
name="pytest",
errors=self.stats['error'],
failures=self.stats['failure'],
skips=self.stats['skipped'],
tests=numtests,
time="%.3f" % suite_time_delta, ).unicode(indent=0))
logfile.close()
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-",
"generated xml file: %s" % (self.logfile))
def add_global_property(self, name, value):
self.global_properties.append((str(name), bin_xml_escape(value)))
def _get_global_properties_node(self):
"""Return a Junit node containing custom properties, if any.
"""
if self.global_properties:
return Junit.properties(
[
Junit.property(name=name, value=value)
for name, value in self.global_properties
]
)
return ''
|
|
# -*- coding: utf-8 -*-
import logging
import random
import time
from collections import namedtuple, defaultdict
import gevent
from gevent.queue import Empty
from ethereum import slogging
from raiden.network.channelgraph import (
get_best_routes,
)
from raiden.tasks import Task
from raiden.messages import (
MediatedTransfer,
RefundTransfer,
RevealSecret,
Secret,
SecretRequest,
)
from raiden.settings import (
DEFAULT_EVENTS_POLL_TIMEOUT,
)
from raiden.utils import lpex, pex, sha3
log = slogging.get_logger(__name__) # pylint: disable=invalid-name
TIMEOUT = object()
TokenSwap = namedtuple('TokenSwap', (
'identifier',
'from_token',
'from_amount',
'from_nodeaddress', # the node address of the owner of the `from_token`
'to_token',
'to_amount',
'to_nodeaddress', # the node address of the owner of the `to_token`
))
SwapKey = namedtuple('SwapKey', (
'identifier',
'from_token',
'from_amount',
))
class GreenletTasksDispatcher(object):
def __init__(self):
self.hashlocks_greenlets = defaultdict(list)
def register_task(self, task, hashlock):
""" Register the task to receive messages based on `hashlock`.
Registration is required otherwise the task won't receive any messages
from the protocol, un-registering is done by the `unregister_task`
function.
Note:
Messages are dispatched solely on the hashlock value (being part of
the message, eg. SecretRequest, or calculated from the message
content, eg. RevealSecret), this means the sender needs to be
checked for the received messages.
"""
if not isinstance(task, Task):
raise ValueError('task must be an instance of Task')
self.hashlocks_greenlets[hashlock].append(task)
def unregister_task(self, task, hashlock, success): # pylint: disable=unused-argument
""" Clear the task when it's finished. """
self.hashlocks_greenlets[hashlock].remove(task)
if not self.hashlocks_greenlets[hashlock]:
del self.hashlocks_greenlets[hashlock]
def dispatch_message(self, message, hashlock):
for task in self.hashlocks_greenlets[hashlock]:
task.response_queue.put(message)
def stop(self):
wait_for = list()
for greenlets in self.hashlocks_greenlets.itervalues():
for task in greenlets:
task.kill()
wait_for.extend(greenlets)
return wait_for
class BaseMediatedTransferTask(Task):
def _send_and_wait_time(self, raiden, recipient, transfer, timeout):
""" Utility to handle multiple messages for the same hashlock while
properly handling expiration timeouts.
"""
current_time = time.time()
limit_time = current_time + timeout
raiden.send_async(recipient, transfer)
while current_time <= limit_time:
# wait for a response message (not the Ack for the transfer)
try:
response = self.response_queue.get(
timeout=limit_time - current_time,
)
except Empty:
yield TIMEOUT
return
yield response
current_time = time.time()
if log.isEnabledFor(logging.DEBUG):
log.debug(
'TIMED OUT %s %s',
self.__class__,
pex(transfer),
)
def _send_and_wait_block(self, raiden, recipient, transfer, expiration_block):
""" Utility to handle multiple messages and timeout on a blocknumber. """
raiden.send_async(recipient, transfer)
current_block = raiden.get_block_number()
while current_block < expiration_block:
try:
response = self.response_queue.get(
timeout=DEFAULT_EVENTS_POLL_TIMEOUT,
)
except Empty:
pass
else:
if response:
yield response
current_block = raiden.get_block_number()
if log.isEnabledFor(logging.DEBUG):
log.debug(
'TIMED OUT ON BLOCK %s %s %s',
current_block,
self.__class__,
pex(transfer),
block_number=current_block,
)
yield TIMEOUT
def _messages_until_block(self, raiden, expiration_block):
""" Returns the received messages up to the block `expiration_block`.
"""
current_block = raiden.get_block_number()
while current_block < expiration_block:
try:
response = self.response_queue.get(
timeout=DEFAULT_EVENTS_POLL_TIMEOUT,
)
except Empty:
pass
else:
if response:
yield response
current_block = raiden.get_block_number()
def _wait_for_unlock_or_close(self, raiden, graph, channel, mediated_transfer): # noqa
""" Wait for a Secret message from our partner to update the local
state, if the Secret message is not sent within time the channel will
be closed.
Note:
Must be called only once the secret is known.
Must call `unregister_task` after this function returns.
"""
assert graph.token_address == mediated_transfer.token
if not isinstance(mediated_transfer, MediatedTransfer):
raise ValueError('MediatedTransfer expected.')
block_to_close = mediated_transfer.lock.expiration - raiden.config['reveal_timeout']
hashlock = mediated_transfer.lock.hashlock
identifier = mediated_transfer.identifier
token = mediated_transfer.token
while channel.our_state.balance_proof.is_unclaimed(hashlock):
current_block = raiden.get_block_number()
if current_block > block_to_close:
if log.isEnabledFor(logging.WARN):
log.warn(
'Closing channel (%s, %s) to prevent expiration of lock %s %s',
pex(channel.our_state.address),
pex(channel.partner_state.address),
pex(hashlock),
repr(self),
)
channel.external_state.close(
channel.our_state.balance_proof.balance_proof,
)
return
try:
response = self.response_queue.get(
timeout=DEFAULT_EVENTS_POLL_TIMEOUT
)
except Empty:
pass
else:
if isinstance(response, Secret):
secret = response.secret
hashlock = sha3(secret)
is_valid_identifier = response.identifier == identifier
is_valid_channel = response.channel == channel.channel_address
if is_valid_identifier and is_valid_channel:
raiden.handle_secret(
identifier,
graph.token_address,
secret,
response,
hashlock,
)
else:
# cannot use the message but the secret is okay
raiden.handle_secret(
identifier,
graph.token_address,
secret,
None,
hashlock,
)
if log.isEnabledFor(logging.ERROR):
log.error(
'Invalid Secret message received, expected message'
' for token=%s identifier=%s received=%s',
token,
identifier,
response,
)
elif isinstance(response, RevealSecret):
secret = response.secret
hashlock = sha3(secret)
raiden.handle_secret(
identifier,
graph.token_address,
secret,
None,
hashlock,
)
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message ignoring. %s %s',
repr(response),
repr(self),
)
def _wait_expiration(self, raiden, transfer, sleep=DEFAULT_EVENTS_POLL_TIMEOUT):
""" Utility to wait until the expiration block.
For a chain A-B-C, if an attacker controls A and C a mediated transfer
can be done through B and C will wait for/send a timeout, for that
reason B must not unregister the hashlock until the lock has expired,
otherwise the revealed secret wouldn't be caught.
"""
# pylint: disable=no-self-use
expiration = transfer.lock.expiration + 1
while True:
current_block = raiden.get_block_number()
if current_block > expiration:
return
gevent.sleep(sleep)
# Note: send_and_wait_valid methods are used to check the message type and
# sender only, this can be improved by using a encrypted connection between the
# nodes making the signature validation unnecessary
# TODO: Implement the swaps as a restartable task (issue #303)
class MakerTokenSwapTask(BaseMediatedTransferTask):
""" Initiator task, responsible to choose a random secret, initiate the
token swap by sending a mediated transfer to the counterparty and
revealing the secret once the swap is complete.
"""
def __init__(self, raiden, tokenswap, async_result):
super(MakerTokenSwapTask, self).__init__()
self.raiden = raiden
self.tokenswap = tokenswap
self.async_result = async_result
def __repr__(self):
tokenswap = self.tokenswap
return '<{} {} from_token:{} to_token:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(tokenswap.from_token),
pex(tokenswap.to_token),
)
def _run(self): # pylint: disable=method-hidden,too-many-locals
tokenswap = self.tokenswap
raiden = self.raiden
identifier = tokenswap.identifier
from_token = tokenswap.from_token
from_amount = tokenswap.from_amount
to_token = tokenswap.to_token
to_amount = tokenswap.to_amount
to_nodeaddress = tokenswap.to_nodeaddress
from_graph = raiden.token_to_channelgraph[from_token]
to_graph = raiden.token_to_channelgraph[to_token]
from_routes = get_best_routes(
from_graph,
raiden.protocol.nodeaddresses_networkstatuses,
raiden.address,
to_nodeaddress,
from_amount,
previous_address=None,
)
fee = 0
for route in from_routes:
# for each new path a new secret must be used
secret = sha3(hex(random.getrandbits(256)))
hashlock = sha3(secret)
from_channel = from_graph.get_channel_by_contract_address(route.channel_address)
raiden.greenlet_task_dispatcher.register_task(self, hashlock)
raiden.register_channel_for_hashlock(from_token, from_channel, hashlock)
block_number = raiden.get_block_number()
lock_expiration = block_number + from_channel.settle_timeout
from_mediated_transfer = from_channel.create_mediatedtransfer(
raiden.address,
to_nodeaddress,
fee,
from_amount,
identifier,
lock_expiration,
hashlock,
)
raiden.sign(from_mediated_transfer)
from_channel.register_transfer(
# must be the same block number used to compute lock_expiration
block_number,
from_mediated_transfer,
)
# wait for the SecretRequest and MediatedTransfer
to_mediated_transfer = self.send_and_wait_valid_state(
raiden,
route.node_address,
to_nodeaddress,
from_mediated_transfer,
to_token,
to_amount,
)
if to_mediated_transfer is None:
# the initiator can unregister right away since it knows the
# secret wont be revealed
raiden.greenlet_task_dispatcher.unregister_task(self, hashlock, False)
elif isinstance(to_mediated_transfer, MediatedTransfer):
to_hop = to_mediated_transfer.sender
to_channel = to_graph.partneraddress_to_channel[to_hop]
to_channel.register_transfer(
raiden.get_block_number(),
to_mediated_transfer,
)
raiden.register_channel_for_hashlock(to_token, to_channel, hashlock)
# A swap is composed of two mediated transfers, we need to
# reveal the secret to both, since the maker is one of the ends
# we just need to send the reveal secret directly to the taker.
reveal_secret = RevealSecret(secret)
raiden.sign(reveal_secret)
raiden.send_async(to_nodeaddress, reveal_secret)
from_channel.register_secret(secret)
# Register the secret with the to_channel and send the
# RevealSecret message to the node that is paying the to_token
# (this node might, or might not be the same as the taker),
# then wait for the withdraw.
raiden.handle_secret(
identifier,
to_token,
secret,
None,
hashlock,
)
to_channel = to_graph.partneraddress_to_channel[to_mediated_transfer.sender]
self._wait_for_unlock_or_close(
raiden,
to_graph,
to_channel,
to_mediated_transfer,
)
# unlock the from_token and optimistically reveal the secret
# forward
raiden.handle_secret(
identifier,
from_token,
secret,
None,
hashlock,
)
raiden.greenlet_task_dispatcher.unregister_task(self, hashlock, True)
self.async_result.set(True)
return
if log.isEnabledFor(logging.DEBUG):
node_address = raiden.address
log.debug(
'MAKER TOKEN SWAP FAILED',
node=pex(node_address),
to=pex(to_nodeaddress),
)
# all routes failed
self.async_result.set(False)
def send_and_wait_valid_state( # noqa
self,
raiden,
next_hop,
target_address,
from_token_transfer,
to_token,
to_amount):
""" Start the swap by sending the first mediated transfer to the
taker and wait for mediated transfer for the exchanged token.
This method will validate the messages received, discard the invalid
ones, and wait until a valid state is reached. The valid state is
reached when a mediated transfer for `to_token` with `to_amount` tokens
and a SecretRequest from the taker are received.
Returns:
None: when the timeout was reached.
MediatedTransfer: when a valid state is reached.
RefundTransfer: when an invalid state is reached by
our partner.
"""
# pylint: disable=too-many-arguments
# a valid state must have a secret request from the maker and a valid
# mediated transfer for the new token
received_secretrequest = False
mediated_transfer = None
response_iterator = self._send_and_wait_time(
raiden,
from_token_transfer.recipient,
from_token_transfer,
raiden.config['msg_timeout'],
)
for response in response_iterator:
transfer_is_valid_mediated_transfer = (
isinstance(response, MediatedTransfer) and
response.token == to_token and
# we need a lower expiration because:
# - otherwise the previous node is not operating correctly
# - we assume that received mediated transfer has a smaller
# expiration to properly call close on edge cases
response.lock.expiration <= from_token_transfer.lock.expiration
)
if response is None:
if log.isEnabledFor(logging.DEBUG):
log.debug(
'MAKER SWAP TIMED OUT',
hashlock=pex(from_token_transfer.lock.hashlock),
)
return None
# The MediatedTransfer might be from `next_hop` or most likely from
# a different node.
if transfer_is_valid_mediated_transfer:
if response.lock.amount == to_amount:
mediated_transfer = response
elif isinstance(response, SecretRequest) and response.sender == target_address:
received_secretrequest = True
elif isinstance(response, RefundTransfer) and response.sender == next_hop:
return response
# The other participant must not use a direct transfer to finish
# the token swap, ignore it
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message ignoring. %s',
repr(response),
)
if mediated_transfer and received_secretrequest:
return mediated_transfer
return None
class TakerTokenSwapTask(BaseMediatedTransferTask):
""" Taker task, responsible to receive a MediatedTransfer for the
from_transfer and forward a to_transfer with the same hashlock.
"""
def __init__(
self,
raiden,
tokenswap,
from_mediated_transfer):
super(TakerTokenSwapTask, self).__init__()
self.raiden = raiden
self.from_mediated_transfer = from_mediated_transfer
self.tokenswap = tokenswap
def __repr__(self):
return '<{} {} from_token:{} to_token:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(self.from_mediated_transfer.token),
pex(self.tokenswap.to_token),
)
def _run(self): # pylint: disable=method-hidden,too-many-locals
fee = 0
raiden = self.raiden
tokenswap = self.tokenswap
# this is the MediatedTransfer that wil pay the maker's half of the
# swap, not necessarily from him
maker_paying_transfer = self.from_mediated_transfer
# this is the address of the node that the taker actually has a channel
# with (might or might not be the maker)
maker_payer_hop = maker_paying_transfer.sender
assert tokenswap.identifier == maker_paying_transfer.identifier
assert tokenswap.from_token == maker_paying_transfer.token
assert tokenswap.from_amount == maker_paying_transfer.lock.amount
assert tokenswap.from_nodeaddress == maker_paying_transfer.initiator
maker_receiving_token = tokenswap.to_token
to_amount = tokenswap.to_amount
identifier = maker_paying_transfer.identifier
hashlock = maker_paying_transfer.lock.hashlock
maker_address = maker_paying_transfer.initiator
taker_receiving_token = maker_paying_transfer.token
taker_paying_token = maker_receiving_token
from_graph = raiden.token_to_channelgraph[taker_receiving_token]
from_channel = from_graph.partneraddress_to_channel[maker_payer_hop]
to_graph = raiden.token_to_channelgraph[maker_receiving_token]
# update the channel's distributable and merkle tree
from_channel.register_transfer(
raiden.get_block_number(),
maker_paying_transfer,
)
# register the task to receive Refund/Secrect/RevealSecret messages
raiden.greenlet_task_dispatcher.register_task(self, hashlock)
raiden.register_channel_for_hashlock(taker_receiving_token, from_channel, hashlock)
# send to the maker a secret request informing how much the taker will
# be _paid_, this is used to inform the maker that his part of the
# mediated transfer is okay
secret_request = SecretRequest(
identifier,
maker_paying_transfer.lock.hashlock,
maker_paying_transfer.lock.amount,
)
raiden.sign(secret_request)
raiden.send_async(maker_address, secret_request)
lock_expiration = maker_paying_transfer.lock.expiration - raiden.config['reveal_timeout']
# Note: taker may only try different routes if a RefundTransfer is
# received, because the maker is the node controlling the secret
available_routes = get_best_routes(
to_graph,
raiden.protocol.nodeaddresses_networkstatuses,
raiden.address,
maker_address,
maker_paying_transfer.lock.amount,
previous_address=None,
)
if not available_routes:
if log.isEnabledFor(logging.DEBUG):
node_address = raiden.address
log.debug(
'TAKER TOKEN SWAP FAILED, NO ROUTES',
from_=pex(node_address),
to=pex(maker_address),
)
return
first_transfer = None
for route in available_routes:
taker_paying_channel = to_graph.get_channel_by_contract_address(
route.channel_address,
)
taker_paying_hop = route.node_address
if log.isEnabledFor(logging.DEBUG):
log.debug(
'TAKER TOKEN SWAP',
from_=pex(maker_paying_transfer.target),
to=pex(maker_address),
msghash=pex(maker_paying_transfer.hash),
hashlock=pex(hashlock),
)
# make a paying MediatedTransfer with same hashlock/identifier and the
# taker's paying token/amount
taker_paying_transfer = taker_paying_channel.create_mediatedtransfer(
raiden.address,
maker_address,
fee,
to_amount,
identifier,
lock_expiration,
hashlock,
)
raiden.sign(taker_paying_transfer)
taker_paying_channel.register_transfer(
raiden.get_block_number(),
taker_paying_transfer,
)
if not first_transfer:
first_transfer = taker_paying_transfer
if log.isEnabledFor(logging.DEBUG):
log.debug(
'EXCHANGE TRANSFER NEW PATH',
path=lpex(taker_paying_hop),
hashlock=pex(hashlock),
)
# register the task to receive Refund/Secrect/RevealSecret messages
raiden.register_channel_for_hashlock(
maker_receiving_token,
taker_paying_channel,
hashlock,
)
response, secret = self.send_and_wait_valid(
raiden,
taker_paying_transfer,
maker_payer_hop,
)
# only refunds for `maker_receiving_token` must be considered
# (check send_and_wait_valid)
if isinstance(response, RefundTransfer):
if response.lock.amount != taker_paying_transfer.amount:
log.info(
'Partner %s sent an invalid refund message with an invalid amount',
pex(taker_paying_hop),
)
raiden.greenlet_task_dispatcher.unregister_task(self, hashlock, False)
return
else:
taker_paying_channel.register_transfer(
raiden.get_block_number(),
response,
)
elif isinstance(response, RevealSecret):
# the secret was registered by the message handler
# wait for the taker_paying_hop to reveal the secret prior to
# unlocking locally
if response.sender != taker_paying_hop:
response = self.wait_reveal_secret(
raiden,
taker_paying_hop,
taker_paying_transfer.lock.expiration,
)
# unlock and send the Secret message
raiden.handle_secret(
identifier,
taker_paying_token,
response.secret,
None,
hashlock,
)
# if the secret arrived early, withdraw it, otherwise send the
# RevealSecret forward in the maker-path
if secret:
raiden.handle_secret(
identifier,
taker_receiving_token,
response.secret,
secret,
hashlock,
)
# wait for the withdraw in case it did not happen yet
self._wait_for_unlock_or_close(
raiden,
from_graph,
from_channel,
maker_paying_transfer,
)
return
# the lock expired
else:
if log.isEnabledFor(logging.DEBUG):
node_address = raiden.address
log.debug(
'TAKER TOKEN SWAP FAILED',
from_=pex(node_address),
to=pex(maker_address),
)
self.async_result.set(False)
return
# no route is available, wait for the sent mediated transfer to expire
self._wait_expiration(raiden, first_transfer)
if log.isEnabledFor(logging.DEBUG):
node_address = raiden.address
log.debug(
'TAKER TOKEN SWAP FAILED',
from_=pex(node_address),
to=pex(maker_address),
)
self.async_result.set(False)
def send_and_wait_valid(self, raiden, mediated_transfer, maker_payer_hop):
""" Start the second half of the exchange and wait for the SecretReveal
for it.
This will send the taker mediated transfer with the maker as a target,
once the maker receives the transfer he is expected to send a
RevealSecret backwards.
"""
# the taker cannot discard the transfer since the secret is controlled
# by another node (the maker), so we have no option but to wait for a
# valid response until the lock expires
response_iterator = self._send_and_wait_block(
raiden,
mediated_transfer.recipient,
mediated_transfer,
mediated_transfer.lock.expiration,
)
# Usually the RevealSecret for the MediatedTransfer from this node to
# the maker should arrive first, but depending on the number of hops
# and if the maker-path is optimistically revealing the Secret, then
# the Secret message might arrive first.
secret = None
for response in response_iterator:
valid_reveal = (
isinstance(response, RevealSecret) and
response.hashlock == mediated_transfer.lock.hashlock and
response.sender == maker_payer_hop
)
valid_refund = (
isinstance(response, RefundTransfer) and
response.sender == maker_payer_hop and
response.lock.amount == mediated_transfer.lock.amount and
response.lock.expiration <= mediated_transfer.lock.expiration and
response.token == mediated_transfer.token
)
if response is None:
log.error(
'TAKER SWAP TIMED OUT',
node=pex(raiden.address),
hashlock=pex(mediated_transfer.lock.hashlock),
)
return (response, secret)
elif isinstance(response, Secret):
if sha3(response.secret) != mediated_transfer.lock.hashlock:
log.error("Secret doesn't match the hashlock, ignoring.")
continue
secret = response
elif valid_reveal:
return (response, secret)
elif valid_refund:
return (response, secret)
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message [%s] supplied to the task, ignoring.',
repr(response),
)
return (None, secret)
def wait_reveal_secret(self, raiden, taker_paying_hop, expiration_block):
for response in self._messages_until_block(raiden, expiration_block):
if isinstance(response, RevealSecret) and response.sender == taker_paying_hop:
return response
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message [%s] supplied to the task, ignoring.',
repr(response),
)
|
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Iterator, List, Optional, Union
import torch
from pytorch_lightning import loops # import as loops to avoid circular imports
from pytorch_lightning.loops.batch import TrainingBatchLoop
from pytorch_lightning.loops.utilities import _prepare_dataloader_iter
from pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection
from pytorch_lightning.trainer.progress import Progress, SchedulerProgress
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.types import STEP_OUTPUT
class TrainingEpochLoop(loops.Loop):
"""
Runs over all batches in a dataloader (one epoch).
Args:
min_steps: The minimum number of steps (batches) to process
max_steps: The maximum number of steps (batches) to process
"""
def __init__(self, min_steps: int, max_steps: int):
super().__init__()
self.min_steps: int = min_steps
if max_steps and max_steps < -1:
raise MisconfigurationException(f"`max_steps` must be a positive integer or -1. You passed in {max_steps}.")
self.max_steps: int = max_steps
self.global_step: int = 0
# manually tracking which is the last batch is necessary for iterable dataset support
self.is_last_batch: Optional[bool] = None
self.batch_progress = Progress()
self.scheduler_progress = SchedulerProgress()
self.batch_loop: Optional[TrainingBatchLoop] = None
self.val_loop: Optional["loops.EvaluationLoop"] = None
self._results = ResultCollection(training=True)
self._epoch_output: Optional[List[List[STEP_OUTPUT]]] = None
@property
def total_batch_idx(self) -> int:
"""Returns the current batch index (across epochs)"""
# use `ready` instead of `completed` in case this is accessed after `completed` has been increased
# but before the next `ready` increase
return self.batch_progress.total.ready - 1
@property
def batch_idx(self) -> int:
"""Returns the current batch index (within this epoch)"""
# use `ready` instead of `completed` in case this is accessed after `completed` has been increased
# but before the next `ready` increase
return self.batch_progress.current.ready - 1
@property
def done(self) -> bool:
"""Returns whether the training should be stopped.
The criteria are that the number of steps reached the max steps,
the last batch is reached or the trainer signals to stop (e.g. by early stopping).
"""
max_steps_reached = self.max_steps is not None and self.global_step >= self.max_steps
return max_steps_reached or self.trainer.should_stop or self._num_training_batches_reached(self.is_last_batch)
def connect(
self,
batch_loop: TrainingBatchLoop = None,
val_loop: Optional["loops.EvaluationLoop"] = None,
) -> None:
"""Optionally connect a custom batch or validation loop to this training epoch loop."""
if batch_loop is not None:
self.batch_loop = batch_loop
if val_loop is not None:
self.val_loop = val_loop
def reset(self) -> None:
"""Resets the internal state of the loop for a new run"""
self.is_last_batch = False
# track epoch output
self._epoch_output = [[] for _ in range(self.batch_loop.num_active_optimizers(self.total_batch_idx))]
if not self.restarting:
self.batch_progress.current.reset()
self.scheduler_progress.current.reset()
self.batch_loop.optimizer_loop.optim_progress.reset_on_epoch()
def on_run_start(self, dataloader_iter: Iterator, **kwargs: Any) -> None:
# hook
self.trainer.logger_connector.on_epoch_start()
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
self.trainer.fit_loop.epoch_progress.increment_started()
self.dataloader_iter = _prepare_dataloader_iter(dataloader_iter, self.batch_idx + 1)
def advance(self, *args: Any, **kwargs: Any) -> None:
"""Runs a single training batch.
Args:
dataloader_iter: the iterator over the dataloader producing the new batch
Raises:
StopIteration: When the epoch is canceled by the user returning -1
"""
batch_idx, (batch, is_last) = next(self.dataloader_iter)
if not self.trainer.data_connector.train_data_fetcher.store_on_device:
with self.trainer.profiler.profile("training_batch_to_device"):
batch = self.trainer.accelerator.batch_to_device(batch)
self.batch_progress.increment_ready()
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.batch_loop.run(batch, batch_idx)
self.batch_progress.increment_processed()
self.is_last_batch = is_last
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
raise StopIteration
# update non-plateau LR schedulers
# update epoch-interval ones only when we are at the end of training epoch
self.update_lr_schedulers("step", update_plateau_schedulers=False)
if self._num_training_batches_reached(is_last):
self.update_lr_schedulers("epoch", update_plateau_schedulers=False)
batch_end_outputs = [opt_idx_out for opt_idx_out in batch_output.training_step_output if len(opt_idx_out)]
processed_batch_end_outputs = self._prepare_outputs(batch_end_outputs, batch_mode=True)
# hook
self.trainer.call_hook("on_train_batch_end", processed_batch_end_outputs, batch, self.batch_idx, 0)
self.trainer.call_hook("on_batch_end")
self.trainer.logger_connector.on_batch_end()
self.batch_progress.increment_completed()
# figure out what to track for epoch end
self._track_epoch_end_reduce_metrics(self._epoch_output, batch_end_outputs)
# -----------------------------------------
# SAVE METRICS TO LOGGERS AND PROGRESS_BAR
# -----------------------------------------
self.trainer.logger_connector.update_train_step_metrics()
def on_advance_end(self):
"""Runs validation and Checkpointing if necessary.
Raises:
StopIteration: if :attr:`done` evaluates to ``True`` to finish this epoch
"""
# -----------------------------------------
# VALIDATE IF NEEDED + CHECKPOINT CALLBACK
# -----------------------------------------
should_check_val = self._should_check_val_fx(self.batch_idx, self.is_last_batch)
if should_check_val:
self.trainer.validating = True
self._run_validation()
self.trainer.training = True
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self._save_loggers_on_train_batch_end()
# update plateau LR scheduler after metrics are logged
self.update_lr_schedulers("step", update_plateau_schedulers=True)
# progress global step according to grads progress
self._increment_accumulated_grad_global_step()
def on_run_end(self) -> List[List[STEP_OUTPUT]]:
"""Calls the on_epoch_end hook.
Returns:
The output of each training step for each optimizer
Raises:
MisconfigurationException: ``train_epoch_end`` does not return ``None``
"""
if self.batch_progress.current.ready == 0:
# dataloader/iterator did not produce a batch
return
# inform logger the batch loop has finished
self.trainer.logger_connector.epoch_end_reached()
# prepare epoch output
processed_outputs = self._prepare_outputs(self._epoch_output, batch_mode=False)
# get the model and call model.training_epoch_end
model = self.trainer.lightning_module
if is_overridden("training_epoch_end", model):
# run training_epoch_end
# refresh the result for custom logging at the epoch level
model._current_fx_name = "training_epoch_end"
# lightningmodule hook
training_epoch_end_output = model.training_epoch_end(processed_outputs)
if training_epoch_end_output is not None:
raise MisconfigurationException(
"training_epoch_end expects a return of None. "
"HINT: remove the return statement in training_epoch_end"
)
self.trainer.fit_loop.epoch_progress.increment_processed()
# call train epoch end hooks
self.trainer.call_hook("on_train_epoch_end")
self.trainer.call_hook("on_epoch_end")
self.trainer.logger_connector.on_epoch_end()
if self._num_training_batches_reached(self.is_last_batch):
self.update_lr_schedulers("epoch", update_plateau_schedulers=True)
epoch_output = self._epoch_output
# free memory
self._epoch_output = None
return epoch_output
def teardown(self) -> None:
self._results.cpu()
self.batch_loop.teardown()
self.val_loop.teardown()
def _run_validation(self):
# reload dataloaders
self.val_loop.reload_evaluation_dataloaders()
with torch.no_grad():
self.val_loop.run()
def _accumulated_batches_reached(self) -> bool:
"""Determine if accumulation will be finished by the end of the current batch."""
return self.batch_progress.current.ready % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch: bool = False) -> bool:
"""Checks if we are in the last batch or if there are more batches to follow.
Args:
is_last_batch: Whether the current batch is the last one
"""
return self.batch_progress.current.ready == self.trainer.num_training_batches or is_last_batch
def _should_accumulate(self) -> bool:
"""Checks if the optimizer step should be performed or gradients should be accumulated for the current step."""
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def _track_epoch_end_reduce_metrics(
self, epoch_output: List[List[STEP_OUTPUT]], batch_end_outputs: STEP_OUTPUT
) -> None:
"""Adds the batch outputs to the epoch outputs and prepares reduction"""
hook_overridden = is_overridden("training_epoch_end", self.trainer.lightning_module)
if not hook_overridden:
return
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1:
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
@staticmethod
def _prepare_outputs(
outputs: List[List[List["ResultCollection"]]], batch_mode: bool
) -> Union[List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict]:
"""
Extract required information from batch or epoch end results.
Args:
outputs: A 3-dimensional list of ``ResultCollection`` objects with dimensions:
``[optimizer outs][batch outs][tbptt steps]``.
batch_mode: If True, ignore the batch output dimension.
Returns:
The cleaned outputs with ``ResultCollection`` objects converted to dictionaries.
All list dimensions of size one will be collapsed.
"""
processed_outputs = []
for opt_outputs in outputs:
# handle an edge case where an optimizer output is the empty list
if len(opt_outputs) == 0:
continue
processed_batch_outputs = []
if batch_mode:
opt_outputs = [opt_outputs]
for batch_outputs in opt_outputs:
processed_tbptt_outputs = []
if isinstance(batch_outputs, ResultCollection):
batch_outputs = [batch_outputs]
for tbptt_output in batch_outputs:
out = {}
if tbptt_output.minimize is not None:
out["loss"] = tbptt_output.minimize.detach()
out.update(tbptt_output.extra)
processed_tbptt_outputs.append(out)
# if there was only one tbptt step then we can collapse that dimension
if len(processed_tbptt_outputs) == 1:
processed_tbptt_outputs = processed_tbptt_outputs[0]
processed_batch_outputs.append(processed_tbptt_outputs)
# batch_outputs should be just one dict (or a list of dicts if using tbptt) per optimizer
if batch_mode:
processed_batch_outputs = processed_batch_outputs[0]
processed_outputs.append(processed_batch_outputs)
# if there is only one optimiser then we collapse that dimension
if len(processed_outputs) == 1:
processed_outputs = processed_outputs[0]
return processed_outputs
def update_lr_schedulers(self, interval: str, update_plateau_schedulers: bool) -> None:
"""updates the lr schedulers based on the given interval"""
if interval == "step" and self._should_accumulate():
return
self.trainer.optimizer_connector.update_learning_rates(
interval=interval,
update_plateau_schedulers=update_plateau_schedulers,
opt_indices=[opt_idx for opt_idx, _ in self.batch_loop.get_active_optimizers(self.total_batch_idx)],
)
def _increment_accumulated_grad_global_step(self) -> None:
"""Increments global step according to grads progress"""
if not self._should_accumulate():
self.global_step = self.trainer.accelerator.update_global_step(
self.batch_progress.current.ready, self.trainer.global_step
)
def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool) -> bool:
"""Decide if we should run validation."""
if not self.trainer.enable_validation:
return False
is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
if not is_val_check_epoch:
return False
# val_check_batch is inf for iterable datasets with no length defined
is_infinite_dataset = self.trainer.val_check_batch == float("inf")
if is_last_batch and is_infinite_dataset:
return True
if self.trainer.should_stop:
return True
# TODO(@awaelchli): let training/eval loop handle logic around limit_*_batches and val_check_batch
is_val_check_batch = is_last_batch
if isinstance(self.trainer.limit_train_batches, int) and is_infinite_dataset:
is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0
elif self.trainer.val_check_batch != float("inf"):
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
return is_val_check_batch
def _save_loggers_on_train_batch_end(self) -> None:
"""Flushes loggers to disk"""
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
|
|
#!/usr/bin/env python
import numpy as np
import math
def read_ASF(filename):
'''Read an ASF file from disk. Returns a bone dict.'''
return decode_ASF(parse_ASF(open(filename,'r').readlines()))
def parse_ASF(asfLines):
'''Parse an ASF file into a dict of str:one of (str,list of dict(str:str),dict(str:str)).'''
asfDict = {':comment':''}
asfLines = map(str.strip, asfLines)
while len(asfLines):
line = asfLines.pop(0)
if line == '': continue # ignore blank lines
if line.startswith('#'): asfDict[':comment']+=line+'\n'; continue # store comments
assert(line.startswith(':'))
line.replace('\t',' ')
fw,_,rw = line.partition(' ')
if rw != '': asfDict[fw] = rw; continue # inline string
content = []
while len(asfLines) and not asfLines[0].startswith(':'):
line = asfLines.pop(0)
if line != '' and not line.startswith('#'): # ignore comments and blank lines (shouldn't really be here)
content.append(line.partition(' '))
if len(content) and content[0][0] == 'begin': # a list
data,datum = [],{}
while len(content):
if content[0][0].startswith('(') and k == 'limits': # hack to support limits which are a list of (min,max) pairs which can span multiple lines
v1,_,v2 = content.pop(0)
datum[k] = datum[k]+v1+' '+v2
continue
k,_,v = content.pop(0)
if k == 'limits(': k,v = 'limits','('+v
if k == 'end' : data.append(datum)
elif k == 'begin': datum = {}
else: assert(not datum.has_key(k)); datum[k] = v
asfDict[fw] = data
else: # a dict
datum = {}
while len(content):
k,_,v = content.pop(0)
assert(not datum.has_key(k))
datum[k] = v
asfDict[fw] = datum
return asfDict
def asfR(a, angleScale):
x,y,z,order = a['axis'].lower().split()
R = np.eye(3,dtype=np.float32)
for v,c in [(z,order[2]),(y,order[1]),(x,order[0])]: # rotations multiply on the right in reverse order
cv,sv=np.cos(float(v) * angleScale),np.sin(float(v) * angleScale)
if c == 'x': R[:,1],R[:,2] = R[:,1]*cv+R[:,2]*sv, R[:,2]*cv-R[:,1]*sv
elif c == 'y': R[:,2],R[:,0] = R[:,2]*cv+R[:,0]*sv, R[:,0]*cv-R[:,2]*sv
elif c == 'z': R[:,0],R[:,1] = R[:,0]*cv+R[:,1]*sv, R[:,1]*cv-R[:,0]*sv
return R
def asfDofs(a):
if not a.has_key('dof'): return ['','']
dofs = a['dof'].lower().split()
return [''.join([t[1] for t in dofs if t.startswith('t')]),''.join([r[1] for r in dofs if r.startswith('r')])]
def asfDofNames(a):
return ['t'+x for x in a[0]]+['r'+x for x in a[1]]
def asfT(a, lengthScale):
x,y,z = map(float,a['direction'].split())
l = float(a['length'])*lengthScale
return [x*l,y*l,z*l]
def decode_ASF(asfDict, lengthScale = 25.4): # mm output, assuming inches input
'''Decode a parsed ASF dict into a dictionary of sensible arrays with known units.'''
# TODO, bones may have (min,max) limits; probably should decode this
# TODO, other versions of asf?
assert(asfDict[':version']=='1.10')
angleScale = {'deg':np.radians(1),'rad':1.0}[asfDict[':units']['angle']]
lengthScale /= float(asfDict[':units']['length'])
assert(asfDict[':units']['angle'] == 'deg')
name = asfDict[':name']
asf_boneData = list(asfDict[':bonedata']) # make a copy because we will insert a root node for simplicity
asf_root = asfDict[':root']
ori = asf_root['orientation'].split()
axi = asf_root['axis'].split()
if len(axi) == 4 and len(ori) == 3: assert(ori == ['0','0','0']) # weird case; if the orientation is put in the axis then ignore the actual orientation (presumably it's zero...)
else: axi = ori + axi
asf_boneData.insert(0,{'id':'0','name':'root','direction':asf_root['position'],'length':'1',\
'axis':' '.join(axi),'dof':asf_root['order']})
asf_hierarchy = asfDict[':hierarchy']
numBones = len(asf_boneData)
boneNames = [a['name'] for a in asf_boneData]
# is 'id' really a compulsory field? not used for anything, not even in amc where it might have reduced the file size
#boneIds = [a['id'] for a in asf_boneData]
boneTs = [asfT(a, lengthScale) for a in asf_boneData]
boneRs = [asfR(a, angleScale) for a in asf_boneData]
boneDofs = [asfDofs(a) for a in asf_boneData]
dofNames = [n+':'+x for n,a in zip(boneNames,boneDofs) for x in asfDofNames(a)]
boneParents = [-1]*numBones
boneMap = dict([(n, ni) for ni,n in enumerate(boneNames)])
for parent,v in asf_hierarchy[0].iteritems():
pid = boneMap[parent]
for child in v.split():
cid = boneMap[child]
boneParents[cid] = pid
assert(pid < cid) # we can't use this representation unless parents come before children
dofCounts = [len(d[0])+len(d[1]) for d in boneDofs]
asfDofSplits = [sum(dofCounts[:i]) for i in xrange(len(dofCounts)+1)]
dofScales = [s for ss in [[lengthScale]*len(d[0])+[angleScale]*len(d[1]) for d in boneDofs] for s in ss]
numDofs = asfDofSplits[-1]
return { 'name' : str(name),
'numBones' : int(numBones),
'boneNames' : boneNames, # list of strings
#'boneIds' : boneIds, # list of strings
'boneTs' : np.array(boneTs,dtype=np.float32), # bone translation (global axes, but relative to parent)
'boneRs' : np.array(boneRs,dtype=np.float32), # bone orientation (global axes) encoded as 3x3 matrix
'boneDofs' : boneDofs, # list of [string,string]s
'asfDofSplits' : np.array(asfDofSplits,dtype=np.int32),
'boneParents' : np.array(boneParents,dtype=np.int32),
'dofNames' : dofNames, # list of strings
'dofScales' : np.array(dofScales,dtype=np.float32),
'numDofs' : int(numDofs),
'lengthScale' : float(lengthScale),
'angleScale' : float(angleScale) }
def asfDict_to_skelDict(asfDict):
name = asfDict['name']
numBones = asfDict['numBones']
boneNames = asfDict['boneNames']
boneDofs = asfDict['boneDofs']
boneParents = asfDict['boneParents']
dofNames = asfDict['dofNames']
#dofScales = asfDict['dofScales']
numDofs = asfDict['numDofs']
#lengthScale = asfDict['lengthScale']
#angleScale = asfDict['angleScale']
# convert the ASF data into our skeleton data format
jointChans = [[ord(tc)-ord('x') for tc in t]+[ord(rc)-ord('x')+3 for rc in r] for t,r in boneDofs]
jointChans = np.array([x for y in jointChans for x in y],dtype=np.int32) # flatten
jointChanSplits = [x for y in [[len(t),len(r)] for t,r in boneDofs] for x in y]
jointChanSplits = np.array([sum(jointChanSplits[:ji]) for ji in xrange(len(jointChanSplits)+1)],dtype=np.int32)
Gs, Ls, Bs = boneMatrices(asfDict)
jointIndex = {}
for ji,jn in enumerate(boneNames): jointIndex[jn] = ji
return { 'name' : str(name),
'numJoints' : int(numBones),
'jointNames' : boneNames, # list of strings
'jointIndex' : jointIndex, # dict of string:int
'jointParents' : np.array(boneParents,dtype=np.int32),
'jointChans' : np.array(jointChans,dtype=np.int32), # 0 to 5 : tx,ty,tz,rx,ry,rz
'jointChanSplits': np.array(jointChanSplits,dtype=np.int32),
'chanNames' : dofNames, # list of strings
'chanValues' : np.zeros(numDofs,dtype=np.float32),
'numChans' : int(numDofs),
'Bs' : np.array(Bs, dtype=np.float32),
'Ls' : np.array(Ls, dtype=np.float32),
'Gs' : np.array(Gs, dtype=np.float32)
}
def addTrunnions(asfDict):
'''Add some strategic no-dof bones that will reveal the orientation of joints.
These have the additional benefit of making it possible to solve dofs from only joint positions.'''
# TODO make this work on skelDict
numBones = asfDict['numBones']
boneParents = asfDict['boneParents']
boneTs = asfDict['boneTs']
boneRs = asfDict['boneRs']
boneDofs = asfDict['boneDofs']
boneNames = asfDict['boneNames']
addBoneParents, addBoneTs, addBoneRs = [],[],[]
# immutable data, should be passed in
boneChildren = [np.where(boneParents == bi)[0] for bi in xrange(numBones)]
numAdded = 0
# add some end-of-bones where they would be lost.
boneEnd = [len(bcs)==0 and len(dofs[0])+len(dofs[1]) != 0 for bcs,dofs in zip(boneChildren,boneDofs)]
# add end-of-bones where the child allows translation
for pi,dofs in zip(boneParents, boneDofs):
if pi != -1 and len(dofs[0]) != 0: boneEnd[pi] = True
for bi in range(numBones):
if boneEnd[bi]:
addBoneParents.append(bi)
addBoneTs.append([0,0,0])
addBoneRs.append(boneRs[bi])
boneDofs.append(['',''])
boneNames.append(boneNames[bi]+'_bone_end')
numAdded += 1
for bi,(dofs,bcs,bn,bT,bR) in enumerate(zip(boneDofs, boneChildren, boneNames, boneTs, boneRs)):
numRotDofs = len(dofs[1])
numChildren = len(bcs)
bRT = np.abs(np.dot(bR.T, bT)) # closer to zero is more perpendicular; closer to norm(bT) is more parallel
if numRotDofs == 0: continue # zero-rotation dofs can be resolved
if numChildren > 1: continue # if more than 1 child then assume all angles can be resolved
if numChildren == 1 and numRotDofs == 1: # 1-rot with child can be resolved, unless the rotation is on-axis
if bRT[int(ord(dofs[1])-ord('x'))] <= 1.0: continue
#if (numChildren == 1 and numRotDofs <= 2): continue # of no-children joints, only 3+rots earn a trunnion
axis = np.argmin(bRT)
boneT = bR[:,axis]*30. - bT # 30mm is good; shorter trunnions have larger errors
boneName = bn+'_trunnion_'+'xyz'[axis]
addBoneParents.append(bi)
addBoneTs.append(boneT)
addBoneRs.append(bR)
boneDofs.append(['',''])
boneNames.append(boneName)
numAdded += 1
asfDict['boneParents'] = np.concatenate((boneParents,np.array(addBoneParents,dtype=np.int32)))
asfDict['boneTs'] = np.concatenate((asfDict['boneTs'], np.array(addBoneTs,dtype=np.float32).reshape(-1,3)))
asfDict['boneRs'] = np.concatenate((asfDict['boneRs'], np.array(addBoneRs,dtype=np.float32).reshape(-1,3,3)))
asfDict['asfDofSplits'] = np.concatenate((asfDict['asfDofSplits'],np.array([asfDict['asfDofSplits'][-1]]*numAdded,dtype=np.int32)))
asfDict['numBones'] = numBones + numAdded
print 'numAdded',numAdded
return asfDict
def boneMatrices(asfDict):
'''Generate Global and Local matrices from a bone_dict. The Local matrices, together with the
dofs and parents, functionally describe the skeleton.'''
boneParents = asfDict['boneParents']
numBones = len(boneParents)
Gs = np.zeros((numBones,3,4),dtype=np.float32) # GLOBAL mats
Ls = np.zeros((numBones,3,4),dtype=np.float32) # LOCAL mats
Bs = np.zeros((numBones,3),dtype=np.float32) # BONES
# boneRs are in global coordinate system, so they form the rotations of the bone.
Gs[:,:,:3] = asfDict['boneRs']
# boneTs are in global coordinate system, being measured relative to their parent
Bs[:] = asfDict['boneTs']
Gs[:,:,3] = Bs[:]
# turn the delta translations into global translations by accumulating the parent offset from the root
for bi,pi in enumerate(boneParents):
if pi != -1: Gs[bi,:,3] += Gs[pi,:,3]
# in our joint representation, the transform of the bone translation happens on the child
for bi in xrange(numBones-1,-1,-1):
pi = boneParents[bi]
if pi != -1: Gs[bi,:,3] = Gs[pi,:,3]
# now solve for the local matrices using the equation Gs_bi = Gs_pi * Ls_bi
for bi,pi in enumerate(boneParents):
if pi == -1:
Bs[bi] = 0
Ls[bi,:,:] = Gs[bi,:,:]
else:
Gs_pi_T = Gs[pi,:,:3].T
Bs[bi] = np.dot(Gs[bi,:,:3].T, Bs[bi])
Ls[bi,:,:3] = np.dot(Gs_pi_T, Gs[bi,:,:3])
Ls[bi,:,3] = np.dot(Gs_pi_T, Gs[bi,:,3] - Gs[pi,:,3])
Bs[np.where(Bs*Bs<0.01)] = 0 # just tidy up a bit by zeroing any tiny offsets < 0.1mm
return Gs, Ls, Bs
def read_AMC(filename, asfDict):
'''Read an AMC file from disk. Returns a dictionary of extracted data from the file.'''
return parse_AMC(open(filename,'r').readlines(), asfDict)
def parse_AMC(amc, asfDict):
if asfDict.has_key('asfDofSplits'): # this is an asfDict
boneNames = asfDict['boneNames']
asfDofSplits = asfDict['asfDofSplits']
dofScales = asfDict['dofScales']
numDofs = asfDict['numDofs']
else: # this is a skelDict (our internal format)
boneNames = asfDict['jointNames']
asfDofSplits = asfDict['jointChanSplits']
lengthScale,angleScale = 25.4,np.radians(1)
dofScales = [s for ss in [[lengthScale]*(b-a)+[angleScale]*(c-b) for a,b,c in zip(asfDofSplits[0:-1:2],asfDofSplits[1::2],asfDofSplits[2::2])] for s in ss]
asfDofSplits = asfDofSplits[::2]
numDofs = asfDict['numChans']
amc.append('eof') # append a dummy single-word string to mark the end
amc = [a.split() for a in amc if len(a) and a[0] not in '#:']
frameStarts = np.where(np.array(map(len, amc)) == 1)[0] # single-word lines
numFrames = len(frameStarts)-1
frameNumbers = np.zeros(numFrames,dtype=np.int32)
dofData = np.zeros((numFrames,numDofs), dtype=np.float32)
dofMap = dict([(name, dofData[:,asfDofSplits[ni]:asfDofSplits[ni+1]]) for ni,name in enumerate(boneNames)])
for fi,(f0,f1) in enumerate(zip(frameStarts[:-1],frameStarts[1:])):
frameNumbers[fi] = int(amc[f0][0]) # preserve the original numbering of the frameNumbers
for line in amc[f0+1:f1]:
tmp = map(float,line[1:])
dofMap[line[0]][fi,:len(tmp)] = tmp
dofData *= dofScales
return { 'dofData' : dofData,
'frameNumbers' : frameNumbers }
def pose_skeleton(Gs, Ls, boneParents, boneDofs, dofSplits, dofValues):
'''Fill in the Gs from the dofValues and skeleton definition.'''
assert(dofValues.shape == (dofSplits[-1],))
for Gs_bi,Ls_bi,pi,(tchans,rchans),di in zip(Gs, Ls, boneParents, boneDofs, dofSplits):
nt,nr = len(tchans),len(rchans)
if pi == -1: Gs_pi = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]],dtype=np.float32)
else : Gs_pi = Gs[pi]
#Gs_bi = Gs_pi * Ls[bi] * Dof0 * Dof1 * ...
np.dot(Gs_pi[:,:3], Ls_bi, out=Gs_bi)
Gs_bi[:,3] += Gs_pi[:,3]
if nt: # translation DOFs
for c,v in zip(tchans, dofValues[di:di+nt]):
Gs_bi[:,3] += Gs_pi[:,ord(c)-ord('x')] * v
di += nt
if nr: # rotation DOFs
Gs_bi[:,:3] = np.dot(Gs_bi[:,:3], composeR(dofValues[di:di+nr],axes=rchans))
return Gs
def extractSkeletonDofs(Gs, Ls, boneParents, boneDofs, dofSplits, dofValues):
'''Fill in the dofValues and global rotations, given target joint positions.'''
targets = Gs[:,:,3].copy()
# debugging ... let's make sure we don't use uninitialised data!
assert(dofValues.shape == (dofSplits[-1],))
Gs[:,:,:] = float('inf')
dofValues[:] = float('inf')
numBones = len(boneParents)
boneDofCounts = dofSplits[1:] - dofSplits[:-1] # number of dofs per bone
# this "list of bones that are directly driven only by this bone" could be precomputed
# here we remove the influence of child bones with translation dofs, since we added end-of-bones points to deal with that
# we also remove the influence of child bones at the same position, since that could cause numerical problems
boneZeroChildren = [[bi for bi in np.where(boneParents == bi)[0] if boneDofs[bi][0] == '' and not np.all(Ls[bi,:,3]==0)] for bi in xrange(numBones)]
# for our skeletons, it is good enough to consider only grandchildren; but here all zero-dof descendents are added
for bi in xrange(numBones-1,-1,-1):
pi,bdc = boneParents[bi],boneDofCounts[bi]
if pi != -1 and bdc == 0: boneZeroChildren[pi].extend(boneZeroChildren[bi])
for bi,(tgt_bi,Gs_bi,Ls_bi,pi,(tchans,rchans),di,bzcs) in enumerate(zip(targets,Gs, Ls, boneParents, boneDofs, dofSplits, boneZeroChildren)):
nt,nr = len(tchans),len(rchans)
if pi == -1: Gs_pi = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]],dtype=np.float32)
else : Gs_pi = Gs[pi]
#Gs[bi] = Gs[pi] * Ls[bi] * Dof0 * Dof1 * ...
# assume that Gs_pi is complete for now; TODO it might have an unsolved DOF, which is the rotation around the bone axis.
# we added trunnions to fix this, but it might be possible to generate the trunnions on the fly using cross products of other joints
np.dot(Gs_pi[:,:3], Ls_bi, out=Gs_bi)
Gs_bi[:,3] += Gs_pi[:,3]
if nt: # translation DOFs
for ddi,c in enumerate(tchans,start=di):
dofValues[ddi] = v = np.dot(Gs_pi[:,ord(c)-ord('x')], tgt_bi - Gs_bi[:,3])
Gs_bi[:,3] += Gs_pi[:,ord(c)-ord('x')] * v
di += nt
if nr: # rotation DOFs
numChildren = len(bzcs)
if numChildren == 0: # no way to solve the dofs, so just zero them
dofValues[di:di+nr] = 0
else:
Lt = np.zeros((numChildren, 3),dtype=np.float32)
Rt = np.zeros((numChildren, 3),dtype=np.float32)
for ci,cbi in enumerate(bzcs):
# we need to solve the R matrix from equations like: R * Ls[ck] *...* Ls[cj] * Ls[ci,:,3] = Gs[bi,:,:3].T * (Gs[ci,:,3] - Gs[bi,:,3])
# these equations are the columns of: R Lt.T = Rt.T
Lt[ci,:] = Ls[cbi,:,3]
Rt[ci,:] = np.dot(Gs_bi[:,:3].T, targets[cbi] - Gs_bi[:,3])
pbi = boneParents[cbi]
while pbi != bi:
Lt[ci,:] = np.dot(Ls[pbi,:,:3], Lt[ci,:]) + Ls[pbi,:,3]
pbi = boneParents[pbi]
rv = fitPointsAndDecomposeR(Lt, Rt, axes=rchans)
dofValues[di:di+nr] = rv[:nr]
#if nr == 2: print 'hopefully all zero',nr,rv,rv[nr:]
Gs_bi[:,:3] = np.dot(Gs_bi[:,:3], composeR(dofValues[di:di+nr],axes=rchans))
return Gs, dofValues
def composeR(rs, axes='xyz'):
'''Compose a vector of 3 radians into a 3x3 rotation matrix.
The rotation order is traditional right-to-left 'xyz'=R(z)*R(y)*R(x).
The values should be given in the same order (ie in this example: x,y,z).'''
i = ord(axes[0])-ord('x')
if len(axes) == 1: parity = 1 # single channel
else : parity = (ord(axes[1])-ord(axes[0])+3)
j,k = (i+parity)%3,(i+2*parity)%3
if ((parity%3) == 2): rs = -rs
R = np.zeros((3,3),dtype=np.float32)
if len(rs) == 1:
ci,si = math.cos(rs[0]),math.sin(rs[0])
R[i,i],R[j,i],R[k,i],R[i,j],R[j,j],R[k,j],R[i,k],R[j,k],R[k,k] = 1,0,0,0,ci,si,0,-si,ci
elif len(rs) == 2:
ci,cj,si,sj = math.cos(rs[0]),math.cos(rs[1]),math.sin(rs[0]),math.sin(rs[1])
R[i,i],R[j,i],R[k,i],R[i,j],R[j,j],R[k,j],R[i,k],R[j,k],R[k,k] = cj,0,-sj,si*sj,ci,cj*si,ci*sj,-si,cj*ci
else:
ci,cj,ck = np.cos(rs, dtype=np.float32); si,sj,sk = np.sin(rs, dtype=np.float32)
cc,cs,sc,ss = ci*ck,ci*sk,si*ck,si*sk
R[i,i],R[j,i],R[k,i],R[i,j],R[j,j],R[k,j],R[i,k],R[j,k],R[k,k] = ck*cj,sk*cj,-sj,sc*sj-cs,ss*sj+cc,cj*si,cc*sj+ss,cs*sj-sc,cj*ci
return R
def decomposeR(R, axes='xyz'):
'''Decompose a 3x3 rotation matrix into a vector of 3 radians.
The rotation order is traditional right-to-left 'xyz'=R(z)*R(y)*R(x).
The returned values will be in the order specified.'''
i = ord(axes[0])-ord('x')
if len(axes) == 1: parity = 1 # single channel
else: parity = (ord(axes[1])-ord(axes[0])+3)
j,k = (i+parity)%3,(i+2*parity)%3
cj = math.sqrt(R[i,i]*R[i,i] + R[j,i]*R[j,i])
if cj > 1e-30: ret = np.array([math.atan2(R[k,j],R[k,k]),math.atan2(-R[k,i],cj),math.atan2(R[j,i],R[i,i])],dtype=np.float32)
else: ret = np.array([math.atan2(-R[j,k],R[j,j]),math.atan2(-R[k,i],cj),0.0],dtype=np.float32)
if ((parity%3) == 2): ret = -ret
return ret #[:len(axes)]
def fitPointsAndDecomposeR(A, B, axes='xyz'):
'''Given Nx3 matrices A and B with coordinates of N corresponding points.
Solve R A.T = B.T for rotation matrix R having rotation order and degrees of freedom specified by axes.'''
R = np.dot(B.T, A) # NOT np.dot(B.T, np.linalg.pinv(A.T,rcond=0.0001))
if len(axes) == 1: # special case: minimise in 1D (otherwise the solve is unstable)
i = ord(axes[0])-ord('x')
R[i,:] = R[:,i] = 0
R[i,i] = 1
T = np.linalg.svd(R) # U,S,VT
R = np.dot(T[0],T[2])
if np.linalg.det(R) < 0: T[0][:,2] *= -1; R = np.dot(T[0],T[2])
if len(axes) == 2: # force a 2-parameter estimation of joint angles (potentially better than 3-parameter estimation & zeroing the third value)
# rewrite the matrix as the outer product of (1,sin,cos) vectors, and compose only the first singular value
i,j = ord(axes[0])-ord('x'),ord(axes[1])-ord('x')
k = (2*j+3-i)%3
svd = np.linalg.svd([[1.0,-R[k,i],R[i,i]],[-R[j,k],R[i,j],R[k,j]],[R[j,j],R[i,k],R[k,k]]])
[[_,R[k,i],R[i,i]],[R[j,k],R[i,j],R[k,j]],[R[j,j],R[i,k],R[k,k]]] = np.outer(svd[0][:,0],svd[2][0,:])*svd[1][0]
R[j,i] = 0 # this forces the third value to be 0
R[k,i]*=-1
R[j,k]*=-1
return decomposeR(R, axes)
def fitPoints(A,B, out=None):
'''Given Nx3 matrices A and B with coordinates of N corresponding points.
Solve RT A.T = B.T for rotation-translation matrix [R;T].
R (A - mean(A)).T = (B - mean(B)).T for rotation matrix R.'''
RT = out
if RT is None: RT = np.zeros((3,4),dtype = np.float32)
Bmean,Amean = np.mean(B,axis=0),np.mean(A,axis=0)
R = np.dot((B - Bmean).T, (A - Amean))
S0,S1,S2 = np.linalg.svd(R) # U,S,VT
np.dot(S0,S2,out=R)
if np.linalg.det(R) < 0: S0[:,2] *= -1; np.dot(S0,S2,out=R)
RT[:,:3] = R
RT[:,3] = (Bmean-np.dot(R,Amean))
return RT
def makeTriangles(graph):
'''Given a graph of edges (lo,hi), find all the ordered triangles.'''
gdict = {}
for lo,hi in graph: gdict[lo] = []; gdict[hi] = []
for lo,hi in graph: gdict[lo].append(hi)
tris = [[lo,mid,hi] for lo,mids in gdict.iteritems() for mid in mids for hi in gdict[mid]]
return tris
def rigidTriangles(data, threshold = 100.):
'''Given data = numFrames x numVerts x 3 animation data, compute rigid triangles.'''
print data.shape
dm, dd = makeVertsDistanceMatrix(data)
print 'dmdd',dm.shape
graph = makeGraph(dm,dd, threshold)
print 'graph',len(graph), graph[:10]
tris = makeTriangles(graph)
print 'tris',len(tris), tris[:10]
filtTris = []
for t in tris:
D = data[:,t,:] # numFrames x triVerts x 3
D0 = D[0]
dx,dy = D0[1]-D0[0],D0[2]-D0[0]
if np.dot(dx,dy)**2/(np.dot(dx,dx)*np.dot(dy,dy)) > 0.9: continue # weed out too-straight triangles
filtTris.append(t)
print 'filtTris',len(filtTris)
RTs = stabilizeGroups(data, filtTris)
return filtTris, RTs
def stabilizeAssignment(data, assignment):
'''Given data = numFrames x numVerts x 3 animation data and group label per vertex,
compute stabilizing RTs = numGroups x numFrames x 3 x 4 (to the first frame).'''
groups = [np.where(assignment == gi)[0] for gi in xrange(max(assignment)+1)]
return stabilizeGroups(data, groups)
def stabilizeGroups(data, groups):
'''Given data = numFrames x numVerts x 3 animation data and list of groups of vertices,
compute stabilizing RTs = numGroups x numFrames x 3 x 4 (to the first frame).'''
numGroups = len(groups)
numFrames = data.shape[0]
RTs = np.zeros((numGroups,numFrames,3,4), dtype=np.float32)
for group,RT in zip(groups,RTs):
D = data[:,group,:]
for r,d in zip(RT,D): fitPoints(d, D[0], out=r)
return RTs
def assignmentResidual(data, RTs, thresholdDistance):
'''Given data = numFrames x numVerts x 3 animation data and stabilizing RTs = numTris x numFrames x 3 x 4
compute the reconstruction residual for assigning each vertex to each of the triangles.'''
numTris = RTs.shape[0]
numVerts = data.shape[1]
res = np.zeros((numTris,numVerts),dtype=np.float32)
for ti,RT in enumerate(RTs):
alignData = applyRT_list(RT, data)
# calculate the variance of each point
res2 = np.mean(np.sum((alignData[0] - alignData)**2,axis=2,dtype=np.float32),axis=0,dtype=np.float32)
np.clip(res2,0,thresholdDistance,out=res[ti])
return res
def bestTriangle(res, resids):
bestImprovement,bestIndex = 0,-1
for index,res2 in enumerate(resids):
replace = np.where(res > res2)[0]
improvement = np.sum(res[replace] - res2[replace])
if improvement > bestImprovement: bestImprovement,bestIndex = improvement,index
return bestImprovement/len(res),bestIndex
def assignAndStabilize(data, RTs, thresholdDistance):
'''Given data = numFrames x numVerts x 3 animation data and stabilizing RTs = numGroups x numFrames x 3 x 4
assign each data point to one of the triangles and compute the minimum reconstruction residual.
Returns the assignment, the residuals, and the stabilized data points (to the first frame).'''
numVerts = data.shape[1]
res = np.ones(numVerts,dtype=np.float32)*thresholdDistance
assignment = -np.ones(numVerts,dtype=np.int32)
stableData = np.zeros_like(data)
for gi,RT in enumerate(RTs):
alignData = applyRT_list(RT, data)
# calculate the variance of each point
res2 = np.mean(np.sum((alignData[0] - alignData)**2,axis=2,dtype=np.float32),axis=0,dtype=np.float32)
replace = np.where(res2 < res)[0]
res[replace] = res2[replace]
stableData[:,replace,:] = alignData[:,replace,:]
assignment[replace] = gi
return assignment, res, stableData
def unstabilize(stableData, RTs):
'''Given stableData = numGroups x 3 animation data and stabilizing RTs = numGroups x numFrames x 3 x 4
Returns the data = numFrames x numGroups x 3, animated (undoing the stabilizing transform).'''
numGroups = stableData.shape[0]
numFrames = RTs.shape[1]
data = np.zeros((numFrames,numGroups,3),dtype=np.float32)
for gi,(RT,sd) in enumerate(zip(RTs,stableData)):
data[:,gi,:] = unapplyRT_list(RT,sd.reshape(1,-1)).reshape(-1,3)
return data
def invert_matrix_array(RTs):
ret = np.zeros_like(RTs)
ret[:,:3,:3] = np.transpose(RTs[:,:3,:3],(0,2,1))
for rti,rto in zip(RTs,ret):
rto[:,3] = -np.dot(rto[:3,:3],rti[:,3])
return ret
def transform_pair_residual(RT1, RT2):
numFrames = RT1.shape[0]
R1T = np.transpose(RT1[:,:,:3],axes=(0,2,1))
R2T = np.transpose(RT2[:,:,:3],axes=(0,2,1))
T1 = RT1[:,:,3]
T2 = RT2[:,:,3]
A = np.zeros((numFrames*3,3),dtype=np.float32)
B = np.zeros((numFrames*3),dtype=np.float32)
A[:] = (R1T - R2T).reshape(-1,3)
for b,r1,t1,r2,t2 in zip(B.reshape(-1,3),R1T,T1,R2T,T2): b[:] = np.dot(r1,t1)-np.dot(r2,t2)
O,res,_,_ = np.linalg.lstsq(A, B, rcond=0.0001)
res = np.mean((B-np.dot(A,O))**2) # why isn't res this?
O = np.dot(RT1[0,:,:3],O)+RT1[0,:,3]
return res,O
def sharedStablePoints(RTs, threshold=float('inf')):
'''Given stabilizing RTs = numGroups x numFrames x 3 x 4, look for pairs of groups (g1,g2) that have a common stable point.
RTs[g1,fi,:,:3] * xi + RTs[g1,fi,:,3] = O
RTs[g2,fi,:,:3] * xi + RTs[g2,fi,:,3] = O
(RTs[g2,fi,:,:3].T - RTs[g1,fi,:,:3].T) . O = RTs[g2,fi,:,:3].T . RTs[g2,fi,:,3] - RTs[g1,fi,:,:3].T . RTs[g1,fi,:,3]
Return a list of group pairs and stable points.'''
numGroups = RTs.shape[0]
ret = []
for (g1,g2) in ((g1,g2) for g1 in xrange(numGroups) for g2 in xrange(numGroups)):
if g1 == g2: continue
res,O = transform_pair_residual(RTs[g1],RTs[g2])
if res < threshold:
ret.append((g1,g2,O,res))
return ret
def groupRepresentatives(data, stabilizedPointToGroup):
'''Given data = numFrames x numPoints x 3 and stabilizedPointToGroup = numPoints (groupIndex),
Choose a representative (central) point in each group.'''
numGroups = max(stabilizedPointToGroup)+1
ret = -np.ones(numGroups,dtype=np.int32)
for gi in xrange(numGroups):
points = np.where(stabilizedPointToGroup == gi)[0]
D = data[0,points,:]
res = np.sum((D - np.mean(D,axis=0))**2,axis=1)
try:
ret[gi] = points[np.argmin(res)]
except:
continue
return ret
def applyRT(RT, data):
'''RT is 3x4; data is a numVerts x 3'''
return np.dot(data, RT[:,:3].T) + RT[:,3] # numVerts x 3
def applyRT_list(RT, data):
'''RT is numFrames x 3 x 4; data is a numFrames x numVerts x 3'''
ret = np.zeros_like(data)
for o,d,r,t in zip(ret, data, RT[:,:,:3], RT[:,:,3]):
o[:] = np.dot(d,r.T) + t
return ret
def unapplyRT_list(RT, data):
'''RT is numFrames x 3 x 4; data is a numVerts x 3'''
numFrames = RT.shape[0]
numVerts = data.shape[0]
ret = np.zeros((numFrames,numVerts,3),dtype=np.float32)
for o,r,t in zip(ret, RT[:,:,:3], RT[:,:,3]):
o[:] = np.dot(data - t,r)
return ret
def greedyTriangles(data, maxNum = None, triangleThreshold = 500., thresholdDistance = 100.):
'''Given data = numFrames x numVerts x 3 animation data
1) set model M = {}
2) form T = rigidTriangles(data)
3) find the t in T - M that minimises residual(data,M + {t})
4) if the residual reduced by enough: M += {t}, goto 3
where residual(data, model) is a (robust) measure of the total variance of the data, given that each data point must be assigned
to move rigidly with one of the triangles in the model.'''
if maxNum is None: maxNum = data.shape[0]
M = []
numVerts = data.shape[1]
res = np.ones(numVerts,dtype=np.float32)*thresholdDistance
tris,RTs = rigidTriangles(data, threshold=triangleThreshold)
numTris = len(tris)
print 'numTriangles',numTris
resBest = thresholdDistance
resids = assignmentResidual(data,RTs,thresholdDistance)
triIndices = []
remainIndices = list(range(numTris))
for it in range(maxNum):
improvement,bestIndex = bestTriangle(res, resids[remainIndices])
if improvement == 0.0: print '0.0 improvement'; break
ti = remainIndices.pop(bestIndex)
triIndices.append(ti)
res = np.min((res,resids[ti]),axis=0)
print 'it %d/%d minres %2.2fmm' % (it+1,maxNum,np.sqrt(np.mean(res)))
return {'tris':tris,'RTs':RTs,'triIndices':triIndices}
def makeDistanceMatrix(boneDict, animDict):
'''Make a distance matrix for the joints. Probably we want to do this for the markers instead!'''
boneParents = boneDict['boneParents']
boneDofs = boneDict['boneDofs']
dofData = animDict['dofData']
dofSplits = animDict['dofSplits']
Gs, Ls, Bs = boneMatrices(boneDict)
numFrames = dofData.shape[0]
numJoints = len(boneParents)
vertices = np.zeros((numFrames,numJoints,3), dtype=np.float32)
for fi,dofValues in enumerate(dofData):
Gs = pose_skeleton(Gs, Ls, boneParents, boneDofs, dofSplits, dofValues)
vertices[fi,:,:] = Gs[:,:,3]
return makeVertsDistanceMatrix(vertices)
def makeVertsDistanceMatrix(vertices):
'''Given a numFrames x numVerts x 3 data matrix of animating vertices, compute the distance matrix.'''
numVerts = vertices.shape[1]
print 'numVerts',numVerts
dm = np.zeros((numVerts,numVerts),dtype=np.float32)
dd = np.zeros((numVerts,numVerts),dtype=np.float32)
for bi in xrange(numVerts):
for bj in xrange(bi):
d = vertices[:,bi] - vertices[:,bj]
d2 = np.sum(d*d,axis=-1)
d2_mean = np.mean(d2)
d2 -= d2_mean
d2_dev = math.sqrt(np.mean(d2*d2))
dm[bi,bj] = dm[bj,bi] = d2_mean
dd[bi,bj] = dd[bj,bi] = d2_dev
return dm, dd
def makeGraph(dm, dd, threshold = 4000):
'''Given a matrix of mean square-distance and deviation of that, choose the stiff edges.'''
graph = []
numJoints = dm.shape[0]
for bi in xrange(numJoints):
for bj in xrange(bi):
if dd[bi,bj] < threshold: graph.append([bj,bi]) # roughly s.d. of 1cm at 20cm (2x10x200)
return graph
def retargetJoints(joints, targetJoints_tm1, graph, targetInvLengths, targetDirs, positionConstraints = [], oits=7, iits = 7):
'''Implement velocity constraints by predicting the position of the joint from targetJoints_tm1.'''
numJoints = joints.shape[0]
targetJoints = np.zeros((numJoints,3), dtype=np.float32)
if targetJoints_tm1 is None: targetJoints[:] = joints
else: targetJoints[:] = targetJoints_tm1
def err(tjs, graph, tils, tds, pcs):
E = 0
for (bi,pi),til, td in zip(graph, tils, tds):
d = (tjs[bi]-tjs[pi])*til
dg = (d[0]*d[0]+d[1]*d[1]+d[2]*d[2]) - 1.0
if til == 1.0: dg += 1.0
d_td = d - td
do = (d_td[0]*d_td[0]+d_td[1]*d_td[1]+d_td[2]*d_td[2]) # orientation
E += dg*dg + do
for ti,tp,tw in pcs:
dp = (tjs[ti]-tp)*tw
E += np.dot(dp,dp)
return E
def derr2(tjs, graph, tils, tds, pcs):
size = (len(graph)*4+len(pcs)*3)
njs = len(tjs)
E = np.zeros((size),dtype=np.float32)
dE = np.zeros((size,njs,3),dtype=np.float32)
ni = 0
for ((bi,pi),til,td) in zip(graph, tils, tds):
d = (tjs[bi]-tjs[pi])*til
dg = (d[0]*d[0]+d[1]*d[1]+d[2]*d[2]) - 1.0
if til == 1.0: dg += 1.0
d_td = d - td
E[ni] = dg
dE[ni,bi] += d*(2*til)
dE[ni,pi] -= d*(2*til)
R = range(ni+1,ni+4)
E[R] = d_td
dE[R,bi,[0,1,2]] += til
dE[R,pi,[0,1,2]] -= til
ni += 4
for ti,tp,tw in pcs:
dp = (tjs[ti]-tp)*tw
E[ni:ni+3] = dp
dE[[ni,ni+1,ni+2],ti,[0,1,2]] = tw
ni += 3
assert(ni == size)
return E,dE
for oit in xrange(oits):
E,dE = derr2(targetJoints, graph, targetInvLengths, targetDirs, positionConstraints)
A = dE.reshape(-1,numJoints*3)
delta = np.linalg.lstsq(A, -E, rcond=0.0001)
delta,alpha,bestAlpha,bestE = delta[0].reshape(-1,3),0.1,0.0,err(targetJoints, graph, targetInvLengths, targetDirs, positionConstraints)
for iit in xrange(iits):
testJoints = targetJoints + (bestAlpha+alpha)*delta
testE = err(testJoints, graph, targetInvLengths, targetDirs, positionConstraints)
if testE < bestE: bestAlpha,bestE = bestAlpha+alpha,testE
else: alpha *= -0.707 # toggle around the best
if bestAlpha+alpha < 0: alpha=-alpha
targetJoints += bestAlpha*delta
print oit, bestE
E,targetJoints = bestE,targetJoints+bestAlpha*delta
return targetJoints
def decorrelate(M):
# renormalise
mx,mn = np.max(M,axis=1),np.min(M,axis=1)
scl = np.array([[mv,Mv][Mv > -mv] for Mv,mv in zip(mx,mn)]).reshape(-1,1)
M /= scl
for it in xrange(2):
last_di = -1
for ci in xrange(M.shape[1]):
di = np.argmax(np.abs(M[:,ci]))
if di > last_di and abs(M[di,ci]) > 0.1:
last_di += 1
# swap it in position
M[[di,last_di],:] = M[[last_di,di],:]
di = last_di
# now zero out other rows
if abs(M[di,ci]) > 0.5: # actually it should be really close to 1.0
for dj in xrange(M.shape[0]):
if dj != di: # anything above 0.4 gives the same result
M[dj,:] -= M[di,:] * M[dj,ci]/M[di,ci]
# renormalise
mx,mn = np.max(M,axis=1),np.min(M,axis=1)
scl = np.array([[mv,Mv][Mv > -mv] for Mv,mv in zip(mx,mn)]).reshape(-1,1)
M /= scl
return M
def decomposeDofs(animData):
'''Given an animation, find a mostly diagonal, sparse, low-dimensional, linear space that best approximates/compresses it.'''
import pylab as pl
# animData_frames,channels = animDofs_frames,dofs * d2c_dofs,channels. we want the sparse d2c coding.
# assume the first 6 dofs are root
tmp = animData[:-1,6:] - animData[1:,6:]
u,s,vt = np.linalg.svd(tmp, full_matrices=False)
#pl.plot(np.log(s/s[0]))
#pl.hold()
#pl.show()
rank = np.where(s>=s[0]*1e-4)[0][-1]+1
print 'rank',rank
vt = vt[:rank,:]
nch = vt.shape[1]
scale = np.exp(-(1.0/(nch*nch))*np.array(range(nch))).reshape(1,nch) # gently weight the channels to encourage a diagonal
vt = vt * scale
u,s,vt = np.linalg.svd(np.dot(vt.T,vt))
vt = decorrelate(vt[:rank,:] / scale)
vt[:] = np.around(vt * 100.0)
sel = np.where(np.abs(vt) >= 3)
print 'num shared dofs',len(zip(*sel))
pl.imshow((vt[:,:]))
pl.hold()
pl.show()
return [(i,i,100) for i in range(6)]+[(x+6,y+6,int(vt[x,y])) for x,y in zip(*sel)]
def convertASFAMC_to_SKELANIM(asf, amc, skelFilename, animFilename):
import IO
asfDict = read_ASF(asf)
#skelDict = addTrunnions(skelDict)
animDict = read_AMC(amc, asfDict)
skelDict = asfDict_to_skelDict(asfDict)
IO.save(skelFilename, skelDict)
IO.save(animFilename, animDict)
def convertASF_to_SKEL(asf, skelFilename):
import IO
asfDict = read_ASF(asf)
#asfDict = addTrunnions(asfDict)
skelDict = asfDict_to_skelDict(asfDict)
IO.save(skelFilename, skelDict)
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rlkit.envs.wrappers import StackObservationEnv, RewardWrapperEnv
import rlkit.torch.pytorch_util as ptu
from rlkit.samplers.data_collector.step_collector import MdpStepCollector
from rlkit.samplers.data_collector.path_collector import GoalConditionedPathCollector
from rlkit.torch.networks import ConcatMlp
from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic
from rlkit.torch.sac.awac_trainer import AWACTrainer
from rlkit.torch.torch_rl_algorithm import (
TorchBatchRLAlgorithm,
TorchOnlineRLAlgorithm,
)
from rlkit.demos.source.mdp_path_loader import MDPPathLoader
from rlkit.visualization.video import save_paths
import torch
from rlkit.visualization.video import save_paths, VideoSaveFunction, RIGVideoSaveFunction
from rlkit.envs.images import Renderer, InsertImageEnv, EnvRenderer
from rlkit.launchers.contextual.util import (
get_save_video_function,
get_gym_env,
)
from rlkit.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from rlkit.exploration_strategies.gaussian_and_epislon import GaussianAndEpsilonStrategy
from rlkit.exploration_strategies.ou_strategy import OUStrategy
import os.path as osp
from rlkit.core import logger
from rlkit.misc.asset_loader import load_local_or_remote_file
from rlkit.data_management.obs_dict_replay_buffer import \
ObsDictRelabelingBuffer
from rlkit.data_management.wrappers.concat_to_obs_wrapper import \
ConcatToObsWrapper
from rlkit.envs.reward_mask_wrapper import DiscreteDistribution, RewardMaskWrapper
from functools import partial
import rlkit.samplers.rollout_functions as rf
from rlkit.envs.contextual import ContextualEnv
from rlkit.envs.contextual.goal_conditioned import (
GoalDictDistributionFromMultitaskEnv,
ContextualRewardFnFromMultitaskEnv,
AddImageDistribution,
GoalConditionedDiagnosticsToContextualDiagnostics,
IndexIntoAchievedGoal,
)
def compute_hand_sparse_reward(next_obs, reward, done, info):
return info['goal_achieved'] - 1
def resume(variant):
data = load_local_or_remote_file(variant.get("pretrained_algorithm_path"), map_location="cuda")
algo = data['algorithm']
algo.num_epochs = variant['num_epochs']
post_pretrain_hyperparams = variant["trainer_kwargs"].get("post_pretrain_hyperparams", {})
algo.trainer.set_algorithm_weights(**post_pretrain_hyperparams)
algo.train()
def process_args(variant):
if variant.get("debug", False):
variant['max_path_length'] = 50
variant['batch_size'] = 5
variant['num_epochs'] = 5
variant['num_eval_steps_per_epoch'] = 100
variant['num_expl_steps_per_train_loop'] = 100
variant['num_trains_per_train_loop'] = 10
variant['min_num_steps_before_training'] = 100
variant['min_num_steps_before_training'] = 100
variant['trainer_kwargs']['bc_num_pretrain_steps'] = min(10, variant['trainer_kwargs'].get('bc_num_pretrain_steps', 0))
variant['trainer_kwargs']['q_num_pretrain1_steps'] = min(10, variant['trainer_kwargs'].get('q_num_pretrain1_steps', 0))
variant['trainer_kwargs']['q_num_pretrain2_steps'] = min(10, variant['trainer_kwargs'].get('q_num_pretrain2_steps', 0))
def experiment(variant):
render = variant.get("render", False)
debug = variant.get("debug", False)
if variant.get("pretrained_algorithm_path", False):
resume(variant)
return
env_class = variant["env_class"]
env_kwargs = variant["env_kwargs"]
expl_env = env_class(**env_kwargs)
eval_env = env_class(**env_kwargs)
env = eval_env
if variant.get('sparse_reward', False):
expl_env = RewardWrapperEnv(expl_env, compute_hand_sparse_reward)
eval_env = RewardWrapperEnv(eval_env, compute_hand_sparse_reward)
if variant.get('add_env_demos', False):
variant["path_loader_kwargs"]["demo_paths"].append(variant["env_demo_path"])
if variant.get('add_env_offpolicy_data', False):
variant["path_loader_kwargs"]["demo_paths"].append(variant["env_offpolicy_data_path"])
if variant.get("use_masks", False):
mask_wrapper_kwargs = variant.get("mask_wrapper_kwargs", dict())
expl_mask_distribution_kwargs = variant["expl_mask_distribution_kwargs"]
expl_mask_distribution = DiscreteDistribution(**expl_mask_distribution_kwargs)
expl_env = RewardMaskWrapper(env, expl_mask_distribution, **mask_wrapper_kwargs)
eval_mask_distribution_kwargs = variant["eval_mask_distribution_kwargs"]
eval_mask_distribution = DiscreteDistribution(**eval_mask_distribution_kwargs)
eval_env = RewardMaskWrapper(env, eval_mask_distribution, **mask_wrapper_kwargs)
env = eval_env
path_loader_kwargs = variant.get("path_loader_kwargs", {})
stack_obs = path_loader_kwargs.get("stack_obs", 1)
if stack_obs > 1:
expl_env = StackObservationEnv(expl_env, stack_obs=stack_obs)
eval_env = StackObservationEnv(eval_env, stack_obs=stack_obs)
observation_key = variant.get('observation_key', 'latent_observation')
desired_goal_key = variant.get('desired_goal_key', 'latent_desired_goal')
achieved_goal_key = variant.get('achieved_goal_key', 'latent_achieved_goal')
obs_dim = (
env.observation_space.spaces[observation_key].low.size
+ env.observation_space.spaces[desired_goal_key].low.size
)
action_dim = eval_env.action_space.low.size
if hasattr(expl_env, 'info_sizes'):
env_info_sizes = expl_env.info_sizes
else:
env_info_sizes = dict()
replay_buffer_kwargs=dict(
env=env,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
achieved_goal_key=achieved_goal_key,
)
replay_buffer_kwargs.update(variant.get('replay_buffer_kwargs', dict()))
replay_buffer = ConcatToObsWrapper(
ObsDictRelabelingBuffer(**replay_buffer_kwargs),
["resampled_goals", ],
)
replay_buffer_kwargs.update(variant.get('demo_replay_buffer_kwargs', dict()))
demo_train_buffer = ConcatToObsWrapper(
ObsDictRelabelingBuffer(**replay_buffer_kwargs),
["resampled_goals", ],
)
demo_test_buffer = ConcatToObsWrapper(
ObsDictRelabelingBuffer(**replay_buffer_kwargs),
["resampled_goals", ],
)
M = variant['layer_size']
qf1 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
qf2 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf1 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf2 = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
policy_class = variant.get("policy_class", TanhGaussianPolicy)
policy_kwargs = variant['policy_kwargs']
policy_path = variant.get("policy_path", False)
if policy_path:
policy = load_local_or_remote_file(policy_path)
else:
policy = policy_class(
obs_dim=obs_dim,
action_dim=action_dim,
**policy_kwargs,
)
buffer_policy_path = variant.get("buffer_policy_path", False)
if buffer_policy_path:
buffer_policy = load_local_or_remote_file(buffer_policy_path)
else:
buffer_policy_class = variant.get("buffer_policy_class", policy_class)
buffer_policy = buffer_policy_class(
obs_dim=obs_dim,
action_dim=action_dim,
**variant.get("buffer_policy_kwargs", policy_kwargs),
)
expl_policy = policy
exploration_kwargs = variant.get('exploration_kwargs', {})
if exploration_kwargs:
if exploration_kwargs.get("deterministic_exploration", False):
expl_policy = MakeDeterministic(policy)
exploration_strategy = exploration_kwargs.get("strategy", None)
if exploration_strategy is None:
pass
elif exploration_strategy == 'ou':
es = OUStrategy(
action_space=expl_env.action_space,
max_sigma=exploration_kwargs['noise'],
min_sigma=exploration_kwargs['noise'],
)
expl_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=expl_policy,
)
elif exploration_strategy == 'gauss_eps':
es = GaussianAndEpsilonStrategy(
action_space=expl_env.action_space,
max_sigma=exploration_kwargs['noise'],
min_sigma=exploration_kwargs['noise'], # constant sigma
epsilon=0,
)
expl_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=expl_policy,
)
else:
error
trainer = AWACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
buffer_policy=buffer_policy,
**variant['trainer_kwargs']
)
if variant['collection_mode'] == 'online':
expl_path_collector = MdpStepCollector(
expl_env,
policy,
)
algorithm = TorchOnlineRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
max_path_length=variant['max_path_length'],
batch_size=variant['batch_size'],
num_epochs=variant['num_epochs'],
num_eval_steps_per_epoch=variant['num_eval_steps_per_epoch'],
num_expl_steps_per_train_loop=variant['num_expl_steps_per_train_loop'],
num_trains_per_train_loop=variant['num_trains_per_train_loop'],
min_num_steps_before_training=variant['min_num_steps_before_training'],
)
else:
eval_path_collector = GoalConditionedPathCollector(
eval_env,
MakeDeterministic(policy),
observation_key=observation_key,
desired_goal_key=desired_goal_key,
render=render,
)
expl_path_collector = GoalConditionedPathCollector(
expl_env,
expl_policy,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
render=render,
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
max_path_length=variant['max_path_length'],
batch_size=variant['batch_size'],
num_epochs=variant['num_epochs'],
num_eval_steps_per_epoch=variant['num_eval_steps_per_epoch'],
num_expl_steps_per_train_loop=variant['num_expl_steps_per_train_loop'],
num_trains_per_train_loop=variant['num_trains_per_train_loop'],
min_num_steps_before_training=variant['min_num_steps_before_training'],
)
algorithm.to(ptu.device)
if variant.get("save_video", False):
renderer_kwargs = variant.get("renderer_kwargs", {})
save_video_kwargs = variant.get("save_video_kwargs", {})
def get_video_func(
env,
policy,
tag,
):
renderer = EnvRenderer(**renderer_kwargs)
state_goal_distribution = GoalDictDistributionFromMultitaskEnv(
env,
desired_goal_keys=[desired_goal_key],
)
image_goal_distribution = AddImageDistribution(
env=env,
base_distribution=state_goal_distribution,
image_goal_key='image_desired_goal',
renderer=renderer,
)
img_env = InsertImageEnv(env, renderer=renderer)
rollout_function = partial(
rf.multitask_rollout,
max_path_length=variant['max_path_length'],
observation_key=observation_key,
desired_goal_key=desired_goal_key,
return_dict_obs=True,
)
reward_fn = ContextualRewardFnFromMultitaskEnv(
env=env,
achieved_goal_from_observation=IndexIntoAchievedGoal(observation_key),
desired_goal_key=desired_goal_key,
achieved_goal_key="state_achieved_goal",
)
contextual_env = ContextualEnv(
img_env,
context_distribution=image_goal_distribution,
reward_fn=reward_fn,
observation_key=observation_key,
)
video_func = get_save_video_function(
rollout_function,
contextual_env,
policy,
tag=tag,
imsize=renderer.width,
image_format='CWH',
**save_video_kwargs
)
return video_func
expl_video_func = get_video_func(expl_env, expl_policy, "expl")
eval_video_func = get_video_func(eval_env, MakeDeterministic(policy), "eval")
algorithm.post_train_funcs.append(eval_video_func)
algorithm.post_train_funcs.append(expl_video_func)
if variant.get('save_paths', False):
algorithm.post_train_funcs.append(save_paths)
if variant.get('load_demos', False):
path_loader_class = variant.get('path_loader_class', MDPPathLoader)
path_loader = path_loader_class(trainer,
replay_buffer=replay_buffer,
demo_train_buffer=demo_train_buffer,
demo_test_buffer=demo_test_buffer,
**path_loader_kwargs
)
path_loader.load_demos()
if variant.get('pretrain_policy', False):
trainer.pretrain_policy_with_bc(
policy,
demo_train_buffer,
demo_test_buffer,
trainer.bc_num_pretrain_steps,
)
if variant.get('pretrain_rl', False):
trainer.pretrain_q_with_bc_data()
if variant.get('save_pretrained_algorithm', False):
p_path = osp.join(logger.get_snapshot_dir(), 'pretrain_algorithm.p')
pt_path = osp.join(logger.get_snapshot_dir(), 'pretrain_algorithm.pt')
data = algorithm._get_snapshot()
data['algorithm'] = algorithm
torch.save(data, open(pt_path, "wb"))
torch.save(data, open(p_path, "wb"))
algorithm.train()
|
|
from datetime import datetime
import pytest
from unittest.mock import Mock
from channelarchiver import Archiver, codes, utils, exceptions
from channelarchiver.models import ChannelData, ArchiveProperties
from mock_archiver import MockArchiver
utc = utils.UTC()
local_tz = utils.local_tz
@pytest.fixture
def archiver():
archiver = Archiver("http://fake")
archiver.archiver = MockArchiver()
return archiver
def test_scan_archives_all(archiver):
archiver.scan_archives()
archives_for_channel = archiver.archives_for_channel
assert "EXAMPLE:DOUBLE_SCALAR" in archives_for_channel
assert "EXAMPLE:INT_WAVEFORM" in archives_for_channel
assert "EXAMPLE:ENUM_SCALAR" in archives_for_channel
expected_archives = {
"EXAMPLE:DOUBLE_SCALAR": [
ArchiveProperties(
key=1001,
start_time=datetime(2012, 7, 12, 21, 47, 23, 664000, tzinfo=utc),
end_time=datetime(2012, 7, 13, 11, 18, 55, 671259, tzinfo=utc),
)
],
"EXAMPLE:INT_WAVEFORM": [
ArchiveProperties(
key=1001,
start_time=datetime(2012, 7, 12, 23, 14, 19, 129600, tzinfo=utc),
end_time=datetime(2012, 7, 13, 8, 26, 18, 558211, tzinfo=utc),
)
],
"EXAMPLE:ENUM_SCALAR": [
ArchiveProperties(
key=1008,
start_time=datetime(2012, 7, 12, 22, 41, 10, 765676, tzinfo=utc),
end_time=datetime(2012, 7, 13, 9, 20, 23, 623789, tzinfo=utc),
)
],
}
assert archives_for_channel == expected_archives
def test_scan_archives_one(archiver):
archiver.scan_archives("EXAMPLE:DOUBLE_SCALAR")
archives_for_channel = archiver.archives_for_channel.keys()
assert "EXAMPLE:DOUBLE_SCALAR" in archives_for_channel
assert "EXAMPLE:INT_WAVEFORM" not in archives_for_channel
assert "EXAMPLE:ENUM_SCALAR" not in archives_for_channel
def test_scan_archives_list(archiver):
archiver.scan_archives(["EXAMPLE:DOUBLE_SCALAR", "EXAMPLE:ENUM_SCALAR"])
archives_for_channel = archiver.archives_for_channel.keys()
assert "EXAMPLE:DOUBLE_SCALAR" in archives_for_channel
assert "EXAMPLE:INT_WAVEFORM" not in archives_for_channel
assert "EXAMPLE:ENUM_SCALAR" in archives_for_channel
def test_get_scalar(archiver):
start = datetime(2012, 1, 1, tzinfo=utc)
end = datetime(2013, 1, 1, tzinfo=utc)
data = archiver.get(
["EXAMPLE:DOUBLE_SCALAR"], start, end, interpolation=codes.interpolation.RAW
)
assert isinstance(data, list)
channel_data = data[0]
assert channel_data.channel == "EXAMPLE:DOUBLE_SCALAR"
assert channel_data.data_type == codes.data_type.DOUBLE
assert channel_data.elements == 1
assert channel_data.values == [200.5, 199.9, 198.7, 196.1]
assert channel_data.times == [
datetime(2012, 7, 12, 21, 47, 23, 664000, utc),
datetime(2012, 7, 13, 2, 5, 1, 443589, utc),
datetime(2012, 7, 13, 7, 19, 31, 806097, utc),
datetime(2012, 7, 13, 11, 18, 55, 671259, utc),
]
assert channel_data.statuses == [0, 6, 6, 5]
assert channel_data.severities == [0, 1, 1, 2]
assert repr(channel_data.times[0].tzinfo) == "UTC()"
def test_get_interpolation_string(archiver):
start = datetime(2012, 1, 1, tzinfo=utc)
end = datetime(2013, 1, 1, tzinfo=utc)
channel_data = archiver.get(
"EXAMPLE:DOUBLE_SCALAR", start, end, interpolation="raw"
)
assert channel_data.channel == "EXAMPLE:DOUBLE_SCALAR"
assert channel_data.values == [200.5, 199.9, 198.7, 196.1]
def test_get_scalar_str(archiver):
start = datetime(2012, 1, 1, tzinfo=utc)
end = datetime(2013, 1, 1, tzinfo=utc)
channel_data = archiver.get(
"EXAMPLE:DOUBLE_SCALAR", start, end, interpolation=codes.interpolation.RAW
)
assert isinstance(channel_data, ChannelData)
assert channel_data.channel == "EXAMPLE:DOUBLE_SCALAR"
assert channel_data.data_type == codes.data_type.DOUBLE
def test_get_scalar_in_tz(archiver):
start = datetime(2012, 1, 1, tzinfo=utc)
end = datetime(2013, 1, 1, tzinfo=utc)
data = archiver.get(
"EXAMPLE:DOUBLE_SCALAR",
start,
end,
interpolation=codes.interpolation.RAW,
tz=utils.UTC(11.5),
)
assert str(data.times[0].tzinfo) == "UTC+11:30"
assert repr(data.times[0].tzinfo) == "UTC(+11.5)"
def test_get_without_scan(archiver):
start = datetime(2012, 1, 1, tzinfo=utc)
end = datetime(2013, 1, 1, tzinfo=utc)
with pytest.raises(exceptions.ChannelNotFound):
archiver.get(
["EXAMPLE:DOUBLE_SCALAR"],
start,
end,
interpolation=codes.interpolation.RAW,
scan_archives=False,
)
def test_get_with_restrictive_interval(archiver):
start = datetime(2012, 7, 13, tzinfo=utc)
end = datetime(2012, 7, 13, 10, tzinfo=utc)
channel_data = archiver.get(
"EXAMPLE:DOUBLE_SCALAR", start, end, interpolation=codes.interpolation.RAW
)
assert channel_data.values == [199.9, 198.7]
assert channel_data.times == [
datetime(2012, 7, 13, 2, 5, 1, 443589, utc),
datetime(2012, 7, 13, 7, 19, 31, 806097, utc),
]
def test_get_with_restrictive_interval_with_tzs(archiver):
start = datetime(2012, 7, 13, 10, tzinfo=utils.UTC(10))
end = datetime(2012, 7, 13, 20, tzinfo=utils.UTC(10))
channel_data = archiver.get(
"EXAMPLE:DOUBLE_SCALAR", start, end, interpolation=codes.interpolation.RAW
)
assert channel_data.values == [199.9, 198.7]
assert channel_data.times == [
datetime(2012, 7, 13, 2, 5, 1, 443589, utc),
datetime(2012, 7, 13, 7, 19, 31, 806097, utc),
]
assert repr(channel_data.times[0].tzinfo) == "UTC(+10)"
def test_get_with_str_times(archiver):
start = "2012-07-13 00:00:00Z"
end = "2012-07-13 10:00:00Z"
channel_data = archiver.get(
"EXAMPLE:DOUBLE_SCALAR", start, end, interpolation=codes.interpolation.RAW
)
assert channel_data.values == [199.9, 198.7]
assert channel_data.times == [
datetime(2012, 7, 13, 2, 5, 1, 443589, utc),
datetime(2012, 7, 13, 7, 19, 31, 806097, utc),
]
def test_get_with_str_times_incl_tz(archiver):
start = "2012-07-13 10:00:00+10:00"
end = "2012-07-13 20:00:00+10:00"
channel_data = archiver.get(
"EXAMPLE:DOUBLE_SCALAR", start, end, interpolation=codes.interpolation.RAW
)
assert channel_data.values == [199.9, 198.7]
assert channel_data.times == [
datetime(2012, 7, 13, 2, 5, 1, 443589, utc),
datetime(2012, 7, 13, 7, 19, 31, 806097, utc),
]
assert repr(channel_data.times[0].tzinfo) == "UTC(+10)"
def test_get_waveform(archiver):
start = datetime(2012, 1, 1)
end = datetime(2013, 1, 1)
channel_data = archiver.get(
"EXAMPLE:INT_WAVEFORM", start, end, interpolation=codes.interpolation.RAW
)
assert channel_data.channel == "EXAMPLE:INT_WAVEFORM"
assert channel_data.data_type == codes.data_type.INT
assert channel_data.elements == 3
assert channel_data.values == [[3, 5, 13], [2, 4, 11], [0, 7, 1]]
def test_get_enum(archiver):
start = datetime(2012, 1, 1)
end = datetime(2013, 1, 1)
channel_data = archiver.get(
"EXAMPLE:ENUM_SCALAR", start, end, interpolation=codes.interpolation.RAW
)
assert channel_data.channel == "EXAMPLE:ENUM_SCALAR"
assert channel_data.data_type == codes.data_type.ENUM
assert channel_data.values == [7, 1, 8]
def test_get_multiple(archiver):
start = datetime(2012, 1, 1)
end = datetime(2013, 1, 1)
channels = ["EXAMPLE:DOUBLE_SCALAR", "EXAMPLE:INT_WAVEFORM", "EXAMPLE:ENUM_SCALAR"]
data = archiver.get(channels, start, end, interpolation=codes.interpolation.RAW)
assert isinstance(data, list)
assert data[0].channel == "EXAMPLE:DOUBLE_SCALAR"
assert data[1].channel == "EXAMPLE:INT_WAVEFORM"
assert data[2].channel == "EXAMPLE:ENUM_SCALAR"
assert data[0].values == [200.5, 199.9, 198.7, 196.1]
assert data[1].values == [[3, 5, 13], [2, 4, 11], [0, 7, 1]]
assert data[2].values == [7, 1, 8]
def test_get_with_archive_keys(archiver):
values_mock = Mock(wraps=archiver.archiver.values)
archiver.archiver.values = values_mock
channels = ["EXAMPLE:GROUP1_A", "EXAMPLE:GROUP2_A", "EXAMPLE:GROUP1_B"]
keys = [1010, 1011, 1010]
archiver.get(
channels,
"2000-01-01",
"2000-01-02",
archive_keys=keys,
interpolation=codes.interpolation.RAW,
)
first_call, second_call = values_mock.call_args_list
assert first_call[0][:2] == (1010, ["EXAMPLE:GROUP1_A", "EXAMPLE:GROUP1_B"])
assert second_call[0][:2] == (1011, ["EXAMPLE:GROUP2_A"])
def test_get_with_wrong_number_of_keys(archiver):
start = datetime(2012, 1, 1)
end = datetime(2013, 1, 1)
with pytest.raises(exceptions.ChannelKeyMismatch):
archiver.get(
["EXAMPLE:DOUBLE_SCALAR"],
start,
end,
archive_keys=[1001, 1008],
interpolation=codes.interpolation.RAW,
)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import os
import io
import gzip
import sys
import bz2
import zipfile
from contextlib import contextmanager
import subprocess
import logging
from petl.errors import ArgumentError
from petl.compat import urlopen, StringIO, BytesIO, string_types, PY2
logger = logging.getLogger(__name__)
warning = logger.warning
info = logger.info
debug = logger.debug
class FileSource(object):
def __init__(self, filename, **kwargs):
self.filename = filename
self.kwargs = kwargs
def open(self, mode='r'):
return io.open(self.filename, mode, **self.kwargs)
class GzipSource(object):
def __init__(self, filename, remote=False, **kwargs):
self.filename = filename
self.remote = remote
self.kwargs = kwargs
@contextmanager
def open(self, mode='r'):
if self.remote:
if not mode.startswith('r'):
raise ArgumentError('source is read-only')
filehandle = urlopen(self.filename)
else:
filehandle = self.filename
source = gzip.open(filehandle, mode, **self.kwargs)
try:
yield source
finally:
source.close()
class BZ2Source(object):
def __init__(self, filename, remote=False, **kwargs):
self.filename = filename
self.remote = remote
self.kwargs = kwargs
@contextmanager
def open(self, mode='r'):
if self.remote:
if not mode.startswith('r'):
raise ArgumentError('source is read-only')
filehandle = urlopen(self.filename)
else:
filehandle = self.filename
source = bz2.BZ2File(filehandle, mode, **self.kwargs)
try:
yield source
finally:
source.close()
class ZipSource(object):
def __init__(self, filename, membername, pwd=None, **kwargs):
self.filename = filename
self.membername = membername
self.pwd = pwd
self.kwargs = kwargs
@contextmanager
def open(self, mode):
if PY2:
mode = mode.translate(None, 'bU')
else:
mode = mode.translate({ord('b'): None, ord('U'): None})
zf = zipfile.ZipFile(self.filename, mode, **self.kwargs)
try:
if self.pwd is not None:
yield zf.open(self.membername, mode, self.pwd)
else:
yield zf.open(self.membername, mode)
finally:
zf.close()
class Uncloseable(object):
def __init__(self, inner):
object.__setattr__(self, '_inner', inner)
def __getattr__(self, item):
return getattr(self._inner, item)
def __setattr__(self, key, value):
setattr(self._inner, key, value)
def close(self):
debug('Uncloseable: close called (%r)' % self._inner)
pass
def _get_stdout_binary():
try:
return sys.stdout.buffer
except AttributeError:
pass
try:
fd = sys.stdout.fileno()
return os.fdopen(fd, 'ab', 0)
except Exception:
pass
try:
return sys.__stdout__.buffer
except AttributeError:
pass
try:
fd = sys.__stdout__.fileno()
return os.fdopen(fd, 'ab', 0)
except Exception:
pass
# fallback
return sys.stdout
stdout_binary = _get_stdout_binary()
def _get_stdin_binary():
try:
return sys.stdin.buffer
except AttributeError:
pass
try:
fd = sys.stdin.fileno()
return os.fdopen(fd, 'rb', 0)
except Exception:
pass
try:
return sys.__stdin__.buffer
except AttributeError:
pass
try:
fd = sys.__stdin__.fileno()
return os.fdopen(fd, 'rb', 0)
except Exception:
pass
# fallback
return sys.stdin
stdin_binary = _get_stdin_binary()
class StdoutSource(object):
@contextmanager
def open(self, mode):
if mode.startswith('r'):
raise ArgumentError('source is write-only')
if 'b' in mode:
yield Uncloseable(stdout_binary)
else:
yield Uncloseable(sys.stdout)
class StdinSource(object):
@contextmanager
def open(self, mode='r'):
if not mode.startswith('r'):
raise ArgumentError('source is read-only')
if 'b' in mode:
yield Uncloseable(stdin_binary)
else:
yield Uncloseable(sys.stdin)
class URLSource(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
@contextmanager
def open(self, mode='r'):
if not mode.startswith('r'):
raise ArgumentError('source is read-only')
f = urlopen(*self.args, **self.kwargs)
try:
yield f
finally:
f.close()
class MemorySource(object):
"""Memory data source. E.g.::
>>> import petl as etl
>>> data = b'foo,bar\\na,1\\nb,2\\nc,2\\n'
>>> source = etl.MemorySource(data)
>>> tbl = etl.fromcsv(source)
>>> tbl
+-----+-----+
| foo | bar |
+=====+=====+
| 'a' | '1' |
+-----+-----+
| 'b' | '2' |
+-----+-----+
| 'c' | '2' |
+-----+-----+
>>> sink = etl.MemorySource()
>>> tbl.tojson(sink)
>>> sink.getvalue()
b'[{"foo": "a", "bar": "1"}, {"foo": "b", "bar": "2"}, {"foo": "c", "bar": "2"}]'
Also supports appending.
"""
def __init__(self, s=None):
self.s = s
self.buffer = None
@contextmanager
def open(self, mode='rb'):
try:
if 'r' in mode:
if self.s is not None:
if 'b' in mode:
self.buffer = BytesIO(self.s)
else:
self.buffer = StringIO(self.s)
else:
raise ArgumentError('no string data supplied')
elif 'w' in mode:
if self.buffer is not None:
self.buffer.close()
if 'b' in mode:
self.buffer = BytesIO()
else:
self.buffer = StringIO()
elif 'a' in mode:
if self.buffer is None:
if 'b' in mode:
self.buffer = BytesIO()
else:
self.buffer = StringIO()
yield Uncloseable(self.buffer)
except:
raise
finally:
pass # don't close the buffer
def getvalue(self):
if self.buffer:
return self.buffer.getvalue()
# backwards compatibility
StringSource = MemorySource
class PopenSource(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
@contextmanager
def open(self, mode='r'):
if not mode.startswith('r'):
raise ArgumentError('source is read-only')
self.kwargs['stdout'] = subprocess.PIPE
proc = subprocess.Popen(*self.args, **self.kwargs)
try:
yield proc.stdout
finally:
pass
_invalid_source_msg = 'invalid source argument, expected None or a string or ' \
'an object implementing open(), found %r'
def read_source_from_arg(source):
if source is None:
return StdinSource()
elif isinstance(source, string_types):
if any(map(source.startswith, ['http://', 'https://', 'ftp://'])):
if source.endswith('.gz') or source.endswith('.bgz'):
return GzipSource(source, remote=True)
elif source.endswith('.bz2'):
return BZ2Source(source, remote=True)
else:
return URLSource(source)
elif source.endswith('.gz') or source.endswith('.bgz'):
return GzipSource(source)
elif source.endswith('.bz2'):
return BZ2Source(source)
else:
return FileSource(source)
else:
assert (hasattr(source, 'open')
and callable(getattr(source, 'open'))), \
_invalid_source_msg % source
return source
def write_source_from_arg(source):
if source is None:
return StdoutSource()
elif isinstance(source, string_types):
if source.endswith('.gz') or source.endswith('.bgz'):
return GzipSource(source)
elif source.endswith('.bz2'):
return BZ2Source(source)
else:
return FileSource(source)
else:
assert (hasattr(source, 'open')
and callable(getattr(source, 'open'))), \
_invalid_source_msg % source
return source
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
sessions2trash.py
Run this script in a web2py environment shell e.g. python web2py.py -S app
If models are loaded (-M option) auth.settings.expiration is assumed
for sessions without an expiration. If models are not loaded, sessions older
than 60 minutes are removed. Use the --expiration option to override these
values.
Typical usage:
# Delete expired sessions every 5 minutes
nohup python web2py.py -S app -M -R scripts/sessions2trash.py &
# Delete sessions older than 60 minutes regardless of expiration,
# with verbose output, then exit.
python web2py.py -S app -M -R scripts/sessions2trash.py -A -o -x 3600 -f -v
# Delete all sessions regardless of expiry and exit.
python web2py.py -S app -M -R scripts/sessions2trash.py -A -o -x 0
"""
from __future__ import with_statement
from gluon.storage import Storage
from optparse import OptionParser
import cPickle
import datetime
import os
import stat
import time
EXPIRATION_MINUTES = 60
SLEEP_MINUTES = 5
VERSION = 0.3
class SessionSet(object):
"""Class representing a set of sessions"""
def __init__(self, expiration, force, verbose):
self.expiration = expiration
self.force = force
self.verbose = verbose
def get(self):
"""Get session files/records."""
raise NotImplementedError
def trash(self):
"""Trash expired sessions."""
now = datetime.datetime.now()
for item in self.get():
status = 'OK'
last_visit = item.last_visit_default()
try:
session = item.get()
if session.auth:
if session.auth.expiration and not self.force:
self.expiration = session.auth.expiration
if session.auth.last_visit:
last_visit = session.auth.last_visit
except:
pass
age = 0
if last_visit:
age = total_seconds(now - last_visit)
if age > self.expiration or not self.expiration:
item.delete()
status = 'trashed'
if self.verbose > 1:
print 'key: %s' % str(item)
print 'expiration: %s seconds' % self.expiration
print 'last visit: %s' % str(last_visit)
print 'age: %s seconds' % age
print 'status: %s' % status
print ''
elif self.verbose > 0:
print('%s %s' % (str(item), status))
class SessionSetDb(SessionSet):
"""Class representing a set of sessions stored in database"""
def __init__(self, expiration, force, verbose):
SessionSet.__init__(self, expiration, force, verbose)
def get(self):
"""Return list of SessionDb instances for existing sessions."""
sessions = []
tablename = 'web2py_session'
from gluon import current
(record_id_name, table, record_id, unique_key) = \
current.response._dbtable_and_field
for row in table._db(table.id > 0).select():
sessions.append(SessionDb(row))
return sessions
class SessionSetFiles(SessionSet):
"""Class representing a set of sessions stored in flat files"""
def __init__(self, expiration, force, verbose):
SessionSet.__init__(self, expiration, force, verbose)
def get(self):
"""Return list of SessionFile instances for existing sessions."""
path = os.path.join(request.folder, 'sessions')
return [SessionFile(os.path.join(path, x)) for x in os.listdir(path)]
class SessionDb(object):
"""Class representing a single session stored in database"""
def __init__(self, row):
self.row = row
def delete(self):
from gluon import current
(record_id_name, table, record_id, unique_key) = \
current.response._dbtable_and_field
self.row.delete_record()
table._db.commit()
def get(self):
session = Storage()
session.update(cPickle.loads(self.row.session_data))
return session
def last_visit_default(self):
if isinstance(self.row.modified_datetime, datetime.datetime):
return self.row.modified_datetime
else:
try:
return datetime.datetime.strptime(self.row.modified_datetime, '%Y-%m-%d %H:%M:%S.%f')
except:
print 'failed to retrieve last modified time (value: %s)' % self.row.modified_datetime
def __str__(self):
return self.row.unique_key
class SessionFile(object):
"""Class representing a single session stored as a flat file"""
def __init__(self, filename):
self.filename = filename
def delete(self):
os.unlink(self.filename)
def get(self):
session = Storage()
with open(self.filename, 'rb+') as f:
session.update(cPickle.load(f))
return session
def last_visit_default(self):
return datetime.datetime.fromtimestamp(
os.stat(self.filename)[stat.ST_MTIME])
def __str__(self):
return self.filename
def total_seconds(delta):
"""
Adapted from Python 2.7's timedelta.total_seconds() method.
Args:
delta: datetime.timedelta instance.
"""
return (delta.microseconds + (delta.seconds + (delta.days * 24 * 3600)) *
10 ** 6) / 10 ** 6
def main():
"""Main processing."""
usage = '%prog [options]' + '\nVersion: %s' % VERSION
parser = OptionParser(usage=usage)
parser.add_option('-f', '--force',
action='store_true', dest='force', default=False,
help=('Ignore session expiration. '
'Force expiry based on -x option or auth.settings.expiration.')
)
parser.add_option('-o', '--once',
action='store_true', dest='once', default=False,
help='Delete sessions, then exit.',
)
parser.add_option('-s', '--sleep',
dest='sleep', default=SLEEP_MINUTES * 60, type="int",
help='Number of seconds to sleep between executions. Default 300.',
)
parser.add_option('-v', '--verbose',
default=0, action='count',
help="print verbose output, a second -v increases verbosity")
parser.add_option('-x', '--expiration',
dest='expiration', default=None, type="int",
help='Expiration value for sessions without expiration (in seconds)',
)
(options, unused_args) = parser.parse_args()
expiration = options.expiration
if expiration is None:
try:
expiration = auth.settings.expiration
except:
expiration = EXPIRATION_MINUTES * 60
set_db = SessionSetDb(expiration, options.force, options.verbose)
set_files = SessionSetFiles(expiration, options.force, options.verbose)
while True:
set_db.trash()
set_files.trash()
if options.once:
break
else:
if options.verbose:
print 'Sleeping %s seconds' % (options.sleep)
time.sleep(options.sleep)
main()
|
|
'''
Created on Mar 18, 2013
@author: Gooch
'''
import re,os
import Pipeline.settings.BiotoolsSettings as BiotoolsSettings
from Pipeline.core.PipelineTemplate import PipelineTemplate
import Pipeline.core.PipelineUtil as PipelineUtil
from Pipeline.core.PipelineError import PipelineError
from Pipeline.core.PipelineSampleData import SampleData
import igraph
class PipelineNode:
def __init__(self,pipeline):
#TODO: fill in stub
self.pipeline=pipeline
self.template=None
self.subname=None
self.optionfile=None
def setValues(self,templatename,subname=None,optionfile=None):
self.subname=subname.upper()
self.template=self.pipeline.getTemplate(templatename.upper())
self.loadOptionFile(optionfile)
def loadOptionFile(self,filename):
#TODO empty method stub
self.optionfile=filename
class AnalysisPipeline:
def __init__(self):
#TODO: fill in stub
self.jobtemplates={}
self.optionfiles={}
#list of nodes in tree
self.nodes={}#info about nodes stored in PipelineNode object in this dictionary
#index with template[subname]
#only allow first specification to contain optionfile, blank != ""
#allows for easier start point of new branches.
self.templategraph= igraph.Graph(directed=True)
self.templategraph.is_dag()
self.samples=None
def loadSampleData(self,filename):
self.samples=SampleData.readBatch(filename)
def loadTemplate(self,templateName):
template=None
#TODO: fill in stub
if len(templateName) == 0:
return False
#check if step template is already loaded
if templateName.upper() in self.jobtemplates:
#if loaded, no more work to do
return True;
#if not found, check if template exists
path2Template=os.path.join(PipelineUtil.templateDir(),templateName.upper()+".sjt")
#print(path2Template)
if os.path.isfile(path2Template):
#if template exists
template=PipelineTemplate.readTemplate(templateName.upper())
self.jobtemplates[templateName.upper()]=template
return True
else:
#if template doesn't exist, signal error
return False
def TemplateIsLoaded(self,template):
return (template.upper() in self.jobtemplates)
def getTemplate(self,template):
if not self.TemplateIsLoaded(template):
self.loadTemplate(template)
if not self.TemplateIsLoaded(template):
raise PipelineError("[PipelineTemplate.AnalysisPipeline] requested template does not exist: %s\n" % template)
return self.jobtemplates.get(template.upper())
def getNode(self,template,subname,optionfile):
# print("searching for:")
# print ("\ttemplate: %s" % template)
# print ("\tsubname: %s" % subname)
# print ("\toptionfile: %s" % optionfile)
vertName=""
if(subname):
vertName="%s|%s" % (template.upper(),subname.upper())
else:
vertName="%s" % (template.upper())
# print ("using vertex name: '%s'" % vertName)
#check if node already exists (template,subname)
if self.nodes.has_key(vertName):
# print ("getting existing node")
node=self.nodes.get(vertName)
if node:
subnames_match=node.subname.upper() == subname.upper()
# print ("\ttemplate: %s" % node.template)
# print ("\tsubname: %s" % node.subname)
# print ("\toptionfile: %s" % node.optionfile)
names_match=node.template.name.upper() == template.upper()
if not(subnames_match and names_match):
raise PipelineError("[PipelineTemplate.AnalysisPipeline] template in expected location did not match\n")
#if it does make sure optionfile matches or is blank or None,
if optionfile != node.optionfile:
#otherwise mismatch is an error
raise PipelineError("[PipelineTemplate.AnalysisPipeline] matched template & subname, but mismatched optionfile\n")
#if it does return it
return node
#if it doesnt, create it
# print("creating new node")
newNode=PipelineNode(self)
newNode.setValues(template,subname,optionfile)
# print ("\ttemplate: %s" % newNode.template)
# print ("\tsubname: %s" % newNode.subname)
# print ("\toptionfile: %s" % newNode.optionfile)
# newNode.template=self.getTemplate(template)
# newNode.subname=subname
# newNode.optionfile=optionfile
self.nodes[vertName]=newNode
self.templategraph.add_vertex(name=vertName,data=newNode)
return newNode
def getNodeWithDict(self,data):
if not isinstance(data,dict):
raise PipelineError("[PipelineTemplate.AnalysisPipeline.getNodeWithDict] gave wrong type: %s" % type(data))
if not(data.has_key('template') and data.has_key('subname') and data.has_key('optionfile')):
raise PipelineError("[PipelineTemplate.AnalysisPipeline.getNodeWithDict] missing an entry, has:(template:%s,subname:%s,optionfile:%s)\n" %(data.has_key('template') , data.has_key('subname') , data.has_key('optionfile')))
return self.getNode(data['template'], data['subname'], data['optionfile'])
def linkNodes(self,source_name,source_subname,sink_name,sink_subname):
#add edge linking nodes
sourceVertName=""
sinkVertName=""
if(source_subname):
sourceVertName="%s|%s" % (source_name.upper(),source_subname.upper())
else:
sourceVertName="%s" % (source_name.upper())
# print ("source vertex name: '%s'" % sourceVertName)
if(sink_subname):
sinkVertName="%s|%s" % (sink_name.upper(),sink_subname.upper())
else:
sinkVertName="%s" % (sink_name.upper())
# print ("sink vertex name: '%s'" % sinkVertName)
self.templategraph.add_edge(sourceVertName,sinkVertName)
def getSourceNodes(self):
#return list of all nodes that aren't targets of other nodes
degrees=self.templategraph.indegree()
result=[]
for i in range(0,len(degrees)):
if not degrees[i]:
result.append(self.templategraph.vs[i].attributes()['data'])
return result
def getSinkNodes(self):
#return list of all nodes that don't have targets
degrees=self.templategraph.outdegree()
result=[]
for i in range(0,len(degrees)):
if not degrees[i]:
result.append(self.templategraph.vs[i].attributes()['data'])
return result
def getParentOfNode(self,node):
source_name=node.template.name
source_subname=node.subname
sourceVertName=""
if(source_subname):
sourceVertName="%s|%s" % (source_name.upper(),source_subname.upper())
else:
sourceVertName="%s" % (source_name.upper())
sourceVert=self.templategraph.vs.find(name=sourceVertName)
parentList=sourceVert.predecessors()
if len(parentList) > 1:
raise PipelineError("[PipelineTemplate.AnalysisPipeline.getParentOfNode] graph indicates node does not only have 1 parent")
if len(parentList) == 0:
return None
return parentList[0].attributes()['data']
def getTargetsOfNode(self,node):
source_name=node.template.name
source_subname=node.subname
sourceVertName=""
result=[]
if(source_subname):
sourceVertName="%s|%s" % (source_name.upper(),source_subname.upper())
else:
sourceVertName="%s" % (source_name.upper())
sourceVert=self.templategraph.vs.find(name=sourceVertName)
for node in sourceVert.successors():
result.append(node.attributes()['data'])
return result
def toSJMStrings(self,splitOpts,baseName,grouplbl):
sjm_strings={}
#get name of string content should be added to:
nodeQueue=[]
cumsuffixQueue=[]
#set starting nodes
for item in self.getSourceNodes():
nodeQueue.append(item)
cumsuffixQueue.append("")
while len(nodeQueue) > 0:
node=nodeQueue.pop(0)
cumsuffix=cumsuffixQueue.pop(0)
#if job is comparing pairs of samples,
if node.template.isCrossJob:
raise PipelineError("[PipelineTemplate.AnalysisPipeline.toSJMStrings] CrossJob Translation not yet implemented")
#handle selected pairs
#TODO, what to do with no selection (missing optionfile)? Fail or do ALL pairwise?
else:
#otherwise translate template once per file
for sample in self.samples.keys():
Sample=self.samples[sample]
stringName=self.getFileNameForString(splitOpts,baseName, node, Sample)
if not (sjm_strings.has_key(stringName)):
sjm_strings[stringName]=""
parentNode=self.getParentOfNode(node)
#TODO get template string & append it to sjm_strings[stringName]
sjm_strings[stringName]+=node.template.toString(grouplbl,cumsuffix,Sample.ID)
#TODO add any extra link-related job dependencies manually
if not (splitOpts['step']):
#TODO link across templates, link back to parents
derp=""
if parentNode is not None:
print("%s <<< %s | %s <- %s | % s : %s" % (stringName, node.template.name,node.subname,parentNode.template.name,parentNode.subname, Sample.ID))
else:
print("%s <<< %s | %s : %s" % (stringName, node.template.name,node.subname,Sample.ID))
for item in self.getTargetsOfNode(node):
nodeQueue.append(item)
if node.template.clearsuffixes:
cumsuffixQueue.append("")
else:
cumsuffixQueue.append(cumsuffix+node.template.suffix)
#TODO add log_dir line to strings
logdir=BiotoolsSettings.getValue("CURDIR")+os.sep+"sjm_logs"
if os.path.exists(logdir):
if not (os.path.isdir(logdir)):
raise PipelineError("[PipelineTemplate.AnalysisPipeline.toSJMStrings] log directory path: %s already exists and is not a directory" % logdir)
else:
os.mkdir(logdir)
if not (os.path.isdir(logdir)):
raise PipelineError("[PipelineTemplate.AnalysisPipeline.toSJMStrings] failed to create log directory")
for sjm in sjm_strings.keys():
sjm_strings[sjm]=sjm_strings.get(sjm)+"log_dir %s" % logdir
return sjm_strings
#produce strings in fully split form
#get source nodes
#starting with each source node, and tracing the tree parent-first, then children:
#use templates to get SJM content,
#track cumulative suffixes
#join strings as appropriate:
# if splitOpts['sample'] and splitOpts['step']:
# #split between samples AND between
# return dict()
# elif splitOpts['sample']:
# #split between samples
#
# return dict()
# elif splitOpts['step']:
# #split only between templates
# return dict()
# else:
# #one giant file
# #add any extra link-related job dependencies manually
# return dict()
# #add sjm logfile location to end of each file
# return sjm_strings
def getFileNameForString(self,splitOpts,baseName,node=None,sample=None):
if splitOpts['sample'] and splitOpts['step']:
#split between samples AND between
if node.subname:
return "%s.%s.%s.sjm" % (baseName,node.template.name,sample.ID)
else:
return "%s.%s.%s.%s.sjm" % (baseName,node.template.name,node.subname,sample.ID)
elif splitOpts['sample']:
#split between samples
#add any extra link-related job dependencies manually
return "%s.%s.sjm" % (baseName,sample.ID)
elif splitOpts['step']:
#split only between templates
if node.subname:
return "%s.%s.%s.sjm" % (baseName,node.template.name,node.subname)
else:
return "%s.%s.sjm" % (baseName,node.template.name)
else:
#one giant file
#add any extra link-related job dependencies manually
return "%s.sjm" % baseName
def recursiveVertToString(self,strings,splitOpts,baseName,grouplbl,node,cumsuffix):
#split on templates but keep samples together, join pairs
if node.template.isCrossJob:
derp=""
else:
#get one string for each file
for sample in self.samples.keys():
mystring=node.template.toString(grouplbl,cumsuffix,sample.source1,sample.source2)
for nextnode in self.getTargetsOf(node.template.name,node.subname):
if node.clearsuffixes:
mystring+=self.recursiveVertToString(nextnode,strings,grouplbl,node.template.suffix)
else:
mystring+=self.recursiveVertToString(nextnode,strings,grouplbl,cumsuffix+node.template.suffix)
#if templates are joined, append extra order_after strings
if(splitOpts['step']):
derp=""
#apl=AnalysisPipeline()
#worked=apl.loadTemplate("BWA_ALIGN_PAIRED")
#print (apl.TemplateIsLoaded("BWA_ALIGN_PAIRED"))
#print (apl.getTemplate("BWA_ALIGN_PAIRED").toTemplateString())
#print (apl.getTemplate("BWA_ALIGN_PAIRED").toString('grouplbl','.cumsuffix','prefix'))
|
|
"""
Sandbox Panel Estimators
References
-----------
Baltagi, Badi H. `Econometric Analysis of Panel Data.` 4th ed. Wiley, 2008.
"""
from scikits.statsmodels.tools.tools import categorical
from scikits.statsmodels.regression.linear_model import GLS, WLS
import numpy as np
__all__ = ["PanelModel"]
try:
from pandas import LongPanel, __version__
__version__ >= .1
except:
raise ImportError("While in the sandbox this code depends on the pandas \
package. http://code.google.com/p/pandas/")
def group(X):
"""
Returns unique numeric values for groups without sorting.
Examples
--------
>>> X = np.array(['a','a','b','c','b','c'])
>>> group(X)
>>> g
array([ 0., 0., 1., 2., 1., 2.])
"""
uniq_dict = {}
group = np.zeros(len(X))
for i in xrange(len(X)):
if not X[i] in uniq_dict:
uniq_dict.update({X[i] : len(uniq_dict)})
group[i] = uniq_dict[X[i]]
return group
def repanel_cov(groups, sigmas):
'''calculate error covariance matrix for random effects model
Parameters
----------
groups : array, (nobs, nre) or (nobs,)
array of group/category observations
sigma : array, (nre+1,)
array of standard deviations of random effects,
last element is the standard deviation of the
idiosyncratic error
Returns
-------
omega : array, (nobs, nobs)
covariance matrix of error
omegainv : array, (nobs, nobs)
inverse covariance matrix of error
omegainvsqrt : array, (nobs, nobs)
squareroot inverse covariance matrix of error
such that omega = omegainvsqrt * omegainvsqrt.T
Notes
-----
This does not use sparse matrices and constructs nobs by nobs
matrices. Also, omegainvsqrt is not sparse, i.e. elements are non-zero
'''
if groups.ndim == 1:
groups = groups[:,None]
nobs, nre = groups.shape
omega = sigmas[-1]*np.eye(nobs)
for igr in range(nre):
group = groups[:,igr:igr+1]
groupuniq = np.unique(group)
dummygr = sigmas[igr] * (group == groupuniq).astype(float)
omega += np.dot(dummygr, dummygr.T)
ev, evec = np.linalg.eigh(omega) #eig doesn't work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainvhalf = evec/np.sqrt(ev)
return omega, omegainv, omegainvhalf
class PanelData(LongPanel):
pass
class PanelModel(object):
"""
An abstract statistical model class for panel (longitudinal) datasets.
Parameters
---------
endog : array-like or str
If a pandas object is used then endog should be the name of the
endogenous variable as a string.
# exog
# panel_arr
# time_arr
panel_data : pandas.LongPanel object
Notes
-----
If a pandas object is supplied it is assumed that the major_axis is time
and that the minor_axis has the panel variable.
"""
def __init__(self, endog=None, exog=None, panel=None, time=None,
xtnames=None, equation=None, panel_data=None):
if panel_data == None:
# if endog == None and exog == None and panel == None and \
# time == None:
# raise ValueError("If pandel_data is False then endog, exog, \
#panel_arr, and time_arr cannot be None.")
self.initialize(endog, exog, panel, time, xtnames, equation)
# elif aspandas != False:
# if not isinstance(endog, str):
# raise ValueError("If a pandas object is supplied then endog \
#must be a string containing the name of the endogenous variable")
# if not isinstance(aspandas, LongPanel):
# raise ValueError("Only pandas.LongPanel objects are supported")
# self.initialize_pandas(endog, aspandas, panel_name)
def initialize(self, endog, exog, panel, time, xtnames, equation):
"""
Initialize plain array model.
See PanelModel
"""
#TODO: for now, we are going assume a constant, and then make the first
#panel the base, add a flag for this....
# get names
names = equation.split(" ")
self.endog_name = names[0]
exog_names = names[1:] # this makes the order matter in the array
self.panel_name = xtnames[0]
self.time_name = xtnames[1]
novar = exog.var(0) == 0
if True in novar:
cons_index = np.where(novar == 1)[0][0] # constant col. num
exog_names.insert(cons_index, 'cons')
self._cons_index = novar # used again in fit_fixed
self.exog_names = exog_names
self.endog = np.squeeze(np.asarray(endog))
exog = np.asarray(exog)
self.exog = exog
self.panel = np.asarray(panel)
self.time = np.asarray(time)
self.paneluniq = np.unique(panel)
self.timeuniq = np.unique(time)
#TODO: this structure can possibly be extracted somewhat to deal with
#names in general
#TODO: add some dimension checks, etc.
# def initialize_pandas(self, endog, aspandas):
# """
# Initialize pandas objects.
#
# See PanelModel.
# """
# self.aspandas = aspandas
# endog = aspandas[endog].values
# self.endog = np.squeeze(endog)
# exog_name = aspandas.columns.tolist()
# exog_name.remove(endog)
# self.exog = aspandas.filterItems(exog_name).values
#TODO: can the above be simplified to slice notation?
# if panel_name != None:
# self.panel_name = panel_name
# self.exog_name = exog_name
# self.endog_name = endog
# self.time_arr = aspandas.major_axis
#TODO: is time always handled correctly in fromRecords?
# self.panel_arr = aspandas.minor_axis
#TODO: all of this might need to be refactored to explicitly rely (internally)
# on the pandas LongPanel structure for speed and convenience.
# not sure this part is finished...
#TODO: doesn't conform to new initialize
def initialize_pandas(self, panel_data, endog_name, exog_name):
self.panel_data = panel_data
endog = panel_data[endog_name].values # does this create a copy?
self.endog = np.squeeze(endog)
if exog_name == None:
exog_name = panel_data.columns.tolist()
exog_name.remove(endog_name)
self.exog = panel_data.filterItems(exog_name).values # copy?
self._exog_name = exog_name
self._endog_name = endog_name
self._timeseries = panel_data.major_axis # might not need these
self._panelseries = panel_data.minor_axis
#TODO: this could be pulled out and just have a by kwd that takes
# the panel or time array
#TODO: this also needs to be expanded for 'twoway'
def _group_mean(self, X, index='oneway', counts=False, dummies=False):
"""
Get group means of X by time or by panel.
index default is panel
"""
if index == 'oneway':
Y = self.panel
uniq = self.paneluniq
elif index == 'time':
Y = self.time
uniq = self.timeuniq
else:
raise ValueError("index %s not understood" % index)
#TODO: use sparse matrices
dummy = (Y == uniq[:,None]).astype(float)
if X.ndim > 1:
mean = np.dot(dummy,X)/dummy.sum(1)[:,None]
else:
mean = np.dot(dummy,X)/dummy.sum(1)
if counts == False and dummies == False:
return mean
elif counts == True and dummies == False:
return mean, dummy.sum(1)
elif counts == True and dummies == True:
return mean, dummy.sum(1), dummy
elif counts == False and dummies == True:
return mean, dummy
#TODO: Use kwd arguments or have fit_method methods?
def fit(self, model=None, method=None, effects='oneway'):
"""
method : LSDV, demeaned, MLE, GLS, BE, FE, optional
model :
between
fixed
random
pooled
[gmm]
effects :
oneway
time
twoway
femethod : demeaned (only one implemented)
WLS
remethod :
swar -
amemiya
nerlove
walhus
Notes
------
This is unfinished. None of the method arguments work yet.
Only oneway effects should work.
"""
if method: # get rid of this with default
method = method.lower()
model = model.lower()
if method and method not in ["lsdv", "demeaned", "mle", "gls", "be",
"fe"]: # get rid of if method with default
raise ValueError("%s not a valid method" % method)
# if method == "lsdv":
# self.fit_lsdv(model)
if model == 'pooled':
return GLS(self.endog, self.exog).fit()
if model == 'between':
return self._fit_btwn(method, effects)
if model == 'fixed':
return self._fit_fixed(method, effects)
# def fit_lsdv(self, effects):
# """
# Fit using least squares dummy variables.
#
# Notes
# -----
# Should only be used for small `nobs`.
# """
# pdummies = None
# tdummies = None
def _fit_btwn(self, method, effects):
# group mean regression or WLS
if effects != "twoway":
endog = self._group_mean(self.endog, index=effects)
exog = self._group_mean(self.exog, index=effects)
else:
raise ValueError("%s effects is not valid for the between \
estimator" % s)
befit = GLS(endog, exog).fit()
return befit
def _fit_fixed(self, method, effects):
endog = self.endog
exog = self.exog
demeantwice = False
if effects in ["oneway","twoways"]:
if effects == "twoways":
demeantwice = True
effects = "oneway"
endog_mean, counts = self._group_mean(endog, index=effects,
counts=True)
exog_mean = self._group_mean(exog, index=effects)
counts = counts.astype(int)
endog = endog - np.repeat(endog_mean, counts)
exog = exog - np.repeat(exog_mean, counts, axis=0)
if demeantwice or effects == "time":
endog_mean, dummies = self._group_mean(endog, index="time",
dummies=True)
exog_mean = self._group_mean(exog, index="time")
# This allows unbalanced panels
endog = endog - np.dot(endog_mean, dummies)
exog = exog - np.dot(dummies.T, exog_mean)
fefit = GLS(endog, exog[:,-self._cons_index]).fit()
#TODO: might fail with one regressor
return fefit
class SURPanel(PanelModel):
pass
class SEMPanel(PanelModel):
pass
class DynamicPanel(PanelModel):
pass
if __name__ == "__main__":
try:
import pandas
pandas.version >= .1
except:
raise ImportError("pandas >= .10 not installed")
from pandas import LongPanel
import scikits.statsmodels.api as sm
import numpy.lib.recfunctions as nprf
data = sm.datasets.grunfeld.load()
# Baltagi doesn't include American Steel
endog = data.endog[:-20]
fullexog = data.exog[:-20]
# fullexog.sort(order=['firm','year'])
panel_arr = nprf.append_fields(fullexog, 'investment', endog, float,
usemask=False)
panel_panda = LongPanel.fromRecords(panel_arr, major_field='year',
minor_field='firm')
# the most cumbersome way of doing it as far as preprocessing by hand
exog = fullexog[['value','capital']].view(float).reshape(-1,2)
exog = sm.add_constant(exog)
panel = group(fullexog['firm'])
year = fullexog['year']
panel_mod = PanelModel(endog, exog, panel, year, xtnames=['firm','year'],
equation='invest value capital')
# note that equation doesn't actually do anything but name the variables
panel_ols = panel_mod.fit(model='pooled')
panel_be = panel_mod.fit(model='between', effects='oneway')
panel_fe = panel_mod.fit(model='fixed', effects='oneway')
panel_bet = panel_mod.fit(model='between', effects='time')
panel_fet = panel_mod.fit(model='fixed', effects='time')
panel_fe2 = panel_mod.fit(model='fixed', effects='twoways')
#see also Baltagi (3rd edt) 3.3 THE RANDOM EFFECTS MODEL p.35
#for explicit formulas for spectral decomposition
#but this works also for unbalanced panel
#
#I also just saw: 9.4.2 The Random Effects Model p.176 which is
#partially almost the same as I did
#
#this needs to use sparse matrices for larger datasets
#
#"""
#
#import numpy as np
#
groups = np.array([0,0,0,1,1,2,2,2])
nobs = groups.shape[0]
groupuniq = np.unique(groups)
periods = np.array([0,1,2,1,2,0,1,2])
perioduniq = np.unique(periods)
dummygr = (groups[:,None] == groupuniq).astype(float)
dummype = (periods[:,None] == perioduniq).astype(float)
sigma = 1.
sigmagr = np.sqrt(2.)
sigmape = np.sqrt(3.)
#dummyall = np.c_[sigma*np.ones((nobs,1)), sigmagr*dummygr,
# sigmape*dummype]
#exclude constant ?
dummyall = np.c_[sigmagr*dummygr, sigmape*dummype]
# omega is the error variance-covariance matrix for the stacked
# observations
omega = np.dot(dummyall, dummyall.T) + sigma* np.eye(nobs)
print omega
print np.linalg.cholesky(omega)
ev, evec = np.linalg.eigh(omega) #eig doesn't work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainv2 = np.linalg.inv(omega)
omegacomp = np.dot(evec, (ev * evec).T)
print np.max(np.abs(omegacomp - omega))
#check
#print np.dot(omegainv,omega)
print np.max(np.abs(np.dot(omegainv,omega) - np.eye(nobs)))
omegainvhalf = evec/np.sqrt(ev) #not sure whether ev shouldn't be column
print np.max(np.abs(np.dot(omegainvhalf,omegainvhalf.T) - omegainv))
# now we can use omegainvhalf in GLS (instead of the cholesky)
sigmas2 = np.array([sigmagr, sigmape, sigma])
groups2 = np.column_stack((groups, periods))
omega_, omegainv_, omegainvhalf_ = repanel_cov(groups2, sigmas2)
print np.max(np.abs(omega_ - omega))
print np.max(np.abs(omegainv_ - omegainv))
print np.max(np.abs(omegainvhalf_ - omegainvhalf))
# notation Baltagi (3rd) section 9.4.1 (Fixed Effects Model)
Pgr = reduce(np.dot,[dummygr,
np.linalg.inv(np.dot(dummygr.T, dummygr)),dummygr.T])
Qgr = np.eye(nobs) - Pgr
# within group effect: np.dot(Qgr, groups)
# but this is not memory efficient, compared to groupstats
print np.max(np.abs(np.dot(Qgr, groups)))
|
|
"""
Read/write tools for nonuniform electric field .grd format.
Matthew Grawe, grawe2 (at) illinois.edu
January 2017
"""
import numpy as np
def next_line(grd_file):
"""
next_line
Function returns the next line in the file
that is not a blank line, unless the line is
'', which is a typical EOF marker.
"""
done = False
while not done:
line = grd_file.readline()
if line == '':
return line, False
elif line.strip():
return line, True
def read_block(grd_file, n_lats):
lats = []
# read+store until we have collected n_lats
go = True
while go:
fline, status = next_line(grd_file)
line = fline.split()
# the line hats lats in it
lats.extend(np.array(line).astype('float'))
if len(lats) == 17:
go = False
return np.array(lats)
def grd_read(grd_filename):
"""
Opens the .grd file grd_file and returns the following:
lon_grid : 1D numpy array of lons
lat_grid : 1D numpy array of lats
time_grid: 1D numpy array of times
DATA : 3D numpy array of the electric field data, such that
the electric field at (lon, lat) for time t
is accessed via DATA[lon, lat, t].
"""
with open(grd_filename, 'rb') as grd_file:
# read the header line
fline, status = next_line(grd_file)
line = fline.split()
lon_res = float(line[0])
lon_west = float(line[1])
n_lons = int(line[2])
lat_res = float(line[3])
lat_south = float(line[4])
n_lats = int(line[5])
DATA = []
times = []
go = True
while go:
# get the time index line
ftline, status = next_line(grd_file)
tline = ftline.split()
t = float(tline[0])
times.append(t)
SLICE = np.zeros([n_lons, n_lats])
for lon_index in range(0, n_lons):
data_slice = read_block(grd_file, n_lats)
SLICE[lon_index, :] = data_slice
DATA.append(SLICE.T)
# current line should have length one to indicate next time index
# make sure, then back up
before = grd_file.tell()
fline, status = next_line(grd_file)
line = fline.split()
if len(line) != 1:
if status == False:
# EOF, leave
break
else:
raise Exception('Unexpected number of lat entries.')
grd_file.seek(before)
DATA = np.array(DATA).T
lon_grid = np.arange(lon_west, lon_west + lon_res*n_lons, lon_res)
lat_grid = np.arange(lat_south, lat_south + lat_res*n_lats, lat_res)
time_grid = np.array(times)
return lon_grid, lat_grid, times, DATA
def write_lon_block(grd_file, n_lats, data):
"""
len(data) == n_lats should be True
"""
current_index = 0
go1 = True
while go1:
line = ['']*81
go2 = True
internal_index = 0
while go2:
datum = data[current_index]
line[16*internal_index:16*internal_index+16] = ('%.11g' % datum).rjust(16)
current_index += 1
internal_index += 1
if(current_index >= len(data)):
line[80] = '\n'
grd_file.write("".join(line))
go2 = False
go1 = False
elif(internal_index >= 5):
line[80] = '\n'
grd_file.write("".join(line))
go2 = False
def grd_write(grd_filename, lon_grid, lat_grid, time_grid, DATA):
"""
Writes out DATA corresponding to the locations
specified by lon_grid, lat_grid in the .grd format.
lon_grid must have the westmost point as lon_grid[0].
lat_grid must have the southmost point as lat_grid[0].
Assumptions made:
latitude/longitude resolutions are positive
number of latitude/longitude points in header is positive
at least one space must be between each number
data lines have no more than 5 entries
Assumed structure of header line:
# first 16 spaces allocated as longitude resolution
# next 16 spaces allocated as westmost longitude
# next 5 spaces allocated as number of longitude points
# next 11 spaces allocated as latitude resolution
# next 16 spaces allocated as southmost latitude
# next 16 spaces allocated for number of latitude points
# TOTAL: 80 characters
Assumed stucture of time line:
# 5 blank spaces
# next 16 spaces allocated for time
Assumed structure a data line:
# 16 spaces allocated for data entry
# .. .. ..
"""
with open(grd_filename, 'wb') as grd_file:
# convert the lon grid to -180 to 180 if necessary
lon_grid = np.array(lon_grid)
lon_grid = lon_grid % 360
lon_grid = ((lon_grid + 180) % 360) - 180
lon_res = np.abs(lon_grid[1] - lon_grid[0])
lon_west = lon_grid[0]
n_lons = len(lon_grid)
lat_res = np.abs(lat_grid[1] - lat_grid[0])
lat_south = lat_grid[0]
n_lats = len(lat_grid)
n_times = len(time_grid)
# write the header: 80 characters
header = ['']*81
header[0:16] = ('%.7g' % lon_res).rjust(16)
header[16:32] = ('%.15g' % lon_west).rjust(16)
header[32:37] = str(n_lons).rjust(5)
header[37:48] = ('%.7g' % lat_res).rjust(11)
header[48:64] = ('%.15g' % lat_south).rjust(16)
header[64:80] = str(n_lats).rjust(16)
header[80] = '\n'
header_str = "".join(header)
grd_file.write(header_str)
for i, t in enumerate(time_grid):
# write the time line
timeline = ['']*(16+5)
timeline[5:-1] = ('%.8g' % t).rjust(9)
timeline[-1] = '\n'
timeline_str = "".join(timeline)
grd_file.write(timeline_str)
for j, lon in enumerate(lon_grid):
# write the lon blocks
write_lon_block(grd_file, n_lats, DATA[j, :, i])
grd_file.close()
|
|
# -*- coding: utf-8 -*-
"""
babel.messages.pofile
~~~~~~~~~~~~~~~~~~~~~
Reading and writing of files in the ``gettext`` PO (portable object)
format.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import os
import re
from babel.messages.catalog import Catalog, Message
from babel.util import wraptext
from babel._compat import text_type
def unescape(string):
r"""Reverse `escape` the given string.
>>> print(unescape('"Say:\\n \\"hello, world!\\"\\n"'))
Say:
"hello, world!"
<BLANKLINE>
:param string: the string to unescape
"""
def replace_escapes(match):
m = match.group(1)
if m == 'n':
return '\n'
elif m == 't':
return '\t'
elif m == 'r':
return '\r'
# m is \ or "
return m
return re.compile(r'\\([\\trn"])').sub(replace_escapes, string[1:-1])
def denormalize(string):
r"""Reverse the normalization done by the `normalize` function.
>>> print(denormalize(r'''""
... "Say:\n"
... " \"hello, world!\"\n"'''))
Say:
"hello, world!"
<BLANKLINE>
>>> print(denormalize(r'''""
... "Say:\n"
... " \"Lorem ipsum dolor sit "
... "amet, consectetur adipisicing"
... " elit, \"\n"'''))
Say:
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
<BLANKLINE>
:param string: the string to denormalize
"""
if '\n' in string:
escaped_lines = string.splitlines()
if string.startswith('""'):
escaped_lines = escaped_lines[1:]
lines = map(unescape, escaped_lines)
return ''.join(lines)
else:
return unescape(string)
class _NormalizedString(object):
def __init__(self, *args):
self._strs = []
for arg in args:
self.append(arg)
def append(self, s):
self._strs.append(s.strip())
def denormalize(self):
return ''.join(map(unescape, self._strs))
def __nonzero__(self):
return bool(self._strs)
class PoFileParser(object):
"""Support class to read messages from a ``gettext`` PO (portable object) file
and add them to a `Catalog`
See `read_po` for simple cases.
"""
_keywords = [
'msgid',
'msgstr',
'msgctxt',
'msgid_plural',
]
def __init__(self, catalog, ignore_obsolete=False):
self.catalog = catalog
self.ignore_obsolete = ignore_obsolete
self.counter = 0
self.offset = 0
self._reset_message_state()
def _reset_message_state(self):
self.messages = []
self.translations = []
self.locations = []
self.flags = []
self.user_comments = []
self.auto_comments = []
self.context = None
self.obsolete = False
self.in_msgid = False
self.in_msgstr = False
self.in_msgctxt = False
def _add_message(self):
"""
Add a message to the catalog based on the current parser state and
clear the state ready to process the next message.
"""
self.translations.sort()
if len(self.messages) > 1:
msgid = tuple([m.denormalize() for m in self.messages])
else:
msgid = self.messages[0].denormalize()
if isinstance(msgid, (list, tuple)):
string = ['' for _ in range(self.catalog.num_plurals)]
for idx, translation in self.translations:
if idx >= self.catalog.num_plurals:
self._invalid_pofile("", self.offset, "msg has more translations than num_plurals of catalog")
continue
string[idx] = translation.denormalize()
string = tuple(string)
else:
string = self.translations[0][1].denormalize()
if self.context:
msgctxt = self.context.denormalize()
else:
msgctxt = None
message = Message(msgid, string, list(self.locations), set(self.flags),
self.auto_comments, self.user_comments, lineno=self.offset + 1,
context=msgctxt)
if self.obsolete:
if not self.ignore_obsolete:
self.catalog.obsolete[msgid] = message
else:
self.catalog[msgid] = message
self.counter += 1
self._reset_message_state()
def _finish_current_message(self):
if self.messages:
self._add_message()
def _process_message_line(self, lineno, line, obsolete=False):
if line.startswith('"'):
self._process_string_continuation_line(line, lineno)
else:
self._process_keyword_line(lineno, line, obsolete)
def _process_keyword_line(self, lineno, line, obsolete=False):
for keyword in self._keywords:
if line.startswith(keyword) and line[len(keyword)] in [' ', '[']:
arg = line[len(keyword):]
break
else:
self._invalid_pofile(line, lineno, "Start of line didn't match any expected keyword.")
return
if keyword in ['msgid', 'msgctxt']:
self._finish_current_message()
self.obsolete = obsolete
# The line that has the msgid is stored as the offset of the msg
# should this be the msgctxt if it has one?
if keyword == 'msgid':
self.offset = lineno
if keyword in ['msgid', 'msgid_plural']:
self.in_msgctxt = False
self.in_msgid = True
self.messages.append(_NormalizedString(arg))
elif keyword == 'msgstr':
self.in_msgid = False
self.in_msgstr = True
if arg.startswith('['):
idx, msg = arg[1:].split(']', 1)
self.translations.append([int(idx), _NormalizedString(msg)])
else:
self.translations.append([0, _NormalizedString(arg)])
elif keyword == 'msgctxt':
self.in_msgctxt = True
self.context = _NormalizedString(arg)
def _process_string_continuation_line(self, line, lineno):
if self.in_msgid:
s = self.messages[-1]
elif self.in_msgstr:
s = self.translations[-1][1]
elif self.in_msgctxt:
s = self.context
else:
self._invalid_pofile(line, lineno, "Got line starting with \" but not in msgid, msgstr or msgctxt")
return
s.append(line)
def _process_comment(self, line):
self._finish_current_message()
if line[1:].startswith(':'):
for location in line[2:].lstrip().split():
pos = location.rfind(':')
if pos >= 0:
try:
lineno = int(location[pos + 1:])
except ValueError:
continue
self.locations.append((location[:pos], lineno))
else:
self.locations.append((location, None))
elif line[1:].startswith(','):
for flag in line[2:].lstrip().split(','):
self.flags.append(flag.strip())
elif line[1:].startswith('.'):
# These are called auto-comments
comment = line[2:].strip()
if comment: # Just check that we're not adding empty comments
self.auto_comments.append(comment)
else:
# These are called user comments
self.user_comments.append(line[1:].strip())
def parse(self, fileobj):
"""
Reads from the file-like object `fileobj` and adds any po file
units found in it to the `Catalog` supplied to the constructor.
"""
for lineno, line in enumerate(fileobj):
line = line.strip()
if not isinstance(line, text_type):
line = line.decode(self.catalog.charset)
if not line:
continue
if line.startswith('#'):
if line[1:].startswith('~'):
self._process_message_line(lineno, line[2:].lstrip(), obsolete=True)
else:
self._process_comment(line)
else:
self._process_message_line(lineno, line)
self._finish_current_message()
# No actual messages found, but there was some info in comments, from which
# we'll construct an empty header message
if not self.counter and (self.flags or self.user_comments or self.auto_comments):
self.messages.append(_NormalizedString(u'""'))
self.translations.append([0, _NormalizedString(u'""')])
self._add_message()
def _invalid_pofile(self, line, lineno, msg):
print("WARNING:", msg)
print("WARNING: Problem on line {0}: {1}".format(lineno + 1, line))
def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False, charset=None):
"""Read messages from a ``gettext`` PO (portable object) file from the given
file-like object and return a `Catalog`.
>>> from datetime import datetime
>>> from babel._compat import StringIO
>>> buf = StringIO('''
... #: main.py:1
... #, fuzzy, python-format
... msgid "foo %(name)s"
... msgstr "quux %(name)s"
...
... # A user comment
... #. An auto comment
... #: main.py:3
... msgid "bar"
... msgid_plural "baz"
... msgstr[0] "bar"
... msgstr[1] "baaz"
... ''')
>>> catalog = read_po(buf)
>>> catalog.revision_date = datetime(2007, 4, 1)
>>> for message in catalog:
... if message.id:
... print((message.id, message.string))
... print(' ', (message.locations, sorted(list(message.flags))))
... print(' ', (message.user_comments, message.auto_comments))
(u'foo %(name)s', u'quux %(name)s')
([(u'main.py', 1)], [u'fuzzy', u'python-format'])
([], [])
((u'bar', u'baz'), (u'bar', u'baaz'))
([(u'main.py', 3)], [])
([u'A user comment'], [u'An auto comment'])
.. versionadded:: 1.0
Added support for explicit charset argument.
:param fileobj: the file-like object to read the PO file from
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param ignore_obsolete: whether to ignore obsolete messages in the input
:param charset: the character set of the catalog.
"""
catalog = Catalog(locale=locale, domain=domain, charset=charset)
parser = PoFileParser(catalog, ignore_obsolete)
parser.parse(fileobj)
return catalog
WORD_SEP = re.compile('('
r'\s+|' # any whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w)' # em-dash
')')
def escape(string):
r"""Escape the given string so that it can be included in double-quoted
strings in ``PO`` files.
>>> escape('''Say:
... "hello, world!"
... ''')
'"Say:\\n \\"hello, world!\\"\\n"'
:param string: the string to escape
"""
return '"%s"' % string.replace('\\', '\\\\') \
.replace('\t', '\\t') \
.replace('\r', '\\r') \
.replace('\n', '\\n') \
.replace('\"', '\\"')
def normalize(string, prefix='', width=76):
r"""Convert a string into a format that is appropriate for .po files.
>>> print(normalize('''Say:
... "hello, world!"
... ''', width=None))
""
"Say:\n"
" \"hello, world!\"\n"
>>> print(normalize('''Say:
... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
... ''', width=32))
""
"Say:\n"
" \"Lorem ipsum dolor sit "
"amet, consectetur adipisicing"
" elit, \"\n"
:param string: the string to normalize
:param prefix: a string that should be prepended to every line
:param width: the maximum line width; use `None`, 0, or a negative number
to completely disable line wrapping
"""
if width and width > 0:
prefixlen = len(prefix)
lines = []
for line in string.splitlines(True):
if len(escape(line)) + prefixlen > width:
chunks = WORD_SEP.split(line)
chunks.reverse()
while chunks:
buf = []
size = 2
while chunks:
l = len(escape(chunks[-1])) - 2 + prefixlen
if size + l < width:
buf.append(chunks.pop())
size += l
else:
if not buf:
# handle long chunks by putting them on a
# separate line
buf.append(chunks.pop())
break
lines.append(u''.join(buf))
else:
lines.append(line)
else:
lines = string.splitlines(True)
if len(lines) <= 1:
return escape(string)
# Remove empty trailing line
if lines and not lines[-1]:
del lines[-1]
lines[-1] += '\n'
return u'""\n' + u'\n'.join([(prefix + escape(line)) for line in lines])
def write_po(fileobj, catalog, width=76, no_location=False, omit_header=False,
sort_output=False, sort_by_file=False, ignore_obsolete=False,
include_previous=False, include_lineno=True):
r"""Write a ``gettext`` PO (portable object) template file for a given
message catalog to the provided file-like object.
>>> catalog = Catalog()
>>> catalog.add(u'foo %(name)s', locations=[('main.py', 1)],
... flags=('fuzzy',))
<Message...>
>>> catalog.add((u'bar', u'baz'), locations=[('main.py', 3)])
<Message...>
>>> from babel._compat import BytesIO
>>> buf = BytesIO()
>>> write_po(buf, catalog, omit_header=True)
>>> print(buf.getvalue().decode("utf8"))
#: main.py:1
#, fuzzy, python-format
msgid "foo %(name)s"
msgstr ""
<BLANKLINE>
#: main.py:3
msgid "bar"
msgid_plural "baz"
msgstr[0] ""
msgstr[1] ""
<BLANKLINE>
<BLANKLINE>
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param width: the maximum line width for the generated output; use `None`,
0, or a negative number to completely disable line wrapping
:param no_location: do not emit a location comment for every message
:param omit_header: do not include the ``msgid ""`` entry at the top of the
output
:param sort_output: whether to sort the messages in the output by msgid
:param sort_by_file: whether to sort the messages in the output by their
locations
:param ignore_obsolete: whether to ignore obsolete messages and not include
them in the output; by default they are included as
comments
:param include_previous: include the old msgid as a comment when
updating the catalog
:param include_lineno: include line number in the location comment
"""
def _normalize(key, prefix=''):
return normalize(key, prefix=prefix, width=width)
def _write(text):
if isinstance(text, text_type):
text = text.encode(catalog.charset, 'backslashreplace')
fileobj.write(text)
def _write_comment(comment, prefix=''):
# xgettext always wraps comments even if --no-wrap is passed;
# provide the same behaviour
if width and width > 0:
_width = width
else:
_width = 76
for line in wraptext(comment, _width):
_write('#%s %s\n' % (prefix, line.strip()))
def _write_message(message, prefix=''):
if isinstance(message.id, (list, tuple)):
if message.context:
_write('%smsgctxt %s\n' % (prefix,
_normalize(message.context, prefix)))
_write('%smsgid %s\n' % (prefix, _normalize(message.id[0], prefix)))
_write('%smsgid_plural %s\n' % (
prefix, _normalize(message.id[1], prefix)
))
for idx in range(catalog.num_plurals):
try:
string = message.string[idx]
except IndexError:
string = ''
_write('%smsgstr[%d] %s\n' % (
prefix, idx, _normalize(string, prefix)
))
else:
if message.context:
_write('%smsgctxt %s\n' % (prefix,
_normalize(message.context, prefix)))
_write('%smsgid %s\n' % (prefix, _normalize(message.id, prefix)))
_write('%smsgstr %s\n' % (
prefix, _normalize(message.string or '', prefix)
))
sort_by = None
if sort_output:
sort_by = "message"
elif sort_by_file:
sort_by = "location"
for message in _sort_messages(catalog, sort_by=sort_by):
if not message.id: # This is the header "message"
if omit_header:
continue
comment_header = catalog.header_comment
if width and width > 0:
lines = []
for line in comment_header.splitlines():
lines += wraptext(line, width=width,
subsequent_indent='# ')
comment_header = u'\n'.join(lines)
_write(comment_header + u'\n')
for comment in message.user_comments:
_write_comment(comment)
for comment in message.auto_comments:
_write_comment(comment, prefix='.')
if not no_location:
locs = []
for filename, lineno in sorted(message.locations):
if lineno and include_lineno:
locs.append(u'%s:%d' % (filename.replace(os.sep, '/'), lineno))
else:
locs.append(u'%s' % filename.replace(os.sep, '/'))
_write_comment(' '.join(locs), prefix=':')
if message.flags:
_write('#%s\n' % ', '.join([''] + sorted(message.flags)))
if message.previous_id and include_previous:
_write_comment('msgid %s' % _normalize(message.previous_id[0]),
prefix='|')
if len(message.previous_id) > 1:
_write_comment('msgid_plural %s' % _normalize(
message.previous_id[1]
), prefix='|')
_write_message(message)
_write('\n')
if not ignore_obsolete:
for message in _sort_messages(
catalog.obsolete.values(),
sort_by=sort_by
):
for comment in message.user_comments:
_write_comment(comment)
_write_message(message, prefix='#~ ')
_write('\n')
def _sort_messages(messages, sort_by):
"""
Sort the given message iterable by the given criteria.
Always returns a list.
:param messages: An iterable of Messages.
:param sort_by: Sort by which criteria? Options are `message` and `location`.
:return: list[Message]
"""
messages = list(messages)
if sort_by == "message":
messages.sort()
elif sort_by == "location":
messages.sort(key=lambda m: m.locations)
return messages
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
from unittest import mock
import fixtures
from neutron_lib import context
from neutron_lib.db import api as db_api
from neutron_lib.db import constants as db_const
from neutron_lib.exceptions import flavors as flav_exc
from neutron_lib.plugins import constants
from oslo_config import cfg
from oslo_utils import uuidutils
from webob import exc
from neutron.db.models import l3 as l3_models
from neutron.db import servicetype_db
from neutron.extensions import flavors
from neutron.objects import flavor as flavor_obj
from neutron.services.flavors import flavors_plugin
from neutron.services import provider_configuration as provconf
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit import dummy_plugin
from neutron.tests.unit.extensions import base as extension
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
_driver = ('neutron.tests.unit.extensions.test_flavors.'
'DummyServiceDriver')
_provider = dummy_plugin.RESOURCE_NAME
_long_name = 'x' * (db_const.NAME_FIELD_SIZE + 1)
_long_description = 'x' * (db_const.LONG_DESCRIPTION_FIELD_SIZE + 1)
class FlavorExtensionTestCase(extension.ExtensionTestCase):
def setUp(self):
super(FlavorExtensionTestCase, self).setUp()
self.setup_extension(
'neutron.services.flavors.flavors_plugin.FlavorsPlugin',
constants.FLAVORS, flavors.Flavors, '',
supported_extension_aliases=['flavors'])
def test_create_flavor(self):
tenant_id = uuidutils.generate_uuid()
# Use service_type FLAVORS since plugin must be loaded to validate
data = {'flavor': {'name': 'GOLD',
'service_type': constants.FLAVORS,
'description': 'the best flavor',
'tenant_id': tenant_id,
'project_id': tenant_id,
'enabled': True}}
expected = copy.deepcopy(data)
expected['flavor']['service_profiles'] = []
instance = self.plugin.return_value
instance.create_flavor.return_value = expected['flavor']
res = self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_flavor.assert_called_with(mock.ANY,
flavor=expected)
res = self.deserialize(res)
self.assertIn('flavor', res)
self.assertEqual(expected, res)
def test_create_flavor_invalid_service_type(self):
tenant_id = uuidutils.generate_uuid()
data = {'flavor': {'name': 'GOLD',
'service_type': 'BROKEN',
'description': 'the best flavor',
'tenant_id': tenant_id,
'enabled': True}}
self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_flavor_too_long_name(self):
tenant_id = uuidutils.generate_uuid()
data = {'flavor': {'name': _long_name,
'service_type': constants.FLAVORS,
'description': 'the best flavor',
'tenant_id': tenant_id,
'enabled': True}}
self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_flavor_too_long_description(self):
tenant_id = uuidutils.generate_uuid()
data = {'flavor': {'name': _long_name,
'service_type': constants.FLAVORS,
'description': _long_description,
'tenant_id': tenant_id,
'enabled': True}}
self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_flavor_invalid_enabled(self):
tenant_id = uuidutils.generate_uuid()
data = {'flavor': {'name': _long_name,
'service_type': constants.FLAVORS,
'description': 'the best flavor',
'tenant_id': tenant_id,
'enabled': 'BROKEN'}}
self.api.post(_get_path('flavors', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_flavor(self):
flavor_id = 'fake_id'
data = {'flavor': {'name': 'GOLD',
'description': 'the best flavor',
'enabled': True}}
expected = copy.copy(data)
expected['flavor']['service_profiles'] = []
instance = self.plugin.return_value
instance.update_flavor.return_value = expected['flavor']
res = self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.update_flavor.assert_called_with(mock.ANY,
flavor_id,
flavor=expected)
res = self.deserialize(res)
self.assertIn('flavor', res)
self.assertEqual(expected, res)
def test_update_flavor_too_long_name(self):
flavor_id = 'fake_id'
data = {'flavor': {'name': _long_name,
'description': 'the best flavor',
'enabled': True}}
self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_flavor_too_long_description(self):
flavor_id = 'fake_id'
data = {'flavor': {'name': 'GOLD',
'description': _long_description,
'enabled': True}}
self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_flavor_invalid_enabled(self):
flavor_id = 'fake_id'
data = {'flavor': {'name': 'GOLD',
'description': _long_description,
'enabled': 'BROKEN'}}
self.api.put(_get_path('flavors', id=flavor_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_delete_flavor(self):
flavor_id = 'fake_id'
instance = self.plugin.return_value
self.api.delete(_get_path('flavors', id=flavor_id, fmt=self.fmt),
content_type='application/%s' % self.fmt)
instance.delete_flavor.assert_called_with(mock.ANY,
flavor_id)
def test_show_flavor(self):
flavor_id = 'fake_id'
expected = {'flavor': {'id': flavor_id,
'name': 'GOLD',
'description': 'the best flavor',
'enabled': True,
'service_profiles': ['profile-1']}}
instance = self.plugin.return_value
instance.get_flavor.return_value = expected['flavor']
res = self.api.get(_get_path('flavors', id=flavor_id, fmt=self.fmt))
instance.get_flavor.assert_called_with(mock.ANY,
flavor_id,
fields=mock.ANY)
res = self.deserialize(res)
self.assertEqual(expected, res)
def test_get_flavors(self):
data = {'flavors': [{'id': 'id1',
'name': 'GOLD',
'description': 'the best flavor',
'enabled': True,
'service_profiles': ['profile-1']},
{'id': 'id2',
'name': 'GOLD',
'description': 'the best flavor',
'enabled': True,
'service_profiles': ['profile-2', 'profile-1']}]}
instance = self.plugin.return_value
instance.get_flavors.return_value = data['flavors']
res = self.api.get(_get_path('flavors', fmt=self.fmt))
instance.get_flavors.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
res = self.deserialize(res)
self.assertEqual(data, res)
def test_create_service_profile(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'description': 'the best sp',
'driver': '',
'tenant_id': tenant_id,
'project_id': tenant_id,
'enabled': True,
'metainfo': '{"data": "value"}'}}
instance = self.plugin.return_value
instance.create_service_profile.return_value = (
expected['service_profile'])
res = self.api.post(_get_path('service_profiles', fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt)
instance.create_service_profile.assert_called_with(
mock.ANY,
service_profile=expected)
res = self.deserialize(res)
self.assertIn('service_profile', res)
self.assertEqual(expected, res)
def test_create_service_profile_too_long_description(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'description': _long_description,
'driver': '',
'tenant_id': tenant_id,
'enabled': True,
'metainfo': '{"data": "value"}'}}
self.api.post(_get_path('service_profiles', fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_service_profile_too_long_driver(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'description': 'the best sp',
'driver': _long_description,
'tenant_id': tenant_id,
'enabled': True,
'metainfo': '{"data": "value"}'}}
self.api.post(_get_path('service_profiles', fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_create_service_profile_invalid_enabled(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'description': 'the best sp',
'driver': '',
'tenant_id': tenant_id,
'enabled': 'BROKEN',
'metainfo': '{"data": "value"}'}}
self.api.post(_get_path('service_profiles', fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_service_profile(self):
sp_id = "fake_id"
expected = {'service_profile': {'description': 'the best sp',
'enabled': False,
'metainfo': '{"data1": "value3"}'}}
instance = self.plugin.return_value
instance.update_service_profile.return_value = (
expected['service_profile'])
res = self.api.put(_get_path('service_profiles',
id=sp_id, fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt)
instance.update_service_profile.assert_called_with(
mock.ANY,
sp_id,
service_profile=expected)
res = self.deserialize(res)
self.assertIn('service_profile', res)
self.assertEqual(expected, res)
def test_update_service_profile_too_long_description(self):
sp_id = "fake_id"
expected = {'service_profile': {'description': 'the best sp',
'enabled': 'BROKEN',
'metainfo': '{"data1": "value3"}'}}
self.api.put(_get_path('service_profiles',
id=sp_id, fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_update_service_profile_invalid_enabled(self):
sp_id = "fake_id"
expected = {'service_profile': {'description': 'the best sp',
'enabled': 'BROKEN',
'metainfo': '{"data1": "value3"}'}}
self.api.put(_get_path('service_profiles',
id=sp_id, fmt=self.fmt),
self.serialize(expected),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
def test_delete_service_profile(self):
sp_id = 'fake_id'
instance = self.plugin.return_value
self.api.delete(_get_path('service_profiles', id=sp_id, fmt=self.fmt),
content_type='application/%s' % self.fmt)
instance.delete_service_profile.assert_called_with(mock.ANY,
sp_id)
def test_show_service_profile(self):
sp_id = 'fake_id'
expected = {'service_profile': {'id': 'id1',
'driver': _driver,
'description': 'desc',
'metainfo': '{}',
'enabled': True}}
instance = self.plugin.return_value
instance.get_service_profile.return_value = (
expected['service_profile'])
res = self.api.get(_get_path('service_profiles',
id=sp_id, fmt=self.fmt))
instance.get_service_profile.assert_called_with(mock.ANY,
sp_id,
fields=mock.ANY)
res = self.deserialize(res)
self.assertEqual(expected, res)
def test_get_service_profiles(self):
expected = {'service_profiles': [{'id': 'id1',
'driver': _driver,
'description': 'desc',
'metainfo': '{}',
'enabled': True},
{'id': 'id2',
'driver': _driver,
'description': 'desc',
'metainfo': '{}',
'enabled': True}]}
instance = self.plugin.return_value
instance.get_service_profiles.return_value = (
expected['service_profiles'])
res = self.api.get(_get_path('service_profiles', fmt=self.fmt))
instance.get_service_profiles.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
res = self.deserialize(res)
self.assertEqual(expected, res)
def test_associate_service_profile_with_flavor(self):
tenant_id = uuidutils.generate_uuid()
expected = {'service_profile': {'id': _uuid(),
'tenant_id': tenant_id,
'project_id': tenant_id}}
instance = self.plugin.return_value
instance.create_flavor_service_profile.return_value = (
expected['service_profile'])
res = self.api.post('/flavors/fl_id/service_profiles',
self.serialize(expected),
content_type='application/%s' % self.fmt)
instance.create_flavor_service_profile.assert_called_with(
mock.ANY, service_profile=expected, flavor_id='fl_id')
res = self.deserialize(res)
self.assertEqual(expected, res)
def test_disassociate_service_profile_with_flavor(self):
instance = self.plugin.return_value
instance.delete_flavor_service_profile.return_value = None
self.api.delete('/flavors/fl_id/service_profiles/%s' % 'fake_spid',
content_type='application/%s' % self.fmt)
instance.delete_flavor_service_profile.assert_called_with(
mock.ANY,
'fake_spid',
flavor_id='fl_id')
def test_update_association_error(self):
"""Confirm that update is not permitted with user error."""
new_id = uuidutils.generate_uuid()
data = {'service_profile': {'id': new_id}}
self.api.put('/flavors/fl_id/service_profiles/%s' % 'fake_spid',
self.serialize(data),
content_type='application/%s' % self.fmt,
status=exc.HTTPBadRequest.code)
class DummyServicePlugin(object):
def driver_loaded(self, driver, service_profile):
pass
@classmethod
def get_plugin_type(cls):
return dummy_plugin.DUMMY_SERVICE_TYPE
def get_plugin_description(self):
return "Dummy service plugin, aware of flavors"
class DummyServiceDriver(object):
@staticmethod
def get_service_type():
return dummy_plugin.DUMMY_SERVICE_TYPE
def __init__(self, plugin):
pass
class FlavorPluginTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
base.PluginFixture):
def setUp(self):
super(FlavorPluginTestCase, self).setUp()
self.config_parse()
cfg.CONF.set_override(
'service_plugins',
['neutron.tests.unit.extensions.test_flavors.DummyServicePlugin'])
self.useFixture(
fixtures.MonkeyPatch('neutron.manager.NeutronManager._instance'))
self.plugin = flavors_plugin.FlavorsPlugin()
self.ctx = context.get_admin_context()
providers = [DummyServiceDriver.get_service_type() +
":" + _provider + ":" + _driver]
self.service_manager = servicetype_db.ServiceTypeManager.get_instance()
self.service_providers = mock.patch.object(
provconf.NeutronModule, 'service_providers').start()
self.service_providers.return_value = providers
for provider in providers:
self.service_manager.add_provider_configuration(
provider.split(':')[0], provconf.ProviderConfiguration())
db_api.CONTEXT_WRITER.get_engine()
def _create_flavor(self, description=None):
flavor = {'flavor': {'name': 'GOLD',
'service_type': dummy_plugin.DUMMY_SERVICE_TYPE,
'description': description or 'the best flavor',
'enabled': True}}
return self.plugin.create_flavor(self.ctx, flavor), flavor
def test_create_flavor(self):
self._create_flavor()
res = flavor_obj.Flavor.get_objects(self.ctx)
self.assertEqual(1, len(res))
self.assertEqual('GOLD', res[0]['name'])
self.assertEqual(
dummy_plugin.DUMMY_SERVICE_TYPE, res[0]['service_type'])
def test_update_flavor(self):
fl, flavor = self._create_flavor()
flavor = {'flavor': {'name': 'Silver',
'enabled': False}}
self.plugin.update_flavor(self.ctx, fl['id'], flavor)
# don't reuse cached models from previous plugin call
self.ctx.session.expire_all()
res = flavor_obj.Flavor.get_object(self.ctx, id=fl['id'])
self.assertEqual('Silver', res['name'])
self.assertFalse(res['enabled'])
def test_delete_flavor(self):
fl, _ = self._create_flavor()
self.plugin.delete_flavor(self.ctx, fl['id'])
self.assertFalse(flavor_obj.Flavor.objects_exist(self.ctx))
def test_show_flavor(self):
fl, _ = self._create_flavor()
show_fl = self.plugin.get_flavor(self.ctx, fl['id'])
self.assertEqual(fl, show_fl)
def test_get_flavors(self):
fl, flavor = self._create_flavor()
flavor['flavor']['name'] = 'SILVER'
self.plugin.create_flavor(self.ctx, flavor)
show_fl = self.plugin.get_flavors(self.ctx)
self.assertEqual(2, len(show_fl))
def _create_service_profile(self, description=None):
data = {'service_profile':
{'description': description or 'the best sp',
'driver': _driver,
'enabled': True,
'metainfo': '{"data": "value"}'}}
sp = self.plugin.create_service_profile(self.ctx,
data)
return sp, data
def test_create_service_profile(self):
sp, data = self._create_service_profile()
res = flavor_obj.ServiceProfile.get_object(self.ctx, id=sp['id'])
self.assertIsNotNone(res)
self.assertEqual(data['service_profile']['driver'], res.driver)
self.assertEqual(data['service_profile']['metainfo'], res.metainfo)
def test_create_service_profile_empty_driver(self):
data = {'service_profile':
{'description': 'the best sp',
'driver': '',
'enabled': True,
'metainfo': '{"data": "value"}'}}
sp = self.plugin.create_service_profile(self.ctx,
data)
res = flavor_obj.ServiceProfile.get_object(self.ctx, id=sp['id'])
self.assertIsNotNone(res)
self.assertEqual(data['service_profile']['driver'], res.driver)
self.assertEqual(data['service_profile']['metainfo'], res.metainfo)
def test_create_service_profile_invalid_driver(self):
data = {'service_profile':
{'description': 'the best sp',
'driver': "Broken",
'enabled': True,
'metainfo': '{"data": "value"}'}}
self.assertRaises(flav_exc.ServiceProfileDriverNotFound,
self.plugin.create_service_profile,
self.ctx,
data)
def test_create_service_profile_invalid_empty(self):
data = {'service_profile':
{'description': '',
'driver': '',
'enabled': True,
'metainfo': ''}}
self.assertRaises(flav_exc.ServiceProfileEmpty,
self.plugin.create_service_profile,
self.ctx,
data)
def test_update_service_profile(self):
sp, data = self._create_service_profile()
data['service_profile']['metainfo'] = '{"data": "value1"}'
sp = self.plugin.update_service_profile(self.ctx, sp['id'],
data)
# don't reuse cached models from previous plugin call
self.ctx.session.expire_all()
res = flavor_obj.ServiceProfile.get_object(self.ctx, id=sp['id'])
self.assertEqual(data['service_profile']['metainfo'], res['metainfo'])
def test_delete_service_profile(self):
sp, data = self._create_service_profile()
self.plugin.delete_service_profile(self.ctx, sp['id'])
res = flavor_obj.ServiceProfile.get_objects(self.ctx)
self.assertFalse(res)
def test_show_service_profile(self):
sp, data = self._create_service_profile()
sp_show = self.plugin.get_service_profile(self.ctx, sp['id'])
self.assertEqual(sp, sp_show)
def test_get_service_profiles(self):
self._create_service_profile()
self._create_service_profile(description='another sp')
self.assertEqual(2, len(self.plugin.get_service_profiles(self.ctx)))
def test_associate_service_profile_with_flavor(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
binding = flavor_obj.FlavorServiceProfileBinding.get_objects(
self.ctx)[0]
self.assertEqual(fl['id'], binding['flavor_id'])
self.assertEqual(sp['id'], binding['service_profile_id'])
# don't reuse cached models from previous plugin call
self.ctx.session.expire_all()
res = self.plugin.get_flavor(self.ctx, fl['id'])
self.assertEqual(1, len(res['service_profiles']))
self.assertEqual(sp['id'], res['service_profiles'][0])
res = self.plugin.get_service_profile(self.ctx, sp['id'])
self.assertEqual(1, len(res['flavors']))
self.assertEqual(fl['id'], res['flavors'][0])
def test_autodelete_flavor_associations(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.plugin.delete_flavor(self.ctx, fl['id'])
self.assertFalse(
flavor_obj.FlavorServiceProfileBinding.objects_exist(self.ctx))
def test_associate_service_profile_with_flavor_exists(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.assertRaises(flav_exc.FlavorServiceProfileBindingExists,
self.plugin.create_flavor_service_profile,
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
def test_disassociate_service_profile_with_flavor(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.plugin.delete_flavor_service_profile(
self.ctx, sp['id'], fl['id'])
self.assertFalse(
flavor_obj.FlavorServiceProfileBinding.objects_exist(self.ctx))
self.assertRaises(
flav_exc.FlavorServiceProfileBindingNotFound,
self.plugin.delete_flavor_service_profile,
self.ctx, sp['id'], fl['id'])
def test_delete_service_profile_in_use(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.assertRaises(
flav_exc.ServiceProfileInUse,
self.plugin.delete_service_profile,
self.ctx,
sp['id'])
def test_delete_flavor_in_use(self):
# make use of router since it has a flavor id
fl, data = self._create_flavor()
with db_api.CONTEXT_WRITER.using(self.ctx):
self.ctx.session.add(l3_models.Router(flavor_id=fl['id']))
self.assertRaises(
flav_exc.FlavorInUse,
self.plugin.delete_flavor,
self.ctx,
fl['id'])
def test_get_flavor_next_provider_no_binding(self):
fl, data = self._create_flavor()
self.assertRaises(
flav_exc.FlavorServiceProfileBindingNotFound,
self.plugin.get_flavor_next_provider,
self.ctx,
fl['id'])
def test_get_flavor_next_provider_disabled(self):
data = {'service_profile':
{'description': 'the best sp',
'driver': _driver,
'enabled': False,
'metainfo': '{"data": "value"}'}}
sp = self.plugin.create_service_profile(self.ctx,
data)
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.assertRaises(
flav_exc.ServiceProfileDisabled,
self.plugin.get_flavor_next_provider,
self.ctx,
fl['id'])
def test_get_flavor_next_provider_no_driver(self):
data = {'service_profile':
{'description': 'the best sp',
'driver': '',
'enabled': True,
'metainfo': '{"data": "value"}'}}
sp = self.plugin.create_service_profile(self.ctx,
data)
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
self.assertRaises(
flav_exc.ServiceProfileDriverNotFound,
self.plugin.get_flavor_next_provider,
self.ctx,
fl['id'])
def test_get_flavor_next_provider(self):
sp, data = self._create_service_profile()
fl, data = self._create_flavor()
self.plugin.create_flavor_service_profile(
self.ctx,
{'service_profile': {'id': sp['id']}},
fl['id'])
providers = self.plugin.get_flavor_next_provider(
self.ctx,
fl['id'])
self.assertEqual(_provider, providers[0].get('provider', None))
|
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Nils Weiss <nils@we155.de>
# This program is published under a GPLv2 license
# scapy.contrib.description = Unified Diagnostic Service (UDS)
# scapy.contrib.status = loads
import struct
import time
from itertools import product
from scapy.fields import ByteEnumField, StrField, ConditionalField, \
BitEnumField, BitField, XByteField, FieldListField, \
XShortField, X3BytesField, XIntField, ByteField, \
ShortField, ObservableDict, XShortEnumField, XByteEnumField, StrLenField, \
FieldLenField
from scapy.packet import Packet, bind_layers, NoPayload
from scapy.config import conf
from scapy.error import log_loading
from scapy.utils import PeriodicSenderThread
from scapy.contrib.isotp import ISOTP
from scapy.compat import Dict, Union
"""
UDS
"""
try:
if conf.contribs['UDS']['treat-response-pending-as-answer']:
pass
except KeyError:
log_loading.info("Specify \"conf.contribs['UDS'] = "
"{'treat-response-pending-as-answer': True}\" to treat "
"a negative response 'requestCorrectlyReceived-"
"ResponsePending' as answer of a request. \n"
"The default value is False.")
conf.contribs['UDS'] = {'treat-response-pending-as-answer': False}
class UDS(ISOTP):
services = ObservableDict(
{0x10: 'DiagnosticSessionControl',
0x11: 'ECUReset',
0x14: 'ClearDiagnosticInformation',
0x19: 'ReadDTCInformation',
0x22: 'ReadDataByIdentifier',
0x23: 'ReadMemoryByAddress',
0x24: 'ReadScalingDataByIdentifier',
0x27: 'SecurityAccess',
0x28: 'CommunicationControl',
0x2A: 'ReadDataPeriodicIdentifier',
0x2C: 'DynamicallyDefineDataIdentifier',
0x2E: 'WriteDataByIdentifier',
0x2F: 'InputOutputControlByIdentifier',
0x31: 'RoutineControl',
0x34: 'RequestDownload',
0x35: 'RequestUpload',
0x36: 'TransferData',
0x37: 'RequestTransferExit',
0x38: 'RequestFileTransfer',
0x3D: 'WriteMemoryByAddress',
0x3E: 'TesterPresent',
0x50: 'DiagnosticSessionControlPositiveResponse',
0x51: 'ECUResetPositiveResponse',
0x54: 'ClearDiagnosticInformationPositiveResponse',
0x59: 'ReadDTCInformationPositiveResponse',
0x62: 'ReadDataByIdentifierPositiveResponse',
0x63: 'ReadMemoryByAddressPositiveResponse',
0x64: 'ReadScalingDataByIdentifierPositiveResponse',
0x67: 'SecurityAccessPositiveResponse',
0x68: 'CommunicationControlPositiveResponse',
0x6A: 'ReadDataPeriodicIdentifierPositiveResponse',
0x6C: 'DynamicallyDefineDataIdentifierPositiveResponse',
0x6E: 'WriteDataByIdentifierPositiveResponse',
0x6F: 'InputOutputControlByIdentifierPositiveResponse',
0x71: 'RoutineControlPositiveResponse',
0x74: 'RequestDownloadPositiveResponse',
0x75: 'RequestUploadPositiveResponse',
0x76: 'TransferDataPositiveResponse',
0x77: 'RequestTransferExitPositiveResponse',
0x78: 'RequestFileTransferPositiveResponse',
0x7D: 'WriteMemoryByAddressPositiveResponse',
0x7E: 'TesterPresentPositiveResponse',
0x83: 'AccessTimingParameter',
0x84: 'SecuredDataTransmission',
0x85: 'ControlDTCSetting',
0x86: 'ResponseOnEvent',
0x87: 'LinkControl',
0xC3: 'AccessTimingParameterPositiveResponse',
0xC4: 'SecuredDataTransmissionPositiveResponse',
0xC5: 'ControlDTCSettingPositiveResponse',
0xC6: 'ResponseOnEventPositiveResponse',
0xC7: 'LinkControlPositiveResponse',
0x7f: 'NegativeResponse'}) # type: Dict[int, str]
name = 'UDS'
fields_desc = [
XByteEnumField('service', 0, services)
]
def answers(self, other):
# type: (Union[UDS, Packet]) -> bool
if other.__class__ != self.__class__:
return False
if self.service == 0x7f:
return self.payload.answers(other)
if self.service == (other.service + 0x40):
if isinstance(self.payload, NoPayload) or \
isinstance(other.payload, NoPayload):
return len(self) <= len(other)
else:
return self.payload.answers(other.payload)
return False
def hashret(self):
# type: () -> bytes
if self.service == 0x7f:
return struct.pack('B', self.requestServiceId)
return struct.pack('B', self.service & ~0x40)
# ########################DSC###################################
class UDS_DSC(Packet):
diagnosticSessionTypes = ObservableDict({
0x00: 'ISOSAEReserved',
0x01: 'defaultSession',
0x02: 'programmingSession',
0x03: 'extendedDiagnosticSession',
0x04: 'safetySystemDiagnosticSession',
0x7F: 'ISOSAEReserved'})
name = 'DiagnosticSessionControl'
fields_desc = [
ByteEnumField('diagnosticSessionType', 0, diagnosticSessionTypes)
]
bind_layers(UDS, UDS_DSC, service=0x10)
class UDS_DSCPR(Packet):
name = 'DiagnosticSessionControlPositiveResponse'
fields_desc = [
ByteEnumField('diagnosticSessionType', 0,
UDS_DSC.diagnosticSessionTypes),
StrField('sessionParameterRecord', b"")
]
def answers(self, other):
return isinstance(other, UDS_DSC) and \
other.diagnosticSessionType == self.diagnosticSessionType
bind_layers(UDS, UDS_DSCPR, service=0x50)
# #########################ER###################################
class UDS_ER(Packet):
resetTypes = {
0x00: 'ISOSAEReserved',
0x01: 'hardReset',
0x02: 'keyOffOnReset',
0x03: 'softReset',
0x04: 'enableRapidPowerShutDown',
0x05: 'disableRapidPowerShutDown',
0x41: 'powerDown',
0x7F: 'ISOSAEReserved'}
name = 'ECUReset'
fields_desc = [
ByteEnumField('resetType', 0, resetTypes)
]
bind_layers(UDS, UDS_ER, service=0x11)
class UDS_ERPR(Packet):
name = 'ECUResetPositiveResponse'
fields_desc = [
ByteEnumField('resetType', 0, UDS_ER.resetTypes),
ConditionalField(ByteField('powerDownTime', 0),
lambda pkt: pkt.resetType == 0x04)
]
def answers(self, other):
return isinstance(other, UDS_ER)
bind_layers(UDS, UDS_ERPR, service=0x51)
# #########################SA###################################
class UDS_SA(Packet):
name = 'SecurityAccess'
fields_desc = [
ByteField('securityAccessType', 0),
ConditionalField(StrField('securityAccessDataRecord', b""),
lambda pkt: pkt.securityAccessType % 2 == 1),
ConditionalField(StrField('securityKey', b""),
lambda pkt: pkt.securityAccessType % 2 == 0)
]
bind_layers(UDS, UDS_SA, service=0x27)
class UDS_SAPR(Packet):
name = 'SecurityAccessPositiveResponse'
fields_desc = [
ByteField('securityAccessType', 0),
ConditionalField(StrField('securitySeed', b""),
lambda pkt: pkt.securityAccessType % 2 == 1),
]
def answers(self, other):
return isinstance(other, UDS_SA) \
and other.securityAccessType == self.securityAccessType
bind_layers(UDS, UDS_SAPR, service=0x67)
# #########################CC###################################
class UDS_CC(Packet):
controlTypes = {
0x00: 'enableRxAndTx',
0x01: 'enableRxAndDisableTx',
0x02: 'disableRxAndEnableTx',
0x03: 'disableRxAndTx'
}
name = 'CommunicationControl'
fields_desc = [
ByteEnumField('controlType', 0, controlTypes),
BitEnumField('communicationType0', 0, 2,
{0: 'ISOSAEReserved',
1: 'normalCommunicationMessages',
2: 'networkManagmentCommunicationMessages',
3: 'networkManagmentCommunicationMessages and '
'normalCommunicationMessages'}),
BitField('communicationType1', 0, 2),
BitEnumField('communicationType2', 0, 4,
{0: 'Disable/Enable specified communication Type',
1: 'Disable/Enable specific subnet',
2: 'Disable/Enable specific subnet',
3: 'Disable/Enable specific subnet',
4: 'Disable/Enable specific subnet',
5: 'Disable/Enable specific subnet',
6: 'Disable/Enable specific subnet',
7: 'Disable/Enable specific subnet',
8: 'Disable/Enable specific subnet',
9: 'Disable/Enable specific subnet',
10: 'Disable/Enable specific subnet',
11: 'Disable/Enable specific subnet',
12: 'Disable/Enable specific subnet',
13: 'Disable/Enable specific subnet',
14: 'Disable/Enable specific subnet',
15: 'Disable/Enable network'})
]
bind_layers(UDS, UDS_CC, service=0x28)
class UDS_CCPR(Packet):
name = 'CommunicationControlPositiveResponse'
fields_desc = [
ByteEnumField('controlType', 0, UDS_CC.controlTypes)
]
def answers(self, other):
return isinstance(other, UDS_CC) \
and other.controlType == self.controlType
bind_layers(UDS, UDS_CCPR, service=0x68)
# #########################TP###################################
class UDS_TP(Packet):
name = 'TesterPresent'
fields_desc = [
ByteField('subFunction', 0)
]
bind_layers(UDS, UDS_TP, service=0x3E)
class UDS_TPPR(Packet):
name = 'TesterPresentPositiveResponse'
fields_desc = [
ByteField('zeroSubFunction', 0)
]
def answers(self, other):
return isinstance(other, UDS_TP)
bind_layers(UDS, UDS_TPPR, service=0x7E)
# #########################ATP###################################
class UDS_ATP(Packet):
timingParameterAccessTypes = {
0: 'ISOSAEReserved',
1: 'readExtendedTimingParameterSet',
2: 'setTimingParametersToDefaultValues',
3: 'readCurrentlyActiveTimingParameters',
4: 'setTimingParametersToGivenValues'
}
name = 'AccessTimingParameter'
fields_desc = [
ByteEnumField('timingParameterAccessType', 0,
timingParameterAccessTypes),
ConditionalField(StrField('timingParameterRequestRecord', b""),
lambda pkt: pkt.timingParameterAccessType == 0x4)
]
bind_layers(UDS, UDS_ATP, service=0x83)
class UDS_ATPPR(Packet):
name = 'AccessTimingParameterPositiveResponse'
fields_desc = [
ByteEnumField('timingParameterAccessType', 0,
UDS_ATP.timingParameterAccessTypes),
ConditionalField(StrField('timingParameterResponseRecord', b""),
lambda pkt: pkt.timingParameterAccessType == 0x3)
]
def answers(self, other):
return isinstance(other, UDS_ATP) \
and other.timingParameterAccessType == \
self.timingParameterAccessType
bind_layers(UDS, UDS_ATPPR, service=0xC3)
# #########################SDT###################################
class UDS_SDT(Packet):
name = 'SecuredDataTransmission'
fields_desc = [
StrField('securityDataRequestRecord', b"")
]
bind_layers(UDS, UDS_SDT, service=0x84)
class UDS_SDTPR(Packet):
name = 'SecuredDataTransmissionPositiveResponse'
fields_desc = [
StrField('securityDataResponseRecord', b"")
]
def answers(self, other):
return isinstance(other, UDS_SDT)
bind_layers(UDS, UDS_SDTPR, service=0xC4)
# #########################CDTCS###################################
class UDS_CDTCS(Packet):
DTCSettingTypes = {
0: 'ISOSAEReserved',
1: 'on',
2: 'off'
}
name = 'ControlDTCSetting'
fields_desc = [
ByteEnumField('DTCSettingType', 0, DTCSettingTypes),
StrField('DTCSettingControlOptionRecord', b"")
]
bind_layers(UDS, UDS_CDTCS, service=0x85)
class UDS_CDTCSPR(Packet):
name = 'ControlDTCSettingPositiveResponse'
fields_desc = [
ByteEnumField('DTCSettingType', 0, UDS_CDTCS.DTCSettingTypes)
]
def answers(self, other):
return isinstance(other, UDS_CDTCS)
bind_layers(UDS, UDS_CDTCSPR, service=0xC5)
# #########################ROE###################################
# TODO: improve this protocol implementation
class UDS_ROE(Packet):
eventTypes = {
0: 'doNotStoreEvent',
1: 'storeEvent'
}
name = 'ResponseOnEvent'
fields_desc = [
ByteEnumField('eventType', 0, eventTypes),
ByteField('eventWindowTime', 0),
StrField('eventTypeRecord', b"")
]
bind_layers(UDS, UDS_ROE, service=0x86)
class UDS_ROEPR(Packet):
name = 'ResponseOnEventPositiveResponse'
fields_desc = [
ByteEnumField('eventType', 0, UDS_ROE.eventTypes),
ByteField('numberOfIdentifiedEvents', 0),
ByteField('eventWindowTime', 0),
StrField('eventTypeRecord', b"")
]
def answers(self, other):
return isinstance(other, UDS_ROE) \
and other.eventType == self.eventType
bind_layers(UDS, UDS_ROEPR, service=0xC6)
# #########################LC###################################
class UDS_LC(Packet):
linkControlTypes = {
0: 'ISOSAEReserved',
1: 'verifyBaudrateTransitionWithFixedBaudrate',
2: 'verifyBaudrateTransitionWithSpecificBaudrate',
3: 'transitionBaudrate'
}
name = 'LinkControl'
fields_desc = [
ByteEnumField('linkControlType', 0, linkControlTypes),
ConditionalField(ByteField('baudrateIdentifier', 0),
lambda pkt: pkt.linkControlType == 0x1),
ConditionalField(ByteField('baudrateHighByte', 0),
lambda pkt: pkt.linkControlType == 0x2),
ConditionalField(ByteField('baudrateMiddleByte', 0),
lambda pkt: pkt.linkControlType == 0x2),
ConditionalField(ByteField('baudrateLowByte', 0),
lambda pkt: pkt.linkControlType == 0x2)
]
bind_layers(UDS, UDS_LC, service=0x87)
class UDS_LCPR(Packet):
name = 'LinkControlPositiveResponse'
fields_desc = [
ByteEnumField('linkControlType', 0, UDS_LC.linkControlTypes)
]
def answers(self, other):
return isinstance(other, UDS_LC) \
and other.linkControlType == self.linkControlType
bind_layers(UDS, UDS_LCPR, service=0xC7)
# #########################RDBI###################################
class UDS_RDBI(Packet):
dataIdentifiers = ObservableDict()
name = 'ReadDataByIdentifier'
fields_desc = [
FieldListField("identifiers", None,
XShortEnumField('dataIdentifier', 0,
dataIdentifiers))
]
bind_layers(UDS, UDS_RDBI, service=0x22)
class UDS_RDBIPR(Packet):
name = 'ReadDataByIdentifierPositiveResponse'
fields_desc = [
XShortEnumField('dataIdentifier', 0,
UDS_RDBI.dataIdentifiers),
]
def answers(self, other):
return isinstance(other, UDS_RDBI) \
and self.dataIdentifier in other.identifiers
bind_layers(UDS, UDS_RDBIPR, service=0x62)
# #########################RMBA###################################
class UDS_RMBA(Packet):
name = 'ReadMemoryByAddress'
fields_desc = [
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4),
]
bind_layers(UDS, UDS_RMBA, service=0x23)
class UDS_RMBAPR(Packet):
name = 'ReadMemoryByAddressPositiveResponse'
fields_desc = [
StrField('dataRecord', b"", fmt="B")
]
def answers(self, other):
return isinstance(other, UDS_RMBA)
bind_layers(UDS, UDS_RMBAPR, service=0x63)
# #########################RSDBI###################################
class UDS_RSDBI(Packet):
name = 'ReadScalingDataByIdentifier'
dataIdentifiers = ObservableDict()
fields_desc = [
XShortEnumField('dataIdentifier', 0, dataIdentifiers)
]
bind_layers(UDS, UDS_RSDBI, service=0x24)
# TODO: Implement correct scaling here, instead of using just the dataRecord
class UDS_RSDBIPR(Packet):
name = 'ReadScalingDataByIdentifierPositiveResponse'
fields_desc = [
XShortEnumField('dataIdentifier', 0, UDS_RSDBI.dataIdentifiers),
ByteField('scalingByte', 0),
StrField('dataRecord', b"", fmt="B")
]
def answers(self, other):
return isinstance(other, UDS_RSDBI) \
and other.dataIdentifier == self.dataIdentifier
bind_layers(UDS, UDS_RSDBIPR, service=0x64)
# #########################RDBPI###################################
class UDS_RDBPI(Packet):
transmissionModes = {
0: 'ISOSAEReserved',
1: 'sendAtSlowRate',
2: 'sendAtMediumRate',
3: 'sendAtFastRate',
4: 'stopSending'
}
name = 'ReadDataByPeriodicIdentifier'
fields_desc = [
ByteEnumField('transmissionMode', 0, transmissionModes),
ByteField('periodicDataIdentifier', 0),
StrField('furtherPeriodicDataIdentifier', b"", fmt="B")
]
bind_layers(UDS, UDS_RDBPI, service=0x2A)
# TODO: Implement correct scaling here, instead of using just the dataRecord
class UDS_RDBPIPR(Packet):
name = 'ReadDataByPeriodicIdentifierPositiveResponse'
fields_desc = [
ByteField('periodicDataIdentifier', 0),
StrField('dataRecord', b"", fmt="B")
]
def answers(self, other):
return isinstance(other, UDS_RDBPI) \
and other.periodicDataIdentifier == self.periodicDataIdentifier
bind_layers(UDS, UDS_RDBPIPR, service=0x6A)
# #########################DDDI###################################
# TODO: Implement correct interpretation here,
# instead of using just the dataRecord
class UDS_DDDI(Packet):
name = 'DynamicallyDefineDataIdentifier'
subFunctions = {0x1: "defineByIdentifier",
0x2: "defineByMemoryAddress",
0x3: "clearDynamicallyDefinedDataIdentifier"}
fields_desc = [
ByteEnumField('subFunction', 0, subFunctions),
StrField('dataRecord', b"", fmt="B")
]
bind_layers(UDS, UDS_DDDI, service=0x2C)
class UDS_DDDIPR(Packet):
name = 'DynamicallyDefineDataIdentifierPositiveResponse'
fields_desc = [
ByteEnumField('subFunction', 0, UDS_DDDI.subFunctions),
XShortField('dynamicallyDefinedDataIdentifier', 0)
]
def answers(self, other):
return isinstance(other, UDS_DDDI) \
and other.subFunction == self.subFunction
bind_layers(UDS, UDS_DDDIPR, service=0x6C)
# #########################WDBI###################################
class UDS_WDBI(Packet):
name = 'WriteDataByIdentifier'
fields_desc = [
XShortEnumField('dataIdentifier', 0,
UDS_RDBI.dataIdentifiers)
]
bind_layers(UDS, UDS_WDBI, service=0x2E)
class UDS_WDBIPR(Packet):
name = 'WriteDataByIdentifierPositiveResponse'
fields_desc = [
XShortEnumField('dataIdentifier', 0,
UDS_RDBI.dataIdentifiers),
]
def answers(self, other):
return isinstance(other, UDS_WDBI) \
and other.dataIdentifier == self.dataIdentifier
bind_layers(UDS, UDS_WDBIPR, service=0x6E)
# #########################WMBA###################################
class UDS_WMBA(Packet):
name = 'WriteMemoryByAddress'
fields_desc = [
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4),
StrField('dataRecord', b'', fmt="B"),
]
bind_layers(UDS, UDS_WMBA, service=0x3D)
class UDS_WMBAPR(Packet):
name = 'WriteMemoryByAddressPositiveResponse'
fields_desc = [
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4)
]
def answers(self, other):
return isinstance(other, UDS_WMBA) \
and other.memorySizeLen == self.memorySizeLen \
and other.memoryAddressLen == self.memoryAddressLen
bind_layers(UDS, UDS_WMBAPR, service=0x7D)
# #########################CDTCI###################################
class UDS_CDTCI(Packet):
name = 'ClearDiagnosticInformation'
fields_desc = [
ByteField('groupOfDTCHighByte', 0),
ByteField('groupOfDTCMiddleByte', 0),
ByteField('groupOfDTCLowByte', 0),
]
bind_layers(UDS, UDS_CDTCI, service=0x14)
class UDS_CDTCIPR(Packet):
name = 'ClearDiagnosticInformationPositiveResponse'
def answers(self, other):
return isinstance(other, UDS_CDTCI)
bind_layers(UDS, UDS_CDTCIPR, service=0x54)
# #########################RDTCI###################################
class UDS_RDTCI(Packet):
reportTypes = {
0: 'ISOSAEReserved',
1: 'reportNumberOfDTCByStatusMask',
2: 'reportDTCByStatusMask',
3: 'reportDTCSnapshotIdentification',
4: 'reportDTCSnapshotRecordByDTCNumber',
5: 'reportDTCSnapshotRecordByRecordNumber',
6: 'reportDTCExtendedDataRecordByDTCNumber',
7: 'reportNumberOfDTCBySeverityMaskRecord',
8: 'reportDTCBySeverityMaskRecord',
9: 'reportSeverityInformationOfDTC',
10: 'reportSupportedDTC',
11: 'reportFirstTestFailedDTC',
12: 'reportFirstConfirmedDTC',
13: 'reportMostRecentTestFailedDTC',
14: 'reportMostRecentConfirmedDTC',
15: 'reportMirrorMemoryDTCByStatusMask',
16: 'reportMirrorMemoryDTCExtendedDataRecordByDTCNumber',
17: 'reportNumberOfMirrorMemoryDTCByStatusMask',
18: 'reportNumberOfEmissionsRelatedOBDDTCByStatusMask',
19: 'reportEmissionsRelatedOBDDTCByStatusMask',
20: 'reportDTCFaultDetectionCounter',
21: 'reportDTCWithPermanentStatus'
}
name = 'ReadDTCInformation'
fields_desc = [
ByteEnumField('reportType', 0, reportTypes),
ConditionalField(ByteField('DTCSeverityMask', 0),
lambda pkt: pkt.reportType in [0x07, 0x08]),
ConditionalField(XByteField('DTCStatusMask', 0),
lambda pkt: pkt.reportType in [
0x01, 0x02, 0x07, 0x08, 0x0f, 0x11, 0x12, 0x13]),
ConditionalField(ByteField('DTCHighByte', 0),
lambda pkt: pkt.reportType in [0x3, 0x4, 0x6,
0x10, 0x09]),
ConditionalField(ByteField('DTCMiddleByte', 0),
lambda pkt: pkt.reportType in [0x3, 0x4, 0x6,
0x10, 0x09]),
ConditionalField(ByteField('DTCLowByte', 0),
lambda pkt: pkt.reportType in [0x3, 0x4, 0x6,
0x10, 0x09]),
ConditionalField(ByteField('DTCSnapshotRecordNumber', 0),
lambda pkt: pkt.reportType in [0x3, 0x4, 0x5]),
ConditionalField(ByteField('DTCExtendedDataRecordNumber', 0),
lambda pkt: pkt.reportType in [0x6, 0x10])
]
bind_layers(UDS, UDS_RDTCI, service=0x19)
class UDS_RDTCIPR(Packet):
name = 'ReadDTCInformationPositiveResponse'
fields_desc = [
ByteEnumField('reportType', 0, UDS_RDTCI.reportTypes),
ConditionalField(XByteField('DTCStatusAvailabilityMask', 0),
lambda pkt: pkt.reportType in [0x01, 0x07, 0x11,
0x12, 0x02, 0x0A,
0x0B, 0x0C, 0x0D,
0x0E, 0x0F, 0x13,
0x15]),
ConditionalField(ByteEnumField('DTCFormatIdentifier', 0,
{0: 'ISO15031-6DTCFormat',
1: 'UDS-1DTCFormat',
2: 'SAEJ1939-73DTCFormat',
3: 'ISO11992-4DTCFormat'}),
lambda pkt: pkt.reportType in [0x01, 0x07,
0x11, 0x12]),
ConditionalField(ShortField('DTCCount', 0),
lambda pkt: pkt.reportType in [0x01, 0x07,
0x11, 0x12]),
ConditionalField(StrField('DTCAndStatusRecord', b""),
lambda pkt: pkt.reportType in [0x02, 0x0A, 0x0B,
0x0C, 0x0D, 0x0E,
0x0F, 0x13, 0x15]),
ConditionalField(StrField('dataRecord', b""),
lambda pkt: pkt.reportType in [0x03, 0x04, 0x05,
0x06, 0x08, 0x09,
0x10, 0x14])
]
def answers(self, other):
return isinstance(other, UDS_RDTCI) \
and other.reportType == self.reportType
bind_layers(UDS, UDS_RDTCIPR, service=0x59)
# #########################RC###################################
class UDS_RC(Packet):
routineControlTypes = {
0: 'ISOSAEReserved',
1: 'startRoutine',
2: 'stopRoutine',
3: 'requestRoutineResults'
}
routineControlIdentifiers = ObservableDict()
name = 'RoutineControl'
fields_desc = [
ByteEnumField('routineControlType', 0, routineControlTypes),
XShortEnumField('routineIdentifier', 0, routineControlIdentifiers)
]
bind_layers(UDS, UDS_RC, service=0x31)
class UDS_RCPR(Packet):
name = 'RoutineControlPositiveResponse'
fields_desc = [
ByteEnumField('routineControlType', 0, UDS_RC.routineControlTypes),
XShortEnumField('routineIdentifier', 0,
UDS_RC.routineControlIdentifiers),
]
def answers(self, other):
return isinstance(other, UDS_RC) \
and other.routineControlType == self.routineControlType \
and other.routineIdentifier == self.routineIdentifier
bind_layers(UDS, UDS_RCPR, service=0x71)
# #########################RD###################################
class UDS_RD(Packet):
dataFormatIdentifiers = ObservableDict({
0: 'noCompressionNoEncryption'
})
name = 'RequestDownload'
fields_desc = [
ByteEnumField('dataFormatIdentifier', 0, dataFormatIdentifiers),
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4)
]
bind_layers(UDS, UDS_RD, service=0x34)
class UDS_RDPR(Packet):
name = 'RequestDownloadPositiveResponse'
fields_desc = [
BitField('memorySizeLen', 0, 4),
BitField('reserved', 0, 4),
StrField('maxNumberOfBlockLength', b"", fmt="B"),
]
def answers(self, other):
return isinstance(other, UDS_RD)
bind_layers(UDS, UDS_RDPR, service=0x74)
# #########################RU###################################
class UDS_RU(Packet):
name = 'RequestUpload'
fields_desc = [
ByteEnumField('dataFormatIdentifier', 0,
UDS_RD.dataFormatIdentifiers),
BitField('memorySizeLen', 0, 4),
BitField('memoryAddressLen', 0, 4),
ConditionalField(XByteField('memoryAddress1', 0),
lambda pkt: pkt.memoryAddressLen == 1),
ConditionalField(XShortField('memoryAddress2', 0),
lambda pkt: pkt.memoryAddressLen == 2),
ConditionalField(X3BytesField('memoryAddress3', 0),
lambda pkt: pkt.memoryAddressLen == 3),
ConditionalField(XIntField('memoryAddress4', 0),
lambda pkt: pkt.memoryAddressLen == 4),
ConditionalField(XByteField('memorySize1', 0),
lambda pkt: pkt.memorySizeLen == 1),
ConditionalField(XShortField('memorySize2', 0),
lambda pkt: pkt.memorySizeLen == 2),
ConditionalField(X3BytesField('memorySize3', 0),
lambda pkt: pkt.memorySizeLen == 3),
ConditionalField(XIntField('memorySize4', 0),
lambda pkt: pkt.memorySizeLen == 4)
]
bind_layers(UDS, UDS_RU, service=0x35)
class UDS_RUPR(Packet):
name = 'RequestUploadPositiveResponse'
fields_desc = [
BitField('memorySizeLen', 0, 4),
BitField('reserved', 0, 4),
StrField('maxNumberOfBlockLength', b"", fmt="B"),
]
def answers(self, other):
return isinstance(other, UDS_RU)
bind_layers(UDS, UDS_RUPR, service=0x75)
# #########################TD###################################
class UDS_TD(Packet):
name = 'TransferData'
fields_desc = [
ByteField('blockSequenceCounter', 0),
StrField('transferRequestParameterRecord', b"", fmt="B")
]
bind_layers(UDS, UDS_TD, service=0x36)
class UDS_TDPR(Packet):
name = 'TransferDataPositiveResponse'
fields_desc = [
ByteField('blockSequenceCounter', 0),
StrField('transferResponseParameterRecord', b"", fmt="B")
]
def answers(self, other):
return isinstance(other, UDS_TD) \
and other.blockSequenceCounter == self.blockSequenceCounter
bind_layers(UDS, UDS_TDPR, service=0x76)
# #########################RTE###################################
class UDS_RTE(Packet):
name = 'RequestTransferExit'
fields_desc = [
StrField('transferRequestParameterRecord', b"", fmt="B")
]
bind_layers(UDS, UDS_RTE, service=0x37)
class UDS_RTEPR(Packet):
name = 'RequestTransferExitPositiveResponse'
fields_desc = [
StrField('transferResponseParameterRecord', b"", fmt="B")
]
def answers(self, other):
return isinstance(other, UDS_RTE)
bind_layers(UDS, UDS_RTEPR, service=0x77)
# #########################RFT###################################
class UDS_RFT(Packet):
name = 'RequestFileTransfer'
modeOfOperations = {
0x00: "ISO/SAE Reserved",
0x01: "Add File",
0x02: "Delete File",
0x03: "Replace File",
0x04: "Read File",
0x05: "Read Directory"
}
@staticmethod
def _contains_file_size(packet):
return packet.modeOfOperation not in [2, 4, 5]
fields_desc = [
XByteEnumField('modeOfOperation', 0, modeOfOperations),
FieldLenField('filePathAndNameLength', 0,
length_of='filePathAndName', fmt='H'),
StrLenField('filePathAndName', b"",
length_from=lambda p: p.filePathAndNameLength),
ConditionalField(BitField('compressionMethod', 0, 4),
lambda p: p.modeOfOperation not in [2, 5]),
ConditionalField(BitField('encryptingMethod', 0, 4),
lambda p: p.modeOfOperation not in [2, 5]),
ConditionalField(FieldLenField('fileSizeParameterLength', 0, fmt="B",
length_of='fileSizeUnCompressed'),
lambda p: UDS_RFT._contains_file_size(p)),
ConditionalField(StrLenField('fileSizeUnCompressed', b"",
length_from=lambda p:
p.fileSizeParameterLength),
lambda p: UDS_RFT._contains_file_size(p)),
ConditionalField(StrLenField('fileSizeCompressed', b"",
length_from=lambda p:
p.fileSizeParameterLength),
lambda p: UDS_RFT._contains_file_size(p))
]
bind_layers(UDS, UDS_RFT, service=0x38)
class UDS_RFTPR(Packet):
name = 'RequestFileTransferPositiveResponse'
@staticmethod
def _contains_data_format_identifier(packet):
return packet.modeOfOperation != 0x02
fields_desc = [
XByteEnumField('modeOfOperation', 0, UDS_RFT.modeOfOperations),
ConditionalField(FieldLenField('lengthFormatIdentifier', 0,
length_of='maxNumberOfBlockLength',
fmt='B'),
lambda p: p.modeOfOperation != 2),
ConditionalField(StrLenField('maxNumberOfBlockLength', b"",
length_from=lambda p: p.lengthFormatIdentifier),
lambda p: p.modeOfOperation != 2),
ConditionalField(BitField('compressionMethod', 0, 4),
lambda p: p.modeOfOperation != 0x02),
ConditionalField(BitField('encryptingMethod', 0, 4),
lambda p: p.modeOfOperation != 0x02),
ConditionalField(FieldLenField('fileSizeOrDirInfoParameterLength', 0,
length_of='fileSizeUncompressedOrDirInfoLength'),
lambda p: p.modeOfOperation not in [1, 2, 3]),
ConditionalField(StrLenField('fileSizeUncompressedOrDirInfoLength',
b"",
length_from=lambda p:
p.fileSizeOrDirInfoParameterLength),
lambda p: p.modeOfOperation not in [1, 2, 3]),
ConditionalField(StrLenField('fileSizeCompressed', b"",
length_from=lambda p:
p.fileSizeOrDirInfoParameterLength),
lambda p: p.modeOfOperation not in [1, 2, 3, 5]),
]
def answers(self, other):
return isinstance(other, UDS_RFT)
bind_layers(UDS, UDS_RFTPR, service=0x78)
# #########################IOCBI###################################
class UDS_IOCBI(Packet):
name = 'InputOutputControlByIdentifier'
dataIdentifiers = ObservableDict()
fields_desc = [
XShortEnumField('dataIdentifier', 0, dataIdentifiers),
ByteField('controlOptionRecord', 0),
StrField('controlEnableMaskRecord', b"", fmt="B")
]
bind_layers(UDS, UDS_IOCBI, service=0x2F)
class UDS_IOCBIPR(Packet):
name = 'InputOutputControlByIdentifierPositiveResponse'
fields_desc = [
XShortField('dataIdentifier', 0),
StrField('controlStatusRecord', b"", fmt="B")
]
def answers(self, other):
return isinstance(other, UDS_IOCBI) \
and other.dataIdentifier == self.dataIdentifier
bind_layers(UDS, UDS_IOCBIPR, service=0x6F)
# #########################NR###################################
class UDS_NR(Packet):
negativeResponseCodes = {
0x00: 'positiveResponse',
0x10: 'generalReject',
0x11: 'serviceNotSupported',
0x12: 'subFunctionNotSupported',
0x13: 'incorrectMessageLengthOrInvalidFormat',
0x14: 'responseTooLong',
0x20: 'ISOSAEReserved',
0x21: 'busyRepeatRequest',
0x22: 'conditionsNotCorrect',
0x23: 'ISOSAEReserved',
0x24: 'requestSequenceError',
0x25: 'noResponseFromSubnetComponent',
0x26: 'failurePreventsExecutionOfRequestedAction',
0x31: 'requestOutOfRange',
0x33: 'securityAccessDenied',
0x35: 'invalidKey',
0x36: 'exceedNumberOfAttempts',
0x37: 'requiredTimeDelayNotExpired',
0x70: 'uploadDownloadNotAccepted',
0x71: 'transferDataSuspended',
0x72: 'generalProgrammingFailure',
0x73: 'wrongBlockSequenceCounter',
0x78: 'requestCorrectlyReceived-ResponsePending',
0x7E: 'subFunctionNotSupportedInActiveSession',
0x7F: 'serviceNotSupportedInActiveSession',
0x80: 'ISOSAEReserved',
0x81: 'rpmTooHigh',
0x82: 'rpmTooLow',
0x83: 'engineIsRunning',
0x84: 'engineIsNotRunning',
0x85: 'engineRunTimeTooLow',
0x86: 'temperatureTooHigh',
0x87: 'temperatureTooLow',
0x88: 'vehicleSpeedTooHigh',
0x89: 'vehicleSpeedTooLow',
0x8a: 'throttle/PedalTooHigh',
0x8b: 'throttle/PedalTooLow',
0x8c: 'transmissionRangeNotInNeutral',
0x8d: 'transmissionRangeNotInGear',
0x8e: 'ISOSAEReserved',
0x8f: 'brakeSwitch(es)NotClosed',
0x90: 'shifterLeverNotInPark',
0x91: 'torqueConverterClutchLocked',
0x92: 'voltageTooHigh',
0x93: 'voltageTooLow',
}
name = 'NegativeResponse'
fields_desc = [
XByteEnumField('requestServiceId', 0, UDS.services),
ByteEnumField('negativeResponseCode', 0, negativeResponseCodes)
]
def answers(self, other):
return self.requestServiceId == other.service and \
(self.negativeResponseCode != 0x78 or
conf.contribs['UDS']['treat-response-pending-as-answer'])
bind_layers(UDS, UDS_NR, service=0x7f)
# ##################################################################
# ######################## UTILS ###################################
# ##################################################################
class UDS_TesterPresentSender(PeriodicSenderThread):
def __init__(self, sock, pkt=UDS() / UDS_TP(), interval=2):
""" Thread to send TesterPresent messages packets periodically
Args:
sock: socket where packet is sent periodically
pkt: packet to send
interval: interval between two packets
"""
PeriodicSenderThread.__init__(self, sock, pkt, interval)
def run(self):
# type: () -> None
while not self._stopped.is_set():
for p in self._pkts:
self._socket.sr1(p, timeout=0.3, verbose=False)
time.sleep(self._interval)
def UDS_SessionEnumerator(sock, session_range=range(0x100), reset_wait=1.5):
""" Enumerates session ID's in given range
and returns list of UDS()/UDS_DSC() packets
with valid session types
Args:
sock: socket where packets are sent
session_range: range for session ID's
reset_wait: wait time in sec after every packet
"""
pkts = (req for tup in
product(UDS() / UDS_DSC(diagnosticSessionType=session_range),
UDS() / UDS_ER(resetType='hardReset')) for req in tup)
results, _ = sock.sr(pkts, timeout=len(session_range) * reset_wait * 2 + 1,
verbose=False, inter=reset_wait)
return [req for req, res in results if req is not None and
req.service != 0x11 and
(res.service == 0x50 or
res.negativeResponseCode not in [0x10, 0x11, 0x12])]
def UDS_ServiceEnumerator(sock, session="DefaultSession",
filter_responses=True):
""" Enumerates every service ID
and returns list of tuples. Each tuple contains
the session and the respective positive response
Args:
sock: socket where packet is sent periodically
session: session in which the services are enumerated
"""
pkts = (UDS(service=x) for x in set(x & ~0x40 for x in range(0x100)))
found_services = sock.sr(pkts, timeout=5, verbose=False)
return [(session, p) for _, p in found_services[0] if
p.service != 0x7f or
(p.negativeResponseCode not in [0x10, 0x11] or not
filter_responses)]
def getTableEntry(tup):
""" Helping function for make_lined_table.
Returns the session and response code of tup.
Args:
tup: tuple with session and UDS response package
Example:
make_lined_table([('DefaultSession', UDS()/UDS_SAPR(),
'ExtendedDiagnosticSession', UDS()/UDS_IOCBI())],
getTableEntry)
"""
session, pkt = tup
if pkt.service == 0x7f:
return (session,
"0x%02x: %s" % (pkt.requestServiceId,
pkt.sprintf("%UDS_NR.requestServiceId%")),
pkt.sprintf("%UDS_NR.negativeResponseCode%"))
else:
return (session,
"0x%02x: %s" % (pkt.service & ~0x40,
pkt.get_field('service').
i2s[pkt.service & ~0x40]),
"PositiveResponse")
|
|
from collections import deque
### parent and children should be made private so don't mess up linking
### parent() and children() will return their data
### but to change parent, graft/trim should be used. same for children
### check all cases of parent/children to make sure can be replaced.
class TreeNode():
"""Nodes of a tree. Each node can act as the root of it's own tree
Public Interface:
Focused on Node:
data -- dictionary with arbitrary contents. accessible by [] notation
parent()
children()
children_by_data()
trim_children()
depth()
Focused on Tree:
traverse()
traverse_post_order()
graft()
route_to_root()
route_from_root()
leaves()
trim()
trim_dead_branch()
degenerate_to_leaf()
search()
"""
### get rid of either kwargs or data_. probably kwargs since less flexible
def __init__(self, *args, data_ = None, **kwargs):
"""Construct a SimpleNode with optional data.
keyword-only arguments:
data_ -- a dictionary with arbitrary data
kwargs:
any set of name=value pairs will be (over)written in the node's data.
"""
self._parent = None
self._children = []
self.data = data_ if data_ else {}
for name, value in kwargs.items():
self.data[name] = value
def __eq__(self, other):
if id(self) == id(other): return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def parent(self):
"""Return the parent of this node."""
return self._parent
def children(self):
"""Generate each child of this node."""
for child in self._children:
yield child
def children_by_data(self, data_name, data_value):
"""Generate all children that have same data or None if not found."""
for child in self.children():
try:
if child.data[data_name] == data_value: yield child
except KeyError: continue
def graft(self, child):
"""Append child to self's children and set child's parent to self.
Note: For performance reasons, this assumes that child does not already
exist in any tree connected to self rather than testing for it."""
self._children.append(child)
child._parent = self
def trim(self):
"""Remove branch_root from any tree it was connected to."""
try:
self._parent._children.remove(self)
except AttributeError: pass #if parent is none, ignore.
#ValueError for child not being in parent will still propagate
self._parent = None
def trim_children(self):
"""Trim and return a list of children."""
child_list = list(self.children())
for child in child_list:
child.trim()
return child_list
def depth(self):
"""Find depth relative to full tree."""
d = 0
node = self
while node.parent():
node = node.parent()
d += 1
return d
def route_to_root(self):
""" Generate nodes ordered from self to root"""
node = self
yield node
while node.parent():
# create path to parent
node = node.parent()
yield node
def route_from_root(self):
""""Fake" generate nodes ordered from root to self."""
for node in reversed(list(self.route_to_root())): yield node
### untested in unittests
def route_to_ancestor(self, ancestor):
""""Fake" generate nodes ordered from self to ancestor."""
for node in reversed(list(self.route_from_ancestor(ancestor))):
yield node
def route_from_ancestor(self, ancestor):
"""Generate nodes ordered from ancestor to self."""
node = ancestor
yield node
while node != self:
node = node.parent()
if node == None: raise ValueError('The node must be an ancestor of self.')
yield node
def traverse(self):
"""In-order generate all nodes in tree with self as root."""
stack = deque()
stack.append(self)
while stack:
node = stack.pop()
#use reversed to get more logical human order on the stack
for child in reversed(list(node.children())):
stack.append(child)
yield node
def traverse_post_order(self):
"""Post-order "Fake" generate all nodes in tree with self as root."""
###is there a way to do this without making a full list? probably.
for node in reversed(list(self.traverse())):
yield node
def leaves(self):
"""Generate all leaves of tree with self as root."""
for node in self.traverse():
if not list(node.children()): yield node
def trim_dead_branch(self,
stop_data_key = None,
stop_data_value = None):
"""Trim up from leaf and return root of dead branch. Stop conditions:
a)parent is non-degenerate
b)parent has stop data
c)parent is the full tree root
"""
#make sure this is a leaf
if list(self.children()):
raise ValueError('Start node must be a leaf')
#move up the branch until stop condition(s) met
dead_node = self
while dead_node.parent():
parent = dead_node.parent()
if len(list(parent.children())) > 1: break #parent is non-degenerate
try: #parent has stop data
if parent.data[stop_data_key] == stop_data_value: break
except KeyError: pass
if not parent.parent(): break #parent is full tree root
dead_node = parent
dead_node.trim()
return dead_node
def degenerate_to_leaf(self, leaf):
"""Make tree degenerate from self down to leaf.
arguments:
leaf -- leaf SimpleNode connected to tree of self
"""
#determine that there is a path before actually modifying the tree
path = [leaf]
current = leaf
while current != self:
try: path.append(current.parent())
except AttributeError: raise ValueError('There is no vertical path'\
'between self and leaf')
current = current.parent()
#if a vertical path was found, make path degenerate
#note: important to use trim so both connections are broken
parent = path.pop()
while list(parent.children()):
protected_child = path.pop()
children = list(parent.children())
for child in children:
if child != protected_child: child.trim()
parent = protected_child
def search(self, data_items):
"""Generate all matching nodes.
arguments:
data_items -- dict with key,value pairs to be checked for in tree
"""
for node in self.traverse():
for key, value in data_items.items():
found = True
try:
if node[key] != value:
found = False
break
except KeyError:
found = False
break
if found: yield node
def main():
pass
if __name__ == '__main__':
main()
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image statistics generator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import pickle
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pyarrow as pa
import tensorflow as tf
from tensorflow_data_validation.statistics.generators import image_stats_generator
from tensorflow_data_validation.utils import test_util
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import statistics_pb2
class FakeImageDecoder(image_stats_generator.ImageDecoderInterface):
"""Fake ImageDecoderInterface implementation for testing."""
@staticmethod
def encode_image_metadata(image_format, image_height, image_width):
image_metadata = {
'format': image_format,
'height': image_height,
'width': image_width
}
return json.dumps(image_metadata)
def get_formats(self, value_list):
return np.array([json.loads(value)['format'] for value in value_list],
dtype=np.object)
def get_sizes(self, value_list):
loaded_metadata = [json.loads(value) for value in value_list]
return np.array([[meta['height'], meta['width']]
for meta in loaded_metadata])
class ImageStatsGeneratorTest(test_util.CombinerFeatureStatsGeneratorTest,
parameterized.TestCase):
@parameterized.named_parameters(
('EmptyList', []), # Line-break comment for readability.
('EmptyBatch', [pa.array([])]),
('NumericalShouldInvalidateImageStats', [
pa.array([[
FakeImageDecoder.encode_image_metadata('TIFF', 5, 1),
FakeImageDecoder.encode_image_metadata('JPEG', 1, 1),
FakeImageDecoder.encode_image_metadata('TIFF', 3, 7),
]]),
pa.array([[1]]),
]))
def test_cases_with_no_image_stats(self, batches):
"""Test cases that should not generate image statistics."""
image_decoder = FakeImageDecoder()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator,
statistics_pb2.FeatureNameStatistics())
def test_image_stats_generator_with_missing_feature(self):
"""Test with missing values for a batch."""
batches = [
pa.array([]),
pa.array([[
FakeImageDecoder.encode_image_metadata('JPEG', 10, 1),
]]),
]
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'JPEG'
sample_count: 1
}
}
}
custom_stats {
name: 'image_max_width'
num: 1.0
}
custom_stats {
name: 'image_max_height'
num: 10.0
}""", statistics_pb2.FeatureNameStatistics())
image_decoder = FakeImageDecoder()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_image_stats_generator_values_threshold_check(self):
"""Check values_threshold with a feature that is all images."""
batches = [
pa.array([
[
FakeImageDecoder.encode_image_metadata('PNG', 2, 4),
FakeImageDecoder.encode_image_metadata('JPEG', 4, 2),
],
[
FakeImageDecoder.encode_image_metadata('TIFF', 5, 1),
FakeImageDecoder.encode_image_metadata('JPEG', -1, -1),
FakeImageDecoder.encode_image_metadata('TIFF', 3, 7)
],
]),
pa.array([[
FakeImageDecoder.encode_image_metadata('GIF', 2, 1),
]]),
]
# With values_threshold = 7 statistics should not be generated.
image_decoder = FakeImageDecoder()
expected_result = statistics_pb2.FeatureNameStatistics()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
values_threshold=7,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
# With values_threshold = 6 statistics should be generated.
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'GIF'
sample_count: 1
}
buckets {
label: 'JPEG'
sample_count: 2
}
buckets {
label: 'PNG'
sample_count: 1
}
buckets {
label: 'TIFF'
sample_count: 2
}
}
}
custom_stats {
name: 'image_max_width'
num: 7.0
}
custom_stats {
name: 'image_max_height'
num: 5.0
}
""", statistics_pb2.FeatureNameStatistics())
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
values_threshold=6,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_image_stats_generator_check_is_image_ratio(self):
"""Check is_image_ratio with a feature that has partially images."""
# The image ratio is: 0.83
batches = [
pa.array([
[
FakeImageDecoder.encode_image_metadata('PNG', 2, 4),
FakeImageDecoder.encode_image_metadata('JPEG', 4, 2),
],
[
FakeImageDecoder.encode_image_metadata('TIFF', 5, 1),
FakeImageDecoder.encode_image_metadata('', -1, -1),
FakeImageDecoder.encode_image_metadata('TIFF', 3, 7)
],
]),
pa.array([[
FakeImageDecoder.encode_image_metadata('GIF', 2, 1),
]]),
]
# For image_ratio_threshold=0.85 we for not expect stats.
expected_result = statistics_pb2.FeatureNameStatistics()
image_decoder = FakeImageDecoder()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
is_image_ratio_threshold=0.85,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
# For image_ratio_threshold=0.8 we expect stats.
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'UNKNOWN'
sample_count: 1
}
buckets {
label: 'GIF'
sample_count: 1
}
buckets {
label: 'JPEG'
sample_count: 1
}
buckets {
label: 'PNG'
sample_count: 1
}
buckets {
label: 'TIFF'
sample_count: 2
}
}
}
custom_stats {
name: 'image_max_width'
num: 7.0
}
custom_stats {
name: 'image_max_height'
num: 5.0
}
""", statistics_pb2.FeatureNameStatistics())
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
is_image_ratio_threshold=0.8,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_image_stats_generator_disable_size_stats(self):
"""Test the enable_size_stats_option."""
# Identical input to test_image_stats_generator_check_is_image_ratio
batches = [
pa.array([
[
FakeImageDecoder.encode_image_metadata('PNG', 2, 4),
FakeImageDecoder.encode_image_metadata('JPEG', 4, 2),
],
[
FakeImageDecoder.encode_image_metadata('TIFF', 5, 1),
FakeImageDecoder.encode_image_metadata('', -1, -1),
FakeImageDecoder.encode_image_metadata('TIFF', 3, 7)
],
]),
pa.array([[
FakeImageDecoder.encode_image_metadata('GIF', 2, 1),
]]),
]
# Stats should be identical but without stats for image size.
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'UNKNOWN'
sample_count: 1
}
buckets {
label: 'GIF'
sample_count: 1
}
buckets {
label: 'JPEG'
sample_count: 1
}
buckets {
label: 'PNG'
sample_count: 1
}
buckets {
label: 'TIFF'
sample_count: 2
}
}
}
""", statistics_pb2.FeatureNameStatistics())
image_decoder = FakeImageDecoder()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
is_image_ratio_threshold=0.8,
values_threshold=1,
enable_size_stats=False)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def _read_file(filepath):
"""Helper method for reading a file in binary mode."""
f = tf.io.gfile.GFile(filepath, mode='rb')
return f.read()
class ImageStatsGeneratorRealImageTest(
test_util.CombinerFeatureStatsGeneratorTest):
def test_image_stats_generator_real_image(self):
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
batches = [
pa.array([
[
_read_file(os.path.join(test_data_dir, 'image1.gif')),
_read_file(os.path.join(test_data_dir, 'image2.png')),
_read_file(os.path.join(test_data_dir, 'image5.jpg')),
_read_file(os.path.join(test_data_dir, 'image6.jpg')),
_read_file(os.path.join(test_data_dir, 'not_a_image.abc'))
],
[
_read_file(os.path.join(test_data_dir, 'image3.bmp')),
b'not_a_image'
],
]),
pa.array([[
_read_file(os.path.join(test_data_dir, 'image4.png')),
]]),
]
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'UNKNOWN'
sample_count: 2
}
buckets {
label: 'bmp'
sample_count: 1
}
buckets {
label: 'gif'
sample_count: 1
}
buckets {
label: 'jpeg'
sample_count: 2
}
buckets {
label: 'png'
sample_count: 2
}
}
}
custom_stats {
name: 'image_max_width'
num: 300.0
}
custom_stats {
name: 'image_max_height'
num: 300.0
}
""", statistics_pb2.FeatureNameStatistics())
generator = image_stats_generator.ImageStatsGenerator(
is_image_ratio_threshold=0.6,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_image_stats_generator_pickle_success(self):
"""Ensure that decoder and generator implementations are pickle-able."""
image_decoder = image_stats_generator.TfImageDecoder()
pickle.dumps(image_decoder)
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
is_image_ratio_threshold=0.6,
values_threshold=1)
pickle.dumps(generator)
if __name__ == '__main__':
absltest.main()
|
|
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for ACL rendering module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from lib import aclgenerator
from lib import naming
from lib import policy
import mock
GOOD_HEADER_1 = """
header {
comment:: "this is a test acl"
target:: mock
}
"""
GOOD_TERM_1 = """
term good-term-1 {
protocol:: icmp
action:: accept
}
"""
STATEFUL_ONLY_TERM = """
term stateful-only {
option:: established
action:: accept
}
"""
ICMPV6_TERM = """
term icmpv6-term {
protocol:: icmpv6
action:: accept
}
"""
SHORT_TERM_NAME = """
term short-term-name {
protocol:: tcp
action:: accept
}
"""
GOOD_LONG_TERM_NAME = """
term google-experiment-abbreviations {
protocol:: tcp
action:: accept
}
"""
BAD_LONG_TERM_NAME = """
term this-term-name-is-really-far-too-long {
protocol:: tcp
action:: accept
}
"""
# Print a info message when a term is set to expire in that many weeks.
# This is normally passed from command line.
EXP_INFO = 2
class ACLMock(aclgenerator.ACLGenerator):
_PLATFORM = 'mock'
_TERM_MAX_LENGTH = 24
def _TranslatePolicy(self, pol, exp_info):
pass
class ACLGeneratorTest(unittest.TestCase):
def setUp(self):
self.naming = mock.create_autospec(naming.Naming)
def testEstablishedNostate(self):
# When using "nostate" filter and a term with "option:: established"
# have any protocol other than TCP and/or UDP should raise error.
pol = policy.ParsePolicy(GOOD_HEADER_1 + STATEFUL_ONLY_TERM, self.naming)
acl = ACLMock(pol, EXP_INFO)
for _, terms in pol.filters:
for term in terms:
self.assertRaises(aclgenerator.EstablishedError,
acl.FixHighPorts, term, 'inet', False)
def testSupportedAF(self):
# Unsupported address families should raise an error.
pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_TERM_1, self.naming)
acl = ACLMock(pol, EXP_INFO)
for _, terms in pol.filters:
for term in terms:
self.assertRaises(aclgenerator.UnsupportedAF,
acl.FixHighPorts, term, 'unsupported', False)
def testTermNameBelowLimit(self):
# Term name that is below specified limit should come out unchanged,
# regardless of abbreviation and truncation settings.
pol = policy.ParsePolicy(GOOD_HEADER_1 + SHORT_TERM_NAME, self.naming)
acl = ACLMock(pol, EXP_INFO)
for _, terms in pol.filters:
for term in terms:
result = acl.FixTermLength(term.name, True, True)
self.assertEquals(term.name, result)
result = acl.FixTermLength(term.name, True, False)
self.assertEquals(term.name, result)
result = acl.FixTermLength(term.name, False, True)
self.assertEquals(term.name, result)
result = acl.FixTermLength(term.name, False, False)
self.assertEquals(term.name, result)
def testLongTermAbbreviation(self):
# Term name that is above specified limit should come out abbreviated
# when abbreviation is enabled.
pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_LONG_TERM_NAME, self.naming)
acl = ACLMock(pol, EXP_INFO)
for _, terms in pol.filters:
for term in terms:
result = acl.FixTermLength(term.name, True, False)
self.failUnless('-abbreviations' in result,
'Our strings disappeared during abbreviation.')
def testTermNameTruncation(self):
# Term name that is above specified limit should come out truncated
# when truncation is enabled.
pol = policy.ParsePolicy(GOOD_HEADER_1 + GOOD_LONG_TERM_NAME, self.naming)
acl = ACLMock(pol, EXP_INFO)
for _, terms in pol.filters:
for term in terms:
result = acl.FixTermLength(term.name, False, True)
self.assertEquals('google-experiment-abbrev', result)
def testLongTermName(self):
# Term name that is above specified limit and is impossible to abbreviate
# should raise an exception.
pol = policy.ParsePolicy(GOOD_HEADER_1 + BAD_LONG_TERM_NAME, self.naming)
acl = ACLMock(pol, EXP_INFO)
for _, terms in pol.filters:
for term in terms:
self.assertRaises(aclgenerator.TermNameTooLongError,
acl.FixTermLength, term.name, True, False)
def testProtocolNameToNumber(self):
protoMap = {'icmp': 1,
'ipip': 4,
'tcp': 6,
'gre': 47,
}
protoConvert = ['gre', 'tcp']
protocolList = ['icmp', 'gre', 'tcp', 'ipip']
expectedProtocolList = ['icmp', 47, 6, 'ipip']
retProtocolList = aclgenerator.ProtocolNameToNumber(protocolList,
protoConvert,
protoMap)
self.assertItemsEqual(expectedProtocolList, retProtocolList)
def testAddRepositoryTags(self):
# Format print the '$' into the RCS tags in order prevent the tags from
# being interpolated here.
# Include all tags.
self.assertItemsEqual(
['%sId:%s' % ('$', '$'),
'%sDate:%s' % ('$', '$'),
'%sRevision:%s' % ('$', '$')], aclgenerator.AddRepositoryTags())
# Remove the revision tag.
self.assertItemsEqual(
['%sId:%s' % ('$', '$'),
'%sDate:%s' % ('$', '$')],
aclgenerator.AddRepositoryTags(revision=False))
# Only include the Id: tag.
self.assertItemsEqual(
['%sId:%s' % ('$', '$')],
aclgenerator.AddRepositoryTags(date=False, revision=False))
if __name__ == '__main__':
unittest.main()
|
|
from copy import copy
import six
from . import ast
from .visitor_meta import QUERY_DOCUMENT_KEYS, VisitorMeta
class Falsey(object):
def __nonzero__(self):
return False
def __bool__(self):
return False
BREAK = object()
REMOVE = Falsey()
class Stack(object):
__slots__ = 'in_array', 'index', 'keys', 'edits', 'prev'
def __init__(self, in_array, index, keys, edits, prev):
self.in_array = in_array
self.index = index
self.keys = keys
self.edits = edits
self.prev = prev
def visit(root, visitor, key_map=None):
visitor_keys = key_map or QUERY_DOCUMENT_KEYS
stack = None
in_array = isinstance(root, list)
keys = [root]
index = -1
edits = []
parent = None
path = []
ancestors = []
new_root = root
leave = visitor.leave
enter = visitor.enter
path_pop = path.pop
ancestors_pop = ancestors.pop
path_append = path.append
ancestors_append = ancestors.append
while True:
index += 1
is_leaving = index == len(keys)
is_edited = is_leaving and edits
if is_leaving:
key = path_pop() if ancestors else None
node = parent
parent = ancestors_pop() if ancestors else None
if is_edited:
if in_array:
node = list(node)
else:
node = copy(node)
edit_offset = 0
for edit_key, edit_value in edits:
if in_array:
edit_key -= edit_offset
if in_array and edit_value is REMOVE:
node.pop(edit_key)
edit_offset += 1
else:
if isinstance(node, list):
node[edit_key] = edit_value
else:
setattr(node, edit_key, edit_value)
index = stack.index
keys = stack.keys
edits = stack.edits
in_array = stack.in_array
stack = stack.prev
else:
if parent:
key = index if in_array else keys[index]
if isinstance(parent, list):
node = parent[key]
else:
node = getattr(parent, key, None)
else:
key = None
node = new_root
if node is REMOVE or node is None:
continue
if parent:
path_append(key)
result = None
if not isinstance(node, list):
assert isinstance(node, ast.Node), 'Invalid AST Node: ' + repr(node)
if is_leaving:
result = leave(node, key, parent, path, ancestors)
else:
result = enter(node, key, parent, path, ancestors)
if result is BREAK:
break
if result is False:
if not is_leaving:
path_pop()
continue
elif result is not None:
edits.append((key, result))
if not is_leaving:
if isinstance(result, ast.Node):
node = result
else:
path_pop()
continue
if result is None and is_edited:
edits.append((key, node))
if not is_leaving:
stack = Stack(in_array, index, keys, edits, stack)
in_array = isinstance(node, list)
keys = node if in_array else visitor_keys.get(type(node), None) or []
index = -1
edits = []
if parent:
ancestors_append(parent)
parent = node
if not stack:
break
if edits:
new_root = edits[-1][1]
return new_root
@six.add_metaclass(VisitorMeta)
class Visitor(object):
__slots__ = ()
def enter(self, node, key, parent, path, ancestors):
method = self._get_enter_handler(type(node))
if method:
return method(self, node, key, parent, path, ancestors)
def leave(self, node, key, parent, path, ancestors):
method = self._get_leave_handler(type(node))
if method:
return method(self, node, key, parent, path, ancestors)
class ParallelVisitor(Visitor):
__slots__ = 'skipping', 'visitors'
def __init__(self, visitors):
self.visitors = visitors
self.skipping = [None] * len(visitors)
def enter(self, node, key, parent, path, ancestors):
for i, visitor in enumerate(self.visitors):
if not self.skipping[i]:
result = visitor.enter(node, key, parent, path, ancestors)
if result is False:
self.skipping[i] = node
elif result is BREAK:
self.skipping[i] = BREAK
elif result is not None:
return result
def leave(self, node, key, parent, path, ancestors):
for i, visitor in enumerate(self.visitors):
if not self.skipping[i]:
result = visitor.leave(node, key, parent, path, ancestors)
if result is BREAK:
self.skipping[i] = BREAK
elif result is not None and result is not False:
return result
elif self.skipping[i] == node:
self.skipping[i] = REMOVE
class TypeInfoVisitor(Visitor):
__slots__ = 'visitor', 'type_info'
def __init__(self, type_info, visitor):
self.type_info = type_info
self.visitor = visitor
def enter(self, node, key, parent, path, ancestors):
self.type_info.enter(node)
result = self.visitor.enter(node, key, parent, path, ancestors)
if result is not None:
self.type_info.leave(node)
if isinstance(result, ast.Node):
self.type_info.enter(result)
return result
def leave(self, node, key, parent, path, ancestors):
result = self.visitor.leave(node, key, parent, path, ancestors)
self.type_info.leave(node)
return result
|
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.utils.unittest import TestCase
from django_extensions.db.fields import json
from nose.tools import eq_, ok_
from .fixtures import create_project, create_user, create_template
from ..models import Project, Template
class JSONClient(Client):
def get(self, path, **extra):
extra.update({
'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest',
})
return super(JSONClient, self).get(path, **extra)
def post(self, path, data={}, content_type='application/json', **extra):
data = json.dumps(data)
extra.update({'data': data,
'content_type': content_type,
'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
return super(JSONClient, self).post(path, **extra)
VALID_DATA = {
"name": "Rad Project!",
"data": {"data": "foo"},
"template": "basic",
"html": "<!DOCTYPE html5>",
}
class ButterIntegrationTestCase(TestCase):
"""We use harcoded urls because the API is coupled to them"""
def setUp(self):
self.user = create_user('bob')
self.template = create_template(slug='base-template')
self.client = JSONClient()
self.client.login(username='bob', password='bob')
def tearDown(self):
self.client.logout()
for model in [Project, Template, User]:
model.objects.all().delete()
def test_add_project(self):
url = '/api/project/'
response = self.client.post(url, VALID_DATA)
eq_(response.status_code, 200)
response_data = json.loads(response.content)
eq_(response_data['error'], 'okay')
ok_('_id' in response_data['project'])
project = Project.objects.get()
json.loads(project.metadata)
def test_get_detail_project_hidden(self):
project = create_project(author=self.user, status=Project.HIDDEN,
template=self.template)
url = '/api/project/%s' % project.uuid
response = self.client.get(url)
eq_(response.status_code, 200)
response_data = json.loads(response.content)
eq_(response_data['error'], 'okay')
ok_(isinstance(response_data['project'], basestring))
json.loads(response_data['project'])
def test_get_detail_project(self):
project = create_project(author=self.user, status=Project.LIVE,
template=self.template)
url = '/api/project/%s' % project.uuid
response = self.client.get(url)
eq_(response.status_code, 200)
response_data = json.loads(response.content)
eq_(response_data['error'], 'okay')
ok_(isinstance(response_data['project'], basestring))
json.loads(response_data['project'])
def test_get_detail_project_removed(self):
project = create_project(author=self.user, status=Project.REMOVED,
template=self.template)
url = '/api/project/%s' % project.uuid
response = self.client.get(url)
eq_(response.status_code, 404)
def test_post_detail_project(self):
project = create_project(author=self.user,
template=self.template)
url = '/api/project/%s' % project.uuid
response = self.client.post(url, VALID_DATA)
eq_(response.status_code, 200)
response_data = json.loads(response.content)
eq_(response_data['error'], 'okay')
ok_('_id', response_data['project'])
ok_('data', response_data['project'])
def test_post_detail_project_invalid(self):
project = create_project(author=self.user, template=self.template)
url = '/api/project/%s' % project.uuid
response = self.client.post(url, {'template': 'invalid'})
eq_(response.status_code, 200)
response_data = json.loads(response.content)
eq_(response_data['error'], 'error')
ok_('form_errors' in response_data)
def test_list_projects(self):
alex = create_user('alex')
create_project(author=alex, template=self.template)
create_project(author=self.user, template=self.template)
response = self.client.get('/api/projects')
eq_(response.status_code, 200)
response_data = json.loads(response.content)
eq_(response_data['error'], 'okay')
eq_(len(response_data['projects']), 1)
def test_list_project_removed(self):
create_project(author=self.user, status=Project.REMOVED,
template=self.template)
response = self.client.get('/api/projects')
eq_(response.status_code, 200)
response_data = json.loads(response.content)
eq_(response_data['error'], 'okay')
eq_(len(response_data['projects']), 0)
def test_publish_project_get(self):
project = create_project(author=self.user,
template=self.template)
url = '/api/publish/%s' % project.uuid
response = self.client.get(url)
eq_(response.status_code, 405)
def test_publish_project(self):
project = create_project(author=self.user, template=self.template)
url = '/api/publish/%s' % project.uuid
response = self.client.post(url, {})
eq_(response.status_code, 200)
response_data = json.loads(response.content)
eq_(response_data['error'], 'okay')
ok_('url' in response_data)
def test_publish_project_removed(self):
project = create_project(author=self.user, status=Project.REMOVED,
template=self.template)
url = '/api/publish/%s' % project.uuid
response = self.client.post(url, {})
eq_(response.status_code, 403)
def test_whoami(self):
response = self.client.get('/api/whoami')
eq_(response.status_code, 200)
response_data = json.loads(response.content)
ok_(response_data['username'])
ok_(response_data['name'])
ok_(response_data['email'])
class ButterIntegrationTestCaseAnon(TestCase):
def setUp(self):
self.client = JSONClient()
self.user = create_user('bob')
def tearDown(self):
self.client.logout()
for model in [Project, Template, User]:
model.objects.all().delete()
def test_whoami(self):
response = self.client.get('/api/whoami')
eq_(response.status_code, 403)
def test_get_detail_project(self):
project = create_project(author=self.user)
url = '/api/project/%s' % project.uuid
response = self.client.get(url)
eq_(response.status_code, 403)
def test_post_detail_project(self):
project = create_project(author=self.user)
url = '/api/project/%s' % project.uuid
response = self.client.post(url, VALID_DATA)
eq_(response.status_code, 403)
def test_get_detail_project_removed(self):
project = create_project(author=self.user, status=Project.REMOVED)
url = '/api/project/%s' % project.uuid
response = self.client.get(url)
eq_(response.status_code, 403)
def test_publish_project(self):
project = create_project(author=self.user)
url = '/api/publish/%s' % project.uuid
response = self.client.post(url, {})
eq_(response.status_code, 403)
class ButterIntegrationTestCaseNotOwner(TestCase):
def setUp(self):
self.user = create_user('bob')
self.other = create_user('x')
self.template = create_template(slug='base-template')
self.client = JSONClient()
self.client.login(username='bob', password='bob')
def tearDown(self):
self.client.logout()
for model in [Project, Template, User]:
model.objects.all().delete()
def test_get_project_unpublished(self):
project = create_project(author=self.other, status=Project.HIDDEN,
template=self.template)
url = '/api/project/%s' % project.uuid
response = self.client.get(url)
eq_(response.status_code, 404)
def test_post_project_unpublished(self):
project = create_project(author=self.other, status=Project.HIDDEN,
template=self.template)
url = '/api/project/%s' % project.uuid
response = self.client.post(url, VALID_DATA)
eq_(response.status_code, 404)
def test_get_detail_project(self):
project = create_project(author=self.other, status=Project.LIVE,
is_forkable=False, template=self.template)
url = '/api/project/%s' % project.uuid
response = self.client.get(url)
eq_(response.status_code, 200)
response_data = json.loads(response.content)
eq_(response_data['error'], 'okay')
ok_(isinstance(response_data['project'], basestring))
json.loads(response_data['project'])
def test_post_detail_project_not_forkable(self):
project = create_project(author=self.other, status=Project.LIVE,
is_forkable=False, template=self.template)
url = '/api/project/%s' % project.uuid
response = self.client.post(url, VALID_DATA)
eq_(response.status_code, 403)
def test_post_detail_project_forkable(self):
project = create_project(author=self.other, status=Project.LIVE,
is_forkable=True, template=self.template)
url = '/api/project/%s' % project.uuid
response = self.client.post(url, VALID_DATA)
eq_(response.status_code, 200)
response_data = json.loads(response.content)
eq_(response_data['error'], 'okay')
ok_('_id' in response_data['project'])
json.loads(project.metadata)
eq_(Project.objects.filter(author=self.user).count(), 1)
eq_(Project.objects.filter(author=self.other).count(), 1)
|
|
# encoding: utf-8
# Copyright 2013 maker
# License
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Add currencies to financial items"
try:
currency = orm['finance.Currency'].objects.get(is_default=True)
except:
currency = orm['finance.Currency'].objects.create()
currency.code = "USD"
currency.name = "USD United States of America, Dollars"
currency.symbol = u"$"
currency.is_default = True
currency.save()
for obj in orm['finance.Liability'].objects.all():
obj.value_currency = currency
obj.value_display = obj.value
obj.save()
for obj in orm['finance.Transaction'].objects.all():
obj.value_currency = currency
obj.value_display = obj.value
obj.save()
for obj in orm['finance.Account'].objects.all():
obj.balance_currency = currency
obj.balance_display = obj.balance
obj.save()
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.accessentity': {
'Meta': {'object_name': 'AccessEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"})
},
'core.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Comment']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'objects_created'", 'null': 'True', 'to': "orm['core.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'full_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_full_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}),
'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'read_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_read_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Tag']", 'null': 'True', 'blank': 'True'}),
'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
},
'core.tag': {
'Meta': {'ordering': "['name']", 'object_name': 'Tag'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'core.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User'},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.AccessEntity']"}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_access': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'finance.account': {
'Meta': {'ordering': "['name']", 'object_name': 'Account', '_ormbases': ['core.Object']},
'balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'balance_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'balance_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"})
},
'finance.asset': {
'Meta': {'ordering': "['-purchase_date']", 'object_name': 'Asset', '_ormbases': ['core.Object']},
'asset_type': ('django.db.models.fields.CharField', [], {'default': "'fixed'", 'max_length': '32'}),
'current_value': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'depreciation_rate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'depreciation_type': ('django.db.models.fields.CharField', [], {'default': "'straight'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'endlife_value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2', 'blank': 'True'}),
'initial_value': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'lifetime': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '0', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"}),
'purchase_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'})
},
'finance.category': {
'Meta': {'object_name': 'Category', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
},
'finance.currency': {
'Meta': {'object_name': 'Currency', '_ormbases': ['core.Object']},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'factor': ('django.db.models.fields.DecimalField', [], {'default': '1', 'max_digits': '10', 'decimal_places': '4'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'})
},
'finance.equity': {
'Meta': {'ordering': "['-purchase_date']", 'object_name': 'Equity', '_ormbases': ['core.Object']},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'equity_type': ('django.db.models.fields.CharField', [], {'default': "'share'", 'max_length': '32'}),
'issue_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'issuer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_equity_issued'", 'to': "orm['identities.Contact']"}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_equity_owned'", 'to': "orm['identities.Contact']"}),
'purchase_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
'sell_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'})
},
'finance.liability': {
'Meta': {'ordering': "['-due_date']", 'object_name': 'Liability', '_ormbases': ['core.Object']},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Account']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Category']", 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_liability_source'", 'to': "orm['identities.Contact']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_liability_target'", 'to': "orm['identities.Contact']"}),
'value': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'value_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'value_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'})
},
'finance.tax': {
'Meta': {'object_name': 'Tax', '_ormbases': ['core.Object']},
'compound': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'})
},
'finance.transaction': {
'Meta': {'ordering': "['-datetime']", 'object_name': 'Transaction', '_ormbases': ['core.Object']},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Account']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Category']", 'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'liability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Liability']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_transaction_source'", 'to': "orm['identities.Contact']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_transaction_target'", 'to': "orm['identities.Contact']"}),
'value': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'value_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'value_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'})
},
'identities.contact': {
'Meta': {'ordering': "['name']", 'object_name': 'Contact', '_ormbases': ['core.Object']},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.ContactType']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'related_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'related_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.AccessEntity']", 'null': 'True', 'blank': 'True'})
},
'identities.contactfield': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactField', '_ormbases': ['core.Object']},
'allowed_values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'identities.contacttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactType', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['identities.ContactField']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['finance']
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import shutil
import sys
import tempfile
import time
import unittest
import ray
from ray.rllib import _register_all
from ray import tune
from ray.tune import Trainable, TuneError
from ray.tune import register_env, register_trainable, run_experiments
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.schedulers import TrialScheduler, FIFOScheduler
from ray.tune.registry import _global_registry, TRAINABLE_CLASS
from ray.tune.result import (DEFAULT_RESULTS_DIR, TIMESTEPS_TOTAL, DONE,
HOSTNAME, NODE_IP, PID, EPISODES_TOTAL,
TRAINING_ITERATION, TIMESTEPS_THIS_ITER,
TIME_THIS_ITER_S, TIME_TOTAL_S)
from ray.tune.logger import Logger
from ray.tune.util import pin_in_object_store, get_pinned_object
from ray.tune.experiment import Experiment
from ray.tune.trial import (Trial, ExportFormat, Resources, resources_to_json,
json_to_resources)
from ray.tune.trial_runner import TrialRunner
from ray.tune.suggest import grid_search, BasicVariantGenerator
from ray.tune.suggest.suggestion import (_MockSuggestionAlgorithm,
SuggestionAlgorithm)
from ray.tune.suggest.variant_generator import (RecursiveDependencyError,
resolve_nested_dict)
if sys.version_info >= (3, 3):
from unittest.mock import patch
else:
from mock import patch
class TrainableFunctionApiTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=4, num_gpus=0)
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def checkAndReturnConsistentLogs(self, results, sleep_per_iter=None):
"""Checks logging is the same between APIs.
Ignore "DONE" for logging but checks that the
scheduler is notified properly with the last result.
"""
class_results = copy.deepcopy(results)
function_results = copy.deepcopy(results)
class_output = []
function_output = []
scheduler_notif = []
class MockScheduler(FIFOScheduler):
def on_trial_complete(self, runner, trial, result):
scheduler_notif.append(result)
class ClassAPILogger(Logger):
def on_result(self, result):
class_output.append(result)
class FunctionAPILogger(Logger):
def on_result(self, result):
function_output.append(result)
class _WrappedTrainable(Trainable):
def _setup(self, config):
del config
self._result_iter = copy.deepcopy(class_results)
def _train(self):
if sleep_per_iter:
time.sleep(sleep_per_iter)
res = self._result_iter.pop(0) # This should not fail
if not self._result_iter: # Mark "Done" for last result
res[DONE] = True
return res
def _function_trainable(config, reporter):
for result in function_results:
if sleep_per_iter:
time.sleep(sleep_per_iter)
reporter(**result)
class_trainable_name = "class_trainable"
register_trainable(class_trainable_name, _WrappedTrainable)
trials = run_experiments(
{
"function_api": {
"run": _function_trainable,
"loggers": [FunctionAPILogger],
},
"class_api": {
"run": class_trainable_name,
"loggers": [ClassAPILogger],
},
},
raise_on_failed_trial=False,
scheduler=MockScheduler())
# Ignore these fields
NO_COMPARE_FIELDS = {
HOSTNAME,
NODE_IP,
PID,
TIME_THIS_ITER_S,
TIME_TOTAL_S,
DONE, # This is ignored because FunctionAPI has different handling
"timestamp",
"time_since_restore",
"experiment_id",
"date",
}
self.assertEqual(len(class_output), len(results))
self.assertEqual(len(function_output), len(results))
def as_comparable_result(result):
return {
k: v
for k, v in result.items() if k not in NO_COMPARE_FIELDS
}
function_comparable = [
as_comparable_result(result) for result in function_output
]
class_comparable = [
as_comparable_result(result) for result in class_output
]
self.assertEqual(function_comparable, class_comparable)
self.assertEqual(sum(t.get(DONE) for t in scheduler_notif), 2)
self.assertEqual(
as_comparable_result(scheduler_notif[0]),
as_comparable_result(scheduler_notif[1]))
# Make sure the last result is the same.
self.assertEqual(
as_comparable_result(trials[0].last_result),
as_comparable_result(trials[1].last_result))
return function_output, trials
def testPinObject(self):
X = pin_in_object_store("hello")
@ray.remote
def f():
return get_pinned_object(X)
self.assertEqual(ray.get(f.remote()), "hello")
def testFetchPinned(self):
X = pin_in_object_store("hello")
def train(config, reporter):
get_pinned_object(X)
reporter(timesteps_total=100, done=True)
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 100)
def testRegisterEnv(self):
register_env("foo", lambda: None)
self.assertRaises(TypeError, lambda: register_env("foo", 2))
def testRegisterEnvOverwrite(self):
def train(config, reporter):
reporter(timesteps_total=100, done=True)
def train2(config, reporter):
reporter(timesteps_total=200, done=True)
register_trainable("f1", train)
register_trainable("f1", train2)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 200)
def testRegisterTrainable(self):
def train(config, reporter):
pass
class A(object):
pass
class B(Trainable):
pass
register_trainable("foo", train)
register_trainable("foo", B)
self.assertRaises(TypeError, lambda: register_trainable("foo", B()))
self.assertRaises(TypeError, lambda: register_trainable("foo", A))
def testRegisterTrainableCallable(self):
def dummy_fn(config, reporter, steps):
reporter(timesteps_total=steps, done=True)
from functools import partial
steps = 500
register_trainable("test", partial(dummy_fn, steps=steps))
[trial] = run_experiments({
"foo": {
"run": "test",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], steps)
def testBuiltInTrainableResources(self):
class B(Trainable):
@classmethod
def default_resource_request(cls, config):
return Resources(cpu=config["cpu"], gpu=config["gpu"])
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
register_trainable("B", B)
def f(cpus, gpus, queue_trials):
return run_experiments(
{
"foo": {
"run": "B",
"config": {
"cpu": cpus,
"gpu": gpus,
},
}
},
queue_trials=queue_trials)[0]
# Should all succeed
self.assertEqual(f(0, 0, False).status, Trial.TERMINATED)
self.assertEqual(f(1, 0, True).status, Trial.TERMINATED)
self.assertEqual(f(1, 0, True).status, Trial.TERMINATED)
# Infeasible even with queueing enabled (no gpus)
self.assertRaises(TuneError, lambda: f(1, 1, True))
# Too large resource request
self.assertRaises(TuneError, lambda: f(100, 100, False))
self.assertRaises(TuneError, lambda: f(0, 100, False))
self.assertRaises(TuneError, lambda: f(100, 0, False))
# TODO(ekl) how can we test this is queued (hangs)?
# f(100, 0, True)
def testRewriteEnv(self):
def train(config, reporter):
reporter(timesteps_total=1)
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
"env": "CartPole-v0",
}
})
self.assertEqual(trial.config["env"], "CartPole-v0")
def testConfigPurity(self):
def train(config, reporter):
assert config == {"a": "b"}, config
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"config": {
"a": "b"
},
}
})
def testLogdir(self):
def train(config, reporter):
assert "/tmp/logdir/foo" in os.getcwd(), os.getcwd()
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"local_dir": "/tmp/logdir",
"config": {
"a": "b"
},
}
})
def testUploadDirNone(self):
def train(config, reporter):
reporter(timesteps_total=1)
[trial] = run_experiments({
"foo": {
"run": train,
"upload_dir": None,
"config": {
"a": "b"
},
}
})
self.assertFalse(trial.upload_dir)
def testLogdirStartingWithTilde(self):
local_dir = "~/ray_results/local_dir"
def train(config, reporter):
cwd = os.getcwd()
assert cwd.startswith(os.path.expanduser(local_dir)), cwd
assert not cwd.startswith("~"), cwd
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"local_dir": local_dir,
"config": {
"a": "b"
},
}
})
def testLongFilename(self):
def train(config, reporter):
assert "/tmp/logdir/foo" in os.getcwd(), os.getcwd()
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"local_dir": "/tmp/logdir",
"config": {
"a" * 50: tune.sample_from(lambda spec: 5.0 / 7),
"b" * 50: tune.sample_from(lambda spec: "long" * 40),
},
}
})
def testBadParams(self):
def f():
run_experiments({"foo": {}})
self.assertRaises(TuneError, f)
def testBadParams2(self):
def f():
run_experiments({
"foo": {
"run": "asdf",
"bah": "this param is not allowed",
}
})
self.assertRaises(TuneError, f)
def testBadParams3(self):
def f():
run_experiments({
"foo": {
"run": grid_search("invalid grid search"),
}
})
self.assertRaises(TuneError, f)
def testBadParams4(self):
def f():
run_experiments({
"foo": {
"run": "asdf",
}
})
self.assertRaises(TuneError, f)
def testBadParams5(self):
def f():
run_experiments({"foo": {"run": "PPO", "stop": {"asdf": 1}}})
self.assertRaises(TuneError, f)
def testBadParams6(self):
def f():
run_experiments({
"foo": {
"run": "PPO",
"resources_per_trial": {
"asdf": 1
}
}
})
self.assertRaises(TuneError, f)
def testBadStoppingReturn(self):
def train(config, reporter):
reporter()
register_trainable("f1", train)
def f():
run_experiments({
"foo": {
"run": "f1",
"stop": {
"time": 10
},
}
})
self.assertRaises(TuneError, f)
def testEarlyReturn(self):
def train(config, reporter):
reporter(timesteps_total=100, done=True)
time.sleep(99999)
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 100)
def testErrorReturn(self):
def train(config, reporter):
raise Exception("uh oh")
register_trainable("f1", train)
def f():
run_experiments({
"foo": {
"run": "f1",
}
})
self.assertRaises(TuneError, f)
def testSuccess(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testNoRaiseFlag(self):
def train(config, reporter):
raise Exception()
register_trainable("f1", train)
[trial] = run_experiments(
{
"foo": {
"run": "f1",
}
}, raise_on_failed_trial=False)
self.assertEqual(trial.status, Trial.ERROR)
def testReportInfinity(self):
def train(config, reporter):
for i in range(100):
reporter(mean_accuracy=float("inf"))
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result["mean_accuracy"], float("inf"))
def testReportTimeStep(self):
# Test that no timestep count are logged if never the Trainable never
# returns any.
results1 = [dict(mean_accuracy=5, done=i == 99) for i in range(100)]
logs1, _ = self.checkAndReturnConsistentLogs(results1)
self.assertTrue(all(log[TIMESTEPS_TOTAL] is None for log in logs1))
# Test that no timesteps_this_iter are logged if only timesteps_total
# are returned.
results2 = [dict(timesteps_total=5, done=i == 9) for i in range(10)]
logs2, _ = self.checkAndReturnConsistentLogs(results2)
# Re-run the same trials but with added delay. This is to catch some
# inconsistent timestep counting that was present in the multi-threaded
# FunctionRunner. This part of the test can be removed once the
# multi-threaded FunctionRunner is removed from ray/tune.
# TODO: remove once the multi-threaded function runner is gone.
logs2, _ = self.checkAndReturnConsistentLogs(results2, 0.5)
# check all timesteps_total report the same value
self.assertTrue(all(log[TIMESTEPS_TOTAL] == 5 for log in logs2))
# check that none of the logs report timesteps_this_iter
self.assertFalse(
any(hasattr(log, TIMESTEPS_THIS_ITER) for log in logs2))
# Test that timesteps_total and episodes_total are reported when
# timesteps_this_iter and episodes_this_iter despite only return zeros.
results3 = [
dict(timesteps_this_iter=0, episodes_this_iter=0)
for i in range(10)
]
logs3, _ = self.checkAndReturnConsistentLogs(results3)
self.assertTrue(all(log[TIMESTEPS_TOTAL] == 0 for log in logs3))
self.assertTrue(all(log[EPISODES_TOTAL] == 0 for log in logs3))
# Test that timesteps_total and episodes_total are properly counted
# when timesteps_this_iter and episodes_this_iter report non-zero
# values.
results4 = [
dict(timesteps_this_iter=3, episodes_this_iter=i)
for i in range(10)
]
logs4, _ = self.checkAndReturnConsistentLogs(results4)
# The last reported result should not be double-logged.
self.assertEqual(logs4[-1][TIMESTEPS_TOTAL], 30)
self.assertNotEqual(logs4[-2][TIMESTEPS_TOTAL],
logs4[-1][TIMESTEPS_TOTAL])
self.assertEqual(logs4[-1][EPISODES_TOTAL], 45)
self.assertNotEqual(logs4[-2][EPISODES_TOTAL],
logs4[-1][EPISODES_TOTAL])
def testAllValuesReceived(self):
results1 = [
dict(timesteps_total=(i + 1), my_score=i**2, done=i == 4)
for i in range(5)
]
logs1, _ = self.checkAndReturnConsistentLogs(results1)
# check if the correct number of results were reported
self.assertEqual(len(logs1), len(results1))
def check_no_missing(reported_result, result):
common_results = [reported_result[k] == result[k] for k in result]
return all(common_results)
# check that no result was dropped or modified
complete_results = [
check_no_missing(log, result)
for log, result in zip(logs1, results1)
]
self.assertTrue(all(complete_results))
# check if done was logged exactly once
self.assertEqual(len([r for r in logs1 if r.get("done")]), 1)
def testNoDoneReceived(self):
# repeat same test but without explicitly reporting done=True
results1 = [
dict(timesteps_total=(i + 1), my_score=i**2) for i in range(5)
]
logs1, trials = self.checkAndReturnConsistentLogs(results1)
# check if the correct number of results were reported.
self.assertEqual(len(logs1), len(results1))
def check_no_missing(reported_result, result):
common_results = [reported_result[k] == result[k] for k in result]
return all(common_results)
# check that no result was dropped or modified
complete_results1 = [
check_no_missing(log, result)
for log, result in zip(logs1, results1)
]
self.assertTrue(all(complete_results1))
def testCheckpointDict(self):
class TestTrain(Trainable):
def _setup(self, config):
self.state = {"hi": 1}
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
def _save(self, path):
return self.state
def _restore(self, state):
self.state = state
test_trainable = TestTrain()
result = test_trainable.save()
test_trainable.state["hi"] = 2
test_trainable.restore(result)
self.assertEqual(test_trainable.state["hi"], 1)
trials = run_experiments({
"foo": {
"run": TestTrain,
"checkpoint_at_end": True
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
def testMultipleCheckpoints(self):
class TestTrain(Trainable):
def _setup(self, config):
self.state = {"hi": 1, "iter": 0}
def _train(self):
self.state["iter"] += 1
return {"timesteps_this_iter": 1, "done": True}
def _save(self, path):
return self.state
def _restore(self, state):
self.state = state
test_trainable = TestTrain()
checkpoint_1 = test_trainable.save()
test_trainable.train()
checkpoint_2 = test_trainable.save()
self.assertNotEqual(checkpoint_1, checkpoint_2)
test_trainable.restore(checkpoint_2)
self.assertEqual(test_trainable.state["iter"], 1)
test_trainable.restore(checkpoint_1)
self.assertEqual(test_trainable.state["iter"], 0)
trials = run_experiments({
"foo": {
"run": TestTrain,
"checkpoint_at_end": True
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
def testIterationCounter(self):
def train(config, reporter):
for i in range(100):
reporter(itr=i, timesteps_this_iter=1)
register_trainable("exp", train)
config = {
"my_exp": {
"run": "exp",
"config": {
"iterations": 100,
},
"stop": {
"timesteps_total": 100
},
}
}
[trial] = run_experiments(config)
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TRAINING_ITERATION], 100)
self.assertEqual(trial.last_result["itr"], 99)
class RunExperimentTest(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testDict(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
trials = run_experiments({
"foo": {
"run": "f1",
},
"bar": {
"run": "f1",
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testExperiment(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
exp1 = Experiment(**{
"name": "foo",
"run": "f1",
})
[trial] = run_experiments(exp1)
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testExperimentList(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
exp1 = Experiment(**{
"name": "foo",
"run": "f1",
})
exp2 = Experiment(**{
"name": "bar",
"run": "f1",
})
trials = run_experiments([exp1, exp2])
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testAutoregisterTrainable(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
class B(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
register_trainable("f1", train)
trials = run_experiments({
"foo": {
"run": train,
},
"bar": {
"run": B
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
def testCheckpointAtEnd(self):
class train(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
def _save(self, path):
checkpoint = path + "/checkpoint"
with open(checkpoint, "w") as f:
f.write("OK")
return checkpoint
trials = run_experiments({
"foo": {
"run": train,
"checkpoint_at_end": True
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
def testExportFormats(self):
class train(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
def _export_model(self, export_formats, export_dir):
path = export_dir + "/exported"
with open(path, "w") as f:
f.write("OK")
return {export_formats[0]: path}
trials = run_experiments({
"foo": {
"run": train,
"export_formats": ["format"]
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(
os.path.exists(os.path.join(trial.logdir, "exported")))
def testInvalidExportFormats(self):
class train(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
def _export_model(self, export_formats, export_dir):
ExportFormat.validate(export_formats)
return {}
def fail_trial():
run_experiments({
"foo": {
"run": train,
"export_formats": ["format"]
}
})
self.assertRaises(TuneError, fail_trial)
def testCustomResources(self):
ray.shutdown()
ray.init(resources={"hi": 3})
class train(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
trials = run_experiments({
"foo": {
"run": train,
"resources_per_trial": {
"cpu": 1,
"custom_resources": {
"hi": 2
}
}
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
def testCustomLogger(self):
class CustomLogger(Logger):
def on_result(self, result):
with open(os.path.join(self.logdir, "test.log"), "w") as f:
f.write("hi")
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
},
"loggers": [CustomLogger]
}
})
self.assertTrue(os.path.exists(os.path.join(trial.logdir, "test.log")))
self.assertFalse(
os.path.exists(os.path.join(trial.logdir, "params.json")))
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
}
}
})
self.assertTrue(
os.path.exists(os.path.join(trial.logdir, "params.json")))
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
},
"loggers": []
}
})
self.assertFalse(
os.path.exists(os.path.join(trial.logdir, "params.json")))
def testCustomTrialString(self):
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
},
"trial_name_creator": tune.function(
lambda t: "{}_{}_321".format(t.trainable_name, t.trial_id))
}
})
self.assertEquals(
str(trial), "{}_{}_321".format(trial.trainable_name,
trial.trial_id))
def testSyncFunction(self):
def fail_sync_local():
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
},
"upload_dir": "test",
"sync_function": "ls {remote_dir}"
}
})
self.assertRaises(AssertionError, fail_sync_local)
def fail_sync_remote():
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
},
"upload_dir": "test",
"sync_function": "ls {local_dir}"
}
})
self.assertRaises(AssertionError, fail_sync_remote)
def sync_func(local, remote):
with open(os.path.join(local, "test.log"), "w") as f:
f.write(remote)
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
},
"upload_dir": "test",
"sync_function": tune.function(sync_func)
}
})
self.assertTrue(os.path.exists(os.path.join(trial.logdir, "test.log")))
class VariantGeneratorTest(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def generate_trials(self, spec, name):
suggester = BasicVariantGenerator()
suggester.add_configurations({name: spec})
return suggester.next_trials()
def testParseToTrials(self):
trials = self.generate_trials({
"run": "PPO",
"num_samples": 2,
"max_failures": 5,
"config": {
"env": "Pong-v0",
"foo": "bar"
},
}, "tune-pong")
trials = list(trials)
self.assertEqual(len(trials), 2)
self.assertEqual(str(trials[0]), "PPO_Pong-v0_0")
self.assertEqual(trials[0].config, {"foo": "bar", "env": "Pong-v0"})
self.assertEqual(trials[0].trainable_name, "PPO")
self.assertEqual(trials[0].experiment_tag, "0")
self.assertEqual(trials[0].max_failures, 5)
self.assertEqual(trials[0].local_dir,
os.path.join(DEFAULT_RESULTS_DIR, "tune-pong"))
self.assertEqual(trials[1].experiment_tag, "1")
def testEval(self):
trials = self.generate_trials({
"run": "PPO",
"config": {
"foo": {
"eval": "2 + 2"
},
},
}, "eval")
trials = list(trials)
self.assertEqual(len(trials), 1)
self.assertEqual(trials[0].config, {"foo": 4})
self.assertEqual(trials[0].experiment_tag, "0_foo=4")
def testGridSearch(self):
trials = self.generate_trials({
"run": "PPO",
"config": {
"bar": {
"grid_search": [True, False]
},
"foo": {
"grid_search": [1, 2, 3]
},
},
}, "grid_search")
trials = list(trials)
self.assertEqual(len(trials), 6)
self.assertEqual(trials[0].config, {"bar": True, "foo": 1})
self.assertEqual(trials[0].experiment_tag, "0_bar=True,foo=1")
self.assertEqual(trials[1].config, {"bar": False, "foo": 1})
self.assertEqual(trials[1].experiment_tag, "1_bar=False,foo=1")
self.assertEqual(trials[2].config, {"bar": True, "foo": 2})
self.assertEqual(trials[3].config, {"bar": False, "foo": 2})
self.assertEqual(trials[4].config, {"bar": True, "foo": 3})
self.assertEqual(trials[5].config, {"bar": False, "foo": 3})
def testGridSearchAndEval(self):
trials = self.generate_trials({
"run": "PPO",
"config": {
"qux": tune.sample_from(lambda spec: 2 + 2),
"bar": grid_search([True, False]),
"foo": grid_search([1, 2, 3]),
},
}, "grid_eval")
trials = list(trials)
self.assertEqual(len(trials), 6)
self.assertEqual(trials[0].config, {"bar": True, "foo": 1, "qux": 4})
self.assertEqual(trials[0].experiment_tag, "0_bar=True,foo=1,qux=4")
def testConditionResolution(self):
trials = self.generate_trials({
"run": "PPO",
"config": {
"x": 1,
"y": tune.sample_from(lambda spec: spec.config.x + 1),
"z": tune.sample_from(lambda spec: spec.config.y + 1),
},
}, "condition_resolution")
trials = list(trials)
self.assertEqual(len(trials), 1)
self.assertEqual(trials[0].config, {"x": 1, "y": 2, "z": 3})
def testDependentLambda(self):
trials = self.generate_trials({
"run": "PPO",
"config": {
"x": grid_search([1, 2]),
"y": tune.sample_from(lambda spec: spec.config.x * 100),
},
}, "dependent_lambda")
trials = list(trials)
self.assertEqual(len(trials), 2)
self.assertEqual(trials[0].config, {"x": 1, "y": 100})
self.assertEqual(trials[1].config, {"x": 2, "y": 200})
def testDependentGridSearch(self):
trials = self.generate_trials({
"run": "PPO",
"config": {
"x": grid_search([
tune.sample_from(lambda spec: spec.config.y * 100),
tune.sample_from(lambda spec: spec.config.y * 200)
]),
"y": tune.sample_from(lambda spec: 1),
},
}, "dependent_grid_search")
trials = list(trials)
self.assertEqual(len(trials), 2)
self.assertEqual(trials[0].config, {"x": 100, "y": 1})
self.assertEqual(trials[1].config, {"x": 200, "y": 1})
def test_resolve_dict(self):
config = {
"a": {
"b": 1,
"c": 2,
},
"b": {
"a": 3
}
}
resolved = resolve_nested_dict(config)
for k, v in [(("a", "b"), 1), (("a", "c"), 2), (("b", "a"), 3)]:
self.assertEqual(resolved.get(k), v)
def testRecursiveDep(self):
try:
list(
self.generate_trials({
"run": "PPO",
"config": {
"foo": tune.sample_from(lambda spec: spec.config.foo),
},
}, "recursive_dep"))
except RecursiveDependencyError as e:
assert "`foo` recursively depends on" in str(e), e
else:
assert False
def testMaxConcurrentSuggestions(self):
"""Checks that next_trials() supports throttling."""
experiment_spec = {
"run": "PPO",
"num_samples": 6,
}
experiments = [Experiment.from_json("test", experiment_spec)]
searcher = _MockSuggestionAlgorithm(max_concurrent=4)
searcher.add_configurations(experiments)
trials = searcher.next_trials()
self.assertEqual(len(trials), 4)
self.assertEqual(searcher.next_trials(), [])
finished_trial = trials.pop()
searcher.on_trial_complete(finished_trial.trial_id)
self.assertEqual(len(searcher.next_trials()), 1)
finished_trial = trials.pop()
searcher.on_trial_complete(finished_trial.trial_id)
finished_trial = trials.pop()
searcher.on_trial_complete(finished_trial.trial_id)
finished_trial = trials.pop()
searcher.on_trial_complete(finished_trial.trial_id)
self.assertEqual(len(searcher.next_trials()), 1)
self.assertEqual(len(searcher.next_trials()), 0)
def create_mock_components():
class _MockScheduler(FIFOScheduler):
errored_trials = []
def on_trial_error(self, trial_runner, trial):
self.errored_trials += [trial]
class _MockSearchAlg(BasicVariantGenerator):
errored_trials = []
def on_trial_complete(self, trial_id, error=False, **kwargs):
if error:
self.errored_trials += [trial_id]
searchalg = _MockSearchAlg()
scheduler = _MockScheduler()
return searchalg, scheduler
class TrialRunnerTest(unittest.TestCase):
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testTrialStatus(self):
ray.init()
trial = Trial("__fake")
trial_executor = RayTrialExecutor()
self.assertEqual(trial.status, Trial.PENDING)
trial_executor.start_trial(trial)
self.assertEqual(trial.status, Trial.RUNNING)
trial_executor.stop_trial(trial)
self.assertEqual(trial.status, Trial.TERMINATED)
trial_executor.stop_trial(trial, error=True)
self.assertEqual(trial.status, Trial.ERROR)
def testExperimentTagTruncation(self):
ray.init()
def train(config, reporter):
reporter(timesteps_total=1)
trial_executor = RayTrialExecutor()
register_trainable("f1", train)
experiments = {
"foo": {
"run": "f1",
"config": {
"a" * 50: tune.sample_from(lambda spec: 5.0 / 7),
"b" * 50: tune.sample_from(lambda spec: "long" * 40)
},
}
}
for name, spec in experiments.items():
trial_generator = BasicVariantGenerator()
trial_generator.add_configurations({name: spec})
for trial in trial_generator.next_trials():
trial_executor.start_trial(trial)
self.assertLessEqual(len(trial.logdir), 200)
trial_executor.stop_trial(trial)
def testExtraResources(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=0, extra_cpu=3, extra_gpu=1),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
def testCustomResources(self):
ray.init(num_cpus=4, num_gpus=2, resources={"a": 2})
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=0, custom_resources={"a": 2}),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
def testExtraCustomResources(self):
ray.init(num_cpus=4, num_gpus=2, resources={"a": 2})
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(
cpu=1, gpu=0, extra_custom_resources={"a": 2}),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertTrue(sum(t.status == Trial.RUNNING for t in trials) < 2)
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
def testCustomResources2(self):
ray.init(num_cpus=4, num_gpus=2, resources={"a": 2})
runner = TrialRunner()
resource1 = Resources(cpu=1, gpu=0, extra_custom_resources={"a": 2})
self.assertTrue(runner.has_resources(resource1))
resource2 = Resources(cpu=1, gpu=0, custom_resources={"a": 2})
self.assertTrue(runner.has_resources(resource2))
resource3 = Resources(cpu=1, gpu=0, custom_resources={"a": 3})
self.assertFalse(runner.has_resources(resource3))
resource4 = Resources(cpu=1, gpu=0, extra_custom_resources={"a": 3})
self.assertFalse(runner.has_resources(resource4))
def testFractionalGpus(self):
ray.init(num_cpus=4, num_gpus=1)
runner = TrialRunner()
kwargs = {
"resources": Resources(cpu=1, gpu=0.5),
}
trials = [
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs)
]
for t in trials:
runner.add_trial(t)
for _ in range(10):
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(trials[2].status, Trial.PENDING)
self.assertEqual(trials[3].status, Trial.PENDING)
def testResourceScheduler(self):
ray.init(num_cpus=4, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=1),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.TERMINATED)
def testMultiStepRun(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 5
},
"resources": Resources(cpu=1, gpu=1),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.RUNNING)
def testMultiStepRun2(self):
"""Checks that runner.step throws when overstepping."""
ray.init(num_cpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"resources": Resources(cpu=1, gpu=0),
}
trials = [Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertRaises(TuneError, runner.step)
def testChangeResources(self):
"""Checks that resource requirements can be changed on fly."""
ray.init(num_cpus=2)
class ChangingScheduler(FIFOScheduler):
def on_trial_result(self, trial_runner, trial, result):
if result["training_iteration"] == 1:
executor = trial_runner.trial_executor
executor.stop_trial(trial, stop_logger=False)
trial.update_resources(2, 0)
executor.start_trial(trial)
return TrialScheduler.CONTINUE
runner = TrialRunner(scheduler=ChangingScheduler())
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"resources": Resources(cpu=1, gpu=0),
}
trials = [Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(runner.trial_executor._committed_resources.cpu, 1)
self.assertRaises(ValueError, lambda: trials[0].update_resources(2, 0))
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(runner.trial_executor._committed_resources.cpu, 2)
def testErrorHandling(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=1),
}
_global_registry.register(TRAINABLE_CLASS, "asdf", None)
trials = [Trial("asdf", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.ERROR)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.ERROR)
self.assertEqual(trials[1].status, Trial.RUNNING)
def testThrowOnOverstep(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
runner.step()
self.assertRaises(TuneError, runner.step)
def testFailureRecoveryDisabled(self):
ray.init(num_cpus=1, num_gpus=1)
searchalg, scheduler = create_mock_components()
runner = TrialRunner(searchalg, scheduler=scheduler)
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 0,
"config": {
"mock_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.ERROR)
self.assertEqual(trials[0].num_failures, 1)
self.assertEqual(len(searchalg.errored_trials), 1)
self.assertEqual(len(scheduler.errored_trials), 1)
def testFailureRecoveryEnabled(self):
ray.init(num_cpus=1, num_gpus=1)
searchalg, scheduler = create_mock_components()
runner = TrialRunner(searchalg, scheduler=scheduler)
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 1,
"config": {
"mock_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[0].num_failures, 1)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(len(searchalg.errored_trials), 0)
self.assertEqual(len(scheduler.errored_trials), 0)
def testFailureRecoveryNodeRemoval(self):
ray.init(num_cpus=1, num_gpus=1)
searchalg, scheduler = create_mock_components()
runner = TrialRunner(searchalg, scheduler=scheduler)
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 1,
"config": {
"mock_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
with patch("ray.cluster_resources") as resource_mock:
resource_mock.return_value = {"CPU": 1, "GPU": 1}
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
# Mimic a node failure
resource_mock.return_value = {"CPU": 0, "GPU": 0}
runner.step()
self.assertEqual(trials[0].status, Trial.PENDING)
self.assertEqual(trials[0].num_failures, 1)
self.assertEqual(len(searchalg.errored_trials), 0)
self.assertEqual(len(scheduler.errored_trials), 1)
def testFailureRecoveryMaxFailures(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 2,
"config": {
"mock_error": True,
"persistent_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[0].num_failures, 1)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[0].num_failures, 2)
runner.step()
self.assertEqual(trials[0].status, Trial.ERROR)
self.assertEqual(trials[0].num_failures, 3)
def testCheckpointing(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.set_info.remote(1)), 1)
path = runner.trial_executor.save(trials[0])
kwargs["restore_path"] = path
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[1].runner.get_info.remote()), 1)
self.addCleanup(os.remove, path)
def testRestoreMetricsAfterCheckpointing(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.set_info.remote(1)), 1)
path = runner.trial_executor.save(trials[0])
runner.trial_executor.stop_trial(trials[0])
kwargs["restore_path"] = path
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[1].last_result["timesteps_since_restore"], 10)
self.assertEqual(trials[1].last_result["iterations_since_restore"], 1)
self.assertGreater(trials[1].last_result["time_since_restore"], 0)
runner.step()
self.assertEqual(trials[1].last_result["timesteps_since_restore"], 20)
self.assertEqual(trials[1].last_result["iterations_since_restore"], 2)
self.assertGreater(trials[1].last_result["time_since_restore"], 0)
self.addCleanup(os.remove, path)
def testCheckpointingAtEnd(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"checkpoint_at_end": True,
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
runner.step()
self.assertEqual(trials[0].last_result[DONE], True)
self.assertEqual(trials[0].has_checkpoint(), True)
def testResultDone(self):
"""Tests that last_result is marked `done` after trial is complete."""
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertNotEqual(trials[0].last_result[DONE], True)
runner.step()
self.assertEqual(trials[0].last_result[DONE], True)
def testPauseThenResume(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.get_info.remote()), None)
self.assertEqual(ray.get(trials[0].runner.set_info.remote(1)), 1)
runner.trial_executor.pause_trial(trials[0])
self.assertEqual(trials[0].status, Trial.PAUSED)
runner.trial_executor.resume_trial(trials[0])
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.get_info.remote()), 1)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
def testStepHook(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
def on_step_begin(self):
self._update_avail_resources()
cnt = self.pre_step if hasattr(self, "pre_step") else 0
setattr(self, "pre_step", cnt + 1)
def on_step_end(self):
cnt = self.pre_step if hasattr(self, "post_step") else 0
setattr(self, "post_step", 1 + cnt)
import types
runner.trial_executor.on_step_begin = types.MethodType(
on_step_begin, runner.trial_executor)
runner.trial_executor.on_step_end = types.MethodType(
on_step_end, runner.trial_executor)
kwargs = {
"stopping_criterion": {
"training_iteration": 5
},
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
runner.step()
self.assertEqual(runner.trial_executor.pre_step, 1)
self.assertEqual(runner.trial_executor.post_step, 1)
def testStopTrial(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 5
},
"resources": Resources(cpu=1, gpu=1),
}
trials = [
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs)
]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
# Stop trial while running
runner.stop_trial(trials[0])
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(trials[-1].status, Trial.PENDING)
# Stop trial while pending
runner.stop_trial(trials[-1])
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(trials[-1].status, Trial.TERMINATED)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(trials[2].status, Trial.RUNNING)
self.assertEqual(trials[-1].status, Trial.TERMINATED)
def testSearchAlgNotification(self):
"""Checks notification of trial to the Search Algorithm."""
ray.init(num_cpus=4, num_gpus=2)
experiment_spec = {"run": "__fake", "stop": {"training_iteration": 2}}
experiments = [Experiment.from_json("test", experiment_spec)]
searcher = _MockSuggestionAlgorithm(max_concurrent=10)
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher)
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(searcher.counter["result"], 1)
self.assertEqual(searcher.counter["complete"], 1)
def testSearchAlgFinished(self):
"""Checks that SearchAlg is Finished before all trials are done."""
ray.init(num_cpus=4, num_gpus=2)
experiment_spec = {"run": "__fake", "stop": {"training_iteration": 1}}
experiments = [Experiment.from_json("test", experiment_spec)]
searcher = _MockSuggestionAlgorithm(max_concurrent=10)
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher)
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertTrue(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(len(searcher.live_trials), 0)
self.assertTrue(searcher.is_finished())
self.assertTrue(runner.is_finished())
def testSearchAlgSchedulerInteraction(self):
"""Checks that TrialScheduler killing trial will notify SearchAlg."""
class _MockScheduler(FIFOScheduler):
def on_trial_result(self, *args, **kwargs):
return TrialScheduler.STOP
ray.init(num_cpus=4, num_gpus=2)
experiment_spec = {"run": "__fake", "stop": {"training_iteration": 2}}
experiments = [Experiment.from_json("test", experiment_spec)]
searcher = _MockSuggestionAlgorithm(max_concurrent=10)
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher, scheduler=_MockScheduler())
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertTrue(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(len(searcher.live_trials), 0)
self.assertTrue(searcher.is_finished())
self.assertTrue(runner.is_finished())
def testSearchAlgStalled(self):
"""Checks that runner and searcher state is maintained when stalled."""
ray.init(num_cpus=4, num_gpus=2)
experiment_spec = {
"run": "__fake",
"num_samples": 3,
"stop": {
"training_iteration": 1
}
}
experiments = [Experiment.from_json("test", experiment_spec)]
searcher = _MockSuggestionAlgorithm(max_concurrent=1)
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher)
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(len(searcher.live_trials), 1)
searcher.stall = True
runner.step()
self.assertEqual(trials[1].status, Trial.TERMINATED)
self.assertEqual(len(searcher.live_trials), 0)
self.assertTrue(all(trial.is_finished() for trial in trials))
self.assertFalse(searcher.is_finished())
self.assertFalse(runner.is_finished())
searcher.stall = False
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[2].status, Trial.RUNNING)
self.assertEqual(len(searcher.live_trials), 1)
runner.step()
self.assertEqual(trials[2].status, Trial.TERMINATED)
self.assertEqual(len(searcher.live_trials), 0)
self.assertTrue(searcher.is_finished())
self.assertTrue(runner.is_finished())
def testSearchAlgFinishes(self):
"""Empty SearchAlg changing state in `next_trials` does not crash."""
class FinishFastAlg(SuggestionAlgorithm):
_index = 0
def next_trials(self):
trials = []
self._index += 1
for trial in self._trial_generator:
trials += [trial]
break
if self._index > 4:
self._finished = True
return trials
def _suggest(self, trial_id):
return {}
ray.init(num_cpus=2)
experiment_spec = {
"run": "__fake",
"num_samples": 2,
"stop": {
"training_iteration": 1
}
}
searcher = FinishFastAlg()
experiments = [Experiment.from_json("test", experiment_spec)]
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher)
self.assertFalse(runner.is_finished())
runner.step() # This launches a new run
runner.step() # This launches a 2nd run
self.assertFalse(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step() # This kills the first run
self.assertFalse(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step() # This kills the 2nd run
self.assertFalse(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step() # this converts self._finished to True
self.assertTrue(searcher.is_finished())
self.assertRaises(TuneError, runner.step)
def testTrialSaveRestore(self):
"""Creates different trials to test runner.checkpoint/restore."""
ray.init(num_cpus=3)
tmpdir = tempfile.mkdtemp()
runner = TrialRunner(metadata_checkpoint_dir=tmpdir)
trials = [
Trial(
"__fake",
trial_id="trial_terminate",
stopping_criterion={"training_iteration": 1},
checkpoint_freq=1)
]
runner.add_trial(trials[0])
runner.step() # start
runner.step()
self.assertEquals(trials[0].status, Trial.TERMINATED)
trials += [
Trial(
"__fake",
trial_id="trial_fail",
stopping_criterion={"training_iteration": 3},
checkpoint_freq=1,
config={"mock_error": True})
]
runner.add_trial(trials[1])
runner.step()
runner.step()
runner.step()
self.assertEquals(trials[1].status, Trial.ERROR)
trials += [
Trial(
"__fake",
trial_id="trial_succ",
stopping_criterion={"training_iteration": 2},
checkpoint_freq=1)
]
runner.add_trial(trials[2])
runner.step()
self.assertEquals(len(runner.trial_executor.get_checkpoints()), 3)
self.assertEquals(trials[2].status, Trial.RUNNING)
runner2 = TrialRunner.restore(tmpdir)
for tid in ["trial_terminate", "trial_fail"]:
original_trial = runner.get_trial(tid)
restored_trial = runner2.get_trial(tid)
self.assertEqual(original_trial.status, restored_trial.status)
restored_trial = runner2.get_trial("trial_succ")
self.assertEqual(Trial.PENDING, restored_trial.status)
runner2.step()
runner2.step()
runner2.step()
self.assertRaises(TuneError, runner2.step)
shutil.rmtree(tmpdir)
def testTrialNoSave(self):
"""Check that non-checkpointing trials are not saved."""
ray.init(num_cpus=3)
tmpdir = tempfile.mkdtemp()
runner = TrialRunner(metadata_checkpoint_dir=tmpdir)
runner.add_trial(
Trial(
"__fake",
trial_id="non_checkpoint",
stopping_criterion={"training_iteration": 2}))
while not all(t.status == Trial.TERMINATED
for t in runner.get_trials()):
runner.step()
runner.add_trial(
Trial(
"__fake",
trial_id="checkpoint",
checkpoint_at_end=True,
stopping_criterion={"training_iteration": 2}))
while not all(t.status == Trial.TERMINATED
for t in runner.get_trials()):
runner.step()
runner.add_trial(
Trial(
"__fake",
trial_id="pending",
stopping_criterion={"training_iteration": 2}))
runner.step()
runner.step()
runner2 = TrialRunner.restore(tmpdir)
new_trials = runner2.get_trials()
self.assertEquals(len(new_trials), 3)
self.assertTrue(
runner2.get_trial("non_checkpoint").status == Trial.TERMINATED)
self.assertTrue(
runner2.get_trial("checkpoint").status == Trial.TERMINATED)
self.assertTrue(runner2.get_trial("pending").status == Trial.PENDING)
self.assertTrue(not runner2.get_trial("pending").last_result)
runner2.step()
shutil.rmtree(tmpdir)
def testCheckpointWithFunction(self):
ray.init()
trial = Trial(
"__fake",
config={
"callbacks": {
"on_episode_start": tune.function(lambda i: i),
}
},
checkpoint_freq=1)
tmpdir = tempfile.mkdtemp()
runner = TrialRunner(metadata_checkpoint_dir=tmpdir)
runner.add_trial(trial)
for i in range(5):
runner.step()
# force checkpoint
runner.checkpoint()
runner2 = TrialRunner.restore(tmpdir)
new_trial = runner2.get_trials()[0]
self.assertTrue("callbacks" in new_trial.config)
self.assertTrue("on_episode_start" in new_trial.config["callbacks"])
shutil.rmtree(tmpdir)
def testCheckpointOverwrite(self):
def count_checkpoints(cdir):
return sum((fname.startswith("experiment_state")
and fname.endswith(".json"))
for fname in os.listdir(cdir))
ray.init()
trial = Trial("__fake", checkpoint_freq=1)
tmpdir = tempfile.mkdtemp()
runner = TrialRunner(metadata_checkpoint_dir=tmpdir)
runner.add_trial(trial)
for i in range(5):
runner.step()
# force checkpoint
runner.checkpoint()
self.assertEquals(count_checkpoints(tmpdir), 1)
runner2 = TrialRunner.restore(tmpdir)
for i in range(5):
runner2.step()
self.assertEquals(count_checkpoints(tmpdir), 2)
runner2.checkpoint()
self.assertEquals(count_checkpoints(tmpdir), 2)
shutil.rmtree(tmpdir)
class SearchAlgorithmTest(unittest.TestCase):
def testNestedSuggestion(self):
class TestSuggestion(SuggestionAlgorithm):
def _suggest(self, trial_id):
return {"a": {"b": {"c": {"d": 4, "e": 5}}}}
alg = TestSuggestion()
alg.add_configurations({"test": {"run": "__fake"}})
trial = alg.next_trials()[0]
self.assertTrue("e=5" in trial.experiment_tag)
self.assertTrue("d=4" in trial.experiment_tag)
class ResourcesTest(unittest.TestCase):
def testSubtraction(self):
resource_1 = Resources(
1,
0,
0,
1,
custom_resources={
"a": 1,
"b": 2
},
extra_custom_resources={
"a": 1,
"b": 1
})
resource_2 = Resources(
1,
0,
0,
1,
custom_resources={
"a": 1,
"b": 2
},
extra_custom_resources={
"a": 1,
"b": 1
})
new_res = Resources.subtract(resource_1, resource_2)
self.assertTrue(new_res.cpu == 0)
self.assertTrue(new_res.gpu == 0)
self.assertTrue(new_res.extra_cpu == 0)
self.assertTrue(new_res.extra_gpu == 0)
self.assertTrue(all(k == 0 for k in new_res.custom_resources.values()))
self.assertTrue(
all(k == 0 for k in new_res.extra_custom_resources.values()))
def testDifferentResources(self):
resource_1 = Resources(1, 0, 0, 1, custom_resources={"a": 1, "b": 2})
resource_2 = Resources(1, 0, 0, 1, custom_resources={"a": 1, "c": 2})
new_res = Resources.subtract(resource_1, resource_2)
assert "c" in new_res.custom_resources
assert "b" in new_res.custom_resources
self.assertTrue(new_res.cpu == 0)
self.assertTrue(new_res.gpu == 0)
self.assertTrue(new_res.extra_cpu == 0)
self.assertTrue(new_res.extra_gpu == 0)
self.assertTrue(new_res.get("a") == 0)
def testSerialization(self):
original = Resources(1, 0, 0, 1, custom_resources={"a": 1, "b": 2})
jsoned = resources_to_json(original)
new_resource = json_to_resources(jsoned)
self.assertEquals(original, new_resource)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
|
from denorm.fields import SumField
import django
from django.db import models
from django.contrib.contenttypes.generic import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from denorm import denormalized, depend_on_related, CountField, CacheKeyField, cached
from django.core.cache import cache
class CachedModelA(models.Model):
b = models.ForeignKey('CachedModelB')
@cached(cache)
@depend_on_related('CachedModelB')
def cached_data(self):
return {
'upper':self.b.data.upper(),
'lower':self.b.data.lower(),
}
class CachedModelB(models.Model):
data = models.CharField(max_length=255)
class Tag(models.Model):
name = models.CharField(max_length=255)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class TaggedModel(models.Model):
tags = GenericRelation(Tag)
@denormalized(models.TextField)
@depend_on_related(Tag)
def tags_string(self):
return ', '.join(sorted([t.name for t in self.tags.all()]))
class Meta:
abstract = True
class Forum(TaggedModel):
title = models.CharField(max_length=255)
# Simple count() aggregate
post_count = CountField('post_set')
cachekey = CacheKeyField()
cachekey.depend_on_related('Post')
@denormalized(models.CharField, max_length=255)
@depend_on_related('Post')
def author_names(self):
return ', '.join((m.author_name for m in self.post_set.all()))
@denormalized(models.ManyToManyField, 'Member', null=True, blank=True)
@depend_on_related('Post')
def authors(self):
return [m.author for m in self.post_set.all() if m.author]
# let's say this forums supports subforums, sub-subforums and so forth
# so we can test depend_on_related('self') (for tree structures).
parent_forum = models.ForeignKey('self', blank=True, null=True)
@denormalized(models.TextField)
@depend_on_related('self', type='forward')
def path(self):
if self.parent_forum:
return self.parent_forum.path + self.title + '/'
else:
return '/' + self.title + '/'
class Post(TaggedModel):
forum = models.ForeignKey(Forum, blank=True, null=True)
author = models.ForeignKey('Member', blank=True, null=True)
response_to = models.ForeignKey('self', blank=True, null=True, related_name='responses')
title = models.CharField(max_length=255, blank=True)
# Brings down the forum title
@denormalized(models.CharField, max_length=255)
@depend_on_related(Forum)
def forum_title(self):
return self.forum.title
@denormalized(models.CharField, max_length=255)
@depend_on_related('Member', foreign_key="author")
def author_name(self):
if self.author:
return self.author.name
else:
return ''
@denormalized(models.PositiveIntegerField)
@depend_on_related('self', type='backward')
def response_count(self):
# Work around odd issue during testing with PostgresDB
if not self.pk:
return 0
rcount = self.responses.count()
rcount += sum((x.response_count for x in self.responses.all()))
return rcount
class Attachment(models.Model):
post = models.ForeignKey(Post, blank=True, null=True)
cachekey = CacheKeyField()
cachekey.depend_on_related('Post')
@denormalized(models.ForeignKey, Forum, blank=True, null=True)
@depend_on_related(Post)
def forum(self):
if self.post and self.post.forum:
return self.post.forum.pk
return None
class Member(models.Model):
first_name = models.CharField(max_length=255)
name = models.CharField(max_length=255)
bookmarks = models.ManyToManyField('Post', blank=True)
cachekey = CacheKeyField()
cachekey.depend_on_related('Post', foreign_key='bookmarks')
@denormalized(models.CharField, max_length=255)
def full_name(self):
return u"%s %s" % (self.first_name, self.name)
@denormalized(models.TextField)
@depend_on_related('Post', foreign_key="bookmarks")
def bookmark_titles(self):
if self.id:
return '\n'.join([p.title for p in self.bookmarks.all()])
class SkipPost(models.Model):
# Skip feature test main model.
text = models.TextField()
class SkipComment(models.Model):
post = models.ForeignKey(SkipPost)
text = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True, null=True, blank=True)
class Meta:
abstract = True
class SkipCommentWithoutSkip(SkipComment):
# Skip feature test model without a skip parameter on an updatable field.
# he updatable field will not be skipped.
@denormalized(models.TextField)
@depend_on_related(SkipPost)
def post_text(self):
return self.post.text
class SkipCommentWithSkip(SkipComment):
# Skip feature test model with a skip parameter on an updatable field.
@denormalized(models.TextField, skip=('updated_on',))
@depend_on_related(SkipPost)
def post_text(self):
return self.post.text
class SkipCommentWithAttributeSkip(SkipComment):
@denormalized(models.TextField)
@depend_on_related(SkipPost)
def post_text(self):
return self.post.text
denorm_always_skip = ('updated_on',)
if not hasattr(django.db.backend,'sqlite3'):
class FilterSumModel(models.Model):
# Simple count() aggregate
active_item_sum = SumField('counts', field='active_item_count', filter = {'age__gte':18})
class FilterSumItem(models.Model):
parent = models.ForeignKey(FilterSumModel, related_name='counts')
age = models.IntegerField(default=18)
active_item_count = models.PositiveIntegerField(default=False)
class FilterCountModel(models.Model):
# Simple count() aggregate
active_item_count = CountField('items', filter = {'active__exact':True})
class FilterCountItem(models.Model):
parent = models.ForeignKey(FilterCountModel, related_name='items')
active = models.BooleanField(default=False)
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import urllib
from random import random
from time import time
from os.path import join
from swift import gettext_ as _
from collections import defaultdict, deque
import hashlib
from eventlet import sleep, Timeout
from eventlet.greenpool import GreenPool
from swift.common.daemon import Daemon
from swift.common.internal_client import InternalClient, UnexpectedResponse
from swift.common.utils import get_logger, dump_recon_cache, split_path, \
Timestamp
from swift.common.http import HTTP_NOT_FOUND, HTTP_CONFLICT, \
HTTP_PRECONDITION_FAILED
from swift.container.reconciler import direct_delete_container_entry
MAX_OBJECTS_TO_CACHE = 100000
class ObjectExpirer(Daemon):
"""
Daemon that queries the internal hidden task accounts to discover objects
that need to be deleted.
:param conf: The daemon configuration.
"""
def __init__(self, conf, logger=None, swift=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-expirer')
self.interval = int(conf.get('interval') or 300)
self.read_conf_for_queue_access(swift)
self.report_interval = int(conf.get('report_interval') or 300)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = join(self.recon_cache_path, 'object.recon')
self.concurrency = int(conf.get('concurrency', 1))
if self.concurrency < 1:
raise ValueError("concurrency must be set to at least 1")
# This option defines how long an un-processable expired object
# marker will be retried before it is abandoned. It is not coupled
# with the tombstone reclaim age in the consistency engine.
self.reclaim_age = int(conf.get('reclaim_age', 604800))
def read_conf_for_queue_access(self, swift):
self.expiring_objects_account = \
(self.conf.get('auto_create_account_prefix') or '.') + \
(self.conf.get('expiring_objects_account_name') or
'expiring_objects')
# This is for common parameter with general task queue in future
self.task_container_prefix = ''
self.ic_conf_path = \
self.conf.get('__file__') or '/etc/swift/object-expirer.conf'
request_tries = int(self.conf.get('request_tries') or 3)
self.swift = swift or InternalClient(
self.ic_conf_path, 'Swift Object Expirer', request_tries)
self.processes = int(self.conf.get('processes', 0))
self.process = int(self.conf.get('process', 0))
def report(self, final=False):
"""
Emits a log line report of the progress so far, or the final progress
is final=True.
:param final: Set to True for the last report once the expiration pass
has completed.
"""
if final:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass completed in %(time)ds; '
'%(objects)d objects expired') % {
'time': elapsed, 'objects': self.report_objects})
dump_recon_cache({'object_expiration_pass': elapsed,
'expired_last_pass': self.report_objects},
self.rcache, self.logger)
elif time() - self.report_last_time >= self.report_interval:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass so far %(time)ds; '
'%(objects)d objects expired') % {
'time': elapsed, 'objects': self.report_objects})
self.report_last_time = time()
def parse_task_obj(self, task_obj):
"""
:param task_obj: a task object name in format of
"<timestamp>-<target_account>/<target_container>" +
"/<target_obj>"
:return: 4-tuples of (delete_at_time, target_account, target_container,
target_obj)
"""
timestamp, target_path = task_obj.split('-', 1)
timestamp = Timestamp(timestamp)
target_account, target_container, target_obj = \
split_path('/' + target_path, 3, 3, True)
return timestamp, target_account, target_container, target_obj
def round_robin_order(self, task_iter):
"""
Change order of expiration tasks to avoid deleting objects in a
certain container continuously.
:param task_iter: An iterator of delete-task dicts, which should each
have a ``target_path`` key.
"""
obj_cache = defaultdict(deque)
cnt = 0
def dump_obj_cache_in_round_robin():
while obj_cache:
for key in sorted(obj_cache):
if obj_cache[key]:
yield obj_cache[key].popleft()
else:
del obj_cache[key]
for delete_task in task_iter:
try:
target_account, target_container, _junk = \
split_path('/' + delete_task['target_path'], 3, 3, True)
cache_key = '%s/%s' % (target_account, target_container)
# sanity
except ValueError:
self.logger.error('Unexcepted error handling task %r' %
delete_task)
continue
obj_cache[cache_key].append(delete_task)
cnt += 1
if cnt > MAX_OBJECTS_TO_CACHE:
for task in dump_obj_cache_in_round_robin():
yield task
cnt = 0
for task in dump_obj_cache_in_round_robin():
yield task
def hash_mod(self, name, divisor):
"""
:param name: a task object name
:param divisor: a divisor number
:return: an integer to decide which expirer is assigned to the task
"""
# md5 is only used for shuffling mod
return int(hashlib.md5(name).hexdigest(), 16) % divisor
def iter_task_accounts_to_expire(self):
"""
Yields (task_account, my_index, divisor).
my_index and divisor is used to assign task obj to only one
expirer. In expirer method, expirer calculates assigned index for each
expiration task. The assigned index is in [0, 1, ..., divisor - 1].
Expirers have their own "my_index" for each task_account. Expirer whose
"my_index" is equal to the assigned index executes the task. Because
each expirer have different "my_index", task objects are executed by
only one expirer.
"""
if self.processes > 0:
yield self.expiring_objects_account, self.process, self.processes
else:
yield self.expiring_objects_account, 0, 1
def delete_at_time_of_task_container(self, task_container):
"""
get delete_at timestamp from task_container name
"""
# task_container name is timestamp
return Timestamp(task_container)
def iter_task_containers_to_expire(self, task_account):
"""
Yields task_container names under the task_account if the delete at
timestamp of task_container is past.
"""
for c in self.swift.iter_containers(task_account,
prefix=self.task_container_prefix):
task_container = str(c['name'])
timestamp = self.delete_at_time_of_task_container(task_container)
if timestamp > Timestamp.now():
break
yield task_container
def iter_task_to_expire(self, task_account_container_list,
my_index, divisor):
"""
Yields task expire info dict which consists of task_account,
task_container, task_object, timestamp_to_delete, and target_path
"""
for task_account, task_container in task_account_container_list:
for o in self.swift.iter_objects(task_account, task_container):
task_object = o['name'].encode('utf8')
try:
delete_timestamp, target_account, target_container, \
target_object = self.parse_task_obj(task_object)
except ValueError:
self.logger.exception('Unexcepted error handling task %r' %
task_object)
continue
if delete_timestamp > Timestamp.now():
# we shouldn't yield the object that doesn't reach
# the expiration date yet.
break
# Only one expirer daemon assigned for one task
if self.hash_mod('%s/%s' % (task_container, task_object),
divisor) != my_index:
continue
yield {'task_account': task_account,
'task_container': task_container,
'task_object': task_object,
'target_path': '/'.join([
target_account, target_container, target_object]),
'delete_timestamp': delete_timestamp}
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug('Run begin')
task_account_container_list_to_delete = list()
for task_account, my_index, divisor in \
self.iter_task_accounts_to_expire():
container_count, obj_count = \
self.swift.get_account_info(task_account)
# the task account is skipped if there are no task container
if not container_count:
continue
self.logger.info(_(
'Pass beginning for task account %(account)s; '
'%(container_count)s possible containers; '
'%(obj_count)s possible objects') % {
'account': task_account,
'container_count': container_count,
'obj_count': obj_count})
task_account_container_list = \
[(task_account, task_container) for task_container in
self.iter_task_containers_to_expire(task_account)]
task_account_container_list_to_delete.extend(
task_account_container_list)
# delete_task_iter is a generator to yield a dict of
# task_account, task_container, task_object, delete_timestamp,
# target_path to handle delete actual object and pop the task
# from the queue.
delete_task_iter = \
self.round_robin_order(self.iter_task_to_expire(
task_account_container_list, my_index, divisor))
for delete_task in delete_task_iter:
pool.spawn_n(self.delete_object, **delete_task)
pool.waitall()
for task_account, task_container in \
task_account_container_list_to_delete:
try:
self.swift.delete_container(
task_account, task_container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %(account)s '
'%(container)s %(err)s') % {
'account': task_account,
'container': task_container, 'err': str(err)})
self.logger.debug('Run end')
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
def run_forever(self, *args, **kwargs):
"""
Executes passes forever, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon has no additional keyword args.
"""
sleep(random() * self.interval)
while True:
begin = time()
try:
self.run_once(*args, **kwargs)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
elapsed = time() - begin
if elapsed < self.interval:
sleep(random() * (self.interval - elapsed))
def get_process_values(self, kwargs):
"""
Sets self.processes and self.process from the kwargs if those
values exist, otherwise, leaves those values as they were set in
the config file.
:param kwargs: Keyword args passed into the run_forever(), run_once()
methods. They have values specified on the command
line when the daemon is run.
"""
if kwargs.get('processes') is not None:
self.processes = int(kwargs['processes'])
if kwargs.get('process') is not None:
self.process = int(kwargs['process'])
if self.process < 0:
raise ValueError(
'process must be an integer greater than or equal to 0')
if self.processes < 0:
raise ValueError(
'processes must be an integer greater than or equal to 0')
if self.processes and self.process >= self.processes:
raise ValueError(
'process must be less than processes')
def delete_object(self, target_path, delete_timestamp,
task_account, task_container, task_object):
start_time = time()
try:
try:
self.delete_actual_object(target_path, delete_timestamp)
except UnexpectedResponse as err:
if err.resp.status_int not in {HTTP_NOT_FOUND,
HTTP_PRECONDITION_FAILED}:
raise
if float(delete_timestamp) > time() - self.reclaim_age:
# we'll have to retry the DELETE later
raise
self.pop_queue(task_account, task_container, task_object)
self.report_objects += 1
self.logger.increment('objects')
except UnexpectedResponse as err:
self.logger.increment('errors')
self.logger.error(
'Unexpected response while deleting object '
'%(account)s %(container)s %(obj)s: %(err)s' % {
'account': task_account, 'container': task_container,
'obj': task_object, 'err': str(err.resp.status_int)})
self.logger.debug(err.resp.body)
except (Exception, Timeout) as err:
self.logger.increment('errors')
self.logger.exception(
'Exception while deleting object %(account)s %(container)s '
'%(obj)s %(err)s' % {
'account': task_account, 'container': task_container,
'obj': task_object, 'err': str(err)})
self.logger.timing_since('timing', start_time)
self.report()
def pop_queue(self, task_account, task_container, task_object):
"""
Issue a delete object request to the task_container for the expiring
object queue entry.
"""
direct_delete_container_entry(self.swift.container_ring, task_account,
task_container, task_object)
def delete_actual_object(self, actual_obj, timestamp):
"""
Deletes the end-user object indicated by the actual object name given
'<account>/<container>/<object>' if and only if the X-Delete-At value
of the object is exactly the timestamp given.
:param actual_obj: The name of the end-user object to delete:
'<account>/<container>/<object>'
:param timestamp: The swift.common.utils.Timestamp instance the
X-Delete-At value must match to perform the actual
delete.
:raises UnexpectedResponse: if the delete was unsuccessful and
should be retried later
"""
path = '/v1/' + urllib.parse.quote(actual_obj.lstrip('/'))
self.swift.make_request(
'DELETE', path,
{'X-If-Delete-At': timestamp.normal,
'X-Timestamp': timestamp.normal,
'X-Backend-Clean-Expiring-Object-Queue': 'no'},
(2, HTTP_CONFLICT))
|
|
import os
import sys
import asyncio
import logging
import functools
import traceback
import contextlib
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.glob as s_glob
import synapse.telepath as s_telepath
import synapse.lib.cmd as s_cmd
import synapse.lib.output as s_output
import synapse.lib.version as s_version
logger = logging.getLogger(__name__)
desc = '''
Manage permissions of users, roles, and objects in a remote cell.
'''
outp = None
min_authgate_vers = (0, 1, 33)
reqver = '>=0.2.0,<3.0.0'
denyallow = ['deny', 'allow']
def reprrule(rule):
head = denyallow[rule[0]]
text = '.'.join(rule[1])
return f'{head}: {text}'
async def printuser(user, details=False, cell=None):
iden = user.get('iden')
name = user.get('name')
admin = user.get('admin')
authtype = user.get('type')
outp.printf(f'{name} ({iden})')
outp.printf(f'type: {authtype}')
if admin is not None:
outp.printf(f'admin: {admin}')
if authtype == 'user':
locked = user.get('locked')
outp.printf(f'locked: {locked}')
outp.printf('rules:')
i = 0
for rule in user.get('rules'):
rrep = reprrule(rule)
outp.printf(f' {i} {rrep}')
i += 1
for gateiden, gateinfo in user.get('authgates', {}).items():
outp.printf(f' auth gate: {gateiden}')
for rule in gateinfo.get('rules', ()):
rrep = reprrule(rule)
outp.printf(f' {i} {rrep}')
i += 1
outp.printf('')
if authtype == 'user':
outp.printf('roles:')
for rolename in user.get('roles'):
outp.printf(f' role: {rolename}')
if details:
i = 0
role = await cell.getAuthInfo(rolename)
for rule in role.get('rules', ()):
rrep = reprrule(rule)
outp.printf(f' {i} {rrep}')
i += 1
for gateiden, gateinfo in role.get('authgates', {}).items():
outp.printf(f' auth gate: {gateiden}')
for rule in gateinfo.get('rules', ()):
rrep = reprrule(rule)
outp.printf(f' {i} {rrep}')
i += 1
async def handleModify(opts):
cell_supports_authgate = False
if opts.object and not opts.addrule:
outp.printf('--object option only valid with --addrule')
return -1
try:
async with await s_telepath.openurl(opts.cellurl) as cell:
async def useriden(name):
udef = await cell.getUserDefByName(name)
return udef['iden']
async def roleiden(name):
rdef = await cell.getRoleDefByName(name)
return rdef['iden']
s_version.reqVersion(cell._getSynVers(), reqver)
if cell._getSynVers() >= min_authgate_vers:
cell_supports_authgate = True
if opts.adduser:
outp.printf(f'adding user: {opts.name}')
user = await cell.addUser(opts.name)
if opts.deluser:
outp.printf(f'deleting user: {opts.name}')
await cell.delUser(await useriden(opts.name))
if opts.addrole:
outp.printf(f'adding role: {opts.name}')
user = await cell.addRole(opts.name)
if opts.delrole:
outp.printf(f'deleting role: {opts.name}')
await cell.delRole(await roleiden(opts.name))
if opts.passwd:
outp.printf(f'setting passwd for: {opts.name}')
await cell.setUserPasswd(await useriden(opts.name), opts.passwd)
if opts.grant:
outp.printf(f'granting {opts.grant} to: {opts.name}')
await cell.addUserRole(await useriden(opts.name), await roleiden(opts.grant))
if opts.setroles:
outp.printf(f'settings roles {opts.setroles} to: {opts.name}')
roles = [await roleiden(role) for role in opts.setroles]
await cell.setUserRoles(await useriden(opts.name), roles)
if opts.revoke:
outp.printf(f'revoking {opts.revoke} from: {opts.name}')
await cell.delUserRole(await useriden(opts.name), await roleiden(opts.revoke))
if opts.admin:
outp.printf(f'granting admin status: {opts.name}')
await cell.setAuthAdmin(opts.name, True)
if opts.noadmin:
outp.printf(f'revoking admin status: {opts.name}')
await cell.setAuthAdmin(opts.name, False)
if opts.lock:
outp.printf(f'locking user: {opts.name}')
await cell.setUserLocked(await useriden(opts.name), True)
if opts.unlock:
outp.printf(f'unlocking user: {opts.name}')
await cell.setUserLocked(await useriden(opts.name), False)
if opts.addrule:
text = opts.addrule
# TODO: syntax for index...
allow = True
if text.startswith('!'):
allow = False
text = text[1:]
rule = (allow, text.split('.'))
outp.printf(f'adding rule to {opts.name}: {rule!r}')
if cell_supports_authgate:
await cell.addAuthRule(opts.name, rule, indx=None, gateiden=opts.object)
else:
await cell.addAuthRule(opts.name, rule, indx=None)
if opts.delrule is not None:
ruleind = opts.delrule
outp.printf(f'deleting rule index: {ruleind}')
user = await cell.getAuthInfo(opts.name)
userrules = user.get('rules', ())
delrule = None
delgate = None
if ruleind < len(userrules):
delrule = userrules[ruleind]
else:
i = len(userrules)
for gateiden, gateinfo in user.get('authgates', {}).items():
for rule in gateinfo.get('rules', ()):
if i == ruleind:
delrule = rule
delgate = gateiden
i += 1
if delrule is not None:
await cell.delAuthRule(opts.name, delrule, gateiden=delgate)
else:
outp.printf(f'rule index is out of range')
try:
user = await cell.getAuthInfo(opts.name)
except s_exc.NoSuchName:
outp.printf(f'no such user: {opts.name}')
return 1
await printuser(user)
except s_exc.BadVersion as e:
valu = s_version.fmtVersion(*e.get('valu'))
outp.printf(f'Cell version {valu} is outside of the cellauth supported range ({reqver}).')
outp.printf(f'Please use a version of Synapse which supports {valu}; current version is {s_version.verstring}.')
return 1
except (Exception, asyncio.CancelledError) as e: # pragma: no cover
if opts.debug:
traceback.print_exc()
outp.printf(str(e))
return 1
else:
return 0
async def handleList(opts):
try:
async with await s_telepath.openurl(opts.cellurl) as cell:
s_version.reqVersion(cell._getSynVers(), reqver)
if opts.name:
user = await cell.getAuthInfo(opts.name[0])
if user is None:
outp.printf(f'no such user: {opts.name}')
return 1
await printuser(user, cell=cell, details=opts.detail)
return 0
outp.printf(f'getting users and roles')
outp.printf('users:')
for user in await cell.getAuthUsers():
outp.printf(f' {user.get("name")}')
outp.printf('roles:')
for role in await cell.getAuthRoles():
outp.printf(f' {role.get("name")}')
except s_exc.BadVersion as e:
valu = s_version.fmtVersion(*e.get('valu'))
outp.printf(f'Cell version {valu} is outside of the cellauth supported range ({reqver}).')
outp.printf(f'Please use a version of Synapse which supports {valu}; current version is {s_version.verstring}.')
return 1
except (Exception, asyncio.CancelledError) as e: # pragma: no cover
if opts.debug:
traceback.print_exc()
outp.printf(str(e))
return 1
else:
return 0
async def main(argv, outprint=None):
if outprint is None: # pragma: no cover
outprint = s_output.OutPut()
global outp
outp = outprint
async with contextlib.AsyncExitStack() as cm:
teleyaml = s_common.getSynPath('telepath.yaml')
if os.path.isfile(teleyaml):
fini = await s_telepath.loadTeleEnv(teleyaml)
cm.push_async_callback(fini)
pars = makeargparser()
try:
opts = pars.parse_args(argv)
except s_exc.ParserExit:
return -1
retn = await opts.func(opts)
return retn
def makeargparser():
global outp
pars = s_cmd.Parser('synapse.tools.cellauth', outp=outp, description=desc)
pars.add_argument('--debug', action='store_true', help='Show debug traceback on error.')
pars.add_argument('cellurl', help='The telepath URL to connect to a cell.')
subpars = pars.add_subparsers(required=True,
title='subcommands',
dest='cmd',
parser_class=functools.partial(s_cmd.Parser, outp=outp))
# list
pars_list = subpars.add_parser('list', help='List users/roles')
pars_list.add_argument('name', nargs='*', default=None, help='The name of the user/role to list')
pars_list.add_argument('-d', '--detail', default=False, action='store_true',
help='Show rule details for roles associated with a user.')
pars_list.set_defaults(func=handleList)
# create / modify / delete
pars_mod = subpars.add_parser('modify', help='Create, modify, delete the names user/role')
muxp = pars_mod.add_mutually_exclusive_group()
muxp.add_argument('--adduser', action='store_true', help='Add the named user to the cortex.')
muxp.add_argument('--addrole', action='store_true', help='Add the named role to the cortex.')
muxp.add_argument('--deluser', action='store_true', help='Delete the named user to the cortex.')
muxp.add_argument('--delrole', action='store_true', help='Delete the named role to the cortex.')
muxp.add_argument('--admin', action='store_true', help='Grant admin powers to the user/role.')
muxp.add_argument('--noadmin', action='store_true', help='Revoke admin powers from the user/role.')
muxp.add_argument('--lock', action='store_true', help='Lock the user account.')
muxp.add_argument('--unlock', action='store_true', help='Unlock the user account.')
muxp.add_argument('--passwd', help='Set the user password.')
muxp.add_argument('--grant', help='Grant the specified role to the user.')
muxp.add_argument('--revoke', help='Grant the specified role to the user.')
muxp.add_argument('--setroles', help='Set the roles for the user.', nargs='+')
muxp.add_argument('--addrule', help='Add the given rule to the user/role.')
muxp.add_argument('--delrule', type=int, help='Delete the given rule number from the user/role.')
pars_mod.add_argument('--object', type=str, help='The iden of the object to which to apply the new rule. Only '
'supported on Cells running Synapse >= 0.1.33.')
pars_mod.add_argument('name', help='The user/role to modify.')
pars_mod.set_defaults(func=handleModify)
return pars
async def _main(): # pragma: no cover
s_common.setlogging(logger, 'DEBUG')
return await main(sys.argv[1:])
if __name__ == '__main__': # pragma: no cover
sys.exit(s_glob.sync(_main()))
|
|
from __future__ import unicode_literals, division, absolute_import
import copy
from datetime import datetime, timedelta, time as dt_time
import fnmatch
from hashlib import md5
import logging
import Queue
import threading
import time
import sys
from sqlalchemy import Column, String, DateTime
from flexget.config_schema import register_config_key, parse_time
from flexget.db_schema import versioned_base
from flexget.event import event
from flexget.logger import FlexGetFormatter
from flexget.manager import Session
log = logging.getLogger('scheduler')
Base = versioned_base('scheduler', 0)
UNITS = ['seconds', 'minutes', 'hours', 'days', 'weeks']
WEEKDAYS = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
yaml_schedule = {
'type': 'object',
'properties': {
'seconds': {'type': 'number'},
'minutes': {'type': 'number'},
'hours': {'type': 'number'},
'days': {'type': 'number'},
'weeks': {'type': 'number'},
'at_time': {'type': 'string', 'format': 'time'},
'on_day': {'type': 'string', 'enum': WEEKDAYS}
},
# Only allow one unit to be specified
'oneOf': [{'required': [unit]} for unit in UNITS],
'error_oneOf': 'Interval must be specified as one of %s' % ', '.join(UNITS),
'dependencies': {
'at_time': {
'properties': {'days': {'type': 'integer'}, 'weeks': {'type': 'integer'}},
'oneOf': [{'required': ['days']}, {'required': ['weeks']}],
'error': 'Interval must be an integer number of days or weeks when `at_time` is specified.',
},
'on_day': {
'properties': {'weeks': {'type': 'integer'}},
'required': ['weeks'],
'error': 'Unit must be an integer number of weeks when `on_day` is specified.'
}
},
'additionalProperties': False
}
main_schema = {
'type': 'array',
'items': {
'properties': {
'tasks': {'type': ['array', 'string'], 'items': {'type': 'string'}},
'interval': yaml_schedule
},
'required': ['tasks', 'interval'],
'additionalProperties': False
}
}
class DBTrigger(Base):
__tablename__ = 'scheduler_triggers'
uid = Column(String, primary_key=True) # Hash of all trigger properties, to uniquely identify the trigger
last_run = Column(DateTime)
def __init__(self, uid, last_run=None):
self.uid = uid
self.last_run = last_run
@event('manager.config-loaded')
def create_triggers(manager):
manager.scheduler.load_schedules()
class Scheduler(threading.Thread):
# We use a regular list for periodic jobs, so you must hold this lock while using it
triggers_lock = threading.Lock()
def __init__(self, manager):
super(Scheduler, self).__init__(name='scheduler')
self.daemon = True
self.run_queue = Queue.PriorityQueue()
self.manager = manager
self.triggers = []
self.run_schedules = True
self._shutdown_now = False
self._shutdown_when_finished = False
def load_schedules(self):
"""Clears current schedules and loads them from the config."""
with self.triggers_lock:
self.triggers = []
if 'schedules' not in self.manager.config:
log.info('No schedules defined in config. Defaulting to run all tasks on a 1 hour interval.')
for item in self.manager.config.get('schedules', [{'tasks': ['*'], 'interval': {'hours': 1}}]):
tasks = item['tasks']
if not isinstance(tasks, list):
tasks = [tasks]
self.triggers.append(Trigger(item['interval'], tasks, options={'cron': True}))
def execute(self, options=None, output=None, priority=1, trigger_id=None):
"""
Add a task to the scheduler to be run immediately.
:param options: Either an :class:`argparse.Namespace` instance, or a dict, containing options for execution
:param output: If a file-like object is specified here, log messages and stdout from the execution will be
written to it.
:param priority: If there are other executions waiting to be run, they will be run in priority order,
lowest first.
:param trigger_id: If a trigger_id is specified, it will be attached to the :class:`Job` instance added to the
run queue. Used to check that triggers are not fired faster than they can be executed.
:returns: a list of :class:`threading.Event` instances which will be
set when each respective task has finished running
"""
if options is None:
options = copy.copy(self.manager.options.execute)
elif isinstance(options, dict):
options_namespace = copy.copy(self.manager.options.execute)
options_namespace.__dict__.update(options)
options = options_namespace
tasks = self.manager.tasks
# Handle --tasks
if options.tasks:
# Create list of tasks to run, preserving order
tasks = []
for arg in options.tasks:
matches = [t for t in self.manager.tasks if fnmatch.fnmatchcase(t.lower(), arg.lower())]
if not matches:
log.error('`%s` does not match any tasks' % arg)
continue
tasks.extend(m for m in matches if m not in tasks)
# Set the option as a list of matching task names so plugins can use it easily
options.tasks = tasks
# TODO: 1.2 This is a hack to make task priorities work still, not sure if it's the best one
tasks = sorted(tasks, key=lambda t: self.manager.config['tasks'][t].get('priority', 65535))
finished_events = []
for task in tasks:
job = Job(task, options=options, output=output, priority=priority, trigger_id=trigger_id)
self.run_queue.put(job)
finished_events.append(job.finished_event)
return finished_events
def queue_pending_jobs(self):
# Add pending jobs to the run queue
with self.triggers_lock:
for trigger in self.triggers:
if trigger.should_run:
with self.run_queue.mutex:
if any(j.trigger_id == trigger.uid for j in self.run_queue.queue):
log.error('Not firing schedule %r. Tasks from last run have still not finished.' % trigger)
log.error('You may need to increase the interval for this schedule.')
continue
options = dict(trigger.options)
# If the user has specified all tasks with '*', don't add tasks option at all, so that manual
# tasks are not executed
if trigger.tasks != ['*']:
options['tasks'] = trigger.tasks
self.execute(options=options, priority=5, trigger_id=trigger.uid)
trigger.trigger()
def start(self, run_schedules=None):
if run_schedules is not None:
self.run_schedules = run_schedules
super(Scheduler, self).start()
def run(self):
from flexget.task import Task, TaskAbort
while not self._shutdown_now:
if self.run_schedules:
self.queue_pending_jobs()
# Grab the first job from the run queue and do it
try:
job = self.run_queue.get(timeout=0.5)
except Queue.Empty:
if self._shutdown_when_finished:
self._shutdown_now = True
continue
if job.output:
# Hook up our log and stdout to give back to the requester
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = Tee(job.output, sys.stdout), Tee(job.output, sys.stderr)
# TODO: Use a filter to capture only the logging for this execution?
streamhandler = logging.StreamHandler(job.output)
streamhandler.setFormatter(FlexGetFormatter())
logging.getLogger().addHandler(streamhandler)
try:
Task(self.manager, job.task, options=job.options).execute()
except TaskAbort as e:
log.debug('task %s aborted: %r' % (job.task, e))
finally:
self.run_queue.task_done()
job.finished_event.set()
if job.output:
sys.stdout, sys.stderr = old_stdout, old_stderr
logging.getLogger().removeHandler(streamhandler)
remaining_jobs = self.run_queue.qsize()
if remaining_jobs:
log.warning('Scheduler shut down with %s jobs remaining in the queue to run.' % remaining_jobs)
log.debug('scheduler shut down')
def wait(self):
"""
Waits for the thread to exit.
Similar to :method:`Thread.join`, except it allows ctrl-c to be caught still.
"""
while self.is_alive():
time.sleep(0.1)
def shutdown(self, finish_queue=True):
"""
Ends the thread. If a job is running, waits for it to finish first.
:param bool finish_queue: If this is True, shutdown will wait until all queued tasks have finished.
"""
if finish_queue:
self._shutdown_when_finished = True
else:
self._shutdown_now = True
class Job(object):
"""A job for the scheduler to execute."""
#: Used to determine which job to run first when multiple jobs are waiting.
priority = 1
#: A datetime object when the job is scheduled to run. Jobs are sorted by this value when priority is the same.
run_at = None
#: The name of the task to execute
task = None
#: Options to run the task with
options = None
#: :class:`BufferQueue` to write the task execution output to. '[[END]]' will be sent to the queue when complete
output = None
def __init__(self, task, options=None, output=None, priority=1, trigger_id=None):
self.task = task
self.options = options
self.output = output
self.priority = priority
self.run_at = datetime.now()
self.finished_event = threading.Event()
# Used to make sure a certain trigger doesn't add jobs faster than they can run
self.trigger_id = trigger_id
# Lower priority if cron flag is present in either dict or Namespace form
try:
cron = self.options.cron
except AttributeError:
try:
cron = self.options.get('cron')
except AttributeError:
cron = False
if cron:
self.priority = 5
def __lt__(self, other):
return (self.priority, self.run_at) < (other.priority, other.run_at)
class Trigger(object):
def __init__(self, interval, tasks, options=None):
"""
:param dict interval: An interval dictionary from the config.
:param list tasks: List of task names specified to run. Wildcards are allowed.
:param dict options: Dictionary of options that should be applied to this run.
"""
self.tasks = tasks
self.options = options
self.unit = None
self.amount = None
self.on_day = None
self.at_time = None
self.last_run = None
self.run_at = None
self.interval = interval
self._get_db_last_run()
self.schedule_next_run()
@property
def uid(self):
"""A unique id which describes this trigger."""
# Determine uniqueness based on interval,
hashval = md5(str(sorted(self.interval)))
# and tasks run on that interval.
hashval.update(','.join(self.tasks).encode('utf-8'))
return hashval.hexdigest()
# Handles getting and setting interval in form validated by config
@property
def interval(self):
interval = {self.unit: self.amount}
if self.at_time:
interval['at_time'] = self.at_time
if self.on_day:
interval['on_day'] = self.on_day
return interval
@interval.setter
def interval(self, interval):
if not interval:
for attr in ['unit', 'amount', 'on_day', 'at_time']:
setattr(self, attr, None)
return
for unit in UNITS:
self.amount = interval.pop(unit, None)
if self.amount:
self.unit = unit
break
else:
raise ValueError('Schedule interval must provide a unit and amount')
self.at_time = interval.pop('at_time', None)
if self.at_time and not isinstance(self.at_time, dt_time):
self.at_time = parse_time(self.at_time)
self.on_day = interval.pop('on_day', None)
if interval:
raise ValueError('the following are not valid keys in a schedule interval dictionary: %s' %
', '.join(interval))
self.schedule_next_run()
def trigger(self):
"""Call when trigger is activated. Records current run time and schedules next run."""
self.last_run = datetime.now()
self._set_db_last_run()
self.schedule_next_run()
@property
def should_run(self):
return self.run_at and datetime.now() >= self.run_at
@property
def period(self):
return timedelta(**{self.unit: self.amount})
def schedule_next_run(self):
last_run = self.last_run
if not last_run:
# Pretend we ran one period ago
last_run = datetime.now() - self.period
if self.on_day:
days_ahead = WEEKDAYS.index(self.on_day) - last_run.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
self.run_at = last_run + timedelta(days=days_ahead, weeks=self.amount-1)
else:
self.run_at = last_run + self.period
if self.at_time:
self.run_at = self.run_at.replace(hour=self.at_time.hour, minute=self.at_time.minute,
second=self.at_time.second)
def _get_db_last_run(self):
session = Session()
try:
db_trigger = session.query(DBTrigger).get(self.uid)
if db_trigger:
self.last_run = db_trigger.last_run
log.debug('loaded last_run from the database')
finally:
session.close()
def _set_db_last_run(self):
session = Session()
try:
db_trigger = session.query(DBTrigger).get(self.uid)
if not db_trigger:
db_trigger = DBTrigger(self.uid)
session.add(db_trigger)
db_trigger.last_run = self.last_run
session.commit()
finally:
session.close()
log.debug('recorded last_run to the database')
def __repr__(self):
return 'Trigger(tasks=%r, amount=%r, unit=%r)' % (self.tasks, self.amount, self.unit)
class Tee(object):
"""Used so that output to sys.stdout can be grabbed and still displayed."""
def __init__(self, *files):
self.files = files
def __getattr__(self, meth):
def method_runner(*args, **kwargs):
for f in self.files:
try:
getattr(f, meth)(*args, **kwargs)
except AttributeError:
# We don't really care if all of our 'files' fully support the file api
pass
return method_runner
class BufferQueue(Queue.Queue):
"""Used in place of a file-like object to capture text and access it safely from another thread."""
# Allow access to the Empty error from here
Empty = Queue.Empty
def write(self, line):
self.put(line)
@event('config.register')
def register_config():
register_config_key('schedules', main_schema)
|
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import api_sample_base
from nova.tests.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class ServersSampleBase(api_sample_base.ApiSampleTestBaseV3):
def _post_server(self, use_common_server_api_samples=True):
# param use_common_server_api_samples: Boolean to set whether tests use
# common sample files for server post request and response.
# Default is True which means _get_sample_path method will fetch the
# common server sample files from 'servers' directory.
# Set False if tests need to use extension specific sample files
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
'glance_host': self._get_glance_host(),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::'
}
orig_value = self.__class__._use_common_server_api_samples
orig_sample_dir = self.__class__.sample_dir
try:
self.__class__._use_common_server_api_samples = (
use_common_server_api_samples)
# TODO(gmann) This is temporary hack to let other tests
# inherited from ServersSampleBase run successfully.
# Once all inherited tests are merged, below if condition
# code needs to be removed.
if ((self._api_version == 'v3') and
use_common_server_api_samples):
self.__class__._use_common_server_api_samples = False
self.__class__.sample_dir = 'servers_v21'
response = self._do_post('servers', 'server-post-req', subs)
subs = self._get_regexes()
status = self._verify_response('server-post-resp', subs,
response, 202)
return status
finally:
self.__class__._use_common_server_api_samples = orig_value
self.__class__.sample_dir = orig_sample_dir
class ServersSampleJsonTest(ServersSampleBase):
sample_dir = 'servers'
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
request_api_version = None
def _get_flags(self):
f = super(ServersSampleBase, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.keypairs.Keypairs')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips.Extended_ips')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips_mac.'
'Extended_ips_mac')
return f
def test_servers_post(self):
return self._post_server()
def test_servers_get(self):
uuid = self.test_servers_post()
response = self._do_get('servers/%s' % uuid,
api_version=self.request_api_version)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['hypervisor_hostname'] = r'[\w\.\-]+'
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('server-get-resp', subs, response, 200)
def test_servers_list(self):
uuid = self._post_server()
response = self._do_get('servers',
api_version=self.request_api_version)
subs = self._get_regexes()
subs['id'] = uuid
self._verify_response('servers-list-resp', subs, response, 200)
def test_servers_details(self):
uuid = self._post_server()
response = self._do_get('servers/detail',
api_version=self.request_api_version)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['hypervisor_hostname'] = r'[\w\.\-]+'
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('servers-details-resp', subs, response, 200)
class ServersSampleJson29Test(ServersSampleJsonTest):
request_api_version = '2.9'
# NOTE(gmann): microversion tests do not need to run for v2 API
# so defining scenarios only for v2.9 which will run the original tests
# by appending '(v2_9)' in test_id.
scenarios = [('v2_9', {})]
class ServerSortKeysJsonTests(ServersSampleBase):
sample_dir = 'servers-sort'
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def _get_flags(self):
f = super(ServerSortKeysJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.server_sort_keys.'
'Server_sort_keys')
return f
def test_servers_list(self):
self._post_server()
response = self._do_get('servers?sort_key=display_name&sort_dir=asc')
subs = self._get_regexes()
self._verify_response('server-sort-keys-list-resp', subs, response,
200)
class ServersSampleAllExtensionJsonTest(ServersSampleJsonTest):
all_extensions = True
class ServersActionsJsonTest(ServersSampleBase):
sample_dir = 'servers'
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def _test_server_action(self, uuid, action, req_tpl,
subs=None, resp_tpl=None, code=202):
subs = subs or {}
subs.update({'action': action,
'glance_host': self._get_glance_host()})
response = self._do_post('servers/%s/action' % uuid,
req_tpl,
subs)
if resp_tpl:
subs.update(self._get_regexes())
self._verify_response(resp_tpl, subs, response, code)
else:
self.assertEqual(response.status_code, code)
self.assertEqual(response.content, "")
def test_server_reboot_hard(self):
uuid = self._post_server()
self._test_server_action(uuid, "reboot",
'server-action-reboot',
{"type": "HARD"})
def test_server_reboot_soft(self):
uuid = self._post_server()
self._test_server_action(uuid, "reboot",
'server-action-reboot',
{"type": "SOFT"})
def test_server_rebuild(self):
uuid = self._post_server()
image = fake.get_valid_image_id()
subs = {'host': self._get_host(),
'uuid': image,
'name': 'foobar',
'pass': 'seekr3t',
'hostid': '[a-f0-9]+',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::',
}
self._test_server_action(uuid, 'rebuild',
'server-action-rebuild',
subs,
'server-action-rebuild-resp')
def test_server_resize(self):
self.flags(allow_resize_to_same_host=True)
uuid = self._post_server()
self._test_server_action(uuid, "resize",
'server-action-resize',
{"id": 2,
"host": self._get_host()})
return uuid
def test_server_revert_resize(self):
uuid = self.test_server_resize()
self._test_server_action(uuid, "revertResize",
'server-action-revert-resize')
def test_server_confirm_resize(self):
uuid = self.test_server_resize()
self._test_server_action(uuid, "confirmResize",
'server-action-confirm-resize',
code=204)
def test_server_create_image(self):
uuid = self._post_server()
self._test_server_action(uuid, 'createImage',
'server-action-create-image',
{'name': 'foo-image'})
class ServersActionsAllJsonTest(ServersActionsJsonTest):
all_extensions = True
class ServerStartStopJsonTest(ServersSampleBase):
sample_dir = 'servers'
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def _get_flags(self):
f = super(ServerStartStopJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.server_start_stop.'
'Server_start_stop')
return f
def _test_server_action(self, uuid, action, req_tpl):
response = self._do_post('servers/%s/action' % uuid,
req_tpl,
{'action': action})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
def test_server_start(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-stop', 'server-action-stop')
self._test_server_action(uuid, 'os-start', 'server-action-start')
def test_server_stop(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-stop', 'server-action-stop')
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nsappflowcollector(base_resource) :
""" Configuration for appflowCollector resource. """
def __init__(self) :
self._name = ""
self._ipaddress = ""
self._port = 0
self.___count = 0
@property
def name(self) :
ur"""Name of the AppFlow collector.<br/>Minimum length = 1<br/>Maximum length = 127.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the AppFlow collector.<br/>Minimum length = 1<br/>Maximum length = 127
"""
try :
self._name = name
except Exception as e:
raise e
@property
def ipaddress(self) :
ur"""The IPv4 address of the AppFlow collector.
"""
try :
return self._ipaddress
except Exception as e:
raise e
@ipaddress.setter
def ipaddress(self, ipaddress) :
ur"""The IPv4 address of the AppFlow collector.
"""
try :
self._ipaddress = ipaddress
except Exception as e:
raise e
@property
def port(self) :
ur"""The UDP port on which the AppFlow collector is listening.<br/>Default value: 4739.
"""
try :
return self._port
except Exception as e:
raise e
@port.setter
def port(self, port) :
ur"""The UDP port on which the AppFlow collector is listening.<br/>Default value: 4739
"""
try :
self._port = port
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nsappflowcollector_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nsappflowcollector
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add nsappflowcollector.
"""
try :
if type(resource) is not list :
addresource = nsappflowcollector()
addresource.name = resource.name
addresource.ipaddress = resource.ipaddress
addresource.port = resource.port
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nsappflowcollector() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].ipaddress = resource[i].ipaddress
addresources[i].port = resource[i].port
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete nsappflowcollector.
"""
try :
if type(resource) is not list :
deleteresource = nsappflowcollector()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nsappflowcollector() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nsappflowcollector() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the nsappflowcollector resources that are configured on netscaler.
"""
try :
if not name :
obj = nsappflowcollector()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = nsappflowcollector()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [nsappflowcollector() for _ in range(len(name))]
obj = [nsappflowcollector() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = nsappflowcollector()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of nsappflowcollector resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nsappflowcollector()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the nsappflowcollector resources configured on NetScaler.
"""
try :
obj = nsappflowcollector()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of nsappflowcollector resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nsappflowcollector()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class nsappflowcollector_response(base_response) :
def __init__(self, length=1) :
self.nsappflowcollector = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nsappflowcollector = [nsappflowcollector() for _ in range(length)]
|
|
import urllib2
import os.path
import sys
import re
default_encoding = sys.getfilesystemencoding()
if default_encoding.lower() == 'ascii':
default_encoding = 'utf-8'
def to_native_string(s):
if type(s) == unicode:
return s.encode(default_encoding)
else:
return s
def r1(pattern, text):
m = re.search(pattern, text)
if m:
return m.group(1)
def r1_of(patterns, text):
for p in patterns:
x = r1(p, text)
if x:
return x
def unescape_html(html):
import xml.sax.saxutils
html = xml.sax.saxutils.unescape(html)
html = re.sub(r'&#(\d+);', lambda x: unichr(int(x.group(1))), html)
return html
def ungzip(s):
from StringIO import StringIO
import gzip
buffer = StringIO(s)
f = gzip.GzipFile(fileobj=buffer)
return f.read()
def undeflate(s):
import zlib
return zlib.decompress(s, -zlib.MAX_WBITS)
def get_response(url):
response = urllib2.urlopen(url)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
response.data = data
return response
def get_html(url, encoding=None):
content = get_response(url).data
if encoding:
content = content.decode(encoding)
return content
def get_decoded_html(url):
response = get_response(url)
data = response.data
charset = r1(r'charset=([\w-]+)', response.headers['content-type'])
if charset:
return data.decode(charset)
else:
return data
def url_save(url, filepath, bar, refer=None):
headers = {}
if refer:
headers['Referer'] = refer
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
file_size = int(response.headers['content-length'])
assert file_size
if os.path.exists(filepath):
if file_size == os.path.getsize(filepath):
if bar:
bar.done()
print 'Skip %s: file already exists' % os.path.basename(filepath)
return
else:
if bar:
bar.done()
print 'Overwriting', os.path.basename(filepath), '...'
with open(filepath, 'wb') as output:
received = 0
while True:
buffer = response.read(1024*256)
if not buffer:
break
received += len(buffer)
output.write(buffer)
if bar:
bar.update_received(len(buffer))
assert received == file_size == os.path.getsize(filepath), '%s == %s == %s' % (received, file_size, os.path.getsize(filepath))
def url_size(url):
request = urllib2.Request(url)
request.get_method = lambda: 'HEAD'
response = urllib2.urlopen(request)
size = int(response.headers['content-length'])
return size
def url_size(url):
size = int(urllib2.urlopen(url).headers['content-length'])
return size
def urls_size(urls):
return sum(map(url_size, urls))
class SimpleProgressBar:
def __init__(self, total_size, total_pieces=1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar_size = 40
percent = self.received*100.0/self.total_size
if percent > 100:
percent = 100.0
bar_rate = 100.0 / bar_size
dots = percent / bar_rate
dots = int(dots)
plus = percent / bar_rate - dots
if plus > 0.8:
plus = '='
elif plus > 0.4:
plus = '-'
else:
plus = ''
bar = '=' * dots + plus
bar = '{0:>3.0f}% [{1:<40}] {2}/{3}'.format(percent, bar, self.current_piece, self.total_pieces)
sys.stdout.write('\r'+bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print
self.displayed = False
class PiecesProgressBar:
def __init__(self, total_size, total_pieces=1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar = '{0:>3}%[{1:<40}] {2}/{3}'.format('?', '?'*40, self.current_piece, self.total_pieces)
sys.stdout.write('\r'+bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print
self.displayed = False
class DummyProgressBar:
def __init__(self, *args):
pass
def update_received(self, n):
pass
def update_piece(self, n):
pass
def done(self):
pass
def escape_file_path(path):
path = path.replace('/', '-')
path = path.replace('\\', '-')
path = path.replace('*', '-')
path = path.replace('?', '-')
return path
def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merge=True):
assert urls
assert ext in ('flv', 'mp4')
if not total_size:
try:
total_size = urls_size(urls)
except:
import traceback
import sys
traceback.print_exc(file=sys.stdout)
pass
title = to_native_string(title)
title = escape_file_path(title)
filename = '%s.%s' % (title, ext)
filepath = os.path.join(output_dir, filename)
if total_size:
if os.path.exists(filepath) and os.path.getsize(filepath) >= total_size * 0.9:
print 'Skip %s: file already exists' % filepath
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
url = urls[0]
print 'Downloading %s ...' % filename
url_save(url, filepath, bar, refer=refer)
bar.done()
else:
flvs = []
print 'Downloading %s.%s ...' % (title, ext)
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
flvs.append(filepath)
#print 'Downloading %s [%s/%s]...' % (filename, i+1, len(urls))
bar.update_piece(i+1)
url_save(url, filepath, bar, refer=refer)
bar.done()
if not merge:
return
if ext == 'flv':
from flv_join import concat_flvs
concat_flvs(flvs, os.path.join(output_dir, title+'.flv'))
for flv in flvs:
os.remove(flv)
elif ext == 'mp4':
from mp4_join import concat_mp4s
concat_mp4s(flvs, os.path.join(output_dir, title+'.mp4'))
for flv in flvs:
os.remove(flv)
else:
print "Can't join %s files" % ext
def playlist_not_supported(name):
def f(*args, **kwargs):
raise NotImplementedError('Play list is not supported for '+name)
return f
def script_main(script_name, download, download_playlist=None):
if download_playlist:
help = 'python %s.py [--playlist] [-c|--create-dir] [--no-merge] url ...' % script_name
short_opts = 'hc'
opts = ['help', 'playlist', 'create-dir', 'no-merge']
else:
help = 'python [--no-merge] %s.py url ...' % script_name
short_opts = 'h'
opts = ['help', 'no-merge']
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, opts)
except getopt.GetoptError, err:
print help
sys.exit(1)
playlist = False
create_dir = False
merge = True
for o, a in opts:
if o in ('-h', '--help'):
print help
sys.exit()
elif o in ('--playlist',):
playlist = True
elif o in ('-c', '--create-dir'):
create_dir = True
elif o in ('--no-merge'):
merge = False
else:
print help
sys.exit(1)
if not args:
print help
sys.exit(1)
for url in args:
if playlist:
download_playlist(url, create_dir=create_dir, merge=merge)
else:
download(url, merge=merge)
|
|
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import auth as ks_auth
from keystoneclient import session as ks_session
from novaclient import client as nova_client
from novaclient import exceptions as nova_exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from sqlalchemy.orm import attributes as sql_attr
from neutron.common import constants
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.notifiers import batch_notifier
LOG = logging.getLogger(__name__)
VIF_UNPLUGGED = 'network-vif-unplugged'
VIF_PLUGGED = 'network-vif-plugged'
VIF_DELETED = 'network-vif-deleted'
NEUTRON_NOVA_EVENT_STATUS_MAP = {constants.PORT_STATUS_ACTIVE: 'completed',
constants.PORT_STATUS_ERROR: 'failed',
constants.PORT_STATUS_DOWN: 'completed'}
NOVA_API_VERSION = "2"
class Notifier(object):
def __init__(self):
# FIXME(jamielennox): A notifier is being created for each Controller
# and each Notifier is handling it's own auth. That means that we are
# authenticating the exact same thing len(controllers) times. This
# should be an easy thing to optimize.
auth = ks_auth.load_from_conf_options(cfg.CONF, 'nova')
session = ks_session.Session.load_from_conf_options(cfg.CONF,
'nova',
auth=auth)
extensions = [
ext for ext in nova_client.discover_extensions(NOVA_API_VERSION)
if ext.name == "server_external_events"]
self.nclient = nova_client.Client(
NOVA_API_VERSION,
session=session,
region_name=cfg.CONF.nova.region_name,
extensions=extensions)
self.batch_notifier = batch_notifier.BatchNotifier(
cfg.CONF.send_events_interval, self.send_events)
def _is_compute_port(self, port):
try:
if (port['device_id'] and uuidutils.is_uuid_like(port['device_id'])
and port['device_owner'].startswith(
constants.DEVICE_OWNER_COMPUTE_PREFIX)):
return True
except (KeyError, AttributeError):
pass
return False
def _get_network_changed_event(self, device_id):
return {'name': 'network-changed',
'server_uuid': device_id}
def _get_port_delete_event(self, port):
return {'server_uuid': port['device_id'],
'name': VIF_DELETED,
'tag': port['id']}
@property
def _plugin(self):
# NOTE(arosen): this cannot be set in __init__ currently since
# this class is initialized at the same time as NeutronManager()
# which is decorated with synchronized()
if not hasattr(self, '_plugin_ref'):
self._plugin_ref = manager.NeutronManager.get_plugin()
return self._plugin_ref
def send_network_change(self, action, original_obj,
returned_obj):
"""Called when a network change is made that nova cares about.
:param action: the event that occurred.
:param original_obj: the previous value of resource before action.
:param returned_obj: the body returned to client as result of action.
"""
if not cfg.CONF.notify_nova_on_port_data_changes:
return
# When neutron re-assigns floating ip from an original instance
# port to a new instance port without disassociate it first, an
# event should be sent for original instance, that will make nova
# know original instance's info, and update database for it.
if (action == 'update_floatingip'
and returned_obj['floatingip'].get('port_id')
and original_obj.get('port_id')):
disassociate_returned_obj = {'floatingip': {'port_id': None}}
event = self.create_port_changed_event(action, original_obj,
disassociate_returned_obj)
self.batch_notifier.queue_event(event)
event = self.create_port_changed_event(action, original_obj,
returned_obj)
self.batch_notifier.queue_event(event)
def create_port_changed_event(self, action, original_obj, returned_obj):
port = None
if action in ['update_port', 'delete_port']:
port = returned_obj['port']
elif action in ['update_floatingip', 'create_floatingip',
'delete_floatingip']:
# NOTE(arosen) if we are associating a floatingip the
# port_id is in the returned_obj. Otherwise on disassociate
# it's in the original_object
port_id = (returned_obj['floatingip'].get('port_id') or
original_obj.get('port_id'))
if port_id is None:
return
ctx = context.get_admin_context()
port = self._plugin.get_port(ctx, port_id)
if port and self._is_compute_port(port):
if action == 'delete_port':
return self._get_port_delete_event(port)
else:
return self._get_network_changed_event(port['device_id'])
def record_port_status_changed(self, port, current_port_status,
previous_port_status, initiator):
"""Determine if nova needs to be notified due to port status change.
"""
# clear out previous _notify_event
port._notify_event = None
# If there is no device_id set there is nothing we can do here.
if not port.device_id:
LOG.debug("device_id is not set on port yet.")
return
if not port.id:
LOG.warning(_LW("Port ID not set! Nova will not be notified of "
"port status change."))
return
# We only want to notify about nova ports.
if not self._is_compute_port(port):
return
# We notify nova when a vif is unplugged which only occurs when
# the status goes from ACTIVE to DOWN.
if (previous_port_status == constants.PORT_STATUS_ACTIVE and
current_port_status == constants.PORT_STATUS_DOWN):
event_name = VIF_UNPLUGGED
# We only notify nova when a vif is plugged which only occurs
# when the status goes from:
# NO_VALUE/DOWN/BUILD -> ACTIVE/ERROR.
elif (previous_port_status in [sql_attr.NO_VALUE,
constants.PORT_STATUS_DOWN,
constants.PORT_STATUS_BUILD]
and current_port_status in [constants.PORT_STATUS_ACTIVE,
constants.PORT_STATUS_ERROR]):
event_name = VIF_PLUGGED
# All the remaining state transitions are of no interest to nova
else:
LOG.debug("Ignoring state change previous_port_status: "
"%(pre_status)s current_port_status: %(cur_status)s"
" port_id %(id)s",
{'pre_status': previous_port_status,
'cur_status': current_port_status,
'id': port.id})
return
port._notify_event = (
{'server_uuid': port.device_id,
'name': event_name,
'status': NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status),
'tag': port.id})
def send_port_status(self, mapper, connection, port):
event = getattr(port, "_notify_event", None)
self.batch_notifier.queue_event(event)
port._notify_event = None
def send_events(self, batched_events):
LOG.debug("Sending events: %s", batched_events)
try:
response = self.nclient.server_external_events.create(
batched_events)
except nova_exceptions.NotFound:
LOG.warning(_LW("Nova returned NotFound for event: %s"),
batched_events)
except Exception:
LOG.exception(_LE("Failed to notify nova on events: %s"),
batched_events)
else:
if not isinstance(response, list):
LOG.error(_LE("Error response returned from nova: %s"),
response)
return
response_error = False
for event in response:
try:
code = event['code']
except KeyError:
response_error = True
continue
if code != 200:
LOG.warning(_LW("Nova event: %s returned with failed "
"status"), event)
else:
LOG.info(_LI("Nova event response: %s"), event)
if response_error:
LOG.error(_LE("Error response returned from nova: %s"),
response)
|
|
from iam_syncr.errors import SyncrError, BadConfiguration, InvalidConfiguration, NoConfiguration
from iam_syncr.amazon.base import Amazon
from iam_syncr.syncer import Sync
from iam_syncr import VERSION
from rainbow_logging_handler import RainbowLoggingHandler
import argparse
import logging
import fnmatch
import yaml
import sys
import os
log = logging.getLogger("iam_sync.executor")
def setup_logging(verbose=False):
log = logging.getLogger("")
handler = RainbowLoggingHandler(sys.stderr)
handler._column_color['%(asctime)s'] = ('cyan', None, False)
handler._column_color['%(levelname)-7s'] = ('green', None, False)
handler._column_color['%(message)s'][logging.INFO] = ('blue', None, False)
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)-7s %(name)-15s %(message)s"))
log.addHandler(handler)
log.setLevel([logging.INFO, logging.DEBUG][verbose])
logging.getLogger("boto").level = logging.CRITICAL
def argparse_readable_folder(value):
"""Argparse type for a readable folder"""
if not os.path.exists(value):
raise argparse.ArgumentTypeError("{0} doesn't exist".format(value))
if not os.path.isdir(value):
raise argparse.ArgumentTypeError("{0} exists but isn't a folder".format(value))
if not os.access(value, os.R_OK):
raise argparse.ArgumentTypeError("{0} exists and is a folder but isn't readable".format(value))
return os.path.abspath(value)
def make_parser():
"""Make us a parser"""
parser = argparse.ArgumentParser(description="Sync script, supply your own creds!")
parser.add_argument("-v", "--verbose"
, help = "Show debug logging"
, action = "store_true"
)
parser.add_argument("folder"
, help = "The folder containing the roles we want to sync"
, type = argparse_readable_folder
)
parser.add_argument("--accounts-location"
, help = "Path to accounts.yaml holding the map of human names to accounts ids"
)
parser.add_argument("--filename-match"
, help = "A glob to match the path of the configuration against (relative to the specified folder)"
, default = "*.yaml"
)
parser.add_argument("--only-consider"
, help = "Only sync these (i.e. roles, remove_roles, users)"
, action = "append"
)
parser.add_argument("--dry-run"
, help = "Print out what policies would be set/removed"
, action = "store_true"
)
return parser
def accounts_from(location):
"""Get the accounts dictionary"""
if not os.path.exists(location):
raise SyncrError("Could not find an accounts.yaml", location=location)
if not os.access(location, os.R_OK):
raise SyncrError("Could not read the accounts.yaml", location=location)
try:
accounts = yaml.load(open(location))
except yaml.parser.ParserError as error:
raise SyncrError("Failed to parse the accounts yaml file", location=location, error_typ=error.__class__.__name__, error=error)
for account_id in list(accounts.values()):
if account_id not in accounts:
accounts[account_id] = account_id
return accounts
def make_amazon(folder, accounts_location=None, dry_run=False):
"""Find the account we're using and return a setup Amazon object"""
if not accounts_location:
accounts_location = os.path.join(folder, '..', 'accounts.yaml')
accounts = accounts_from(accounts_location)
account_name = os.path.basename(folder)
if account_name not in accounts:
raise SyncrError("Please add this account to accounts.yaml", accounts_yaml_location=accounts_location, account_name=account_name)
account_id = accounts[account_name]
amazon = Amazon(account_id, account_name, accounts, dry_run=dry_run)
amazon.setup()
return amazon
def do_sync(amazon, found, only_consider=None):
"""Sync the configuration from this folder"""
try:
parsed = parse_configurations(found)
except BadConfiguration as err:
log.error("Failed to parse all the yaml specifications")
for _, error in sorted(err.kwargs["parse_errors"].items()):
log.error(error)
raise BadConfiguration()
sync = Sync(amazon)
sync.register_default_types()
if only_consider:
dont_consider = [considering for considering in only_consider if considering not in sync.types]
if dont_consider:
raise SyncrError("Told to sync unknown types", only_sync=list(sync.types.keys()), unknown_types=dont_consider)
for location, configuration in sorted(parsed.items()):
sync.add(configuration, location, only_consider)
try:
log.info("Combining configuration")
combined = sync.combine_configurations()
except BadConfiguration as err:
log.error("Your configuration didn't make sense")
for error in err.kwargs["errors"]:
log.error(error)
raise BadConfiguration()
log.info("Starting sync")
sync.sync(combined)
def parse_configurations(locations):
"""
Return a dictionary of {location: <parsed_yaml>} for .yaml files in this folder
Or Raise A BadConfiguration(parse_errors={<location>: <parse_error>})
With all the errors that are encountered
"""
parsed = {}
parse_errors = {}
for location in locations:
try:
config = yaml.load(open(location))
if not isinstance(config, dict):
parse_errors[location] = InvalidConfiguration("Configuration is not a dictionary", location=location, found=type(config))
else:
parsed[location] = config
except yaml.parser.ParserError as err:
parse_errors[location] = InvalidConfiguration("Couldn't parse the yaml", location=location, err_type=err.__class__.__name__, err=err)
if parse_errors:
raise BadConfiguration(parse_errors=parse_errors)
return parsed
def find_configurations(folder, filename_match):
"""Find all the configurations in this folder"""
found = []
for root, dirs, files in os.walk(folder):
for filename in files:
location = os.path.join(root, filename)
relative_location = os.path.relpath(location, start=folder)
if fnmatch.fnmatch(relative_location, filename_match):
found.append(location)
if not found:
raise NoConfiguration(folder=folder)
return found
def main(argv=None):
parser = make_parser()
args = parser.parse_args(argv)
setup_logging(verbose=args.verbose)
Amazon.set_boto_useragent("iam_syncr", VERSION)
try:
log.info("Making a connection to amazon")
amazon = make_amazon(folder=args.folder, accounts_location=args.accounts_location, dry_run=args.dry_run)
log.info("Finding the configuration")
found = find_configurations(args.folder, args.filename_match)
log.info("Syncing for account %s from %s", amazon.account_id, args.folder)
do_sync(amazon, found, args.only_consider)
if not amazon.changes:
log.info("No changes were made!")
except SyncrError as err:
print("!" * 80)
print("Something went wrong => {0} |:| {1}".format(err.__class__.__name__, err))
sys.exit(1)
if __name__ == '__main__':
main()
|
|
# WARNING: Do not edit by hand, this file was generated by Crank:
#
# https://github.com/gocardless/crank
#
import json
import requests
import responses
from nose.tools import (
assert_equal,
assert_is_instance,
assert_is_none,
assert_is_not_none,
assert_not_equal,
assert_raises
)
from gocardless_pro.errors import MalformedResponseError
from gocardless_pro import resources
from gocardless_pro import list_response
from .. import helpers
@responses.activate
def test_billing_requests_list():
fixture = helpers.load_fixture('billing_requests')['list']
helpers.stub_response(fixture)
response = helpers.client.billing_requests.list(*fixture['url_params'])
body = fixture['body']['billing_requests']
assert_is_instance(response, list_response.ListResponse)
assert_is_instance(response.records[0], resources.BillingRequest)
assert_equal(response.before, fixture['body']['meta']['cursors']['before'])
assert_equal(response.after, fixture['body']['meta']['cursors']['after'])
assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal([r.actions for r in response.records],
[b.get('actions') for b in body])
assert_equal([r.created_at for r in response.records],
[b.get('created_at') for b in body])
assert_equal([r.id for r in response.records],
[b.get('id') for b in body])
assert_equal([r.metadata for r in response.records],
[b.get('metadata') for b in body])
assert_equal([r.status for r in response.records],
[b.get('status') for b in body])
@responses.activate
def test_timeout_billing_requests_list_retries():
fixture = helpers.load_fixture('billing_requests')['list']
with helpers.stub_timeout_then_response(fixture) as rsps:
response = helpers.client.billing_requests.list(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['billing_requests']
assert_is_instance(response, list_response.ListResponse)
assert_is_instance(response.records[0], resources.BillingRequest)
assert_equal(response.before, fixture['body']['meta']['cursors']['before'])
assert_equal(response.after, fixture['body']['meta']['cursors']['after'])
def test_502_billing_requests_list_retries():
fixture = helpers.load_fixture('billing_requests')['list']
with helpers.stub_502_then_response(fixture) as rsps:
response = helpers.client.billing_requests.list(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['billing_requests']
assert_is_instance(response, list_response.ListResponse)
assert_is_instance(response.records[0], resources.BillingRequest)
assert_equal(response.before, fixture['body']['meta']['cursors']['before'])
assert_equal(response.after, fixture['body']['meta']['cursors']['after'])
@responses.activate
def test_billing_requests_all():
fixture = helpers.load_fixture('billing_requests')['list']
def callback(request):
if 'after=123' in request.url:
fixture['body']['meta']['cursors']['after'] = None
else:
fixture['body']['meta']['cursors']['after'] = '123'
return [200, {}, json.dumps(fixture['body'])]
url = 'http://example.com' + fixture['path_template']
responses.add_callback(fixture['method'], url, callback)
all_records = list(helpers.client.billing_requests.all())
assert_equal(len(all_records), len(fixture['body']['billing_requests']) * 2)
for record in all_records:
assert_is_instance(record, resources.BillingRequest)
@responses.activate
def test_billing_requests_create():
fixture = helpers.load_fixture('billing_requests')['create']
helpers.stub_response(fixture)
response = helpers.client.billing_requests.create(*fixture['url_params'])
body = fixture['body']['billing_requests']
assert_is_instance(response, resources.BillingRequest)
assert_is_not_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.actions, body.get('actions'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.metadata, body.get('metadata'))
assert_equal(response.status, body.get('status'))
assert_equal(response.links.bank_authorisation,
body.get('links')['bank_authorisation'])
assert_equal(response.links.creditor,
body.get('links')['creditor'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.customer_bank_account,
body.get('links')['customer_bank_account'])
assert_equal(response.links.customer_billing_detail,
body.get('links')['customer_billing_detail'])
assert_equal(response.links.mandate_request,
body.get('links')['mandate_request'])
assert_equal(response.links.mandate_request_mandate,
body.get('links')['mandate_request_mandate'])
assert_equal(response.links.payment_request,
body.get('links')['payment_request'])
assert_equal(response.links.payment_request_payment,
body.get('links')['payment_request_payment'])
assert_equal(response.mandate_request.currency,
body.get('mandate_request')['currency'])
assert_equal(response.mandate_request.links,
body.get('mandate_request')['links'])
assert_equal(response.mandate_request.scheme,
body.get('mandate_request')['scheme'])
assert_equal(response.mandate_request.verify,
body.get('mandate_request')['verify'])
assert_equal(response.payment_request.amount,
body.get('payment_request')['amount'])
assert_equal(response.payment_request.app_fee,
body.get('payment_request')['app_fee'])
assert_equal(response.payment_request.currency,
body.get('payment_request')['currency'])
assert_equal(response.payment_request.description,
body.get('payment_request')['description'])
assert_equal(response.payment_request.links,
body.get('payment_request')['links'])
assert_equal(response.payment_request.scheme,
body.get('payment_request')['scheme'])
assert_equal(response.resources.customer,
body.get('resources')['customer'])
assert_equal(response.resources.customer_bank_account,
body.get('resources')['customer_bank_account'])
assert_equal(response.resources.customer_billing_detail,
body.get('resources')['customer_billing_detail'])
@responses.activate
def test_billing_requests_create_new_idempotency_key_for_each_call():
fixture = helpers.load_fixture('billing_requests')['create']
helpers.stub_response(fixture)
helpers.client.billing_requests.create(*fixture['url_params'])
helpers.client.billing_requests.create(*fixture['url_params'])
assert_not_equal(responses.calls[0].request.headers.get('Idempotency-Key'),
responses.calls[1].request.headers.get('Idempotency-Key'))
def test_timeout_billing_requests_create_idempotency_conflict():
create_fixture = helpers.load_fixture('billing_requests')['create']
get_fixture = helpers.load_fixture('billing_requests')['get']
with helpers.stub_timeout_then_idempotency_conflict(create_fixture, get_fixture) as rsps:
response = helpers.client.billing_requests.create(*create_fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_is_instance(response, resources.BillingRequest)
@responses.activate
def test_timeout_billing_requests_create_retries():
fixture = helpers.load_fixture('billing_requests')['create']
with helpers.stub_timeout_then_response(fixture) as rsps:
response = helpers.client.billing_requests.create(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['billing_requests']
assert_is_instance(response, resources.BillingRequest)
def test_502_billing_requests_create_retries():
fixture = helpers.load_fixture('billing_requests')['create']
with helpers.stub_502_then_response(fixture) as rsps:
response = helpers.client.billing_requests.create(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['billing_requests']
assert_is_instance(response, resources.BillingRequest)
@responses.activate
def test_billing_requests_get():
fixture = helpers.load_fixture('billing_requests')['get']
helpers.stub_response(fixture)
response = helpers.client.billing_requests.get(*fixture['url_params'])
body = fixture['body']['billing_requests']
assert_is_instance(response, resources.BillingRequest)
assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.actions, body.get('actions'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.metadata, body.get('metadata'))
assert_equal(response.status, body.get('status'))
assert_equal(response.links.bank_authorisation,
body.get('links')['bank_authorisation'])
assert_equal(response.links.creditor,
body.get('links')['creditor'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.customer_bank_account,
body.get('links')['customer_bank_account'])
assert_equal(response.links.customer_billing_detail,
body.get('links')['customer_billing_detail'])
assert_equal(response.links.mandate_request,
body.get('links')['mandate_request'])
assert_equal(response.links.mandate_request_mandate,
body.get('links')['mandate_request_mandate'])
assert_equal(response.links.payment_request,
body.get('links')['payment_request'])
assert_equal(response.links.payment_request_payment,
body.get('links')['payment_request_payment'])
assert_equal(response.mandate_request.currency,
body.get('mandate_request')['currency'])
assert_equal(response.mandate_request.links,
body.get('mandate_request')['links'])
assert_equal(response.mandate_request.scheme,
body.get('mandate_request')['scheme'])
assert_equal(response.mandate_request.verify,
body.get('mandate_request')['verify'])
assert_equal(response.payment_request.amount,
body.get('payment_request')['amount'])
assert_equal(response.payment_request.app_fee,
body.get('payment_request')['app_fee'])
assert_equal(response.payment_request.currency,
body.get('payment_request')['currency'])
assert_equal(response.payment_request.description,
body.get('payment_request')['description'])
assert_equal(response.payment_request.links,
body.get('payment_request')['links'])
assert_equal(response.payment_request.scheme,
body.get('payment_request')['scheme'])
assert_equal(response.resources.customer,
body.get('resources')['customer'])
assert_equal(response.resources.customer_bank_account,
body.get('resources')['customer_bank_account'])
assert_equal(response.resources.customer_billing_detail,
body.get('resources')['customer_billing_detail'])
@responses.activate
def test_timeout_billing_requests_get_retries():
fixture = helpers.load_fixture('billing_requests')['get']
with helpers.stub_timeout_then_response(fixture) as rsps:
response = helpers.client.billing_requests.get(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['billing_requests']
assert_is_instance(response, resources.BillingRequest)
def test_502_billing_requests_get_retries():
fixture = helpers.load_fixture('billing_requests')['get']
with helpers.stub_502_then_response(fixture) as rsps:
response = helpers.client.billing_requests.get(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['billing_requests']
assert_is_instance(response, resources.BillingRequest)
@responses.activate
def test_billing_requests_collect_customer_details():
fixture = helpers.load_fixture('billing_requests')['collect_customer_details']
helpers.stub_response(fixture)
response = helpers.client.billing_requests.collect_customer_details(*fixture['url_params'])
body = fixture['body']['billing_requests']
assert_is_instance(response, resources.BillingRequest)
assert_is_not_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.actions, body.get('actions'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.metadata, body.get('metadata'))
assert_equal(response.status, body.get('status'))
assert_equal(response.links.bank_authorisation,
body.get('links')['bank_authorisation'])
assert_equal(response.links.creditor,
body.get('links')['creditor'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.customer_bank_account,
body.get('links')['customer_bank_account'])
assert_equal(response.links.customer_billing_detail,
body.get('links')['customer_billing_detail'])
assert_equal(response.links.mandate_request,
body.get('links')['mandate_request'])
assert_equal(response.links.mandate_request_mandate,
body.get('links')['mandate_request_mandate'])
assert_equal(response.links.payment_request,
body.get('links')['payment_request'])
assert_equal(response.links.payment_request_payment,
body.get('links')['payment_request_payment'])
assert_equal(response.mandate_request.currency,
body.get('mandate_request')['currency'])
assert_equal(response.mandate_request.links,
body.get('mandate_request')['links'])
assert_equal(response.mandate_request.scheme,
body.get('mandate_request')['scheme'])
assert_equal(response.mandate_request.verify,
body.get('mandate_request')['verify'])
assert_equal(response.payment_request.amount,
body.get('payment_request')['amount'])
assert_equal(response.payment_request.app_fee,
body.get('payment_request')['app_fee'])
assert_equal(response.payment_request.currency,
body.get('payment_request')['currency'])
assert_equal(response.payment_request.description,
body.get('payment_request')['description'])
assert_equal(response.payment_request.links,
body.get('payment_request')['links'])
assert_equal(response.payment_request.scheme,
body.get('payment_request')['scheme'])
assert_equal(response.resources.customer,
body.get('resources')['customer'])
assert_equal(response.resources.customer_bank_account,
body.get('resources')['customer_bank_account'])
assert_equal(response.resources.customer_billing_detail,
body.get('resources')['customer_billing_detail'])
def test_timeout_billing_requests_collect_customer_details_doesnt_retry():
fixture = helpers.load_fixture('billing_requests')['collect_customer_details']
with helpers.stub_timeout(fixture) as rsps:
with assert_raises(requests.ConnectTimeout):
response = helpers.client.billing_requests.collect_customer_details(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
def test_502_billing_requests_collect_customer_details_doesnt_retry():
fixture = helpers.load_fixture('billing_requests')['collect_customer_details']
with helpers.stub_502(fixture) as rsps:
with assert_raises(MalformedResponseError):
response = helpers.client.billing_requests.collect_customer_details(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
@responses.activate
def test_billing_requests_collect_bank_account():
fixture = helpers.load_fixture('billing_requests')['collect_bank_account']
helpers.stub_response(fixture)
response = helpers.client.billing_requests.collect_bank_account(*fixture['url_params'])
body = fixture['body']['billing_requests']
assert_is_instance(response, resources.BillingRequest)
assert_is_not_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.actions, body.get('actions'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.metadata, body.get('metadata'))
assert_equal(response.status, body.get('status'))
assert_equal(response.links.bank_authorisation,
body.get('links')['bank_authorisation'])
assert_equal(response.links.creditor,
body.get('links')['creditor'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.customer_bank_account,
body.get('links')['customer_bank_account'])
assert_equal(response.links.customer_billing_detail,
body.get('links')['customer_billing_detail'])
assert_equal(response.links.mandate_request,
body.get('links')['mandate_request'])
assert_equal(response.links.mandate_request_mandate,
body.get('links')['mandate_request_mandate'])
assert_equal(response.links.payment_request,
body.get('links')['payment_request'])
assert_equal(response.links.payment_request_payment,
body.get('links')['payment_request_payment'])
assert_equal(response.mandate_request.currency,
body.get('mandate_request')['currency'])
assert_equal(response.mandate_request.links,
body.get('mandate_request')['links'])
assert_equal(response.mandate_request.scheme,
body.get('mandate_request')['scheme'])
assert_equal(response.mandate_request.verify,
body.get('mandate_request')['verify'])
assert_equal(response.payment_request.amount,
body.get('payment_request')['amount'])
assert_equal(response.payment_request.app_fee,
body.get('payment_request')['app_fee'])
assert_equal(response.payment_request.currency,
body.get('payment_request')['currency'])
assert_equal(response.payment_request.description,
body.get('payment_request')['description'])
assert_equal(response.payment_request.links,
body.get('payment_request')['links'])
assert_equal(response.payment_request.scheme,
body.get('payment_request')['scheme'])
assert_equal(response.resources.customer,
body.get('resources')['customer'])
assert_equal(response.resources.customer_bank_account,
body.get('resources')['customer_bank_account'])
assert_equal(response.resources.customer_billing_detail,
body.get('resources')['customer_billing_detail'])
def test_timeout_billing_requests_collect_bank_account_doesnt_retry():
fixture = helpers.load_fixture('billing_requests')['collect_bank_account']
with helpers.stub_timeout(fixture) as rsps:
with assert_raises(requests.ConnectTimeout):
response = helpers.client.billing_requests.collect_bank_account(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
def test_502_billing_requests_collect_bank_account_doesnt_retry():
fixture = helpers.load_fixture('billing_requests')['collect_bank_account']
with helpers.stub_502(fixture) as rsps:
with assert_raises(MalformedResponseError):
response = helpers.client.billing_requests.collect_bank_account(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
@responses.activate
def test_billing_requests_fulfil():
fixture = helpers.load_fixture('billing_requests')['fulfil']
helpers.stub_response(fixture)
response = helpers.client.billing_requests.fulfil(*fixture['url_params'])
body = fixture['body']['billing_requests']
assert_is_instance(response, resources.BillingRequest)
assert_is_not_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.actions, body.get('actions'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.metadata, body.get('metadata'))
assert_equal(response.status, body.get('status'))
assert_equal(response.links.bank_authorisation,
body.get('links')['bank_authorisation'])
assert_equal(response.links.creditor,
body.get('links')['creditor'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.customer_bank_account,
body.get('links')['customer_bank_account'])
assert_equal(response.links.customer_billing_detail,
body.get('links')['customer_billing_detail'])
assert_equal(response.links.mandate_request,
body.get('links')['mandate_request'])
assert_equal(response.links.mandate_request_mandate,
body.get('links')['mandate_request_mandate'])
assert_equal(response.links.payment_request,
body.get('links')['payment_request'])
assert_equal(response.links.payment_request_payment,
body.get('links')['payment_request_payment'])
assert_equal(response.mandate_request.currency,
body.get('mandate_request')['currency'])
assert_equal(response.mandate_request.links,
body.get('mandate_request')['links'])
assert_equal(response.mandate_request.scheme,
body.get('mandate_request')['scheme'])
assert_equal(response.mandate_request.verify,
body.get('mandate_request')['verify'])
assert_equal(response.payment_request.amount,
body.get('payment_request')['amount'])
assert_equal(response.payment_request.app_fee,
body.get('payment_request')['app_fee'])
assert_equal(response.payment_request.currency,
body.get('payment_request')['currency'])
assert_equal(response.payment_request.description,
body.get('payment_request')['description'])
assert_equal(response.payment_request.links,
body.get('payment_request')['links'])
assert_equal(response.payment_request.scheme,
body.get('payment_request')['scheme'])
assert_equal(response.resources.customer,
body.get('resources')['customer'])
assert_equal(response.resources.customer_bank_account,
body.get('resources')['customer_bank_account'])
assert_equal(response.resources.customer_billing_detail,
body.get('resources')['customer_billing_detail'])
def test_timeout_billing_requests_fulfil_doesnt_retry():
fixture = helpers.load_fixture('billing_requests')['fulfil']
with helpers.stub_timeout(fixture) as rsps:
with assert_raises(requests.ConnectTimeout):
response = helpers.client.billing_requests.fulfil(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
def test_502_billing_requests_fulfil_doesnt_retry():
fixture = helpers.load_fixture('billing_requests')['fulfil']
with helpers.stub_502(fixture) as rsps:
with assert_raises(MalformedResponseError):
response = helpers.client.billing_requests.fulfil(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
@responses.activate
def test_billing_requests_confirm_payer_details():
fixture = helpers.load_fixture('billing_requests')['confirm_payer_details']
helpers.stub_response(fixture)
response = helpers.client.billing_requests.confirm_payer_details(*fixture['url_params'])
body = fixture['body']['billing_requests']
assert_is_instance(response, resources.BillingRequest)
assert_is_not_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.actions, body.get('actions'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.metadata, body.get('metadata'))
assert_equal(response.status, body.get('status'))
assert_equal(response.links.bank_authorisation,
body.get('links')['bank_authorisation'])
assert_equal(response.links.creditor,
body.get('links')['creditor'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.customer_bank_account,
body.get('links')['customer_bank_account'])
assert_equal(response.links.customer_billing_detail,
body.get('links')['customer_billing_detail'])
assert_equal(response.links.mandate_request,
body.get('links')['mandate_request'])
assert_equal(response.links.mandate_request_mandate,
body.get('links')['mandate_request_mandate'])
assert_equal(response.links.payment_request,
body.get('links')['payment_request'])
assert_equal(response.links.payment_request_payment,
body.get('links')['payment_request_payment'])
assert_equal(response.mandate_request.currency,
body.get('mandate_request')['currency'])
assert_equal(response.mandate_request.links,
body.get('mandate_request')['links'])
assert_equal(response.mandate_request.scheme,
body.get('mandate_request')['scheme'])
assert_equal(response.mandate_request.verify,
body.get('mandate_request')['verify'])
assert_equal(response.payment_request.amount,
body.get('payment_request')['amount'])
assert_equal(response.payment_request.app_fee,
body.get('payment_request')['app_fee'])
assert_equal(response.payment_request.currency,
body.get('payment_request')['currency'])
assert_equal(response.payment_request.description,
body.get('payment_request')['description'])
assert_equal(response.payment_request.links,
body.get('payment_request')['links'])
assert_equal(response.payment_request.scheme,
body.get('payment_request')['scheme'])
assert_equal(response.resources.customer,
body.get('resources')['customer'])
assert_equal(response.resources.customer_bank_account,
body.get('resources')['customer_bank_account'])
assert_equal(response.resources.customer_billing_detail,
body.get('resources')['customer_billing_detail'])
def test_timeout_billing_requests_confirm_payer_details_doesnt_retry():
fixture = helpers.load_fixture('billing_requests')['confirm_payer_details']
with helpers.stub_timeout(fixture) as rsps:
with assert_raises(requests.ConnectTimeout):
response = helpers.client.billing_requests.confirm_payer_details(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
def test_502_billing_requests_confirm_payer_details_doesnt_retry():
fixture = helpers.load_fixture('billing_requests')['confirm_payer_details']
with helpers.stub_502(fixture) as rsps:
with assert_raises(MalformedResponseError):
response = helpers.client.billing_requests.confirm_payer_details(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
@responses.activate
def test_billing_requests_cancel():
fixture = helpers.load_fixture('billing_requests')['cancel']
helpers.stub_response(fixture)
response = helpers.client.billing_requests.cancel(*fixture['url_params'])
body = fixture['body']['billing_requests']
assert_is_instance(response, resources.BillingRequest)
assert_is_not_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.actions, body.get('actions'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.metadata, body.get('metadata'))
assert_equal(response.status, body.get('status'))
assert_equal(response.links.bank_authorisation,
body.get('links')['bank_authorisation'])
assert_equal(response.links.creditor,
body.get('links')['creditor'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.customer_bank_account,
body.get('links')['customer_bank_account'])
assert_equal(response.links.customer_billing_detail,
body.get('links')['customer_billing_detail'])
assert_equal(response.links.mandate_request,
body.get('links')['mandate_request'])
assert_equal(response.links.mandate_request_mandate,
body.get('links')['mandate_request_mandate'])
assert_equal(response.links.payment_request,
body.get('links')['payment_request'])
assert_equal(response.links.payment_request_payment,
body.get('links')['payment_request_payment'])
assert_equal(response.mandate_request.currency,
body.get('mandate_request')['currency'])
assert_equal(response.mandate_request.links,
body.get('mandate_request')['links'])
assert_equal(response.mandate_request.scheme,
body.get('mandate_request')['scheme'])
assert_equal(response.mandate_request.verify,
body.get('mandate_request')['verify'])
assert_equal(response.payment_request.amount,
body.get('payment_request')['amount'])
assert_equal(response.payment_request.app_fee,
body.get('payment_request')['app_fee'])
assert_equal(response.payment_request.currency,
body.get('payment_request')['currency'])
assert_equal(response.payment_request.description,
body.get('payment_request')['description'])
assert_equal(response.payment_request.links,
body.get('payment_request')['links'])
assert_equal(response.payment_request.scheme,
body.get('payment_request')['scheme'])
assert_equal(response.resources.customer,
body.get('resources')['customer'])
assert_equal(response.resources.customer_bank_account,
body.get('resources')['customer_bank_account'])
assert_equal(response.resources.customer_billing_detail,
body.get('resources')['customer_billing_detail'])
def test_timeout_billing_requests_cancel_doesnt_retry():
fixture = helpers.load_fixture('billing_requests')['cancel']
with helpers.stub_timeout(fixture) as rsps:
with assert_raises(requests.ConnectTimeout):
response = helpers.client.billing_requests.cancel(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
def test_502_billing_requests_cancel_doesnt_retry():
fixture = helpers.load_fixture('billing_requests')['cancel']
with helpers.stub_502(fixture) as rsps:
with assert_raises(MalformedResponseError):
response = helpers.client.billing_requests.cancel(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
@responses.activate
def test_billing_requests_notify():
fixture = helpers.load_fixture('billing_requests')['notify']
helpers.stub_response(fixture)
response = helpers.client.billing_requests.notify(*fixture['url_params'])
body = fixture['body']['billing_requests']
assert_is_instance(response, resources.BillingRequest)
assert_is_not_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.actions, body.get('actions'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.metadata, body.get('metadata'))
assert_equal(response.status, body.get('status'))
assert_equal(response.links.bank_authorisation,
body.get('links')['bank_authorisation'])
assert_equal(response.links.creditor,
body.get('links')['creditor'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.customer_bank_account,
body.get('links')['customer_bank_account'])
assert_equal(response.links.customer_billing_detail,
body.get('links')['customer_billing_detail'])
assert_equal(response.links.mandate_request,
body.get('links')['mandate_request'])
assert_equal(response.links.mandate_request_mandate,
body.get('links')['mandate_request_mandate'])
assert_equal(response.links.payment_request,
body.get('links')['payment_request'])
assert_equal(response.links.payment_request_payment,
body.get('links')['payment_request_payment'])
assert_equal(response.mandate_request.currency,
body.get('mandate_request')['currency'])
assert_equal(response.mandate_request.links,
body.get('mandate_request')['links'])
assert_equal(response.mandate_request.scheme,
body.get('mandate_request')['scheme'])
assert_equal(response.mandate_request.verify,
body.get('mandate_request')['verify'])
assert_equal(response.payment_request.amount,
body.get('payment_request')['amount'])
assert_equal(response.payment_request.app_fee,
body.get('payment_request')['app_fee'])
assert_equal(response.payment_request.currency,
body.get('payment_request')['currency'])
assert_equal(response.payment_request.description,
body.get('payment_request')['description'])
assert_equal(response.payment_request.links,
body.get('payment_request')['links'])
assert_equal(response.payment_request.scheme,
body.get('payment_request')['scheme'])
assert_equal(response.resources.customer,
body.get('resources')['customer'])
assert_equal(response.resources.customer_bank_account,
body.get('resources')['customer_bank_account'])
assert_equal(response.resources.customer_billing_detail,
body.get('resources')['customer_billing_detail'])
def test_timeout_billing_requests_notify_doesnt_retry():
fixture = helpers.load_fixture('billing_requests')['notify']
with helpers.stub_timeout(fixture) as rsps:
with assert_raises(requests.ConnectTimeout):
response = helpers.client.billing_requests.notify(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
def test_502_billing_requests_notify_doesnt_retry():
fixture = helpers.load_fixture('billing_requests')['notify']
with helpers.stub_502(fixture) as rsps:
with assert_raises(MalformedResponseError):
response = helpers.client.billing_requests.notify(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
|
|
from .AbstractAction import *
from .Logger import *
from .Range import Range, Position
from .Buffer import LastLinebreakLostExecption
from .Interpreter import Interpreter, InterpreterActionManager
class Idle(AbstractAction):
def act(self, callback = None):
operator = self.command.lpOperator()
return self.actionManager.action("normal", operator).act()
class Append(AbstractAction):
def act(self, callback = None):
self.cursor.appendInLine()
return "insert", self.actionManager.action("insert", "inserting")
class AppendToLine(AbstractAction):
" No count "
def act(self):
self.cursor.endOfLine()
self.cursor.appendInLine()
return "insert", self.actionManager.action("insert", "inserting")
class BeginningOfLine(AbstractAction):
" No count "
def act(self, callback = None):
motion = self.motions.beginningOfLine()
if callback:
return callback.call(motion.exclusive())
else:
return self.moveAndFinshToIdle(motion)
class BackWord(AbstractWord):
backwards = True
pattern = r'\w+'
class BackWORD(AbstractWord):
backwards = True
pattern = r'\S+'
class Change(AbstractPendingAction):
def call(self, motion):
if motion:
self.deleteAndRegister(motion)
if motion.isLines():
y = motion.upperY()
self.buffer.insert(Position(y, 1), "\n")
self.cursor.gotoPositionRelaxed(Position(y, 1))
else:
self.cursor.gotoPositionRelaxed(motion.upperPosition())
return "insert", self.actionManager.action("insert", "inserting")
class ChangeLines(AbstractAction):
def act(self, callback = None):
factor = self.command.multiplyAll()
if callback:
""" S and cc acts linewise """
if not self.command.previous().operator == "c":
return self.skipToIdle()
motion = self.motions.down(factor - 1).limitVertical().linewise()
elif self.command.lpOperator() == "S":
motion = self.motions.down(factor - 1).limitVertical().linewise()
else:
""" C acts from current position """
motion = self.motions.endOfLine(factor)
return self.actionManager.action("normal", "c").call(motion)
class CommandLine(AbstractAction):
def act(self):
return "insert", self.actionManager.action("command", "inserting")
class Delete(AbstractPendingAction):
def call(self, motion):
if motion:
self.deleteAndRegister(motion)
if motion.isLines():
y = motion.upperY()
self.cursor.gotoPositionStrict(Position(y, 1))
else:
self.cursor.gotoPositionStrict(motion.upperPosition())
self.finish()
return "normal", self.actionManager.action("normal", "idle")
class DeleteCharacters(AbstractAction):
def act(self):
factor = self.command.multiplyAll()
motion = self.motions.right(factor - 1).forceLimits()
self.deleteAndRegister(motion)
self.finish()
return "normal", self.actionManager.action("normal", "idle")
class DeleteCharactersBefore(AbstractAction):
def act(self):
factor = self.command.multiplyAll()
motion = self.motions.left(factor)
motion = motion.forceLimits()
motion = motion.exclusive()
self.deleteAndRegister(motion)
return self.moveAndFinshToIdle(motion)
class DeleteLines(AbstractAction):
def act(self, callback = None):
factor = self.command.multiplyAll()
if callback:
""" dd acts linewise """
if not self.command.previous().operator == "d":
return self.skipToIdle()
motion = self.motions.down(factor - 1).limitVertical().linewise()
else:
""" D acts from current position """
motion = self.motions.endOfLine(factor).forceLimits()
return self.actionManager.action("normal", "d").call(motion)
class Down(AbstractAction):
def act(self, callback = None):
factor = self.command.multiplyAll()
motion = self.motions.down(factor).limitVertical()
if callback:
return callback.call(motion.linewise())
else:
return self.moveAndFinshToIdle(motion)
class EndOfLine(AbstractAction):
def act(self, callback = None):
factor = self.command.multiplyAll()
motion = self.motions.endOfLine(factor)
motion = motion.forceLimits()
if callback:
return callback.call(motion)
else:
return self.moveAndFinshToIdle(motion)
class EndOfWord(AbstractWord):
backwards = False
pattern = r'\w\W'
matchEmptyLines = False
exclusive = False
class EndOfWORD(AbstractWord):
backwards = False
pattern = r'\S\s'
matchEmptyLines = False
exclusive = False
class FindInLine(AbstractFindInLine):
backwards = False
pattern = "(%s)"
class FindInLineCharacterBefore(AbstractFindInLine):
backwards = False
pattern = "(.)%s"
class FindInLineBackwards(AbstractFindInLine):
backwards = True
pattern = "(%s)"
class FindInLineBackwardsCharacterBefore(AbstractFindInLine):
backwards = True
pattern = "%s(.)"
class GotoLine(AbstractAction):
def act(self, callback = None):
if self.command.hasNoCounts():
y = self.buffer.countOfLines()
else:
y = self.command.multiplyAll()
motion = self.motions.makeMotion(Position(y,1))
motion = motion.limitVertical()
if callback:
return callback.call(motion.linewise())
else:
return self.moveAndFinshToIdle(motion)
class GCommand(AbstractAction):
def __init__(self):
self.callback = None
def act(self, callback = None):
"""
If act() is called from a pending action that
action is given as callback. While collecting the
g command components this callback is stored into
**self.callback**. When the command is ready and
finally called, that callback is submitted with
act(). It's call() method is called directly,
hence no call() command needed here. """
mode = self.dispatcher.currentMode
if mode == "gPending":
operator = self.command.lpOperator()
if self.callback:
return self.actionManager.action(mode, operator).act(self.callback)
else:
return self.actionManager.action(mode, operator).act()
else:
if callback: self.callback = callback
self.dispatcher.extend()
return "gPending", self
class Insert(AbstractAction):
def act(self):
return "insert", self.actionManager.action("insert", "inserting")
class Inserting(AbstractAction):
def act(self):
self.step(self.command.lpInsert())
return ("insert", self)
def step(self, token):
if not "startPosition" in dir(self): self.start()
if False: pass
elif token == chr(127): self.backspace()
else: self.insert(token)
def start(self):
self.startPosition = self.cursor.position()
def finish(self):
super().finish()
self.cursor.left()
def insert(self, char):
if self.buffer.isEmpty():
self.buffer.insert(Position(1,1), "\n")
self.cursor.position(Position(1,1))
self.buffer.insert(self.cursor.position(), char)
def backspace(self):
y = self.cursor.y
x = self.cursor.x - 1
startY, startX = self.startPosition.toPositionTuple()
if (y > startY or x >= startX) and x > 0:
self.buffer.delete(Position(y, x))
class InsertingCommandLine(Inserting):
def act(self):
token = self.command.lpInsert()
if not "startPosition" in dir(self): self.start()
if False: pass
elif token == chr(10):
return self.commit()
elif token == chr(127):
self.backspace()
return ("insert", self)
else:
self.insert(token)
return ("insert", self)
def commit(self):
interpreter = Interpreter()
interpreter.actionManager = InterpreterActionManager()
interpreter.actionManager.window = self.windowManager.currentWindow()
command = self.buffer.copy(Range(1,1)).strip()
interpreter.interpret(command)
return self.skipToIdle()
def finish(self):
super().finish()
self.buffer.delete(Range(1,self.buffer.countOfLines()))
self.windowManager.currentWindow().focus()
class InsertBeforeLine(AbstractAction):
def act(self):
self.cursor.beginningOfLine()
return "insert", self.actionManager.action("insert", "inserting")
class JoinLinesWithAdjustments(AbstractAction):
def act(self):
factor = self.command.multiplyAll()
y = self.cursor.y
for i in range(factor):
if y < self.buffer.countOfLines():
joinPosition = Position(y, self.buffer.lengthOfLine(y))
# firstLine without newline
firstLine = self.buffer.copy(Range(y,y))
firstLine = firstLine[:-1]
firstLengthTrimmed = len(firstLine.rstrip())
if (len(firstLine) - firstLengthTrimmed) > 0: joint = ""
else: joint = " "
# last line left stripped
lastLine = self.buffer.copy(Range(y+1, y+1)).lstrip()
if lastLine == "": lastLine = "\n"
self.buffer.delete(Range(y, y+1))
joined = firstLine + joint + lastLine
self.buffer.insert(Position(y, 1), joined)
self.cursor.gotoPositionStrict(joinPosition)
return "normal", self.actionManager.action("normal", "idle")
class Left(AbstractAction):
def act(self, callback = None):
factor = self.command.multiplyAll()
motion = self.motions.left(factor)
motion = motion.forceLimits()
motion = motion.exclusive()
if callback:
return callback.call(motion)
else:
return self.moveAndFinshToIdle(motion)
class OpenLineAbove(AbstractAction):
def act(self):
self.buffer.insert(Position(self.cursor.y, 1), "\n")
self.cursor.up()
return "insert", self.actionManager.action("insert", "inserting")
class OpenLineBelow(AbstractAction):
def act(self):
self.buffer.insert(Position(self.cursor.y + 1, 1), "\n")
self.cursor.down()
return "insert", self.actionManager.action("insert", "inserting")
class PutBefore(AbstractAction):
def act(self):
count = self.command.lpCount()
if count == None: count = 1
string, linewise = self.registerManager.fetch('"')
string *= count
if linewise:
y = self.cursor.y
position = Position(y if y else 1, 1)
self.buffer.insert(position, string)
self.cursor.position(position)
else:
if self.buffer.isEmpty():
self.buffer.fill("\n")
self.cursor.beginningOfBuffer()
self.buffer.insert(self.cursor.position(), string)
self.cursor.left()
self.finish()
return "normal", self.actionManager.action("normal", "idle")
class PutAfter(AbstractAction):
def act(self):
count = self.command.lpCount()
if count == None: count = 1
string, linewise = self.registerManager.fetch('"')
string *= count
if linewise:
position = Position(self.cursor.y + 1, 1)
self.buffer.insert(position, string)
self.cursor.position(position)
else:
if self.buffer.isEmpty():
self.buffer.fill("\n")
self.cursor.beginningOfBuffer()
elif self.buffer.lengthOfLine(self.cursor.y) > 1:
self.cursor.appendInLine()
self.buffer.insert(self.cursor.position(), string)
self.cursor.right(len(string))
self.finish()
return "normal", self.actionManager.action("normal", "idle")
class Right(AbstractAction):
def act(self, callback = None):
factor = self.command.multiplyAll()
motion = self.motions.right(factor)
if callback:
motion = motion.forceLimits(1)
return callback.call(motion.exclusive())
else:
motion = motion.forceLimits()
return self.moveAndFinshToIdle(motion)
class RepeatFindInLine(AbstractFindInLine):
repeat = "normal"
class RepeatFindInLineInversed(AbstractFindInLine):
repeat = "inversed"
class ReplaceCharacters(AbstractAction):
def act(self):
mode = self.dispatcher.currentMode
if mode == "pending":
position = self.cursor.position()
factor = self.command.multiplyAll()
motion = self.motions.right(factor - 1)
motion = motion.forceLimits()
self.buffer.delete(motion)
self.buffer.insert(position,
factor*self.command.lpOperator())
self.cursor.move(position)
self.cursor.right(factor - 1)
self.finish()
return "normal", self.actionManager.action("normal", "idle")
else:
self.dispatcher.extend()
return "pending", self
class SubstituteCharacters(AbstractAction):
def act(self):
position = self.cursor.position()
factor = self.command.multiplyAll()
motion = self.motions.right(factor - 1).forceLimits()
self.deleteAndRegister(motion)
self.cursor.move(position)
return "insert", self.actionManager.action("insert", "inserting")
class Up(AbstractAction):
def act(self, callback = None):
factor = self.command.multiplyAll()
motion = self.motions.up(factor).limitVertical()
if callback:
return callback.call(motion.linewise())
else:
return self.moveAndFinshToIdle(motion)
class Word(AbstractWord):
backwards = False
pattern = r'\w+'
changeAlternativePattern = r'\w\W'
class WORD(AbstractWord):
backwards = False
pattern = r'\S+'
changeAlternativePattern = r'\S\s'
class Yank(AbstractPendingAction):
def call(self, motion):
if not motion:
string = ""
self.registerManager.store('0', string)
else:
string = self.buffer.copy(motion)
self.registerManager.store('0', string, motion.isLines())
if motion.isLines():
y = motion.upperY()
x = self.cursor.x
self.cursor.gotoPositionStrict(Position(y, x))
else:
self.cursor.gotoPositionStrict(motion.upperPosition())
self.finish()
return "normal", self.actionManager.action("normal", "idle")
class YankLines(AbstractAction):
def act(self, callback = None):
if callback and not self.command.previous().operator == "y":
""" yy and Y work both linewise """
return self.skipToIdle()
else:
factor = self.command.multiplyAll()
yRange = self.motions.down(factor - 1).limitVertical().linewise()
return self.actionManager.action("normal", "y").call(yRange)
|
|
#!/usr/bin/env python2
from __future__ import division
import sys
import itertools
import re
import warnings
from statepoint import StatePoint
alphanum = re.compile(r"[\W_]+")
err = False
################################################################################
def parse_options():
"""Process command line arguments"""
def tallies_callback(option, opt, value, parser):
"""Option parser function for list of tallies"""
global err
try:
setattr(parser.values, option.dest, [int(v) for v in value.split(',')])
except:
p.print_help()
err = True
def scores_callback(option, opt, value, parser):
"""Option parser function for list of scores"""
global err
try:
scores = {}
entries = value.split(',')
for e in entries:
tally,score = [int(i) for i in e.split('.')]
if not tally in scores: scores[tally] = []
scores[tally].append(score)
setattr(parser.values, option.dest, scores)
except:
p.print_help()
err = True
def filters_callback(option, opt, value, parser):
"""Option parser function for list of filters"""
global err
try:
filters = {}
entries = value.split(',')
for e in entries:
tally,filter_,bin = [i for i in e.split('.')]
tally,bin = int(tally),int(bin)
if not tally in filters: filters[tally] = {}
if not filter_ in filters[tally]: filters[tally][filter_] = []
filters[tally][filter_].append(bin)
setattr(parser.values, option.dest, filters)
except:
p.print_help()
err = True
from optparse import OptionParser
usage = r"""%prog [options] <statepoint_file>
The default is to process all tallies and all scores into one file. Subsets
can be chosen using the options. For example, to only process tallies 2 and 4
with all scores on tally 2 and only scores 1 and 3 on tally 4:
%prog -t 2,4 -s 4.1,4.3 <statepoint_file>
Likewise if you have additional filters on a tally you can specify a subset of
bins for each filter for that tally. For example to process all tallies and
scores, but only energyin bin #1 in tally 2:
%prog -f 2.energyin.1 <statepoint_file>
You can list the available tallies, scores, and filters with the -l option:
%prog -l <statepoint_file>"""
p = OptionParser(usage=usage)
p.add_option('-t', '--tallies', dest='tallies', type='string', default=None,
action='callback', callback=tallies_callback,
help='List of tally indices to process, separated by commas.' \
' Default is to process all tallies.')
p.add_option('-s', '--scores', dest='scores', type='string', default=None,
action='callback', callback=scores_callback,
help='List of score indices to process, separated by commas, ' \
'specified as {tallyid}.{scoreid}.' \
' Default is to process all scores in each tally.')
p.add_option('-f', '--filters', dest='filters', type='string', default=None,
action='callback', callback=filters_callback,
help='List of filter bins to process, separated by commas, ' \
'specified as {tallyid}.{filter}.{binid}. ' \
'Default is to process all filter combinaiton for each score.')
p.add_option('-l', '--list', dest='list', action='store_true',
help='List the tally and score indices available in the file.')
p.add_option('-o', '--output', action='store', dest='output',
default='tally', help='path to output SILO file.')
p.add_option('-e', '--error', dest='valerr', default=False,
action='store_true',
help='Flag to extract errors instead of values.')
p.add_option('-v', '--vtk', action='store_true', dest='vtk',
default=False, help='Flag to convert to VTK instead of SILO.')
parsed = p.parse_args()
if not parsed[1]:
p.print_help()
return parsed, err
if parsed[0].valerr:
parsed[0].valerr = 1
else:
parsed[0].valerr = 0
return parsed, err
################################################################################
def main(file_, o):
"""Main program"""
sp = StatePoint(file_)
sp.read_results()
validate_options(sp, o)
if o.list:
print_available(sp)
return
if o.vtk:
if not o.output[-4:] == ".vtm": o.output += ".vtm"
else:
if not o.output[-5:] == ".silo": o.output += ".silo"
if o.vtk:
try:
import vtk
except:
print 'The vtk python bindings do not appear to be installed properly.\n'+\
'On Ubuntu: sudo apt-get install python-vtk\n'+\
'See: http://www.vtk.org/'
return
else:
try:
import silomesh
except:
print 'The silomesh package does not appear to be installed properly.\n'+\
'See: https://github.com/nhorelik/silomesh/'
return
if o.vtk:
blocks = vtk.vtkMultiBlockDataSet()
blocks.SetNumberOfBlocks(5)
block_idx = 0
else:
silomesh.init_silo(o.output)
# Tally loop #################################################################
for tally in sp.tallies:
# skip non-mesh tallies or non-user-specified tallies
if o.tallies and not tally.id in o.tallies: continue
if not 'mesh' in tally.filters: continue
print "Processing Tally {}...".format(tally.id)
# extract filter options and mesh parameters for this tally
filtercombos = get_filter_combos(tally)
meshparms = get_mesh_parms(sp, tally)
nx,ny,nz = meshparms[:3]
ll = meshparms[3:6]
ur = meshparms[6:9]
if o.vtk:
ww = [(u-l)/n for u,l,n in zip(ur,ll,(nx,ny,nz))]
grid = grid = vtk.vtkImageData()
grid.SetDimensions(nx+1,ny+1,nz+1)
grid.SetOrigin(*ll)
grid.SetSpacing(*ww)
else:
silomesh.init_mesh('Tally_{}'.format(tally.id), *meshparms)
# Score loop ###############################################################
for sid,score in enumerate(tally.scores):
# skip non-user-specified scrores for this tally
if o.scores and tally.id in o.scores and not sid in o.scores[tally.id]:
continue
# Filter loop ############################################################
for filterspec in filtercombos:
# skip non-user-specified filter bins
skip = False
if o.filters and tally.id in o.filters:
for filter_,bin in filterspec[1:]:
if filter_ in o.filters[tally.id] and \
not bin in o.filters[tally.id][filter_]:
skip = True
break
if skip: continue
# find and sanitize the variable name for this score
varname = get_sanitized_filterspec_name(tally, score, filterspec)
if o.vtk:
vtkdata = vtk.vtkDoubleArray()
vtkdata.SetName(varname)
dataforvtk = {}
else:
silomesh.init_var(varname)
lbl = "\t Score {}.{} {}:\t\t{}".format(tally.id, sid+1, score, varname)
# Mesh fill loop #######################################################
for x in range(1,nx+1):
sys.stdout.write(lbl+" {0}%\r".format(int(x/nx*100)))
sys.stdout.flush()
for y in range(1,ny+1):
for z in range(1,nz+1):
filterspec[0][1] = (x,y,z)
val = sp.get_value(tally.id-1, filterspec, sid)[o.valerr]
if o.vtk:
# vtk cells go z, y, x, so we store it now and enter it later
i = (z-1)*nx*ny + (y-1)*nx + x-1
dataforvtk[i] = float(val)
else:
silomesh.set_value(float(val), x, y, z)
# end mesh fill loop
print
if o.vtk:
for i in range(nx*ny*nz):
vtkdata.InsertNextValue(dataforvtk[i])
grid.GetCellData().AddArray(vtkdata)
del vtkdata
else:
silomesh.finalize_var()
# end filter loop
# end score loop
if o.vtk:
blocks.SetBlock(block_idx, grid)
block_idx += 1
else:
silomesh.finalize_mesh()
# end tally loop
if o.vtk:
writer = vtk.vtkXMLMultiBlockDataWriter()
writer.SetFileName(o.output)
writer.SetInput(blocks)
writer.Write()
else:
silomesh.finalize_silo()
################################################################################
def get_sanitized_filterspec_name(tally, score, filterspec):
"""Returns a name fit for silo vars for a given filterspec, tally and score"""
comboname = "_"+" ".join(["{}_{}".format(filter_, bin)
for filter_, bin in filterspec[1:]])
if len(filterspec[1:]) == 0: comboname = ''
varname = 'Tally_{}_{}{}'.format(tally.id, score, comboname)
varname = alphanum.sub('_', varname)
return varname
################################################################################
def get_filter_combos(tally):
"""Returns a list of all filter spec combinations, excluding meshes
Each combo has the mesh spec as the first element, to be set later.
These filter specs correspond with the second argument to StatePoint.get_value
"""
specs = []
if len(tally.filters) == 1:
return [[['mesh', [1, 1, 1]]]]
filters = tally.filters.keys()
filters.pop(filters.index('mesh'))
nbins = [tally.filters[f].length for f in filters]
combos = [ [b] for b in range(nbins[0])]
for i,b in enumerate(nbins[1:]):
prod = list(itertools.product(combos, range(b)))
if i == 0:
combos = prod
else:
combos = [[v for v in p[0]] + [p[1]] for p in prod]
for c in combos:
spec = [['mesh', [1, 1, 1]]]
for i,bin in enumerate(c):
spec.append((filters[i], bin))
specs.append(spec)
return specs
################################################################################
def get_mesh_parms(sp, tally):
meshid = tally.filters['mesh'].bins[0]
for i,m in enumerate(sp.meshes):
if m.id == meshid:
mesh = m
return mesh.dimension + mesh.lower_left + mesh.upper_right
################################################################################
def print_available(sp):
"""Prints available tallies/scores in a statepoint"""
print "Available tally and score indices:"
for tally in sp.tallies:
mesh = ""
if not 'mesh' in tally.filters: mesh = "(no mesh)"
print "\tTally {} {}".format(tally.id, mesh)
scores = ["{}.{}: {}".format(tally.id, sid, score)
for sid, score in enumerate(tally.scores)]
for score in scores:
print "\t\tScore {}".format(score)
for filter_ in tally.filters:
if filter_ == 'mesh': continue
for bin in range(tally.filters[filter_].length):
print "\t\t\tFilters: {}.{}.{}".format(tally.id, filter_, bin)
################################################################################
def validate_options(sp,o):
"""Validates specified tally/score options for the current statepoint"""
available_tallies = [t.id for t in sp.tallies]
if o.tallies:
for otally in o.tallies:
if not otally in available_tallies:
warnings.warn('Tally {} not in statepoint file'.format(otally))
continue
else:
for tally in sp.tallies:
if tally.id == otally: break
if not 'mesh' in tally.filters:
warnings.warn('Tally {} contains no mesh'.format(otally))
if o.scores and otally in o.scores.keys():
for oscore in o.scores[otally]:
if oscore > len(tally.scores):
warnings.warn('No score {} in tally {}'.format(oscore, otally))
if o.scores:
for otally in o.scores.keys():
if not otally in available_tallies:
warnings.warn('Tally {} not in statepoint file'.format(otally))
continue
if o.tallies and not otally in o.tallies:
warnings.warn(
'Skipping scores for tally {}, excluded by tally list'.format(otally))
continue
if o.filters:
for otally in o.filters.keys():
if not otally in available_tallies:
warnings.warn('Tally {} not in statepoint file'.format(otally))
continue
if o.tallies and not otally in o.tallies:
warnings.warn(
'Skipping filters for tally {}, excluded by tally list'.format(otally))
continue
for tally in sp.tallies:
if tally.id == otally: break
for filter_ in o.filters[otally]:
if filter_ == 'mesh':
warnings.warn('Cannot specify mesh filter bins')
continue
if not filter_ in tally.filters.keys():
warnings.warn(
'Tally {} does not contain filter {}'.format(otally, filter_))
continue
for bin in o.filters[otally][filter_]:
if bin >= tally.filters[filter_].length:
warnings.warn(
'No bin {} in tally {} filter {}'.format(bin, otally, filter_))
################################################################################
# monkeypatch to suppress the source echo produced by warnings
def formatwarning(message, category, filename, lineno, line):
return "{}:{}: {}: {}\n".format(filename, lineno, category.__name__, message)
warnings.formatwarning = formatwarning
################################################################################
if __name__ == '__main__':
(options, args), err = parse_options()
if args and not err:
main(args[0],options)
|
|
# -*- coding: utf-8 -*-
"""
Core client, used for all API requests.
"""
import os
import platform
from collections import namedtuple
from plivo.base import ResponseObject
from plivo.exceptions import (AuthenticationError, InvalidRequestError,
PlivoRestError, PlivoServerError,
ResourceNotFoundError, ValidationError)
from plivo.resources import (Accounts, Addresses, Applications, Calls,
Conferences, Endpoints, Identities,
Messages, Powerpacks, Media, Lookup, Brand,Campaign,
Numbers, Pricings, Recordings, Subaccounts, CallFeedback, MultiPartyCalls)
from plivo.resources.live_calls import LiveCalls
from plivo.resources.queued_calls import QueuedCalls
from plivo.resources.regulatory_compliance import EndUsers, ComplianceDocumentTypes, ComplianceDocuments, \
ComplianceRequirements, ComplianceApplications
from plivo.utils import is_valid_mainaccount, is_valid_subaccount
from plivo.version import __version__
from requests import Request, Session
AuthenticationCredentials = namedtuple('AuthenticationCredentials',
'auth_id auth_token')
PLIVO_API = 'https://api.plivo.com'
PLIVO_API_BASE_URI = '/'.join([PLIVO_API, 'v1/Account'])
# Will change these urls before putting this change in production
API_VOICE = 'https://api.plivo.com'
API_VOICE_BASE_URI = '/'.join([API_VOICE, 'v1/Account'])
API_VOICE_FALLBACK_1 = 'https://api.plivo.com'
API_VOICE_FALLBACK_2 = 'https://api.plivo.com'
API_VOICE_BASE_URI_FALLBACK_1 = '/'.join([API_VOICE_FALLBACK_1, 'v1/Account'])
API_VOICE_BASE_URI_FALLBACK_2 = '/'.join([API_VOICE_FALLBACK_2, 'v1/Account'])
CALLINSIGHTS_BASE_URL = 'https://stats.plivo.com'
def get_user_agent():
return 'plivo-python/%s (Python: %s)' % (__version__,
platform.python_version())
def fetch_credentials(auth_id, auth_token):
"""Fetches the right credentials either from params or from environment"""
if not (auth_id and auth_token):
try:
auth_id = os.environ['PLIVO_AUTH_ID']
auth_token = os.environ['PLIVO_AUTH_TOKEN']
except KeyError:
raise AuthenticationError('The Plivo Python SDK '
'could not find your auth credentials.')
if not (is_valid_mainaccount(auth_id) or is_valid_subaccount(auth_id)):
raise AuthenticationError('Invalid auth_id supplied: %s' % auth_id)
return AuthenticationCredentials(auth_id=auth_id, auth_token=auth_token)
class Client(object):
def __init__(self, auth_id=None, auth_token=None, proxies=None, timeout=5):
"""
The Plivo API client.
Deals with all the API requests to be made.
"""
self.base_uri = PLIVO_API_BASE_URI
self.session = Session()
self.session.headers.update({
'User-Agent': get_user_agent(),
'Content-Type': 'application/json',
'Accept': 'application/json',
})
self.session.auth = fetch_credentials(auth_id, auth_token)
self.multipart_session = Session()
self.multipart_session.headers.update({
'User-Agent': get_user_agent(),
'Cache-Control': 'no-cache',
})
self.multipart_session.auth = fetch_credentials(auth_id, auth_token)
self.proxies = proxies
self.timeout = timeout
self.account = Accounts(self)
self.subaccounts = Subaccounts(self)
self.applications = Applications(self)
self.calls = Calls(self)
self.live_calls = LiveCalls(self)
self.queued_calls = QueuedCalls(self)
self.conferences = Conferences(self)
self.endpoints = Endpoints(self)
self.messages = Messages(self)
self.lookup = Lookup(self)
self.numbers = Numbers(self)
self.powerpacks = Powerpacks(self)
self.brand = Brand(self)
self.campaign = Campaign(self)
self.media = Media(self)
self.pricing = Pricings(self)
self.recordings = Recordings(self)
self.addresses = Addresses(self)
self.identities = Identities(self)
self.call_feedback = CallFeedback(self)
self.end_users = EndUsers(self)
self.compliance_document_types = ComplianceDocumentTypes(self)
self.compliance_documents = ComplianceDocuments(self)
self.compliance_requirements = ComplianceRequirements(self)
self.compliance_applications = ComplianceApplications(self)
self.multi_party_calls = MultiPartyCalls(self)
self.voice_retry_count = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.session.close()
self.multipart_session.close()
def process_response(self,
method,
response,
response_type=None,
objects_type=None):
"""Processes the API response based on the status codes and method used
to access the API
"""
try:
response_json = response.json(
object_hook=lambda x: ResponseObject(x) if isinstance(x, dict) else x)
if response_type:
r = response_type(self, response_json.__dict__)
response_json = r
if 'objects' in response_json and objects_type:
response_json.objects = [
objects_type(self, obj.__dict__)
for obj in response_json.objects
]
except ValueError:
response_json = None
if response.status_code == 400:
if response_json is not None and 'error' in response_json:
raise ValidationError(response_json.error)
raise ValidationError(
'A parameter is missing or is invalid while accessing resource'
'at: {url}'.format(url=response.url))
if response.status_code == 401:
if response_json and 'error' in response_json:
raise AuthenticationError(response_json.error)
raise AuthenticationError(
'Failed to authenticate while accessing resource at: '
'{url}'.format(url=response.url))
if response.status_code == 404:
if response_json and 'error' in response_json:
raise ResourceNotFoundError(response_json.error)
raise ResourceNotFoundError(
'Resource not found at: {url}'.format(url=response.url))
if response.status_code == 405:
if response_json and 'error' in response_json:
raise InvalidRequestError(response_json.error)
raise InvalidRequestError(
'HTTP method "{method}" not allowed to access resource at: '
'{url}'.format(method=method, url=response.url))
if response.status_code == 409:
if response_json and 'error' in response_json:
raise InvalidRequestError(response_json.error)
raise InvalidRequestError(
'Conflict: '
'{url}'.format(url=response.url))
if response.status_code == 422:
if response_json and 'error' in response_json:
raise InvalidRequestError(response_json.error)
raise InvalidRequestError(
'Unprocessable Entity: '
'{url}'.format(url=response.url))
if response.status_code == 500:
if response_json and 'error' in response_json:
raise PlivoServerError(response_json.error)
raise PlivoServerError(
'A server error occurred while accessing resource at: '
'{url}'.format(url=response.url))
if method == 'DELETE':
if response.status_code not in [200, 204]:
raise PlivoRestError('Resource at {url} could not be '
'deleted'.format(url=response.url))
elif response.status_code not in [200, 201, 202, 204, 207]:
raise PlivoRestError(
'Received status code {status_code} for the HTTP method '
'"{method}"'.format(
status_code=response.status_code, method=method))
self.voice_retry_count = 0
return response_json
def create_request(self, method, path=None, data=None, **kwargs):
# The abstraction created by request() and create_request() is moot
# now since several product-specific handling have been aded.
# Requires a refactor.
if 'is_callinsights_request' in kwargs:
url = '/'.join([CALLINSIGHTS_BASE_URL, kwargs['callinsights_request_path']])
req = Request(method, url, **({'params': data} if method == 'GET' else {'json': data}))
elif kwargs.get('is_lookup_request', False):
path = path or []
url = '/'.join(list([str(p) for p in path]))
req = Request(method, url, **({'params': data} if method == 'GET' else {'json': data}))
else:
path = path or []
req = Request(method, '/'.join([self.base_uri, self.session.auth[0]] +
list([str(p) for p in path])) + '/',
**({
'params': data
} if method == 'GET' else {
'json': data
}))
return self.session.prepare_request(req)
def create_multipart_request(self,
method,
path=None,
data=None,
files=None):
path = path or []
data_args = {}
if method == 'GET':
data_args['params'] = data
else:
data_args['data'] = data
try:
if files:
data_args['files'] = files
except Exception as e:
print(e)
url = '/'.join([self.base_uri, self.multipart_session.auth[0]] + list([str(p) for p in path])) + '/'
req = Request(method, url, **data_args)
return self.multipart_session.prepare_request(req)
def send_request(self, request, **kwargs):
if 'session' in kwargs:
session = kwargs['session']
del kwargs['session']
else:
session = self.session
return session.send(
request, proxies=self.proxies, timeout=self.timeout, **kwargs)
def request(self,
method,
path=None,
data=None,
response_type=None,
objects_type=None,
files=None,
**kwargs):
if files is not None:
req = self.create_multipart_request(method, path, data, files)
session = self.multipart_session
else:
if not kwargs.get("is_voice_request", False):
self.base_uri = PLIVO_API_BASE_URI
if data and 'is_callinsights_request' in data:
params_dict = {}
if 'callinsights_request_path' in data:
params_dict['is_callinsights_request'] = data['is_callinsights_request']
params_dict['callinsights_request_path'] = data['callinsights_request_path']
del data['is_callinsights_request']
del data['callinsights_request_path']
req = self.create_request(method, path, data, **params_dict)
elif kwargs.get("is_voice_request", False):
del kwargs["is_voice_request"]
if self.voice_retry_count == 0:
self.base_uri = API_VOICE_BASE_URI
req = self.create_request(method, path, data)
session = self.session
kwargs['session'] = session
response = self.send_request(req, **kwargs)
if response.status_code >= 500:
print('Fallback for URL: {}. Retry {}'.format(response.url, self.voice_retry_count))
self.voice_retry_count += 1
if self.voice_retry_count == 1:
self.base_uri = API_VOICE_BASE_URI_FALLBACK_1
elif self.voice_retry_count == 2:
self.base_uri = API_VOICE_BASE_URI_FALLBACK_2
else:
return self.process_response(method, response, response_type, objects_type)
kwargs["is_voice_request"] = True
return self.request(method, path, data, **kwargs)
return self.process_response(method, response, response_type, objects_type)
elif kwargs.get('is_lookup_request', False):
req = self.create_request(method, path, data, is_lookup_request=True)
del kwargs['is_lookup_request']
else:
req = self.create_request(method, path, data)
session = self.session
kwargs['session'] = session
res = self.send_request(req, **kwargs)
return self.process_response(method, res, response_type, objects_type)
|
|
import re
from functools import partial
from inspect import signature
from typing import Any, Dict, List, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventType
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import check_bool
from zerver.lib.webhooks.common import (
check_send_webhook_message,
validate_extract_webhook_http_header,
)
from zerver.lib.webhooks.git import (
EMPTY_SHA,
TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE,
get_commits_comment_action_message,
get_issue_event_message,
get_pull_request_event_message,
get_push_commits_event_message,
get_push_tag_event_message,
get_remove_branch_event_message,
)
from zerver.models import UserProfile
def fixture_to_headers(fixture_name: str) -> Dict[str, Any]:
if fixture_name.startswith("build"):
return {} # Since there are 2 possible event types.
# Map "push_hook__push_commits_more_than_limit.json" into GitLab's
# HTTP event title "Push Hook".
return {"HTTP_X_GITLAB_EVENT": fixture_name.split("__")[0].replace("_", " ").title()}
def get_push_event_body(payload: Dict[str, Any]) -> str:
if payload.get("after") == EMPTY_SHA:
return get_remove_branch_event_body(payload)
return get_normal_push_event_body(payload)
def get_normal_push_event_body(payload: Dict[str, Any]) -> str:
compare_url = "{}/compare/{}...{}".format(
get_project_homepage(payload),
payload["before"],
payload["after"],
)
commits = [
{
"name": commit.get("author").get("name"),
"sha": commit.get("id"),
"message": commit.get("message"),
"url": commit.get("url"),
}
for commit in payload["commits"]
]
return get_push_commits_event_message(
get_user_name(payload),
compare_url,
get_branch_name(payload),
commits,
)
def get_remove_branch_event_body(payload: Dict[str, Any]) -> str:
return get_remove_branch_event_message(
get_user_name(payload),
get_branch_name(payload),
)
def get_tag_push_event_body(payload: Dict[str, Any]) -> str:
return get_push_tag_event_message(
get_user_name(payload),
get_tag_name(payload),
action="pushed" if payload.get("checkout_sha") else "removed",
)
def get_issue_created_event_body(payload: Dict[str, Any], include_title: bool = False) -> str:
description = payload["object_attributes"].get("description")
# Filter out multiline hidden comments
if description is not None:
description = re.sub("<!--.*?-->", "", description, 0, re.DOTALL)
description = description.rstrip()
return get_issue_event_message(
get_issue_user_name(payload),
"created",
get_object_url(payload),
payload["object_attributes"].get("iid"),
description,
assignees=replace_assignees_username_with_name(get_assignees(payload)),
title=payload["object_attributes"].get("title") if include_title else None,
)
def get_issue_event_body(payload: Dict[str, Any], action: str, include_title: bool = False) -> str:
return get_issue_event_message(
get_issue_user_name(payload),
action,
get_object_url(payload),
payload["object_attributes"].get("iid"),
title=payload["object_attributes"].get("title") if include_title else None,
)
def get_merge_request_updated_event_body(
payload: Dict[str, Any], include_title: bool = False
) -> str:
if payload["object_attributes"].get("oldrev"):
return get_merge_request_event_body(
payload,
"added commit(s) to",
include_title=include_title,
)
return get_merge_request_open_or_updated_body(
payload,
"updated",
include_title=include_title,
)
def get_merge_request_event_body(
payload: Dict[str, Any], action: str, include_title: bool = False
) -> str:
pull_request = payload["object_attributes"]
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get("url"),
pull_request.get("iid"),
type="MR",
title=payload["object_attributes"].get("title") if include_title else None,
)
def get_merge_request_open_or_updated_body(
payload: Dict[str, Any], action: str, include_title: bool = False
) -> str:
pull_request = payload["object_attributes"]
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get("url"),
pull_request.get("iid"),
pull_request.get("source_branch"),
pull_request.get("target_branch"),
pull_request.get("description"),
assignees=replace_assignees_username_with_name(get_assignees(payload)),
type="MR",
title=payload["object_attributes"].get("title") if include_title else None,
)
def get_assignees(payload: Dict[str, Any]) -> List[Dict[str, str]]:
assignee_details = payload.get("assignees")
if assignee_details is None:
single_assignee_details = payload.get("assignee")
if single_assignee_details is None:
assignee_details = []
else:
assignee_details = [single_assignee_details]
return assignee_details
def replace_assignees_username_with_name(assignees: List[Dict[str, str]]) -> List[Dict[str, str]]:
"""Replace the username of each assignee with their (full) name.
This is a hack-like adaptor so that when assignees are passed to
`get_pull_request_event_message` we can use the assignee's name
and not their username (for more consistency).
"""
for assignee in assignees:
assignee["username"] = assignee["name"]
return assignees
def get_commented_commit_event_body(payload: Dict[str, Any]) -> str:
comment = payload["object_attributes"]
action = "[commented]({})".format(comment["url"])
return get_commits_comment_action_message(
get_issue_user_name(payload),
action,
payload["commit"].get("url"),
payload["commit"].get("id"),
comment["note"],
)
def get_commented_merge_request_event_body(
payload: Dict[str, Any], include_title: bool = False
) -> str:
comment = payload["object_attributes"]
action = "[commented]({}) on".format(comment["url"])
url = "{}/merge_requests/{}".format(
payload["project"].get("web_url"),
payload["merge_request"].get("iid"),
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload["merge_request"].get("iid"),
message=comment["note"],
type="MR",
title=payload["merge_request"].get("title") if include_title else None,
)
def get_commented_issue_event_body(payload: Dict[str, Any], include_title: bool = False) -> str:
comment = payload["object_attributes"]
action = "[commented]({}) on".format(comment["url"])
url = "{}/issues/{}".format(
payload["project"].get("web_url"),
payload["issue"].get("iid"),
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload["issue"].get("iid"),
message=comment["note"],
type="issue",
title=payload["issue"].get("title") if include_title else None,
)
def get_commented_snippet_event_body(payload: Dict[str, Any], include_title: bool = False) -> str:
comment = payload["object_attributes"]
action = "[commented]({}) on".format(comment["url"])
url = "{}/snippets/{}".format(
payload["project"].get("web_url"),
payload["snippet"].get("id"),
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload["snippet"].get("id"),
message=comment["note"],
type="snippet",
title=payload["snippet"].get("title") if include_title else None,
)
def get_wiki_page_event_body(payload: Dict[str, Any], action: str) -> str:
return '{} {} [wiki page "{}"]({}).'.format(
get_issue_user_name(payload),
action,
payload["object_attributes"].get("title"),
payload["object_attributes"].get("url"),
)
def get_build_hook_event_body(payload: Dict[str, Any]) -> str:
build_status = payload.get("build_status")
if build_status == "created":
action = "was created"
elif build_status == "running":
action = "started"
else:
action = f"changed status to {build_status}"
return "Build {} from {} stage {}.".format(
payload.get("build_name"),
payload.get("build_stage"),
action,
)
def get_test_event_body(payload: Dict[str, Any]) -> str:
return f"Webhook for **{get_repo_name(payload)}** has been configured successfully! :tada:"
def get_pipeline_event_body(payload: Dict[str, Any]) -> str:
pipeline_status = payload["object_attributes"].get("status")
if pipeline_status == "pending":
action = "was created"
elif pipeline_status == "running":
action = "started"
else:
action = f"changed status to {pipeline_status}"
project_homepage = get_project_homepage(payload)
pipeline_url = "{}/pipelines/{}".format(
project_homepage,
payload["object_attributes"].get("id"),
)
builds_status = ""
for build in payload["builds"]:
build_url = "{}/-/jobs/{}".format(
project_homepage,
build.get("id"),
)
artifact_filename = build.get("artifacts_file", {}).get("filename", None)
if artifact_filename:
artifact_download_url = f"{build_url}/artifacts/download"
artifact_browse_url = f"{build_url}/artifacts/browse"
artifact_string = f" * built artifact: *{artifact_filename}* [[Browse]({artifact_browse_url})|[Download]({artifact_download_url})]\n"
else:
artifact_string = ""
builds_status += "* [{}]({}) - {}\n{}".format(
build.get("name"),
build_url,
build.get("status"),
artifact_string,
)
return "[Pipeline ({})]({}) {} with build(s):\n{}.".format(
payload["object_attributes"].get("id"),
pipeline_url,
action,
builds_status[:-1],
)
def get_repo_name(payload: Dict[str, Any]) -> str:
if "project" in payload:
return payload["project"]["name"]
# Apparently, Job Hook payloads don't have a `project` section,
# but the repository name is accessible from the `repository`
# section.
return payload["repository"]["name"]
def get_user_name(payload: Dict[str, Any]) -> str:
return payload["user_name"]
def get_issue_user_name(payload: Dict[str, Any]) -> str:
return payload["user"]["name"]
def get_project_homepage(payload: Dict[str, Any]) -> str:
if "project" in payload:
return payload["project"]["web_url"]
return payload["repository"]["homepage"]
def get_branch_name(payload: Dict[str, Any]) -> str:
return payload["ref"].replace("refs/heads/", "")
def get_tag_name(payload: Dict[str, Any]) -> str:
return payload["ref"].replace("refs/tags/", "")
def get_object_url(payload: Dict[str, Any]) -> str:
return payload["object_attributes"]["url"]
EVENT_FUNCTION_MAPPER = {
"Push Hook": get_push_event_body,
"Tag Push Hook": get_tag_push_event_body,
"Test Hook": get_test_event_body,
"Issue Hook open": get_issue_created_event_body,
"Issue Hook close": partial(get_issue_event_body, action="closed"),
"Issue Hook reopen": partial(get_issue_event_body, action="reopened"),
"Issue Hook update": partial(get_issue_event_body, action="updated"),
"Confidential Issue Hook open": get_issue_created_event_body,
"Confidential Issue Hook close": partial(get_issue_event_body, action="closed"),
"Confidential Issue Hook reopen": partial(get_issue_event_body, action="reopened"),
"Confidential Issue Hook update": partial(get_issue_event_body, action="updated"),
"Note Hook Commit": get_commented_commit_event_body,
"Note Hook MergeRequest": get_commented_merge_request_event_body,
"Note Hook Issue": get_commented_issue_event_body,
"Confidential Note Hook Issue": get_commented_issue_event_body,
"Note Hook Snippet": get_commented_snippet_event_body,
"Merge Request Hook approved": partial(get_merge_request_event_body, action="approved"),
"Merge Request Hook unapproved": partial(get_merge_request_event_body, action="unapproved"),
"Merge Request Hook open": partial(get_merge_request_open_or_updated_body, action="created"),
"Merge Request Hook update": get_merge_request_updated_event_body,
"Merge Request Hook merge": partial(get_merge_request_event_body, action="merged"),
"Merge Request Hook close": partial(get_merge_request_event_body, action="closed"),
"Merge Request Hook reopen": partial(get_merge_request_event_body, action="reopened"),
"Wiki Page Hook create": partial(get_wiki_page_event_body, action="created"),
"Wiki Page Hook update": partial(get_wiki_page_event_body, action="updated"),
"Job Hook": get_build_hook_event_body,
"Build Hook": get_build_hook_event_body,
"Pipeline Hook": get_pipeline_event_body,
}
@webhook_view("GitLab")
@has_request_variables
def api_gitlab_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body"),
branches: Optional[str] = REQ(default=None),
use_merge_request_title: bool = REQ(default=True, json_validator=check_bool),
user_specified_topic: Optional[str] = REQ("topic", default=None),
) -> HttpResponse:
event = get_event(request, payload, branches)
if event is not None:
event_body_function = get_body_based_on_event(event)
if "include_title" in signature(event_body_function).parameters:
body = event_body_function(
payload,
include_title=user_specified_topic is not None,
)
else:
body = event_body_function(payload)
# Add a link to the project if a custom topic is set
if user_specified_topic:
project_url = f"[{get_repo_name(payload)}]({get_project_homepage(payload)})"
body = f"[{project_url}] {body}"
topic = get_subject_based_on_event(event, payload, use_merge_request_title)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
def get_body_based_on_event(event: str) -> Any:
return EVENT_FUNCTION_MAPPER[event]
def get_subject_based_on_event(
event: str, payload: Dict[str, Any], use_merge_request_title: bool
) -> str:
if event == "Push Hook":
return f"{get_repo_name(payload)} / {get_branch_name(payload)}"
elif event == "Job Hook" or event == "Build Hook":
return "{} / {}".format(payload["repository"].get("name"), get_branch_name(payload))
elif event == "Pipeline Hook":
return "{} / {}".format(
get_repo_name(payload),
payload["object_attributes"].get("ref").replace("refs/heads/", ""),
)
elif event.startswith("Merge Request Hook"):
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type="MR",
id=payload["object_attributes"].get("iid"),
title=payload["object_attributes"].get("title") if use_merge_request_title else "",
)
elif event.startswith("Issue Hook") or event.startswith("Confidential Issue Hook"):
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type="issue",
id=payload["object_attributes"].get("iid"),
title=payload["object_attributes"].get("title"),
)
elif event == "Note Hook Issue" or event == "Confidential Note Hook Issue":
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type="issue",
id=payload["issue"].get("iid"),
title=payload["issue"].get("title"),
)
elif event == "Note Hook MergeRequest":
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type="MR",
id=payload["merge_request"].get("iid"),
title=payload["merge_request"].get("title") if use_merge_request_title else "",
)
elif event == "Note Hook Snippet":
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type="snippet",
id=payload["snippet"].get("id"),
title=payload["snippet"].get("title"),
)
return get_repo_name(payload)
def get_event(
request: HttpRequest, payload: Dict[str, Any], branches: Optional[str]
) -> Optional[str]:
event = validate_extract_webhook_http_header(request, "X_GITLAB_EVENT", "GitLab")
if event == "System Hook":
# Convert the event name to a GitLab event title
event_name = payload.get("event_name", payload.get("object_kind"))
event = event_name.split("__")[0].replace("_", " ").title()
event = f"{event} Hook"
if event in ["Confidential Issue Hook", "Issue Hook", "Merge Request Hook", "Wiki Page Hook"]:
action = payload["object_attributes"].get("action", "open")
event = f"{event} {action}"
elif event in ["Confidential Note Hook", "Note Hook"]:
action = payload["object_attributes"].get("noteable_type")
event = f"{event} {action}"
elif event == "Push Hook":
if branches is not None:
branch = get_branch_name(payload)
if branches.find(branch) == -1:
return None
if event in list(EVENT_FUNCTION_MAPPER.keys()):
return event
raise UnsupportedWebhookEventType(event)
|
|
import os
import socket
import StringIO
import traceback
import requests
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from kombu import Connection
from PIL import Image
import redis as redislib
import olympia.core.logger
from olympia.amo import search
from olympia.amo.templatetags.jinja_helpers import user_media_path
from olympia.applications.management.commands import dump_apps
monitor_log = olympia.core.logger.getLogger('z.monitor')
def memcache():
memcache = getattr(settings, 'CACHES', {}).get('default')
memcache_results = []
status = ''
if memcache and 'memcache' in memcache['BACKEND']:
hosts = memcache['LOCATION']
using_twemproxy = False
if not isinstance(hosts, (tuple, list)):
hosts = [hosts]
for host in hosts:
ip, port = host.split(':')
if ip == '127.0.0.1':
using_twemproxy = True
try:
s = socket.socket()
s.connect((ip, int(port)))
except Exception, e:
result = False
status = 'Failed to connect to memcached (%s): %s' % (host, e)
monitor_log.critical(status)
else:
result = True
finally:
s.close()
memcache_results.append((ip, port, result))
if not using_twemproxy and len(memcache_results) < 2:
status = ('2+ memcache servers are required.'
'%s available') % len(memcache_results)
monitor_log.warning(status)
if not memcache_results:
status = 'Memcache is not configured'
monitor_log.info(status)
return status, memcache_results
def libraries():
# Check Libraries and versions
libraries_results = []
status = ''
try:
Image.new('RGB', (16, 16)).save(StringIO.StringIO(), 'JPEG')
libraries_results.append(('PIL+JPEG', True, 'Got it!'))
except Exception, e:
msg = "Failed to create a jpeg image: %s" % e
libraries_results.append(('PIL+JPEG', False, msg))
missing_libs = [l for l, s, m in libraries_results if not s]
if missing_libs:
status = 'missing libs: %s' % ",".join(missing_libs)
return status, libraries_results
def elastic():
elastic_results = None
status = ''
try:
es = search.get_es()
health = es.cluster.health()
if health['status'] == 'red':
status = 'ES is red'
elastic_results = health
except Exception:
elastic_results = {'exception': traceback.format_exc()}
return status, elastic_results
def path():
# Check file paths / permissions
rw = (settings.TMP_PATH,
settings.MEDIA_ROOT,
user_media_path('addons'),
user_media_path('guarded_addons'),
user_media_path('addon_icons'),
user_media_path('collection_icons'),
user_media_path('previews'),
user_media_path('userpics'),
user_media_path('reviewer_attachments'),
dump_apps.Command.JSON_PATH,)
r = [os.path.join(settings.ROOT, 'locale'),
# The deploy process will want write access to this.
# We do not want Django to have write access though.
settings.PROD_DETAILS_DIR]
filepaths = [(path, os.R_OK | os.W_OK, 'We want read + write')
for path in rw]
filepaths += [(path, os.R_OK, 'We want read') for path in r]
filepath_results = []
filepath_status = True
for path, perms, notes in filepaths:
path_exists = os.path.exists(path)
path_perms = os.access(path, perms)
filepath_status = filepath_status and path_exists and path_perms
if not isinstance(path, str):
notes += ' / should be a bytestring!'
filepath_results.append((path, path_exists, path_perms, notes))
status = filepath_status
status = ''
if not filepath_status:
status = 'check main status page for broken perms / values'
return status, filepath_results
def rabbitmq():
# Check rabbitmq
rabbitmq_results = []
status = ''
with Connection(settings.CELERY_BROKER_URL, connect_timeout=2) as broker:
hostname = broker.hostname
try:
broker.connect()
rabbitmq_results.append((hostname, True))
except Exception, e:
rabbitmq_results.append((hostname, False))
status = 'Failed to chat with rabbitmq %s: %s' % (hostname, e)
monitor_log.critical(status)
return status, rabbitmq_results
def redis():
# Check Redis
redis_results = [None, 'REDIS_BACKENDS is not set']
status = 'REDIS_BACKENDS is not set'
if getattr(settings, 'REDIS_BACKENDS', False):
status = []
redis_results = {}
for alias, backend in settings.REDIS_BACKENDS.items():
if not isinstance(backend, dict):
raise ImproperlyConfigured(
'REDIS_BACKENDS is now required to be a dictionary.')
host = backend.get('HOST')
port = backend.get('PORT')
db = backend.get('DB', 0)
password = backend.get('PASSWORD', None)
socket_timeout = backend.get('OPTIONS', {}).get('socket_timeout')
try:
redis_connection = redislib.Redis(
host=host, port=port, db=db, password=password,
socket_timeout=socket_timeout)
redis_results[alias] = redis_connection.info()
except Exception, e:
redis_results[alias] = None
status.append('Failed to chat with redis:%s' % alias)
monitor_log.critical('Failed to chat with redis: (%s)' % e)
status = ','.join(status)
return status, redis_results
def signer():
# Check Signing Server Endpoint
signer_results = None
status = ''
if getattr(settings, 'SIGNING_SERVER', False):
try:
response = requests.get('%s/status' % settings.SIGNING_SERVER,
timeout=settings.SIGNING_SERVER_TIMEOUT)
if response.status_code != 200:
status = (
'Failed to chat with signing service. '
'Invalid HTTP response code.')
monitor_log.critical(status)
signer_results = False
else:
signer_results = True
except Exception, e:
status = 'Failed to chat with signing service: %s' % e
monitor_log.critical(status)
signer_results = False
else:
status = 'SIGNING_SERVER is not set'
monitor_log.critical(status)
signer_results = False
return status, signer_results
|
|
# -*- coding: utf-8 -*-
"""
Unit tests for the neo_tools module.
:copyright: Copyright 2014-2016 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
from itertools import chain
import unittest
from neo.test.generate_datasets import fake_neo, get_fake_values
from neo.test.tools import assert_same_sub_schema
from numpy.testing.utils import assert_array_equal
import elephant.neo_tools as nt
# A list of neo object attributes that contain arrays.
ARRAY_ATTRS = ['waveforms',
'times',
'durations',
'labels',
'index',
'channel_names',
'channel_ids',
'coordinates',
'array_annotations'
]
def strip_iter_values(targ, array_attrs=ARRAY_ATTRS):
"""Remove iterable, non-string values from a dictionary.
`elephant.neo_tools.extract_neo_attrs` automatically strips out
non-scalar values from attributes. This function does the same to a
manually-extracted dictionary.
Parameters
----------
targ : dict
The dictionary of values to process.
array_attrs : list of str objects, optional
The list of attribute names to remove. If not specified,
uses `elephant.test.test_neo_tools.ARRAY_ATTRS`.
Returns
-------
dict
A copy of `targ` with the target values (if present) removed.
Notes
-----
Always returns a copy, even if nothing was removed.
This function has the values to remove hard-coded. This is intentional
to make sure that `extract_neo_attrs` is removing all the attributes
it is supposed to and only the attributes it is supposed to. Please do
NOT change this to any sort of automatic detection, if it is missing
values please add them manually.
"""
targ = targ.copy()
for attr in array_attrs:
targ.pop(attr, None)
return targ
class GetAllObjsTestCase(unittest.TestCase):
def test__get_all_objs__float_valueerror(self):
value = 5.
with self.assertRaises(ValueError):
nt._get_all_objs(value, 'Block')
def test__get_all_objs__list_float_valueerror(self):
value = [5.]
with self.assertRaises(ValueError):
nt._get_all_objs(value, 'Block')
def test__get_all_objs__epoch_for_event_valueerror(self):
value = fake_neo('Epoch', n=10, seed=0)
with self.assertRaises(ValueError):
nt._get_all_objs(value, 'Event')
def test__get_all_objs__empty_list(self):
targ = []
value = []
res = nt._get_all_objs(value, 'Block')
self.assertEqual(targ, res)
def test__get_all_objs__empty_nested_list(self):
targ = []
value = [[], [[], [[]]]]
res = nt._get_all_objs(value, 'Block')
self.assertEqual(targ, res)
def test__get_all_objs__empty_dict(self):
targ = []
value = {}
res = nt._get_all_objs(value, 'Block')
self.assertEqual(targ, res)
def test__get_all_objs__empty_nested_dict(self):
targ = []
value = {'a': {}, 'b': {'c': {}, 'd': {'e': {}}}}
res = nt._get_all_objs(value, 'Block')
self.assertEqual(targ, res)
def test__get_all_objs__empty_itert(self):
targ = []
value = iter([])
res = nt._get_all_objs(value, 'Block')
self.assertEqual(targ, res)
def test__get_all_objs__empty_nested_iter(self):
targ = []
value = iter([iter([]), iter([iter([]), iter([iter([])])])])
res = nt._get_all_objs(value, 'Block')
self.assertEqual(targ, res)
def test__get_all_objs__empty_nested_many(self):
targ = []
value = iter([[], {'c': [], 'd':(iter([]),)}])
res = nt._get_all_objs(value, 'Block')
self.assertEqual(targ, res)
def test__get_all_objs__spiketrain(self):
targ = [fake_neo('SpikeTrain', n=10, seed=0)]
value = fake_neo('SpikeTrain', n=10, seed=0)
res = nt._get_all_objs(value, 'SpikeTrain')
assert_same_sub_schema(targ, res)
def test__get_all_objs__list_spiketrain(self):
targ = [fake_neo('SpikeTrain', n=10, seed=0),
fake_neo('SpikeTrain', n=10, seed=1)]
value = [fake_neo('SpikeTrain', n=10, seed=0),
fake_neo('SpikeTrain', n=10, seed=1)]
res = nt._get_all_objs(value, 'SpikeTrain')
assert_same_sub_schema(targ, res)
def test__get_all_objs__nested_list_epoch(self):
targ = [fake_neo('Epoch', n=10, seed=0),
fake_neo('Epoch', n=10, seed=1)]
value = [[fake_neo('Epoch', n=10, seed=0)],
fake_neo('Epoch', n=10, seed=1)]
res = nt._get_all_objs(value, 'Epoch')
assert_same_sub_schema(targ, res)
def test__get_all_objs__iter_spiketrain(self):
targ = [fake_neo('SpikeTrain', n=10, seed=0),
fake_neo('SpikeTrain', n=10, seed=1)]
value = iter([fake_neo('SpikeTrain', n=10, seed=0),
fake_neo('SpikeTrain', n=10, seed=1)])
res = nt._get_all_objs(value, 'SpikeTrain')
assert_same_sub_schema(targ, res)
def test__get_all_objs__nested_iter_epoch(self):
targ = [fake_neo('Epoch', n=10, seed=0),
fake_neo('Epoch', n=10, seed=1)]
value = iter([iter([fake_neo('Epoch', n=10, seed=0)]),
fake_neo('Epoch', n=10, seed=1)])
res = nt._get_all_objs(value, 'Epoch')
assert_same_sub_schema(targ, res)
def test__get_all_objs__dict_spiketrain(self):
targ = [fake_neo('SpikeTrain', n=10, seed=0),
fake_neo('SpikeTrain', n=10, seed=1)]
value = {'a': fake_neo('SpikeTrain', n=10, seed=0),
'b': fake_neo('SpikeTrain', n=10, seed=1)}
res = nt._get_all_objs(value, 'SpikeTrain')
self.assertEqual(len(targ), len(res))
for i, itarg in enumerate(targ):
for ires in res:
if itarg.annotations['seed'] == ires.annotations['seed']:
assert_same_sub_schema(itarg, ires)
break
else:
raise ValueError('Target %s not in result' % i)
def test__get_all_objs__nested_dict_spiketrain(self):
targ = [fake_neo('SpikeTrain', n=10, seed=0),
fake_neo('SpikeTrain', n=10, seed=1)]
value = {'a': fake_neo('SpikeTrain', n=10, seed=0),
'b': {'c': fake_neo('SpikeTrain', n=10, seed=1)}}
res = nt._get_all_objs(value, 'SpikeTrain')
self.assertEqual(len(targ), len(res))
for i, itarg in enumerate(targ):
for ires in res:
if itarg.annotations['seed'] == ires.annotations['seed']:
assert_same_sub_schema(itarg, ires)
break
else:
raise ValueError('Target %s not in result' % i)
def test__get_all_objs__nested_many_spiketrain(self):
targ = [fake_neo('SpikeTrain', n=10, seed=0),
fake_neo('SpikeTrain', n=10, seed=1)]
value = {'a': [fake_neo('SpikeTrain', n=10, seed=0)],
'b': iter([fake_neo('SpikeTrain', n=10, seed=1)])}
res = nt._get_all_objs(value, 'SpikeTrain')
self.assertEqual(len(targ), len(res))
for i, itarg in enumerate(targ):
for ires in res:
if itarg.annotations['seed'] == ires.annotations['seed']:
assert_same_sub_schema(itarg, ires)
break
else:
raise ValueError('Target %s not in result' % i)
def test__get_all_objs__unit_spiketrain(self):
value = fake_neo('Unit', n=3, seed=0)
targ = [fake_neo('SpikeTrain', n=3, seed=train.annotations['seed'])
for train in value.spiketrains]
for train in value.spiketrains:
train.annotations.pop('i', None)
train.annotations.pop('j', None)
res = nt._get_all_objs(value, 'SpikeTrain')
assert_same_sub_schema(targ, res)
def test__get_all_objs__block_epoch(self):
value = fake_neo('Block', n=3, seed=0)
targ = [fake_neo('Epoch', n=3, seed=train.annotations['seed'])
for train in value.list_children_by_class('Epoch')]
for epoch in value.list_children_by_class('Epoch'):
epoch.annotations.pop('i', None)
epoch.annotations.pop('j', None)
res = nt._get_all_objs(value, 'Epoch')
assert_same_sub_schema(targ, res)
class ExtractNeoAttrsTestCase(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.block = fake_neo('Block', seed=0)
def assert_dicts_equal(self, d1, d2):
"""Assert that two dictionaries are equal, taking into account arrays.
Normally, `unittest.TestCase.assertEqual` doesn't work with
dictionaries containing arrays. This works around that.
Parameters
----------
d1, d2 : dict
The dictionaries to compare
Returns
-------
Nothing
Raises
------
AssertionError : If the `d1` and `d2` are not equal.
"""
try:
self.assertEqual(d1, d2)
except ValueError:
for (key1, value1), (key2, value2) in zip(sorted(d1.items()),
sorted(d2.items())):
self.assertEqual(key1, key2)
try:
if hasattr(value1, 'keys') and hasattr(value2, 'keys'):
self.assert_dicts_equal(value1, value2)
elif hasattr(value1, 'dtype') and hasattr(value2, 'dtype'):
assert_array_equal(value1, value2)
else:
self.assertEqual(value1, value2)
except BaseException as exc:
exc.args += ('key: %s' % key1,)
raise
def test__extract_neo_attrs__spiketrain_noarray(self):
obj = fake_neo('SpikeTrain', seed=0)
targ = get_fake_values('SpikeTrain', seed=0)
targ = strip_iter_values(targ)
res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=True)
res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=True)
res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=False)
res01 = nt.extract_neo_attrs(obj, parents=True, skip_array=True)
res11 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=True)
res21 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=False)
self.assertEqual(targ, res00)
self.assertEqual(targ, res10)
self.assertEqual(targ, res20)
self.assertEqual(targ, res01)
self.assertEqual(targ, res11)
self.assertEqual(targ, res21)
def test__extract_neo_attrs__spiketrain_noarray_skip_none(self):
obj = fake_neo('SpikeTrain', seed=0)
targ = get_fake_values('SpikeTrain', seed=0)
targ = strip_iter_values(targ)
for key, value in targ.copy().items():
if value is None:
del targ[key]
res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
skip_none=True)
res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=True, skip_none=True)
res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=False, skip_none=True)
res01 = nt.extract_neo_attrs(obj, parents=True, skip_array=True,
skip_none=True)
res11 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=True, skip_none=True)
res21 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=False, skip_none=True)
self.assertEqual(targ, res00)
self.assertEqual(targ, res10)
self.assertEqual(targ, res20)
self.assertEqual(targ, res01)
self.assertEqual(targ, res11)
self.assertEqual(targ, res21)
def test__extract_neo_attrs__epoch_noarray(self):
obj = fake_neo('Epoch', seed=0)
targ = get_fake_values('Epoch', seed=0)
targ = strip_iter_values(targ)
res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=True)
res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=True)
res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=False)
res01 = nt.extract_neo_attrs(obj, parents=True, skip_array=True)
res11 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=True)
res21 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=False)
self.assertEqual(targ, res00)
self.assertEqual(targ, res10)
self.assertEqual(targ, res20)
self.assertEqual(targ, res01)
self.assertEqual(targ, res11)
self.assertEqual(targ, res21)
def test__extract_neo_attrs__event_noarray(self):
obj = fake_neo('Event', seed=0)
targ = get_fake_values('Event', seed=0)
targ = strip_iter_values(targ)
res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=True)
res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=True)
res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=False)
res01 = nt.extract_neo_attrs(obj, parents=True, skip_array=True)
res11 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=True)
res21 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=False)
self.assertEqual(targ, res00)
self.assertEqual(targ, res10)
self.assertEqual(targ, res20)
self.assertEqual(targ, res01)
self.assertEqual(targ, res11)
self.assertEqual(targ, res21)
def test__extract_neo_attrs__spiketrain_parents_empty_array(self):
obj = fake_neo('SpikeTrain', seed=0)
targ = get_fake_values('SpikeTrain', seed=0)
del targ['times']
res000 = nt.extract_neo_attrs(obj, parents=False)
res100 = nt.extract_neo_attrs(obj, parents=False, child_first=True)
res200 = nt.extract_neo_attrs(obj, parents=False, child_first=False)
res010 = nt.extract_neo_attrs(obj, parents=False, skip_array=False)
res110 = nt.extract_neo_attrs(obj, parents=False, skip_array=False,
child_first=True)
res210 = nt.extract_neo_attrs(obj, parents=False, skip_array=False,
child_first=False)
res001 = nt.extract_neo_attrs(obj, parents=True)
res101 = nt.extract_neo_attrs(obj, parents=True, child_first=True)
res201 = nt.extract_neo_attrs(obj, parents=True, child_first=False)
res011 = nt.extract_neo_attrs(obj, parents=True, skip_array=False)
res111 = nt.extract_neo_attrs(obj, parents=True, skip_array=False,
child_first=True)
res211 = nt.extract_neo_attrs(obj, parents=True, skip_array=False,
child_first=False)
self.assert_dicts_equal(targ, res000)
self.assert_dicts_equal(targ, res100)
self.assert_dicts_equal(targ, res200)
self.assert_dicts_equal(targ, res010)
self.assert_dicts_equal(targ, res110)
self.assert_dicts_equal(targ, res210)
self.assert_dicts_equal(targ, res001)
self.assert_dicts_equal(targ, res101)
self.assert_dicts_equal(targ, res201)
self.assert_dicts_equal(targ, res011)
self.assert_dicts_equal(targ, res111)
self.assert_dicts_equal(targ, res211)
@staticmethod
def _fix_neo_issue_749(obj, targ):
# TODO: remove once fixed
# https://github.com/NeuralEnsemble/python-neo/issues/749
num_times = len(targ['times'])
obj = obj[:num_times]
del targ['array_annotations']
return obj
def test__extract_neo_attrs__epoch_parents_empty_array(self):
obj = fake_neo('Epoch', seed=0)
targ = get_fake_values('Epoch', seed=0)
obj = self._fix_neo_issue_749(obj, targ)
del targ['times']
res000 = nt.extract_neo_attrs(obj, parents=False)
res100 = nt.extract_neo_attrs(obj, parents=False, child_first=True)
res200 = nt.extract_neo_attrs(obj, parents=False, child_first=False)
res010 = nt.extract_neo_attrs(obj, parents=False, skip_array=False)
res110 = nt.extract_neo_attrs(obj, parents=False, skip_array=False,
child_first=True)
res210 = nt.extract_neo_attrs(obj, parents=False, skip_array=False,
child_first=False)
res001 = nt.extract_neo_attrs(obj, parents=True)
res101 = nt.extract_neo_attrs(obj, parents=True, child_first=True)
res201 = nt.extract_neo_attrs(obj, parents=True, child_first=False)
res011 = nt.extract_neo_attrs(obj, parents=True, skip_array=False)
res111 = nt.extract_neo_attrs(obj, parents=True, skip_array=False,
child_first=True)
res211 = nt.extract_neo_attrs(obj, parents=True, skip_array=False,
child_first=False)
self.assert_dicts_equal(targ, res000)
self.assert_dicts_equal(targ, res100)
self.assert_dicts_equal(targ, res200)
self.assert_dicts_equal(targ, res010)
self.assert_dicts_equal(targ, res110)
self.assert_dicts_equal(targ, res210)
self.assert_dicts_equal(targ, res001)
self.assert_dicts_equal(targ, res101)
self.assert_dicts_equal(targ, res201)
self.assert_dicts_equal(targ, res011)
self.assert_dicts_equal(targ, res111)
self.assert_dicts_equal(targ, res211)
def test__extract_neo_attrs__event_parents_empty_array(self):
obj = fake_neo('Event', seed=0)
targ = get_fake_values('Event', seed=0)
del targ['times']
res000 = nt.extract_neo_attrs(obj, parents=False)
res100 = nt.extract_neo_attrs(obj, parents=False, child_first=True)
res200 = nt.extract_neo_attrs(obj, parents=False, child_first=False)
res010 = nt.extract_neo_attrs(obj, parents=False, skip_array=False)
res110 = nt.extract_neo_attrs(obj, parents=False, skip_array=False,
child_first=True)
res210 = nt.extract_neo_attrs(obj, parents=False, skip_array=False,
child_first=False)
res001 = nt.extract_neo_attrs(obj, parents=True)
res101 = nt.extract_neo_attrs(obj, parents=True, child_first=True)
res201 = nt.extract_neo_attrs(obj, parents=True, child_first=False)
res011 = nt.extract_neo_attrs(obj, parents=True, skip_array=False)
res111 = nt.extract_neo_attrs(obj, parents=True, skip_array=False,
child_first=True)
res211 = nt.extract_neo_attrs(obj, parents=True, skip_array=False,
child_first=False)
self.assert_dicts_equal(targ, res000)
self.assert_dicts_equal(targ, res100)
self.assert_dicts_equal(targ, res200)
self.assert_dicts_equal(targ, res010)
self.assert_dicts_equal(targ, res110)
self.assert_dicts_equal(targ, res210)
self.assert_dicts_equal(targ, res001)
self.assert_dicts_equal(targ, res101)
self.assert_dicts_equal(targ, res201)
self.assert_dicts_equal(targ, res011)
self.assert_dicts_equal(targ, res111)
self.assert_dicts_equal(targ, res211)
def test__extract_neo_attrs__spiketrain_noparents_noarray(self):
obj = self.block.list_children_by_class('SpikeTrain')[0]
targ = get_fake_values('SpikeTrain', seed=obj.annotations['seed'])
targ = strip_iter_values(targ)
res0 = nt.extract_neo_attrs(obj, parents=False, skip_array=True)
res1 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=True)
res2 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=False)
del res0['i']
del res1['i']
del res2['i']
del res0['j']
del res1['j']
del res2['j']
self.assertEqual(targ, res0)
self.assertEqual(targ, res1)
self.assertEqual(targ, res2)
def test__extract_neo_attrs__epoch_noparents_noarray(self):
obj = self.block.list_children_by_class('Epoch')[0]
targ = get_fake_values('Epoch', seed=obj.annotations['seed'])
targ = strip_iter_values(targ)
res0 = nt.extract_neo_attrs(obj, parents=False, skip_array=True)
res1 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=True)
res2 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=False)
del res0['i']
del res1['i']
del res2['i']
del res0['j']
del res1['j']
del res2['j']
self.assertEqual(targ, res0)
self.assertEqual(targ, res1)
self.assertEqual(targ, res2)
def test__extract_neo_attrs__event_noparents_noarray(self):
obj = self.block.list_children_by_class('Event')[0]
targ = get_fake_values('Event', seed=obj.annotations['seed'])
targ = strip_iter_values(targ)
res0 = nt.extract_neo_attrs(obj, parents=False, skip_array=True)
res1 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=True)
res2 = nt.extract_neo_attrs(obj, parents=False, skip_array=True,
child_first=False)
del res0['i']
del res1['i']
del res2['i']
del res0['j']
del res1['j']
del res2['j']
self.assertEqual(targ, res0)
self.assertEqual(targ, res1)
self.assertEqual(targ, res2)
def test__extract_neo_attrs__spiketrain_noparents_array(self):
obj = self.block.list_children_by_class('SpikeTrain')[0]
targ = get_fake_values('SpikeTrain', seed=obj.annotations['seed'])
del targ['times']
res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=False)
res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=False,
child_first=True)
res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=False,
child_first=False)
res01 = nt.extract_neo_attrs(obj, parents=False)
res11 = nt.extract_neo_attrs(obj, parents=False, child_first=True)
res21 = nt.extract_neo_attrs(obj, parents=False, child_first=False)
del res00['i']
del res10['i']
del res20['i']
del res01['i']
del res11['i']
del res21['i']
del res00['j']
del res10['j']
del res20['j']
del res01['j']
del res11['j']
del res21['j']
self.assert_dicts_equal(targ, res00)
self.assert_dicts_equal(targ, res10)
self.assert_dicts_equal(targ, res20)
self.assert_dicts_equal(targ, res01)
self.assert_dicts_equal(targ, res11)
self.assert_dicts_equal(targ, res21)
def test__extract_neo_attrs__epoch_noparents_array(self):
obj = self.block.list_children_by_class('Epoch')[0]
targ = get_fake_values('Epoch', seed=obj.annotations['seed'])
# 'times' is not in obj._necessary_attrs + obj._recommended_attrs
obj = self._fix_neo_issue_749(obj, targ)
del targ['times']
res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=False)
res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=False,
child_first=True)
res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=False,
child_first=False)
res01 = nt.extract_neo_attrs(obj, parents=False)
res11 = nt.extract_neo_attrs(obj, parents=False, child_first=True)
res21 = nt.extract_neo_attrs(obj, parents=False, child_first=False)
del res00['i']
del res10['i']
del res20['i']
del res01['i']
del res11['i']
del res21['i']
del res00['j']
del res10['j']
del res20['j']
del res01['j']
del res11['j']
del res21['j']
self.assert_dicts_equal(targ, res00)
self.assert_dicts_equal(targ, res10)
self.assert_dicts_equal(targ, res20)
self.assert_dicts_equal(targ, res01)
self.assert_dicts_equal(targ, res11)
self.assert_dicts_equal(targ, res21)
def test__extract_neo_attrs__event_noparents_array(self):
obj = self.block.list_children_by_class('Event')[0]
targ = get_fake_values('Event', seed=obj.annotations['seed'])
del targ['times']
res00 = nt.extract_neo_attrs(obj, parents=False, skip_array=False)
res10 = nt.extract_neo_attrs(obj, parents=False, skip_array=False,
child_first=True)
res20 = nt.extract_neo_attrs(obj, parents=False, skip_array=False,
child_first=False)
res01 = nt.extract_neo_attrs(obj, parents=False)
res11 = nt.extract_neo_attrs(obj, parents=False, child_first=True)
res21 = nt.extract_neo_attrs(obj, parents=False, child_first=False)
del res00['i']
del res10['i']
del res20['i']
del res01['i']
del res11['i']
del res21['i']
del res00['j']
del res10['j']
del res20['j']
del res01['j']
del res11['j']
del res21['j']
self.assert_dicts_equal(targ, res00)
self.assert_dicts_equal(targ, res10)
self.assert_dicts_equal(targ, res20)
self.assert_dicts_equal(targ, res01)
self.assert_dicts_equal(targ, res11)
self.assert_dicts_equal(targ, res21)
def test__extract_neo_attrs__spiketrain_parents_childfirst_noarray(self):
obj = self.block.list_children_by_class('SpikeTrain')[0]
blk = self.block
seg = self.block.segments[0]
rcg = self.block.channel_indexes[0]
unit = self.block.channel_indexes[0].units[0]
targ = get_fake_values('Block', seed=blk.annotations['seed'])
targ.update(get_fake_values('Segment', seed=seg.annotations['seed']))
targ.update(get_fake_values('ChannelIndex',
seed=rcg.annotations['seed']))
targ.update(get_fake_values('Unit', seed=unit.annotations['seed']))
targ.update(get_fake_values('SpikeTrain',
seed=obj.annotations['seed']))
targ = strip_iter_values(targ)
res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=True)
res1 = nt.extract_neo_attrs(obj, parents=True, skip_array=True,
child_first=True)
del res0['i']
del res1['i']
del res0['j']
del res1['j']
del res0['index'] # name clash between Block.index and ChannelIndex.index
del res1['index']
self.assertEqual(targ, res0)
self.assertEqual(targ, res1)
def test__extract_neo_attrs__epoch_parents_childfirst_noarray(self):
obj = self.block.list_children_by_class('Epoch')[0]
blk = self.block
seg = self.block.segments[0]
targ = get_fake_values('Block', seed=blk.annotations['seed'])
targ.update(get_fake_values('Segment', seed=seg.annotations['seed']))
targ.update(get_fake_values('Epoch', seed=obj.annotations['seed']))
targ = strip_iter_values(targ)
res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=True)
res1 = nt.extract_neo_attrs(obj, parents=True, skip_array=True,
child_first=True)
del res0['i']
del res1['i']
del res0['j']
del res1['j']
del res0['index'] # name clash between Block.index and ChannelIndex.index
del res1['index']
self.assertEqual(targ, res0)
self.assertEqual(targ, res1)
def test__extract_neo_attrs__event_parents_childfirst_noarray(self):
obj = self.block.list_children_by_class('Event')[0]
blk = self.block
seg = self.block.segments[0]
targ = get_fake_values('Block', seed=blk.annotations['seed'])
targ.update(get_fake_values('Segment', seed=seg.annotations['seed']))
targ.update(get_fake_values('Event', seed=obj.annotations['seed']))
targ = strip_iter_values(targ)
res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=True)
res1 = nt.extract_neo_attrs(obj, parents=True, skip_array=True,
child_first=True)
del res0['i']
del res1['i']
del res0['j']
del res1['j']
del res0['index'] # name clash between Block.index and ChannelIndex.index
del res1['index']
self.assertEqual(targ, res0)
self.assertEqual(targ, res1)
def test__extract_neo_attrs__spiketrain_parents_parentfirst_noarray(self):
obj = self.block.list_children_by_class('SpikeTrain')[0]
blk = self.block
seg = self.block.segments[0]
rcg = self.block.channel_indexes[0]
unit = self.block.channel_indexes[0].units[0]
targ = get_fake_values('SpikeTrain', seed=obj.annotations['seed'])
targ.update(get_fake_values('Unit', seed=unit.annotations['seed']))
targ.update(get_fake_values('ChannelIndex',
seed=rcg.annotations['seed']))
targ.update(get_fake_values('Segment', seed=seg.annotations['seed']))
targ.update(get_fake_values('Block', seed=blk.annotations['seed']))
targ = strip_iter_values(targ)
res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=True,
child_first=False)
del res0['i']
del res0['j']
del res0['index'] # name clash between Block.index and ChannelIndex.index
self.assertEqual(targ, res0)
def test__extract_neo_attrs__epoch_parents_parentfirst_noarray(self):
obj = self.block.list_children_by_class('Epoch')[0]
blk = self.block
seg = self.block.segments[0]
targ = get_fake_values('Epoch', seed=obj.annotations['seed'])
targ.update(get_fake_values('Segment', seed=seg.annotations['seed']))
targ.update(get_fake_values('Block', seed=blk.annotations['seed']))
targ = strip_iter_values(targ)
res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=True,
child_first=False)
del res0['i']
del res0['j']
del res0['index'] # name clash between Block.index and ChannelIndex.index
self.assertEqual(targ, res0)
def test__extract_neo_attrs__event_parents_parentfirst_noarray(self):
obj = self.block.list_children_by_class('Event')[0]
blk = self.block
seg = self.block.segments[0]
targ = get_fake_values('Event', seed=obj.annotations['seed'])
targ.update(get_fake_values('Segment', seed=seg.annotations['seed']))
targ.update(get_fake_values('Block', seed=blk.annotations['seed']))
targ = strip_iter_values(targ)
res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=True,
child_first=False)
del res0['i']
del res0['j']
del res0['index'] # name clash between Block.index and ChannelIndex.index
self.assertEqual(targ, res0)
def test__extract_neo_attrs__spiketrain_parents_childfirst_array(self):
obj = self.block.list_children_by_class('SpikeTrain')[0]
blk = self.block
seg = self.block.segments[0]
rcg = self.block.channel_indexes[0]
unit = self.block.channel_indexes[0].units[0]
targ = get_fake_values('Block', seed=blk.annotations['seed'])
targ.update(get_fake_values('ChannelIndex',
seed=rcg.annotations['seed']))
targ['channel_names'] = rcg.channel_names
targ.update(get_fake_values('Unit', seed=unit.annotations['seed']))
targ.update(get_fake_values('Segment', seed=seg.annotations['seed']))
targ.update(get_fake_values('SpikeTrain',
seed=obj.annotations['seed']))
del targ['times']
res00 = nt.extract_neo_attrs(obj, parents=True, skip_array=False)
res10 = nt.extract_neo_attrs(obj, parents=True, skip_array=False,
child_first=True)
res01 = nt.extract_neo_attrs(obj, parents=True)
res11 = nt.extract_neo_attrs(obj, parents=True, child_first=True)
del res00['i']
del res10['i']
del res01['i']
del res11['i']
del res00['j']
del res10['j']
del res01['j']
del res11['j']
self.assert_dicts_equal(targ, res00)
self.assert_dicts_equal(targ, res10)
self.assert_dicts_equal(targ, res01)
self.assert_dicts_equal(targ, res11)
def test__extract_neo_attrs__epoch_parents_childfirst_array(self):
obj = self.block.list_children_by_class('Epoch')[0]
blk = self.block
seg = self.block.segments[0]
targ = get_fake_values('Block', seed=blk.annotations['seed'])
targ.update(get_fake_values('Segment', seed=seg.annotations['seed']))
targ.update(get_fake_values('Epoch', seed=obj.annotations['seed']))
obj = self._fix_neo_issue_749(obj, targ)
del targ['times']
res00 = nt.extract_neo_attrs(obj, parents=True, skip_array=False)
res10 = nt.extract_neo_attrs(obj, parents=True, skip_array=False,
child_first=True)
res01 = nt.extract_neo_attrs(obj, parents=True)
res11 = nt.extract_neo_attrs(obj, parents=True, child_first=True)
del res00['i']
del res10['i']
del res01['i']
del res11['i']
del res00['j']
del res10['j']
del res01['j']
del res11['j']
self.assert_dicts_equal(targ, res00)
self.assert_dicts_equal(targ, res10)
self.assert_dicts_equal(targ, res01)
self.assert_dicts_equal(targ, res11)
def test__extract_neo_attrs__event_parents_childfirst_array(self):
obj = self.block.list_children_by_class('Event')[0]
blk = self.block
seg = self.block.segments[0]
targ = get_fake_values('Block', seed=blk.annotations['seed'])
targ.update(get_fake_values('Segment', seed=seg.annotations['seed']))
targ.update(get_fake_values('Event', seed=obj.annotations['seed']))
del targ['times']
res00 = nt.extract_neo_attrs(obj, parents=True, skip_array=False)
res10 = nt.extract_neo_attrs(obj, parents=True, skip_array=False,
child_first=True)
res01 = nt.extract_neo_attrs(obj, parents=True)
res11 = nt.extract_neo_attrs(obj, parents=True, child_first=True)
del res00['i']
del res10['i']
del res01['i']
del res11['i']
del res00['j']
del res10['j']
del res01['j']
del res11['j']
self.assert_dicts_equal(targ, res00)
self.assert_dicts_equal(targ, res10)
self.assert_dicts_equal(targ, res01)
self.assert_dicts_equal(targ, res11)
def test__extract_neo_attrs__spiketrain_parents_parentfirst_array(self):
obj = self.block.list_children_by_class('SpikeTrain')[0]
blk = self.block
seg = self.block.segments[0]
rcg = self.block.channel_indexes[0]
unit = self.block.channel_indexes[0].units[0]
targ = get_fake_values('SpikeTrain', seed=obj.annotations['seed'])
targ.update(get_fake_values('Segment', seed=seg.annotations['seed']))
targ.update(get_fake_values('Unit', seed=unit.annotations['seed']))
targ.update(get_fake_values('ChannelIndex',
seed=rcg.annotations['seed']))
targ.update(get_fake_values('Block', seed=blk.annotations['seed']))
del targ['times']
del targ['index']
del targ['channel_names']
res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=False,
child_first=False)
res1 = nt.extract_neo_attrs(obj, parents=True, child_first=False)
del res0['i']
del res1['i']
del res0['j']
del res1['j']
del res0['index']
del res1['index']
del res0['channel_names']
del res1['channel_names']
self.assert_dicts_equal(targ, res0)
self.assert_dicts_equal(targ, res1)
def test__extract_neo_attrs__epoch_parents_parentfirst_array(self):
obj = self.block.list_children_by_class('Epoch')[0]
blk = self.block
seg = self.block.segments[0]
targ = get_fake_values('Epoch', seed=obj.annotations['seed'])
targ.update(get_fake_values('Segment', seed=seg.annotations['seed']))
targ.update(get_fake_values('Block', seed=blk.annotations['seed']))
obj = self._fix_neo_issue_749(obj, targ)
del targ['times']
res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=False,
child_first=False)
res1 = nt.extract_neo_attrs(obj, parents=True, child_first=False)
del res0['i']
del res1['i']
del res0['j']
del res1['j']
self.assert_dicts_equal(targ, res0)
self.assert_dicts_equal(targ, res1)
def test__extract_neo_attrs__event_parents_parentfirst_array(self):
obj = self.block.list_children_by_class('Event')[0]
blk = self.block
seg = self.block.segments[0]
targ = get_fake_values('Event', seed=obj.annotations['seed'])
targ.update(get_fake_values('Segment', seed=seg.annotations['seed']))
targ.update(get_fake_values('Block', seed=blk.annotations['seed']))
del targ['times']
res0 = nt.extract_neo_attrs(obj, parents=True, skip_array=False,
child_first=False)
res1 = nt.extract_neo_attrs(obj, parents=True, child_first=False)
del res0['i']
del res1['i']
del res0['j']
del res1['j']
self.assert_dicts_equal(targ, res0)
self.assert_dicts_equal(targ, res1)
class GetAllSpiketrainsTestCase(unittest.TestCase):
def test__get_all_spiketrains__spiketrain(self):
obj = fake_neo('SpikeTrain', seed=0, n=5)
res0 = nt.get_all_spiketrains(obj)
targ = obj
self.assertEqual(1, len(res0))
assert_same_sub_schema(targ, res0[0])
def test__get_all_spiketrains__unit(self):
obj = fake_neo('Unit', seed=0, n=7)
obj.spiketrains.append(obj.spiketrains[0])
res0 = nt.get_all_spiketrains(obj)
targ = fake_neo('Unit', seed=0, n=7).spiketrains
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_spiketrains__segment(self):
obj = fake_neo('Segment', seed=0, n=5)
obj.spiketrains.extend(obj.spiketrains)
res0 = nt.get_all_spiketrains(obj)
targ = fake_neo('Segment', seed=0, n=5).spiketrains
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_spiketrains__block(self):
obj = fake_neo('Block', seed=0, n=3)
iobj1 = obj.channel_indexes[0].units[0]
obj.channel_indexes[0].units.append(iobj1)
iobj2 = obj.channel_indexes[0].units[2].spiketrains[1]
obj.channel_indexes[1].units[1].spiketrains.append(iobj2)
res0 = nt.get_all_spiketrains(obj)
targ = fake_neo('Block', seed=0, n=3)
targ = targ.list_children_by_class('SpikeTrain')
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_spiketrains__list(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
obj.append(obj[-1])
iobj1 = obj[2].channel_indexes[0].units[0]
obj[2].channel_indexes[0].units.append(iobj1)
iobj2 = obj[1].channel_indexes[1].units[2].spiketrains[1]
obj[2].channel_indexes[0].units[1].spiketrains.append(iobj2)
obj.append(obj[-1])
res0 = nt.get_all_spiketrains(obj)
targ = [fake_neo('Block', seed=i, n=3) for i in range(3)]
targ = [iobj.list_children_by_class('SpikeTrain') for iobj in targ]
targ = list(chain.from_iterable(targ))
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_spiketrains__tuple(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
obj.append(obj[-1])
iobj1 = obj[2].channel_indexes[0].units[0]
obj[2].channel_indexes[0].units.append(iobj1)
iobj2 = obj[1].channel_indexes[1].units[2].spiketrains[1]
obj[2].channel_indexes[0].units[1].spiketrains.append(iobj2)
obj.append(obj[0])
res0 = nt.get_all_spiketrains(tuple(obj))
targ = [fake_neo('Block', seed=i, n=3) for i in range(3)]
targ = [iobj.list_children_by_class('SpikeTrain') for iobj in targ]
targ = list(chain.from_iterable(targ))
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_spiketrains__iter(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
obj.append(obj[-1])
iobj1 = obj[2].channel_indexes[0].units[0]
obj[2].channel_indexes[0].units.append(iobj1)
iobj2 = obj[1].channel_indexes[1].units[2].spiketrains[1]
obj[2].channel_indexes[0].units[1].spiketrains.append(iobj2)
obj.append(obj[1])
res0 = nt.get_all_spiketrains(iter(obj))
targ = [fake_neo('Block', seed=i, n=3) for i in range(3)]
targ = [iobj.list_children_by_class('SpikeTrain') for iobj in targ]
targ = list(chain.from_iterable(targ))
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_spiketrains__dict(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
obj.append(obj[-1])
iobj1 = obj[2].channel_indexes[0].units[0]
obj[2].channel_indexes[0].units.append(iobj1)
iobj2 = obj[1].channel_indexes[1].units[2].spiketrains[1]
obj[2].channel_indexes[0].units[1].spiketrains.append(iobj2)
obj.append(obj[1])
obj = dict((i, iobj) for i, iobj in enumerate(obj))
res0 = nt.get_all_spiketrains(obj)
targ = [fake_neo('Block', seed=i, n=3) for i in range(3)]
targ = [iobj.list_children_by_class('SpikeTrain') for iobj in targ]
targ = list(chain.from_iterable(targ))
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
class GetAllEventsTestCase(unittest.TestCase):
def test__get_all_events__event(self):
obj = fake_neo('Event', seed=0, n=5)
res0 = nt.get_all_events(obj)
targ = obj
self.assertEqual(1, len(res0))
assert_same_sub_schema(targ, res0[0])
def test__get_all_events__segment(self):
obj = fake_neo('Segment', seed=0, n=5)
obj.events.extend(obj.events)
res0 = nt.get_all_events(obj)
targ = fake_neo('Segment', seed=0, n=5).events
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_events__block(self):
obj = fake_neo('Block', seed=0, n=3)
iobj1 = obj.segments[0]
obj.segments.append(iobj1)
iobj2 = obj.segments[0].events[1]
obj.segments[1].events.append(iobj2)
res0 = nt.get_all_events(obj)
targ = fake_neo('Block', seed=0, n=3)
targ = targ.list_children_by_class('Event')
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_events__list(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
obj.append(obj[-1])
iobj1 = obj[2].segments[0]
obj[2].segments.append(iobj1)
iobj2 = obj[1].segments[2].events[1]
obj[2].segments[1].events.append(iobj2)
obj.append(obj[-1])
res0 = nt.get_all_events(obj)
targ = [fake_neo('Block', seed=i, n=3) for i in range(3)]
targ = [iobj.list_children_by_class('Event') for iobj in targ]
targ = list(chain.from_iterable(targ))
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_events__tuple(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
obj.append(obj[-1])
iobj1 = obj[2].segments[0]
obj[2].segments.append(iobj1)
iobj2 = obj[1].segments[2].events[1]
obj[2].segments[1].events.append(iobj2)
obj.append(obj[0])
res0 = nt.get_all_events(tuple(obj))
targ = [fake_neo('Block', seed=i, n=3) for i in range(3)]
targ = [iobj.list_children_by_class('Event') for iobj in targ]
targ = list(chain.from_iterable(targ))
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_events__iter(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
obj.append(obj[-1])
iobj1 = obj[2].segments[0]
obj[2].segments.append(iobj1)
iobj2 = obj[1].segments[2].events[1]
obj[2].segments[1].events.append(iobj2)
obj.append(obj[0])
res0 = nt.get_all_events(iter(obj))
targ = [fake_neo('Block', seed=i, n=3) for i in range(3)]
targ = [iobj.list_children_by_class('Event') for iobj in targ]
targ = list(chain.from_iterable(targ))
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_events__dict(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
obj.append(obj[-1])
iobj1 = obj[2].segments[0]
obj[2].segments.append(iobj1)
iobj2 = obj[1].segments[2].events[1]
obj[2].segments[1].events.append(iobj2)
obj.append(obj[0])
obj = dict((i, iobj) for i, iobj in enumerate(obj))
res0 = nt.get_all_events(obj)
targ = [fake_neo('Block', seed=i, n=3) for i in range(3)]
targ = [iobj.list_children_by_class('Event') for iobj in targ]
targ = list(chain.from_iterable(targ))
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
class GetAllEpochsTestCase(unittest.TestCase):
def test__get_all_epochs__epoch(self):
obj = fake_neo('Epoch', seed=0, n=5)
res0 = nt.get_all_epochs(obj)
targ = obj
self.assertEqual(1, len(res0))
assert_same_sub_schema(targ, res0[0])
def test__get_all_epochs__segment(self):
obj = fake_neo('Segment', seed=0, n=5)
obj.epochs.extend(obj.epochs)
res0 = nt.get_all_epochs(obj)
targ = fake_neo('Segment', seed=0, n=5).epochs
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_epochs__block(self):
obj = fake_neo('Block', seed=0, n=3)
iobj1 = obj.segments[0]
obj.segments.append(iobj1)
iobj2 = obj.segments[0].epochs[1]
obj.segments[1].epochs.append(iobj2)
res0 = nt.get_all_epochs(obj)
targ = fake_neo('Block', seed=0, n=3)
targ = targ.list_children_by_class('Epoch')
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_epochs__list(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
obj.append(obj[-1])
iobj1 = obj[2].segments[0]
obj[2].segments.append(iobj1)
iobj2 = obj[1].segments[2].epochs[1]
obj[2].segments[1].epochs.append(iobj2)
obj.append(obj[-1])
res0 = nt.get_all_epochs(obj)
targ = [fake_neo('Block', seed=i, n=3) for i in range(3)]
targ = [iobj.list_children_by_class('Epoch') for iobj in targ]
targ = list(chain.from_iterable(targ))
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_epochs__tuple(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
obj.append(obj[-1])
iobj1 = obj[2].segments[0]
obj[2].segments.append(iobj1)
iobj2 = obj[1].segments[2].epochs[1]
obj[2].segments[1].epochs.append(iobj2)
obj.append(obj[0])
res0 = nt.get_all_epochs(tuple(obj))
targ = [fake_neo('Block', seed=i, n=3) for i in range(3)]
targ = [iobj.list_children_by_class('Epoch') for iobj in targ]
targ = list(chain.from_iterable(targ))
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_epochs__iter(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
obj.append(obj[-1])
iobj1 = obj[2].segments[0]
obj[2].segments.append(iobj1)
iobj2 = obj[1].segments[2].epochs[1]
obj[2].segments[1].epochs.append(iobj2)
obj.append(obj[0])
res0 = nt.get_all_epochs(iter(obj))
targ = [fake_neo('Block', seed=i, n=3) for i in range(3)]
targ = [iobj.list_children_by_class('Epoch') for iobj in targ]
targ = list(chain.from_iterable(targ))
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
def test__get_all_epochs__dict(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
obj.append(obj[-1])
iobj1 = obj[2].segments[0]
obj[2].segments.append(iobj1)
iobj2 = obj[1].segments[2].epochs[1]
obj[2].segments[1].epochs.append(iobj2)
obj.append(obj[0])
obj = dict((i, iobj) for i, iobj in enumerate(obj))
res0 = nt.get_all_epochs(obj)
targ = [fake_neo('Block', seed=i, n=3) for i in range(3)]
targ = [iobj.list_children_by_class('Epoch') for iobj in targ]
targ = list(chain.from_iterable(targ))
self.assertTrue(len(res0) > 0)
self.assertEqual(len(targ), len(res0))
assert_same_sub_schema(targ, res0)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'bot_update',
'gclient',
'gerrit',
'tryserver',
'recipe_engine/buildbucket',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/runtime',
]
from recipe_engine import types
from PB.go.chromium.org.luci.buildbucket.proto.build import Build
def RunSteps(api):
api.gclient.use_mirror = True
commit = api.buildbucket.build.input.gitiles_commit
src_cfg = api.gclient.make_config(CACHE_DIR='[GIT_CACHE]')
soln = src_cfg.solutions.add()
soln.name = 'src'
soln.url = 'https://chromium.googlesource.com/chromium/src.git'
soln.revision = commit.id or commit.ref or None
api.gclient.c = src_cfg
api.gclient.c.revisions.update(api.properties.get('revisions', {}))
if api.properties.get('deprecated_got_revision_mapping'):
api.gclient.c.got_revision_mapping['src'] = 'got_cr_revision'
else:
api.gclient.c.got_revision_reverse_mapping['got_cr_revision'] = 'src'
api.gclient.c.got_revision_reverse_mapping['got_revision'] = 'src'
api.gclient.c.got_revision_reverse_mapping['got_v8_revision'] = 'src/v8'
api.gclient.c.got_revision_reverse_mapping['got_angle_revision'] = (
'src/third_party/angle')
api.gclient.c.repo_path_map.update({
'https://chromium.googlesource.com/angle/angle': (
'src/third_party/angle', 'HEAD'),
'https://chromium.googlesource.com/v8/v8': ('src/v8', 'HEAD'),
'https://webrtc.googlesource.com/src': ('src/third_party/webrtc', 'HEAD'),
})
patch = api.properties.get('patch', True)
clobber = True if api.properties.get('clobber') else False
with_branch_heads = api.properties.get('with_branch_heads', False)
with_tags = api.properties.get('with_tags', False)
refs = api.properties.get('refs', [])
root_solution_revision = api.properties.get('root_solution_revision')
suffix = api.properties.get('suffix')
gerrit_no_reset = True if api.properties.get('gerrit_no_reset') else False
gerrit_no_rebase_patch_ref = bool(
api.properties.get('gerrit_no_rebase_patch_ref'))
manifest_name = api.properties.get('manifest_name')
patch_refs = api.properties.get('patch_refs')
set_output_commit = api.properties.get('set_output_commit', True)
step_test_data = None
bot_update_output = types.thaw(api.properties.get('bot_update_output'))
if bot_update_output:
step_test_data = lambda: api.json.test_api.output(bot_update_output)
bot_update_step = api.bot_update.ensure_checkout(
patch=patch,
with_branch_heads=with_branch_heads,
with_tags=with_tags,
refs=refs,
clobber=clobber,
root_solution_revision=root_solution_revision,
suffix=suffix,
gerrit_no_reset=gerrit_no_reset,
gerrit_no_rebase_patch_ref=gerrit_no_rebase_patch_ref,
disable_syntax_validation=True,
manifest_name=manifest_name,
patch_refs=patch_refs,
set_output_commit=set_output_commit,
step_test_data=step_test_data,
)
if patch:
api.bot_update.deapply_patch(bot_update_step)
if api.properties.get('resolve_chromium_fixed_version'):
api.bot_update.resolve_fixed_revision(bot_update_step.json.output, 'src')
def GenTests(api):
def try_build(**kwargs):
kwargs.setdefault(
'git_repo', 'https://chromium.googlesource.com/chromium/src')
return api.buildbucket.try_build('chromium/src', 'try', 'linux', **kwargs)
def ci_build(**kwargs):
kwargs.setdefault(
'git_repo', 'https://chromium.googlesource.com/chromium/src')
return (
api.buildbucket.ci_build('chromium/src', 'ci', 'linux', **kwargs) +
api.properties(patch=False)
)
yield (
api.test('basic') +
ci_build()
)
yield (
api.test('input_commit_with_id_without_repo') +
api.buildbucket.build(Build(
input={
'gitiles_commit': {
'id': 'a' * 40,
},
},
))
)
yield (
api.test('unrecognized_commit_repo') +
ci_build(git_repo='https://unrecognized/repo')
)
yield (
api.test('basic_luci') +
ci_build() +
api.runtime(is_experimental=False, is_luci=True)
)
yield (
api.test('with_manifest_name') +
ci_build() +
api.properties(
manifest_name='checkout',
set_output_commit=False,
) +
api.step_data('bot_update (without patch)', api.json.output({
'source_manifest': {
'directories': {
'src': {
'git_checkout': {
'repo_url': (
'https://chromium.googlesource.com/chromium/src.git'),
'revision': 'ea17a292ecfb3dcdaa8dd226e67d6504fc13c15a'
},
},
},
},
}))
)
yield (
api.test('resolve_chromium_fixed_version') +
ci_build() +
api.properties(resolve_chromium_fixed_version=True)
)
yield (
api.test('basic_with_branch_heads') +
ci_build() +
api.properties(
with_branch_heads=True,
suffix='with branch heads'
)
)
yield (
api.test('with_tags') +
api.properties(with_tags=True)
)
yield (
api.test('deprecated_got_revision_mapping') +
try_build() +
api.properties(
deprecated_got_revision_mapping=True,
set_output_commit=False,
)
)
yield (
api.test('refs') +
api.properties(refs=['+refs/change/1/2/333'])
)
yield (
api.test('tryjob_fail') +
try_build() +
api.step_data('bot_update', api.json.invalid(None), retcode=1)
)
yield (
api.test('tryjob_fail_patch') +
try_build() +
api.properties(fail_patch='apply') +
api.step_data('bot_update', retcode=88)
)
yield (
api.test('tryjob_fail_patch_download') +
try_build() +
api.properties(fail_patch='download') +
api.step_data('bot_update', retcode=87)
)
yield (
api.test('clobber') +
api.properties(clobber=1)
)
yield (
api.test('reset_root_solution_revision') +
api.properties(root_solution_revision='revision')
)
yield (
api.test('gerrit_no_reset') +
api.properties(gerrit_no_reset=1)
)
yield (
api.test('gerrit_no_rebase_patch_ref') +
api.properties(gerrit_no_rebase_patch_ref=True)
)
yield (
api.test('tryjob_v8') +
try_build(git_repo='https://chromium.googlesource.com/v8/v8') +
api.properties(revisions={'src/v8': 'abc'})
)
yield (
api.test('tryjob_v8_head_by_default') +
try_build(git_repo='https://chromium.googlesource.com/v8/v8')
)
yield (
api.test('tryjob_gerrit_angle') +
try_build(git_repo='https://chromium.googlesource.com/angle/angle')
)
yield (
api.test('no_apply_patch_on_gclient') +
try_build(git_repo='https://chromium.googlesource.com/angle/angle')
)
yield (
api.test('tryjob_gerrit_v8_feature_branch') +
try_build(git_repo='https://chromium.googlesource.com/v8/v8') +
api.tryserver.gerrit_change_target_ref('refs/heads/experimental/feature')
)
yield (
api.test('tryjob_gerrit_feature_branch') +
try_build() +
api.tryserver.gerrit_change_target_ref('refs/heads/experimental/feature')
)
yield (
api.test('tryjob_gerrit_branch_heads') +
try_build() +
api.tryserver.gerrit_change_target_ref('refs/branch-heads/67')
)
yield (
api.test('tryjob_gerrit_webrtc') +
try_build(git_repo='https://webrtc.googlesource.com/src')
)
yield (
api.test('multiple_patch_refs') +
api.properties(
patch_refs=[
('https://chromium.googlesource.com/chromium/src@'
'refs/changes/12/34/5'),
'https://chromium.googlesource.com/v8/v8@refs/changes/124/45/6',
],
)
)
yield (
api.test('no_cp_checkout_a_specific_commit') +
ci_build(revision='a' * 40) +
api.properties(
revisions={'got_revision': 'src'},
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
yield (
api.test('no_cp_checkout_master') +
ci_build(revision='') +
api.properties(
revisions={'got_revision': 'src'},
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
yield (
api.test('no_cp_checkout_a_branch_head') +
ci_build(revision='', git_ref='refs/branch-heads/x') +
api.properties(
revisions={'got_revision': 'src'},
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
yield (
api.test('no_cp_checkout_HEAD') +
ci_build(revision='HEAD') +
api.properties(
revisions={'got_revision': 'src'},
bot_update_output={
'properties': {
'got_revision': 'a' * 40,
},
'manifest': {
'src': {
'revision': 'a' * 40,
'repository': 'https://chromium.googlesource.com/chromium/src',
}
}
}
)
)
|
|
keysyms = {
'XF86AudioLowerVolume': 0x1008ff11,
'XF86AudioMute': 0x1008ff12,
'XF86AudioRaiseVolume': 0x1008ff13,
'XF86AudioPlay': 0x1008ff14,
'XF86AudioStop': 0x1008ff15,
'XF86AudioPrev': 0x1008ff16,
'XF86AudioNext': 0x1008ff17,
'VoidSymbol': 0xffffff,
'BackSpace': 0xff08,
'Tab': 0xff09,
'Linefeed': 0xff0a,
'Clear': 0xff0b,
'Return': 0xff0d,
'Pause': 0xff13,
'Scroll_Lock': 0xff14,
'Sys_Req': 0xff15,
'Escape': 0xff1b,
'Delete': 0xffff,
'Multi_key': 0xff20,
'Codeinput': 0xff37,
'SingleCandidate': 0xff3c,
'MultipleCandidate': 0xff3d,
'PreviousCandidate': 0xff3e,
'Kanji': 0xff21,
'Muhenkan': 0xff22,
'Henkan_Mode': 0xff23,
'Henkan': 0xff23,
'Romaji': 0xff24,
'Hiragana': 0xff25,
'Katakana': 0xff26,
'Hiragana_Katakana': 0xff27,
'Zenkaku': 0xff28,
'Hankaku': 0xff29,
'Zenkaku_Hankaku': 0xff2a,
'Touroku': 0xff2b,
'Massyo': 0xff2c,
'Kana_Lock': 0xff2d,
'Kana_Shift': 0xff2e,
'Eisu_Shift': 0xff2f,
'Eisu_toggle': 0xff30,
'Kanji_Bangou': 0xff37,
'Zen_Koho': 0xff3d,
'Mae_Koho': 0xff3e,
'Home': 0xff50,
'Left': 0xff51,
'Up': 0xff52,
'Right': 0xff53,
'Down': 0xff54,
'Prior': 0xff55,
'Page_Up': 0xff55,
'Next': 0xff56,
'Page_Down': 0xff56,
'End': 0xff57,
'Begin': 0xff58,
'Select': 0xff60,
'Print': 0xff61,
'Execute': 0xff62,
'Insert': 0xff63,
'Undo': 0xff65,
'Redo': 0xff66,
'Menu': 0xff67,
'Find': 0xff68,
'Cancel': 0xff69,
'Help': 0xff6a,
'Break': 0xff6b,
'Mode_switch': 0xff7e,
'script_switch': 0xff7e,
'Num_Lock': 0xff7f,
'KP_Space': 0xff80,
'KP_Tab': 0xff89,
'KP_Enter': 0xff8d,
'KP_F1': 0xff91,
'KP_F2': 0xff92,
'KP_F3': 0xff93,
'KP_F4': 0xff94,
'KP_Home': 0xff95,
'KP_Left': 0xff96,
'KP_Up': 0xff97,
'KP_Right': 0xff98,
'KP_Down': 0xff99,
'KP_Prior': 0xff9a,
'KP_Page_Up': 0xff9a,
'KP_Next': 0xff9b,
'KP_Page_Down': 0xff9b,
'KP_End': 0xff9c,
'KP_Begin': 0xff9d,
'KP_Insert': 0xff9e,
'KP_Delete': 0xff9f,
'KP_Equal': 0xffbd,
'KP_Multiply': 0xffaa,
'KP_Add': 0xffab,
'KP_Separator': 0xffac,
'KP_Subtract': 0xffad,
'KP_Decimal': 0xffae,
'KP_Divide': 0xffaf,
'KP_0': 0xffb0,
'KP_1': 0xffb1,
'KP_2': 0xffb2,
'KP_3': 0xffb3,
'KP_4': 0xffb4,
'KP_5': 0xffb5,
'KP_6': 0xffb6,
'KP_7': 0xffb7,
'KP_8': 0xffb8,
'KP_9': 0xffb9,
'F1': 0xffbe,
'F2': 0xffbf,
'F3': 0xffc0,
'F4': 0xffc1,
'F5': 0xffc2,
'F6': 0xffc3,
'F7': 0xffc4,
'F8': 0xffc5,
'F9': 0xffc6,
'F10': 0xffc7,
'F11': 0xffc8,
'L1': 0xffc8,
'F12': 0xffc9,
'L2': 0xffc9,
'F13': 0xffca,
'L3': 0xffca,
'F14': 0xffcb,
'L4': 0xffcb,
'F15': 0xffcc,
'L5': 0xffcc,
'F16': 0xffcd,
'L6': 0xffcd,
'F17': 0xffce,
'L7': 0xffce,
'F18': 0xffcf,
'L8': 0xffcf,
'F19': 0xffd0,
'L9': 0xffd0,
'F20': 0xffd1,
'L10': 0xffd1,
'F21': 0xffd2,
'R1': 0xffd2,
'F22': 0xffd3,
'R2': 0xffd3,
'F23': 0xffd4,
'R3': 0xffd4,
'F24': 0xffd5,
'R4': 0xffd5,
'F25': 0xffd6,
'R5': 0xffd6,
'F26': 0xffd7,
'R6': 0xffd7,
'F27': 0xffd8,
'R7': 0xffd8,
'F28': 0xffd9,
'R8': 0xffd9,
'F29': 0xffda,
'R9': 0xffda,
'F30': 0xffdb,
'R10': 0xffdb,
'F31': 0xffdc,
'R11': 0xffdc,
'F32': 0xffdd,
'R12': 0xffdd,
'F33': 0xffde,
'R13': 0xffde,
'F34': 0xffdf,
'R14': 0xffdf,
'F35': 0xffe0,
'R15': 0xffe0,
'Shift_L': 0xffe1,
'Shift_R': 0xffe2,
'Control_L': 0xffe3,
'Control_R': 0xffe4,
'Caps_Lock': 0xffe5,
'Shift_Lock': 0xffe6,
'Meta_L': 0xffe7,
'Meta_R': 0xffe8,
'Alt_L': 0xffe9,
'Alt_R': 0xffea,
'Super_L': 0xffeb,
'Super_R': 0xffec,
'Hyper_L': 0xffed,
'Hyper_R': 0xffee,
'ISO_Lock': 0xfe01,
'ISO_Level2_Latch': 0xfe02,
'ISO_Level3_Shift': 0xfe03,
'ISO_Level3_Latch': 0xfe04,
'ISO_Level3_Lock': 0xfe05,
'ISO_Level5_Shift': 0xfe11,
'ISO_Level5_Latch': 0xfe12,
'ISO_Level5_Lock': 0xfe13,
'ISO_Group_Shift': 0xff7e,
'ISO_Group_Latch': 0xfe06,
'ISO_Group_Lock': 0xfe07,
'ISO_Next_Group': 0xfe08,
'ISO_Next_Group_Lock': 0xfe09,
'ISO_Prev_Group': 0xfe0a,
'ISO_Prev_Group_Lock': 0xfe0b,
'ISO_First_Group': 0xfe0c,
'ISO_First_Group_Lock': 0xfe0d,
'ISO_Last_Group': 0xfe0e,
'ISO_Last_Group_Lock': 0xfe0f,
'ISO_Left_Tab': 0xfe20,
'ISO_Move_Line_Up': 0xfe21,
'ISO_Move_Line_Down': 0xfe22,
'ISO_Partial_Line_Up': 0xfe23,
'ISO_Partial_Line_Down': 0xfe24,
'ISO_Partial_Space_Left': 0xfe25,
'ISO_Partial_Space_Right': 0xfe26,
'ISO_Set_Margin_Left': 0xfe27,
'ISO_Set_Margin_Right': 0xfe28,
'ISO_Release_Margin_Left': 0xfe29,
'ISO_Release_Margin_Right': 0xfe2a,
'ISO_Release_Both_Margins': 0xfe2b,
'ISO_Fast_Cursor_Left': 0xfe2c,
'ISO_Fast_Cursor_Right': 0xfe2d,
'ISO_Fast_Cursor_Up': 0xfe2e,
'ISO_Fast_Cursor_Down': 0xfe2f,
'ISO_Continuous_Underline': 0xfe30,
'ISO_Discontinuous_Underline': 0xfe31,
'ISO_Emphasize': 0xfe32,
'ISO_Center_Object': 0xfe33,
'ISO_Enter': 0xfe34,
'dead_grave': 0xfe50,
'dead_acute': 0xfe51,
'dead_circumflex': 0xfe52,
'dead_tilde': 0xfe53,
'dead_perispomeni': 0xfe53,
'dead_macron': 0xfe54,
'dead_breve': 0xfe55,
'dead_abovedot': 0xfe56,
'dead_diaeresis': 0xfe57,
'dead_abovering': 0xfe58,
'dead_doubleacute': 0xfe59,
'dead_caron': 0xfe5a,
'dead_cedilla': 0xfe5b,
'dead_ogonek': 0xfe5c,
'dead_iota': 0xfe5d,
'dead_voiced_sound': 0xfe5e,
'dead_semivoiced_sound': 0xfe5f,
'dead_belowdot': 0xfe60,
'dead_hook': 0xfe61,
'dead_horn': 0xfe62,
'dead_stroke': 0xfe63,
'dead_abovecomma': 0xfe64,
'dead_psili': 0xfe64,
'dead_abovereversedcomma': 0xfe65,
'dead_dasia': 0xfe65,
'dead_doublegrave': 0xfe66,
'dead_belowring': 0xfe67,
'dead_belowmacron': 0xfe68,
'dead_belowcircumflex': 0xfe69,
'dead_belowtilde': 0xfe6a,
'dead_belowbreve': 0xfe6b,
'dead_belowdiaeresis': 0xfe6c,
'dead_invertedbreve': 0xfe6d,
'dead_belowcomma': 0xfe6e,
'dead_currency': 0xfe6f,
'dead_a': 0xfe80,
'dead_A': 0xfe81,
'dead_e': 0xfe82,
'dead_E': 0xfe83,
'dead_i': 0xfe84,
'dead_I': 0xfe85,
'dead_o': 0xfe86,
'dead_O': 0xfe87,
'dead_u': 0xfe88,
'dead_U': 0xfe89,
'dead_small_schwa': 0xfe8a,
'dead_capital_schwa': 0xfe8b,
'First_Virtual_Screen': 0xfed0,
'Prev_Virtual_Screen': 0xfed1,
'Next_Virtual_Screen': 0xfed2,
'Last_Virtual_Screen': 0xfed4,
'Terminate_Server': 0xfed5,
'AccessX_Enable': 0xfe70,
'AccessX_Feedback_Enable': 0xfe71,
'RepeatKeys_Enable': 0xfe72,
'SlowKeys_Enable': 0xfe73,
'BounceKeys_Enable': 0xfe74,
'StickyKeys_Enable': 0xfe75,
'MouseKeys_Enable': 0xfe76,
'MouseKeys_Accel_Enable': 0xfe77,
'Overlay1_Enable': 0xfe78,
'Overlay2_Enable': 0xfe79,
'AudibleBell_Enable': 0xfe7a,
'Pointer_Left': 0xfee0,
'Pointer_Right': 0xfee1,
'Pointer_Up': 0xfee2,
'Pointer_Down': 0xfee3,
'Pointer_UpLeft': 0xfee4,
'Pointer_UpRight': 0xfee5,
'Pointer_DownLeft': 0xfee6,
'Pointer_DownRight': 0xfee7,
'Pointer_Button_Dflt': 0xfee8,
'Pointer_Button1': 0xfee9,
'Pointer_Button2': 0xfeea,
'Pointer_Button3': 0xfeeb,
'Pointer_Button4': 0xfeec,
'Pointer_Button5': 0xfeed,
'Pointer_DblClick_Dflt': 0xfeee,
'Pointer_DblClick1': 0xfeef,
'Pointer_DblClick2': 0xfef0,
'Pointer_DblClick3': 0xfef1,
'Pointer_DblClick4': 0xfef2,
'Pointer_DblClick5': 0xfef3,
'Pointer_Drag_Dflt': 0xfef4,
'Pointer_Drag1': 0xfef5,
'Pointer_Drag2': 0xfef6,
'Pointer_Drag3': 0xfef7,
'Pointer_Drag4': 0xfef8,
'Pointer_Drag5': 0xfefd,
'Pointer_EnableKeys': 0xfef9,
'Pointer_Accelerate': 0xfefa,
'Pointer_DfltBtnNext': 0xfefb,
'Pointer_DfltBtnPrev': 0xfefc,
'3270_Duplicate': 0xfd01,
'3270_FieldMark': 0xfd02,
'3270_Right2': 0xfd03,
'3270_Left2': 0xfd04,
'3270_BackTab': 0xfd05,
'3270_EraseEOF': 0xfd06,
'3270_EraseInput': 0xfd07,
'3270_Reset': 0xfd08,
'3270_Quit': 0xfd09,
'3270_PA1': 0xfd0a,
'3270_PA2': 0xfd0b,
'3270_PA3': 0xfd0c,
'3270_Test': 0xfd0d,
'3270_Attn': 0xfd0e,
'3270_CursorBlink': 0xfd0f,
'3270_AltCursor': 0xfd10,
'3270_KeyClick': 0xfd11,
'3270_Jump': 0xfd12,
'3270_Ident': 0xfd13,
'3270_Rule': 0xfd14,
'3270_Copy': 0xfd15,
'3270_Play': 0xfd16,
'3270_Setup': 0xfd17,
'3270_Record': 0xfd18,
'3270_ChangeScreen': 0xfd19,
'3270_DeleteWord': 0xfd1a,
'3270_ExSelect': 0xfd1b,
'3270_CursorSelect': 0xfd1c,
'3270_PrintScreen': 0xfd1d,
'3270_Enter': 0xfd1e,
'space': 0x0020,
'exclam': 0x0021,
'quotedbl': 0x0022,
'numbersign': 0x0023,
'dollar': 0x0024,
'percent': 0x0025,
'ampersand': 0x0026,
'apostrophe': 0x0027,
'quoteright': 0x0027,
'parenleft': 0x0028,
'parenright': 0x0029,
'asterisk': 0x002a,
'plus': 0x002b,
'comma': 0x002c,
'minus': 0x002d,
'period': 0x002e,
'slash': 0x002f,
'0': 0x0030,
'1': 0x0031,
'2': 0x0032,
'3': 0x0033,
'4': 0x0034,
'5': 0x0035,
'6': 0x0036,
'7': 0x0037,
'8': 0x0038,
'9': 0x0039,
'colon': 0x003a,
'semicolon': 0x003b,
'less': 0x003c,
'equal': 0x003d,
'greater': 0x003e,
'question': 0x003f,
'at': 0x0040,
'A': 0x0041,
'B': 0x0042,
'C': 0x0043,
'D': 0x0044,
'E': 0x0045,
'F': 0x0046,
'G': 0x0047,
'H': 0x0048,
'I': 0x0049,
'J': 0x004a,
'K': 0x004b,
'L': 0x004c,
'M': 0x004d,
'N': 0x004e,
'O': 0x004f,
'P': 0x0050,
'Q': 0x0051,
'R': 0x0052,
'S': 0x0053,
'T': 0x0054,
'U': 0x0055,
'V': 0x0056,
'W': 0x0057,
'X': 0x0058,
'Y': 0x0059,
'Z': 0x005a,
'bracketleft': 0x005b,
'backslash': 0x005c,
'bracketright': 0x005d,
'asciicircum': 0x005e,
'underscore': 0x005f,
'grave': 0x0060,
'quoteleft': 0x0060,
'a': 0x0061,
'b': 0x0062,
'c': 0x0063,
'd': 0x0064,
'e': 0x0065,
'f': 0x0066,
'g': 0x0067,
'h': 0x0068,
'i': 0x0069,
'j': 0x006a,
'k': 0x006b,
'l': 0x006c,
'm': 0x006d,
'n': 0x006e,
'o': 0x006f,
'p': 0x0070,
'q': 0x0071,
'r': 0x0072,
's': 0x0073,
't': 0x0074,
'u': 0x0075,
'v': 0x0076,
'w': 0x0077,
'x': 0x0078,
'y': 0x0079,
'z': 0x007a,
'braceleft': 0x007b,
'bar': 0x007c,
'braceright': 0x007d,
'asciitilde': 0x007e,
'nobreakspace': 0x00a0,
'exclamdown': 0x00a1,
'cent': 0x00a2,
'sterling': 0x00a3,
'currency': 0x00a4,
'yen': 0x00a5,
'brokenbar': 0x00a6,
'section': 0x00a7,
'diaeresis': 0x00a8,
'copyright': 0x00a9,
'ordfeminine': 0x00aa,
'guillemotleft': 0x00ab,
'notsign': 0x00ac,
'hyphen': 0x00ad,
'registered': 0x00ae,
'macron': 0x00af,
'degree': 0x00b0,
'plusminus': 0x00b1,
'twosuperior': 0x00b2,
'threesuperior': 0x00b3,
'acute': 0x00b4,
'mu': 0x00b5,
'paragraph': 0x00b6,
'periodcentered': 0x00b7,
'cedilla': 0x00b8,
'onesuperior': 0x00b9,
'masculine': 0x00ba,
'guillemotright': 0x00bb,
'onequarter': 0x00bc,
'onehalf': 0x00bd,
'threequarters': 0x00be,
'questiondown': 0x00bf,
'Agrave': 0x00c0,
'Aacute': 0x00c1,
'Acircumflex': 0x00c2,
'Atilde': 0x00c3,
'Adiaeresis': 0x00c4,
'Aring': 0x00c5,
'AE': 0x00c6,
'Ccedilla': 0x00c7,
'Egrave': 0x00c8,
'Eacute': 0x00c9,
'Ecircumflex': 0x00ca,
'Ediaeresis': 0x00cb,
'Igrave': 0x00cc,
'Iacute': 0x00cd,
'Icircumflex': 0x00ce,
'Idiaeresis': 0x00cf,
'ETH': 0x00d0,
'Eth': 0x00d0,
'Ntilde': 0x00d1,
'Ograve': 0x00d2,
'Oacute': 0x00d3,
'Ocircumflex': 0x00d4,
'Otilde': 0x00d5,
'Odiaeresis': 0x00d6,
'multiply': 0x00d7,
'Oslash': 0x00d8,
'Ooblique': 0x00d8,
'Ugrave': 0x00d9,
'Uacute': 0x00da,
'Ucircumflex': 0x00db,
'Udiaeresis': 0x00dc,
'Yacute': 0x00dd,
'THORN': 0x00de,
'Thorn': 0x00de,
'ssharp': 0x00df,
'agrave': 0x00e0,
'aacute': 0x00e1,
'acircumflex': 0x00e2,
'atilde': 0x00e3,
'adiaeresis': 0x00e4,
'aring': 0x00e5,
'ae': 0x00e6,
'ccedilla': 0x00e7,
'egrave': 0x00e8,
'eacute': 0x00e9,
'ecircumflex': 0x00ea,
'ediaeresis': 0x00eb,
'igrave': 0x00ec,
'iacute': 0x00ed,
'icircumflex': 0x00ee,
'idiaeresis': 0x00ef,
'eth': 0x00f0,
'ntilde': 0x00f1,
'ograve': 0x00f2,
'oacute': 0x00f3,
'ocircumflex': 0x00f4,
'otilde': 0x00f5,
'odiaeresis': 0x00f6,
'division': 0x00f7,
'oslash': 0x00f8,
'ooblique': 0x00f8,
'ugrave': 0x00f9,
'uacute': 0x00fa,
'ucircumflex': 0x00fb,
'udiaeresis': 0x00fc,
'yacute': 0x00fd,
'thorn': 0x00fe,
'ydiaeresis': 0x00ff,
'Aogonek': 0x01a1,
'breve': 0x01a2,
'Lstroke': 0x01a3,
'Lcaron': 0x01a5,
'Sacute': 0x01a6,
'Scaron': 0x01a9,
'Scedilla': 0x01aa,
'Tcaron': 0x01ab,
'Zacute': 0x01ac,
'Zcaron': 0x01ae,
'Zabovedot': 0x01af,
'aogonek': 0x01b1,
'ogonek': 0x01b2,
'lstroke': 0x01b3,
'lcaron': 0x01b5,
'sacute': 0x01b6,
'caron': 0x01b7,
'scaron': 0x01b9,
'scedilla': 0x01ba,
'tcaron': 0x01bb,
'zacute': 0x01bc,
'doubleacute': 0x01bd,
'zcaron': 0x01be,
'zabovedot': 0x01bf,
'Racute': 0x01c0,
'Abreve': 0x01c3,
'Lacute': 0x01c5,
'Cacute': 0x01c6,
'Ccaron': 0x01c8,
'Eogonek': 0x01ca,
'Ecaron': 0x01cc,
'Dcaron': 0x01cf,
'Dstroke': 0x01d0,
'Nacute': 0x01d1,
'Ncaron': 0x01d2,
'Odoubleacute': 0x01d5,
'Rcaron': 0x01d8,
'Uring': 0x01d9,
'Udoubleacute': 0x01db,
'Tcedilla': 0x01de,
'racute': 0x01e0,
'abreve': 0x01e3,
'lacute': 0x01e5,
'cacute': 0x01e6,
'ccaron': 0x01e8,
'eogonek': 0x01ea,
'ecaron': 0x01ec,
'dcaron': 0x01ef,
'dstroke': 0x01f0,
'nacute': 0x01f1,
'ncaron': 0x01f2,
'odoubleacute': 0x01f5,
'udoubleacute': 0x01fb,
'rcaron': 0x01f8,
'uring': 0x01f9,
'tcedilla': 0x01fe,
'abovedot': 0x01ff,
'Hstroke': 0x02a1,
'Hcircumflex': 0x02a6,
'Iabovedot': 0x02a9,
'Gbreve': 0x02ab,
'Jcircumflex': 0x02ac,
'hstroke': 0x02b1,
'hcircumflex': 0x02b6,
'idotless': 0x02b9,
'gbreve': 0x02bb,
'jcircumflex': 0x02bc,
'Cabovedot': 0x02c5,
'Ccircumflex': 0x02c6,
'Gabovedot': 0x02d5,
'Gcircumflex': 0x02d8,
'Ubreve': 0x02dd,
'Scircumflex': 0x02de,
'cabovedot': 0x02e5,
'ccircumflex': 0x02e6,
'gabovedot': 0x02f5,
'gcircumflex': 0x02f8,
'ubreve': 0x02fd,
'scircumflex': 0x02fe,
'kra': 0x03a2,
'kappa': 0x03a2,
'Rcedilla': 0x03a3,
'Itilde': 0x03a5,
'Lcedilla': 0x03a6,
'Emacron': 0x03aa,
'Gcedilla': 0x03ab,
'Tslash': 0x03ac,
'rcedilla': 0x03b3,
'itilde': 0x03b5,
'lcedilla': 0x03b6,
'emacron': 0x03ba,
'gcedilla': 0x03bb,
'tslash': 0x03bc,
'ENG': 0x03bd,
'eng': 0x03bf,
'Amacron': 0x03c0,
'Iogonek': 0x03c7,
'Eabovedot': 0x03cc,
'Imacron': 0x03cf,
'Ncedilla': 0x03d1,
'Omacron': 0x03d2,
'Kcedilla': 0x03d3,
'Uogonek': 0x03d9,
'Utilde': 0x03dd,
'Umacron': 0x03de,
'amacron': 0x03e0,
'iogonek': 0x03e7,
'eabovedot': 0x03ec,
'imacron': 0x03ef,
'ncedilla': 0x03f1,
'omacron': 0x03f2,
'kcedilla': 0x03f3,
'uogonek': 0x03f9,
'utilde': 0x03fd,
'umacron': 0x03fe,
'Babovedot': 0x1001e02,
'babovedot': 0x1001e03,
'Dabovedot': 0x1001e0a,
'Wgrave': 0x1001e80,
'Wacute': 0x1001e82,
'dabovedot': 0x1001e0b,
'Ygrave': 0x1001ef2,
'Fabovedot': 0x1001e1e,
'fabovedot': 0x1001e1f,
'Mabovedot': 0x1001e40,
'mabovedot': 0x1001e41,
'Pabovedot': 0x1001e56,
'wgrave': 0x1001e81,
'pabovedot': 0x1001e57,
'wacute': 0x1001e83,
'Sabovedot': 0x1001e60,
'ygrave': 0x1001ef3,
'Wdiaeresis': 0x1001e84,
'wdiaeresis': 0x1001e85,
'sabovedot': 0x1001e61,
'Wcircumflex': 0x1000174,
'Tabovedot': 0x1001e6a,
'Ycircumflex': 0x1000176,
'wcircumflex': 0x1000175,
'tabovedot': 0x1001e6b,
'ycircumflex': 0x1000177,
'OE': 0x13bc,
'oe': 0x13bd,
'Ydiaeresis': 0x13be,
'overline': 0x047e,
'kana_fullstop': 0x04a1,
'kana_openingbracket': 0x04a2,
'kana_closingbracket': 0x04a3,
'kana_comma': 0x04a4,
'kana_conjunctive': 0x04a5,
'kana_middledot': 0x04a5,
'kana_WO': 0x04a6,
'kana_a': 0x04a7,
'kana_i': 0x04a8,
'kana_u': 0x04a9,
'kana_e': 0x04aa,
'kana_o': 0x04ab,
'kana_ya': 0x04ac,
'kana_yu': 0x04ad,
'kana_yo': 0x04ae,
'kana_tsu': 0x04af,
'kana_tu': 0x04af,
'prolongedsound': 0x04b0,
'kana_A': 0x04b1,
'kana_I': 0x04b2,
'kana_U': 0x04b3,
'kana_E': 0x04b4,
'kana_O': 0x04b5,
'kana_KA': 0x04b6,
'kana_KI': 0x04b7,
'kana_KU': 0x04b8,
'kana_KE': 0x04b9,
'kana_KO': 0x04ba,
'kana_SA': 0x04bb,
'kana_SHI': 0x04bc,
'kana_SU': 0x04bd,
'kana_SE': 0x04be,
'kana_SO': 0x04bf,
'kana_TA': 0x04c0,
'kana_CHI': 0x04c1,
'kana_TI': 0x04c1,
'kana_TSU': 0x04c2,
'kana_TU': 0x04c2,
'kana_TE': 0x04c3,
'kana_TO': 0x04c4,
'kana_NA': 0x04c5,
'kana_NI': 0x04c6,
'kana_NU': 0x04c7,
'kana_NE': 0x04c8,
'kana_NO': 0x04c9,
'kana_HA': 0x04ca,
'kana_HI': 0x04cb,
'kana_FU': 0x04cc,
'kana_HU': 0x04cc,
'kana_HE': 0x04cd,
'kana_HO': 0x04ce,
'kana_MA': 0x04cf,
'kana_MI': 0x04d0,
'kana_MU': 0x04d1,
'kana_ME': 0x04d2,
'kana_MO': 0x04d3,
'kana_YA': 0x04d4,
'kana_YU': 0x04d5,
'kana_YO': 0x04d6,
'kana_RA': 0x04d7,
'kana_RI': 0x04d8,
'kana_RU': 0x04d9,
'kana_RE': 0x04da,
'kana_RO': 0x04db,
'kana_WA': 0x04dc,
'kana_N': 0x04dd,
'voicedsound': 0x04de,
'semivoicedsound': 0x04df,
'kana_switch': 0xff7e,
'Farsi_0': 0x10006f0,
'Farsi_1': 0x10006f1,
'Farsi_2': 0x10006f2,
'Farsi_3': 0x10006f3,
'Farsi_4': 0x10006f4,
'Farsi_5': 0x10006f5,
'Farsi_6': 0x10006f6,
'Farsi_7': 0x10006f7,
'Farsi_8': 0x10006f8,
'Farsi_9': 0x10006f9,
'Arabic_percent': 0x100066a,
'Arabic_superscript_alef': 0x1000670,
'Arabic_tteh': 0x1000679,
'Arabic_peh': 0x100067e,
'Arabic_tcheh': 0x1000686,
'Arabic_ddal': 0x1000688,
'Arabic_rreh': 0x1000691,
'Arabic_comma': 0x05ac,
'Arabic_fullstop': 0x10006d4,
'Arabic_0': 0x1000660,
'Arabic_1': 0x1000661,
'Arabic_2': 0x1000662,
'Arabic_3': 0x1000663,
'Arabic_4': 0x1000664,
'Arabic_5': 0x1000665,
'Arabic_6': 0x1000666,
'Arabic_7': 0x1000667,
'Arabic_8': 0x1000668,
'Arabic_9': 0x1000669,
'Arabic_semicolon': 0x05bb,
'Arabic_question_mark': 0x05bf,
'Arabic_hamza': 0x05c1,
'Arabic_maddaonalef': 0x05c2,
'Arabic_hamzaonalef': 0x05c3,
'Arabic_hamzaonwaw': 0x05c4,
'Arabic_hamzaunderalef': 0x05c5,
'Arabic_hamzaonyeh': 0x05c6,
'Arabic_alef': 0x05c7,
'Arabic_beh': 0x05c8,
'Arabic_tehmarbuta': 0x05c9,
'Arabic_teh': 0x05ca,
'Arabic_theh': 0x05cb,
'Arabic_jeem': 0x05cc,
'Arabic_hah': 0x05cd,
'Arabic_khah': 0x05ce,
'Arabic_dal': 0x05cf,
'Arabic_thal': 0x05d0,
'Arabic_ra': 0x05d1,
'Arabic_zain': 0x05d2,
'Arabic_seen': 0x05d3,
'Arabic_sheen': 0x05d4,
'Arabic_sad': 0x05d5,
'Arabic_dad': 0x05d6,
'Arabic_tah': 0x05d7,
'Arabic_zah': 0x05d8,
'Arabic_ain': 0x05d9,
'Arabic_ghain': 0x05da,
'Arabic_tatweel': 0x05e0,
'Arabic_feh': 0x05e1,
'Arabic_qaf': 0x05e2,
'Arabic_kaf': 0x05e3,
'Arabic_lam': 0x05e4,
'Arabic_meem': 0x05e5,
'Arabic_noon': 0x05e6,
'Arabic_ha': 0x05e7,
'Arabic_heh': 0x05e7,
'Arabic_waw': 0x05e8,
'Arabic_alefmaksura': 0x05e9,
'Arabic_yeh': 0x05ea,
'Arabic_fathatan': 0x05eb,
'Arabic_dammatan': 0x05ec,
'Arabic_kasratan': 0x05ed,
'Arabic_fatha': 0x05ee,
'Arabic_damma': 0x05ef,
'Arabic_kasra': 0x05f0,
'Arabic_shadda': 0x05f1,
'Arabic_sukun': 0x05f2,
'Arabic_madda_above': 0x1000653,
'Arabic_hamza_above': 0x1000654,
'Arabic_hamza_below': 0x1000655,
'Arabic_jeh': 0x1000698,
'Arabic_veh': 0x10006a4,
'Arabic_keheh': 0x10006a9,
'Arabic_gaf': 0x10006af,
'Arabic_noon_ghunna': 0x10006ba,
'Arabic_heh_doachashmee': 0x10006be,
'Farsi_yeh': 0x10006cc,
'Arabic_farsi_yeh': 0x10006cc,
'Arabic_yeh_baree': 0x10006d2,
'Arabic_heh_goal': 0x10006c1,
'Arabic_switch': 0xff7e,
'Cyrillic_GHE_bar': 0x1000492,
'Cyrillic_ghe_bar': 0x1000493,
'Cyrillic_ZHE_descender': 0x1000496,
'Cyrillic_zhe_descender': 0x1000497,
'Cyrillic_KA_descender': 0x100049a,
'Cyrillic_ka_descender': 0x100049b,
'Cyrillic_KA_vertstroke': 0x100049c,
'Cyrillic_ka_vertstroke': 0x100049d,
'Cyrillic_EN_descender': 0x10004a2,
'Cyrillic_en_descender': 0x10004a3,
'Cyrillic_U_straight': 0x10004ae,
'Cyrillic_u_straight': 0x10004af,
'Cyrillic_U_straight_bar': 0x10004b0,
'Cyrillic_u_straight_bar': 0x10004b1,
'Cyrillic_HA_descender': 0x10004b2,
'Cyrillic_ha_descender': 0x10004b3,
'Cyrillic_CHE_descender': 0x10004b6,
'Cyrillic_che_descender': 0x10004b7,
'Cyrillic_CHE_vertstroke': 0x10004b8,
'Cyrillic_che_vertstroke': 0x10004b9,
'Cyrillic_SHHA': 0x10004ba,
'Cyrillic_shha': 0x10004bb,
'Cyrillic_SCHWA': 0x10004d8,
'Cyrillic_schwa': 0x10004d9,
'Cyrillic_I_macron': 0x10004e2,
'Cyrillic_i_macron': 0x10004e3,
'Cyrillic_O_bar': 0x10004e8,
'Cyrillic_o_bar': 0x10004e9,
'Cyrillic_U_macron': 0x10004ee,
'Cyrillic_u_macron': 0x10004ef,
'Serbian_dje': 0x06a1,
'Macedonia_gje': 0x06a2,
'Cyrillic_io': 0x06a3,
'Ukrainian_ie': 0x06a4,
'Ukranian_je': 0x06a4,
'Macedonia_dse': 0x06a5,
'Ukrainian_i': 0x06a6,
'Ukranian_i': 0x06a6,
'Ukrainian_yi': 0x06a7,
'Ukranian_yi': 0x06a7,
'Cyrillic_je': 0x06a8,
'Serbian_je': 0x06a8,
'Cyrillic_lje': 0x06a9,
'Serbian_lje': 0x06a9,
'Cyrillic_nje': 0x06aa,
'Serbian_nje': 0x06aa,
'Serbian_tshe': 0x06ab,
'Macedonia_kje': 0x06ac,
'Ukrainian_ghe_with_upturn': 0x06ad,
'Byelorussian_shortu': 0x06ae,
'Cyrillic_dzhe': 0x06af,
'Serbian_dze': 0x06af,
'numerosign': 0x06b0,
'Serbian_DJE': 0x06b1,
'Macedonia_GJE': 0x06b2,
'Cyrillic_IO': 0x06b3,
'Ukrainian_IE': 0x06b4,
'Ukranian_JE': 0x06b4,
'Macedonia_DSE': 0x06b5,
'Ukrainian_I': 0x06b6,
'Ukranian_I': 0x06b6,
'Ukrainian_YI': 0x06b7,
'Ukranian_YI': 0x06b7,
'Cyrillic_JE': 0x06b8,
'Serbian_JE': 0x06b8,
'Cyrillic_LJE': 0x06b9,
'Serbian_LJE': 0x06b9,
'Cyrillic_NJE': 0x06ba,
'Serbian_NJE': 0x06ba,
'Serbian_TSHE': 0x06bb,
'Macedonia_KJE': 0x06bc,
'Ukrainian_GHE_WITH_UPTURN': 0x06bd,
'Byelorussian_SHORTU': 0x06be,
'Cyrillic_DZHE': 0x06bf,
'Serbian_DZE': 0x06bf,
'Cyrillic_yu': 0x06c0,
'Cyrillic_a': 0x06c1,
'Cyrillic_be': 0x06c2,
'Cyrillic_tse': 0x06c3,
'Cyrillic_de': 0x06c4,
'Cyrillic_ie': 0x06c5,
'Cyrillic_ef': 0x06c6,
'Cyrillic_ghe': 0x06c7,
'Cyrillic_ha': 0x06c8,
'Cyrillic_i': 0x06c9,
'Cyrillic_shorti': 0x06ca,
'Cyrillic_ka': 0x06cb,
'Cyrillic_el': 0x06cc,
'Cyrillic_em': 0x06cd,
'Cyrillic_en': 0x06ce,
'Cyrillic_o': 0x06cf,
'Cyrillic_pe': 0x06d0,
'Cyrillic_ya': 0x06d1,
'Cyrillic_er': 0x06d2,
'Cyrillic_es': 0x06d3,
'Cyrillic_te': 0x06d4,
'Cyrillic_u': 0x06d5,
'Cyrillic_zhe': 0x06d6,
'Cyrillic_ve': 0x06d7,
'Cyrillic_softsign': 0x06d8,
'Cyrillic_yeru': 0x06d9,
'Cyrillic_ze': 0x06da,
'Cyrillic_sha': 0x06db,
'Cyrillic_e': 0x06dc,
'Cyrillic_shcha': 0x06dd,
'Cyrillic_che': 0x06de,
'Cyrillic_hardsign': 0x06df,
'Cyrillic_YU': 0x06e0,
'Cyrillic_A': 0x06e1,
'Cyrillic_BE': 0x06e2,
'Cyrillic_TSE': 0x06e3,
'Cyrillic_DE': 0x06e4,
'Cyrillic_IE': 0x06e5,
'Cyrillic_EF': 0x06e6,
'Cyrillic_GHE': 0x06e7,
'Cyrillic_HA': 0x06e8,
'Cyrillic_I': 0x06e9,
'Cyrillic_SHORTI': 0x06ea,
'Cyrillic_KA': 0x06eb,
'Cyrillic_EL': 0x06ec,
'Cyrillic_EM': 0x06ed,
'Cyrillic_EN': 0x06ee,
'Cyrillic_O': 0x06ef,
'Cyrillic_PE': 0x06f0,
'Cyrillic_YA': 0x06f1,
'Cyrillic_ER': 0x06f2,
'Cyrillic_ES': 0x06f3,
'Cyrillic_TE': 0x06f4,
'Cyrillic_U': 0x06f5,
'Cyrillic_ZHE': 0x06f6,
'Cyrillic_VE': 0x06f7,
'Cyrillic_SOFTSIGN': 0x06f8,
'Cyrillic_YERU': 0x06f9,
'Cyrillic_ZE': 0x06fa,
'Cyrillic_SHA': 0x06fb,
'Cyrillic_E': 0x06fc,
'Cyrillic_SHCHA': 0x06fd,
'Cyrillic_CHE': 0x06fe,
'Cyrillic_HARDSIGN': 0x06ff,
'Greek_ALPHAaccent': 0x07a1,
'Greek_EPSILONaccent': 0x07a2,
'Greek_ETAaccent': 0x07a3,
'Greek_IOTAaccent': 0x07a4,
'Greek_IOTAdieresis': 0x07a5,
'Greek_IOTAdiaeresis': 0x07a5,
'Greek_OMICRONaccent': 0x07a7,
'Greek_UPSILONaccent': 0x07a8,
'Greek_UPSILONdieresis': 0x07a9,
'Greek_OMEGAaccent': 0x07ab,
'Greek_accentdieresis': 0x07ae,
'Greek_horizbar': 0x07af,
'Greek_alphaaccent': 0x07b1,
'Greek_epsilonaccent': 0x07b2,
'Greek_etaaccent': 0x07b3,
'Greek_iotaaccent': 0x07b4,
'Greek_iotadieresis': 0x07b5,
'Greek_iotaaccentdieresis': 0x07b6,
'Greek_omicronaccent': 0x07b7,
'Greek_upsilonaccent': 0x07b8,
'Greek_upsilondieresis': 0x07b9,
'Greek_upsilonaccentdieresis': 0x07ba,
'Greek_omegaaccent': 0x07bb,
'Greek_ALPHA': 0x07c1,
'Greek_BETA': 0x07c2,
'Greek_GAMMA': 0x07c3,
'Greek_DELTA': 0x07c4,
'Greek_EPSILON': 0x07c5,
'Greek_ZETA': 0x07c6,
'Greek_ETA': 0x07c7,
'Greek_THETA': 0x07c8,
'Greek_IOTA': 0x07c9,
'Greek_KAPPA': 0x07ca,
'Greek_LAMDA': 0x07cb,
'Greek_LAMBDA': 0x07cb,
'Greek_MU': 0x07cc,
'Greek_NU': 0x07cd,
'Greek_XI': 0x07ce,
'Greek_OMICRON': 0x07cf,
'Greek_PI': 0x07d0,
'Greek_RHO': 0x07d1,
'Greek_SIGMA': 0x07d2,
'Greek_TAU': 0x07d4,
'Greek_UPSILON': 0x07d5,
'Greek_PHI': 0x07d6,
'Greek_CHI': 0x07d7,
'Greek_PSI': 0x07d8,
'Greek_OMEGA': 0x07d9,
'Greek_alpha': 0x07e1,
'Greek_beta': 0x07e2,
'Greek_gamma': 0x07e3,
'Greek_delta': 0x07e4,
'Greek_epsilon': 0x07e5,
'Greek_zeta': 0x07e6,
'Greek_eta': 0x07e7,
'Greek_theta': 0x07e8,
'Greek_iota': 0x07e9,
'Greek_kappa': 0x07ea,
'Greek_lamda': 0x07eb,
'Greek_lambda': 0x07eb,
'Greek_mu': 0x07ec,
'Greek_nu': 0x07ed,
'Greek_xi': 0x07ee,
'Greek_omicron': 0x07ef,
'Greek_pi': 0x07f0,
'Greek_rho': 0x07f1,
'Greek_sigma': 0x07f2,
'Greek_finalsmallsigma': 0x07f3,
'Greek_tau': 0x07f4,
'Greek_upsilon': 0x07f5,
'Greek_phi': 0x07f6,
'Greek_chi': 0x07f7,
'Greek_psi': 0x07f8,
'Greek_omega': 0x07f9,
'Greek_switch': 0xff7e,
'leftradical': 0x08a1,
'topleftradical': 0x08a2,
'horizconnector': 0x08a3,
'topintegral': 0x08a4,
'botintegral': 0x08a5,
'vertconnector': 0x08a6,
'topleftsqbracket': 0x08a7,
'botleftsqbracket': 0x08a8,
'toprightsqbracket': 0x08a9,
'botrightsqbracket': 0x08aa,
'topleftparens': 0x08ab,
'botleftparens': 0x08ac,
'toprightparens': 0x08ad,
'botrightparens': 0x08ae,
'leftmiddlecurlybrace': 0x08af,
'rightmiddlecurlybrace': 0x08b0,
'topleftsummation': 0x08b1,
'botleftsummation': 0x08b2,
'topvertsummationconnector': 0x08b3,
'botvertsummationconnector': 0x08b4,
'toprightsummation': 0x08b5,
'botrightsummation': 0x08b6,
'rightmiddlesummation': 0x08b7,
'lessthanequal': 0x08bc,
'notequal': 0x08bd,
'greaterthanequal': 0x08be,
'integral': 0x08bf,
'therefore': 0x08c0,
'variation': 0x08c1,
'infinity': 0x08c2,
'nabla': 0x08c5,
'approximate': 0x08c8,
'similarequal': 0x08c9,
'ifonlyif': 0x08cd,
'implies': 0x08ce,
'identical': 0x08cf,
'radical': 0x08d6,
'includedin': 0x08da,
'includes': 0x08db,
'intersection': 0x08dc,
'union': 0x08dd,
'logicaland': 0x08de,
'logicalor': 0x08df,
'partialderivative': 0x08ef,
'function': 0x08f6,
'leftarrow': 0x08fb,
'uparrow': 0x08fc,
'rightarrow': 0x08fd,
'downarrow': 0x08fe,
'blank': 0x09df,
'soliddiamond': 0x09e0,
'checkerboard': 0x09e1,
'ht': 0x09e2,
'ff': 0x09e3,
'cr': 0x09e4,
'lf': 0x09e5,
'nl': 0x09e8,
'vt': 0x09e9,
'lowrightcorner': 0x09ea,
'uprightcorner': 0x09eb,
'upleftcorner': 0x09ec,
'lowleftcorner': 0x09ed,
'crossinglines': 0x09ee,
'horizlinescan1': 0x09ef,
'horizlinescan3': 0x09f0,
'horizlinescan5': 0x09f1,
'horizlinescan7': 0x09f2,
'horizlinescan9': 0x09f3,
'leftt': 0x09f4,
'rightt': 0x09f5,
'bott': 0x09f6,
'topt': 0x09f7,
'vertbar': 0x09f8,
'emspace': 0x0aa1,
'enspace': 0x0aa2,
'em3space': 0x0aa3,
'em4space': 0x0aa4,
'digitspace': 0x0aa5,
'punctspace': 0x0aa6,
'thinspace': 0x0aa7,
'hairspace': 0x0aa8,
'emdash': 0x0aa9,
'endash': 0x0aaa,
'signifblank': 0x0aac,
'ellipsis': 0x0aae,
'doubbaselinedot': 0x0aaf,
'onethird': 0x0ab0,
'twothirds': 0x0ab1,
'onefifth': 0x0ab2,
'twofifths': 0x0ab3,
'threefifths': 0x0ab4,
'fourfifths': 0x0ab5,
'onesixth': 0x0ab6,
'fivesixths': 0x0ab7,
'careof': 0x0ab8,
'figdash': 0x0abb,
'leftanglebracket': 0x0abc,
'decimalpoint': 0x0abd,
'rightanglebracket': 0x0abe,
'marker': 0x0abf,
'oneeighth': 0x0ac3,
'threeeighths': 0x0ac4,
'fiveeighths': 0x0ac5,
'seveneighths': 0x0ac6,
'trademark': 0x0ac9,
'signaturemark': 0x0aca,
'trademarkincircle': 0x0acb,
'leftopentriangle': 0x0acc,
'rightopentriangle': 0x0acd,
'emopencircle': 0x0ace,
'emopenrectangle': 0x0acf,
'leftsinglequotemark': 0x0ad0,
'rightsinglequotemark': 0x0ad1,
'leftdoublequotemark': 0x0ad2,
'rightdoublequotemark': 0x0ad3,
'prescription': 0x0ad4,
'minutes': 0x0ad6,
'seconds': 0x0ad7,
'latincross': 0x0ad9,
'hexagram': 0x0ada,
'filledrectbullet': 0x0adb,
'filledlefttribullet': 0x0adc,
'filledrighttribullet': 0x0add,
'emfilledcircle': 0x0ade,
'emfilledrect': 0x0adf,
'enopencircbullet': 0x0ae0,
'enopensquarebullet': 0x0ae1,
'openrectbullet': 0x0ae2,
'opentribulletup': 0x0ae3,
'opentribulletdown': 0x0ae4,
'openstar': 0x0ae5,
'enfilledcircbullet': 0x0ae6,
'enfilledsqbullet': 0x0ae7,
'filledtribulletup': 0x0ae8,
'filledtribulletdown': 0x0ae9,
'leftpointer': 0x0aea,
'rightpointer': 0x0aeb,
'club': 0x0aec,
'diamond': 0x0aed,
'heart': 0x0aee,
'maltesecross': 0x0af0,
'dagger': 0x0af1,
'doubledagger': 0x0af2,
'checkmark': 0x0af3,
'ballotcross': 0x0af4,
'musicalsharp': 0x0af5,
'musicalflat': 0x0af6,
'malesymbol': 0x0af7,
'femalesymbol': 0x0af8,
'telephone': 0x0af9,
'telephonerecorder': 0x0afa,
'phonographcopyright': 0x0afb,
'caret': 0x0afc,
'singlelowquotemark': 0x0afd,
'doublelowquotemark': 0x0afe,
'cursor': 0x0aff,
'leftcaret': 0x0ba3,
'rightcaret': 0x0ba6,
'downcaret': 0x0ba8,
'upcaret': 0x0ba9,
'overbar': 0x0bc0,
'downtack': 0x0bc2,
'upshoe': 0x0bc3,
'downstile': 0x0bc4,
'underbar': 0x0bc6,
'jot': 0x0bca,
'quad': 0x0bcc,
'uptack': 0x0bce,
'circle': 0x0bcf,
'upstile': 0x0bd3,
'downshoe': 0x0bd6,
'rightshoe': 0x0bd8,
'leftshoe': 0x0bda,
'lefttack': 0x0bdc,
'righttack': 0x0bfc,
'hebrew_doublelowline': 0x0cdf,
'hebrew_aleph': 0x0ce0,
'hebrew_bet': 0x0ce1,
'hebrew_beth': 0x0ce1,
'hebrew_gimel': 0x0ce2,
'hebrew_gimmel': 0x0ce2,
'hebrew_dalet': 0x0ce3,
'hebrew_daleth': 0x0ce3,
'hebrew_he': 0x0ce4,
'hebrew_waw': 0x0ce5,
'hebrew_zain': 0x0ce6,
'hebrew_zayin': 0x0ce6,
'hebrew_chet': 0x0ce7,
'hebrew_het': 0x0ce7,
'hebrew_tet': 0x0ce8,
'hebrew_teth': 0x0ce8,
'hebrew_yod': 0x0ce9,
'hebrew_finalkaph': 0x0cea,
'hebrew_kaph': 0x0ceb,
'hebrew_lamed': 0x0cec,
'hebrew_finalmem': 0x0ced,
'hebrew_mem': 0x0cee,
'hebrew_finalnun': 0x0cef,
'hebrew_nun': 0x0cf0,
'hebrew_samech': 0x0cf1,
'hebrew_samekh': 0x0cf1,
'hebrew_ayin': 0x0cf2,
'hebrew_finalpe': 0x0cf3,
'hebrew_pe': 0x0cf4,
'hebrew_finalzade': 0x0cf5,
'hebrew_finalzadi': 0x0cf5,
'hebrew_zade': 0x0cf6,
'hebrew_zadi': 0x0cf6,
'hebrew_qoph': 0x0cf7,
'hebrew_kuf': 0x0cf7,
'hebrew_resh': 0x0cf8,
'hebrew_shin': 0x0cf9,
'hebrew_taw': 0x0cfa,
'hebrew_taf': 0x0cfa,
'Hebrew_switch': 0xff7e,
'Thai_kokai': 0x0da1,
'Thai_khokhai': 0x0da2,
'Thai_khokhuat': 0x0da3,
'Thai_khokhwai': 0x0da4,
'Thai_khokhon': 0x0da5,
'Thai_khorakhang': 0x0da6,
'Thai_ngongu': 0x0da7,
'Thai_chochan': 0x0da8,
'Thai_choching': 0x0da9,
'Thai_chochang': 0x0daa,
'Thai_soso': 0x0dab,
'Thai_chochoe': 0x0dac,
'Thai_yoying': 0x0dad,
'Thai_dochada': 0x0dae,
'Thai_topatak': 0x0daf,
'Thai_thothan': 0x0db0,
'Thai_thonangmontho': 0x0db1,
'Thai_thophuthao': 0x0db2,
'Thai_nonen': 0x0db3,
'Thai_dodek': 0x0db4,
'Thai_totao': 0x0db5,
'Thai_thothung': 0x0db6,
'Thai_thothahan': 0x0db7,
'Thai_thothong': 0x0db8,
'Thai_nonu': 0x0db9,
'Thai_bobaimai': 0x0dba,
'Thai_popla': 0x0dbb,
'Thai_phophung': 0x0dbc,
'Thai_fofa': 0x0dbd,
'Thai_phophan': 0x0dbe,
'Thai_fofan': 0x0dbf,
'Thai_phosamphao': 0x0dc0,
'Thai_moma': 0x0dc1,
'Thai_yoyak': 0x0dc2,
'Thai_rorua': 0x0dc3,
'Thai_ru': 0x0dc4,
'Thai_loling': 0x0dc5,
'Thai_lu': 0x0dc6,
'Thai_wowaen': 0x0dc7,
'Thai_sosala': 0x0dc8,
'Thai_sorusi': 0x0dc9,
'Thai_sosua': 0x0dca,
'Thai_hohip': 0x0dcb,
'Thai_lochula': 0x0dcc,
'Thai_oang': 0x0dcd,
'Thai_honokhuk': 0x0dce,
'Thai_paiyannoi': 0x0dcf,
'Thai_saraa': 0x0dd0,
'Thai_maihanakat': 0x0dd1,
'Thai_saraaa': 0x0dd2,
'Thai_saraam': 0x0dd3,
'Thai_sarai': 0x0dd4,
'Thai_saraii': 0x0dd5,
'Thai_saraue': 0x0dd6,
'Thai_sarauee': 0x0dd7,
'Thai_sarau': 0x0dd8,
'Thai_sarauu': 0x0dd9,
'Thai_phinthu': 0x0dda,
'Thai_maihanakat_maitho': 0x0dde,
'Thai_baht': 0x0ddf,
'Thai_sarae': 0x0de0,
'Thai_saraae': 0x0de1,
'Thai_sarao': 0x0de2,
'Thai_saraaimaimuan': 0x0de3,
'Thai_saraaimaimalai': 0x0de4,
'Thai_lakkhangyao': 0x0de5,
'Thai_maiyamok': 0x0de6,
'Thai_maitaikhu': 0x0de7,
'Thai_maiek': 0x0de8,
'Thai_maitho': 0x0de9,
'Thai_maitri': 0x0dea,
'Thai_maichattawa': 0x0deb,
'Thai_thanthakhat': 0x0dec,
'Thai_nikhahit': 0x0ded,
'Thai_leksun': 0x0df0,
'Thai_leknung': 0x0df1,
'Thai_leksong': 0x0df2,
'Thai_leksam': 0x0df3,
'Thai_leksi': 0x0df4,
'Thai_lekha': 0x0df5,
'Thai_lekhok': 0x0df6,
'Thai_lekchet': 0x0df7,
'Thai_lekpaet': 0x0df8,
'Thai_lekkao': 0x0df9,
'Hangul': 0xff31,
'Hangul_Start': 0xff32,
'Hangul_End': 0xff33,
'Hangul_Hanja': 0xff34,
'Hangul_Jamo': 0xff35,
'Hangul_Romaja': 0xff36,
'Hangul_Codeinput': 0xff37,
'Hangul_Jeonja': 0xff38,
'Hangul_Banja': 0xff39,
'Hangul_PreHanja': 0xff3a,
'Hangul_PostHanja': 0xff3b,
'Hangul_SingleCandidate': 0xff3c,
'Hangul_MultipleCandidate': 0xff3d,
'Hangul_PreviousCandidate': 0xff3e,
'Hangul_Special': 0xff3f,
'Hangul_switch': 0xff7e,
'Hangul_Kiyeog': 0x0ea1,
'Hangul_SsangKiyeog': 0x0ea2,
'Hangul_KiyeogSios': 0x0ea3,
'Hangul_Nieun': 0x0ea4,
'Hangul_NieunJieuj': 0x0ea5,
'Hangul_NieunHieuh': 0x0ea6,
'Hangul_Dikeud': 0x0ea7,
'Hangul_SsangDikeud': 0x0ea8,
'Hangul_Rieul': 0x0ea9,
'Hangul_RieulKiyeog': 0x0eaa,
'Hangul_RieulMieum': 0x0eab,
'Hangul_RieulPieub': 0x0eac,
'Hangul_RieulSios': 0x0ead,
'Hangul_RieulTieut': 0x0eae,
'Hangul_RieulPhieuf': 0x0eaf,
'Hangul_RieulHieuh': 0x0eb0,
'Hangul_Mieum': 0x0eb1,
'Hangul_Pieub': 0x0eb2,
'Hangul_SsangPieub': 0x0eb3,
'Hangul_PieubSios': 0x0eb4,
'Hangul_Sios': 0x0eb5,
'Hangul_SsangSios': 0x0eb6,
'Hangul_Ieung': 0x0eb7,
'Hangul_Jieuj': 0x0eb8,
'Hangul_SsangJieuj': 0x0eb9,
'Hangul_Cieuc': 0x0eba,
'Hangul_Khieuq': 0x0ebb,
'Hangul_Tieut': 0x0ebc,
'Hangul_Phieuf': 0x0ebd,
'Hangul_Hieuh': 0x0ebe,
'Hangul_A': 0x0ebf,
'Hangul_AE': 0x0ec0,
'Hangul_YA': 0x0ec1,
'Hangul_YAE': 0x0ec2,
'Hangul_EO': 0x0ec3,
'Hangul_E': 0x0ec4,
'Hangul_YEO': 0x0ec5,
'Hangul_YE': 0x0ec6,
'Hangul_O': 0x0ec7,
'Hangul_WA': 0x0ec8,
'Hangul_WAE': 0x0ec9,
'Hangul_OE': 0x0eca,
'Hangul_YO': 0x0ecb,
'Hangul_U': 0x0ecc,
'Hangul_WEO': 0x0ecd,
'Hangul_WE': 0x0ece,
'Hangul_WI': 0x0ecf,
'Hangul_YU': 0x0ed0,
'Hangul_EU': 0x0ed1,
'Hangul_YI': 0x0ed2,
'Hangul_I': 0x0ed3,
'Hangul_J_Kiyeog': 0x0ed4,
'Hangul_J_SsangKiyeog': 0x0ed5,
'Hangul_J_KiyeogSios': 0x0ed6,
'Hangul_J_Nieun': 0x0ed7,
'Hangul_J_NieunJieuj': 0x0ed8,
'Hangul_J_NieunHieuh': 0x0ed9,
'Hangul_J_Dikeud': 0x0eda,
'Hangul_J_Rieul': 0x0edb,
'Hangul_J_RieulKiyeog': 0x0edc,
'Hangul_J_RieulMieum': 0x0edd,
'Hangul_J_RieulPieub': 0x0ede,
'Hangul_J_RieulSios': 0x0edf,
'Hangul_J_RieulTieut': 0x0ee0,
'Hangul_J_RieulPhieuf': 0x0ee1,
'Hangul_J_RieulHieuh': 0x0ee2,
'Hangul_J_Mieum': 0x0ee3,
'Hangul_J_Pieub': 0x0ee4,
'Hangul_J_PieubSios': 0x0ee5,
'Hangul_J_Sios': 0x0ee6,
'Hangul_J_SsangSios': 0x0ee7,
'Hangul_J_Ieung': 0x0ee8,
'Hangul_J_Jieuj': 0x0ee9,
'Hangul_J_Cieuc': 0x0eea,
'Hangul_J_Khieuq': 0x0eeb,
'Hangul_J_Tieut': 0x0eec,
'Hangul_J_Phieuf': 0x0eed,
'Hangul_J_Hieuh': 0x0eee,
'Hangul_RieulYeorinHieuh': 0x0eef,
'Hangul_SunkyeongeumMieum': 0x0ef0,
'Hangul_SunkyeongeumPieub': 0x0ef1,
'Hangul_PanSios': 0x0ef2,
'Hangul_KkogjiDalrinIeung': 0x0ef3,
'Hangul_SunkyeongeumPhieuf': 0x0ef4,
'Hangul_YeorinHieuh': 0x0ef5,
'Hangul_AraeA': 0x0ef6,
'Hangul_AraeAE': 0x0ef7,
'Hangul_J_PanSios': 0x0ef8,
'Hangul_J_KkogjiDalrinIeung': 0x0ef9,
'Hangul_J_YeorinHieuh': 0x0efa,
'Korean_Won': 0x0eff,
'Armenian_ligature_ew': 0x1000587,
'Armenian_full_stop': 0x1000589,
'Armenian_verjaket': 0x1000589,
'Armenian_separation_mark': 0x100055d,
'Armenian_but': 0x100055d,
'Armenian_hyphen': 0x100058a,
'Armenian_yentamna': 0x100058a,
'Armenian_exclam': 0x100055c,
'Armenian_amanak': 0x100055c,
'Armenian_accent': 0x100055b,
'Armenian_shesht': 0x100055b,
'Armenian_question': 0x100055e,
'Armenian_paruyk': 0x100055e,
'Armenian_AYB': 0x1000531,
'Armenian_ayb': 0x1000561,
'Armenian_BEN': 0x1000532,
'Armenian_ben': 0x1000562,
'Armenian_GIM': 0x1000533,
'Armenian_gim': 0x1000563,
'Armenian_DA': 0x1000534,
'Armenian_da': 0x1000564,
'Armenian_YECH': 0x1000535,
'Armenian_yech': 0x1000565,
'Armenian_ZA': 0x1000536,
'Armenian_za': 0x1000566,
'Armenian_E': 0x1000537,
'Armenian_e': 0x1000567,
'Armenian_AT': 0x1000538,
'Armenian_at': 0x1000568,
'Armenian_TO': 0x1000539,
'Armenian_to': 0x1000569,
'Armenian_ZHE': 0x100053a,
'Armenian_zhe': 0x100056a,
'Armenian_INI': 0x100053b,
'Armenian_ini': 0x100056b,
'Armenian_LYUN': 0x100053c,
'Armenian_lyun': 0x100056c,
'Armenian_KHE': 0x100053d,
'Armenian_khe': 0x100056d,
'Armenian_TSA': 0x100053e,
'Armenian_tsa': 0x100056e,
'Armenian_KEN': 0x100053f,
'Armenian_ken': 0x100056f,
'Armenian_HO': 0x1000540,
'Armenian_ho': 0x1000570,
'Armenian_DZA': 0x1000541,
'Armenian_dza': 0x1000571,
'Armenian_GHAT': 0x1000542,
'Armenian_ghat': 0x1000572,
'Armenian_TCHE': 0x1000543,
'Armenian_tche': 0x1000573,
'Armenian_MEN': 0x1000544,
'Armenian_men': 0x1000574,
'Armenian_HI': 0x1000545,
'Armenian_hi': 0x1000575,
'Armenian_NU': 0x1000546,
'Armenian_nu': 0x1000576,
'Armenian_SHA': 0x1000547,
'Armenian_sha': 0x1000577,
'Armenian_VO': 0x1000548,
'Armenian_vo': 0x1000578,
'Armenian_CHA': 0x1000549,
'Armenian_cha': 0x1000579,
'Armenian_PE': 0x100054a,
'Armenian_pe': 0x100057a,
'Armenian_JE': 0x100054b,
'Armenian_je': 0x100057b,
'Armenian_RA': 0x100054c,
'Armenian_ra': 0x100057c,
'Armenian_SE': 0x100054d,
'Armenian_se': 0x100057d,
'Armenian_VEV': 0x100054e,
'Armenian_vev': 0x100057e,
'Armenian_TYUN': 0x100054f,
'Armenian_tyun': 0x100057f,
'Armenian_RE': 0x1000550,
'Armenian_re': 0x1000580,
'Armenian_TSO': 0x1000551,
'Armenian_tso': 0x1000581,
'Armenian_VYUN': 0x1000552,
'Armenian_vyun': 0x1000582,
'Armenian_PYUR': 0x1000553,
'Armenian_pyur': 0x1000583,
'Armenian_KE': 0x1000554,
'Armenian_ke': 0x1000584,
'Armenian_O': 0x1000555,
'Armenian_o': 0x1000585,
'Armenian_FE': 0x1000556,
'Armenian_fe': 0x1000586,
'Armenian_apostrophe': 0x100055a,
'Georgian_an': 0x10010d0,
'Georgian_ban': 0x10010d1,
'Georgian_gan': 0x10010d2,
'Georgian_don': 0x10010d3,
'Georgian_en': 0x10010d4,
'Georgian_vin': 0x10010d5,
'Georgian_zen': 0x10010d6,
'Georgian_tan': 0x10010d7,
'Georgian_in': 0x10010d8,
'Georgian_kan': 0x10010d9,
'Georgian_las': 0x10010da,
'Georgian_man': 0x10010db,
'Georgian_nar': 0x10010dc,
'Georgian_on': 0x10010dd,
'Georgian_par': 0x10010de,
'Georgian_zhar': 0x10010df,
'Georgian_rae': 0x10010e0,
'Georgian_san': 0x10010e1,
'Georgian_tar': 0x10010e2,
'Georgian_un': 0x10010e3,
'Georgian_phar': 0x10010e4,
'Georgian_khar': 0x10010e5,
'Georgian_ghan': 0x10010e6,
'Georgian_qar': 0x10010e7,
'Georgian_shin': 0x10010e8,
'Georgian_chin': 0x10010e9,
'Georgian_can': 0x10010ea,
'Georgian_jil': 0x10010eb,
'Georgian_cil': 0x10010ec,
'Georgian_char': 0x10010ed,
'Georgian_xan': 0x10010ee,
'Georgian_jhan': 0x10010ef,
'Georgian_hae': 0x10010f0,
'Georgian_he': 0x10010f1,
'Georgian_hie': 0x10010f2,
'Georgian_we': 0x10010f3,
'Georgian_har': 0x10010f4,
'Georgian_hoe': 0x10010f5,
'Georgian_fi': 0x10010f6,
'Xabovedot': 0x1001e8a,
'Ibreve': 0x100012c,
'Zstroke': 0x10001b5,
'Gcaron': 0x10001e6,
'Ocaron': 0x10001d1,
'Obarred': 0x100019f,
'xabovedot': 0x1001e8b,
'ibreve': 0x100012d,
'zstroke': 0x10001b6,
'gcaron': 0x10001e7,
'ocaron': 0x10001d2,
'obarred': 0x1000275,
'SCHWA': 0x100018f,
'schwa': 0x1000259,
'Lbelowdot': 0x1001e36,
'lbelowdot': 0x1001e37,
'Abelowdot': 0x1001ea0,
'abelowdot': 0x1001ea1,
'Ahook': 0x1001ea2,
'ahook': 0x1001ea3,
'Acircumflexacute': 0x1001ea4,
'acircumflexacute': 0x1001ea5,
'Acircumflexgrave': 0x1001ea6,
'acircumflexgrave': 0x1001ea7,
'Acircumflexhook': 0x1001ea8,
'acircumflexhook': 0x1001ea9,
'Acircumflextilde': 0x1001eaa,
'acircumflextilde': 0x1001eab,
'Acircumflexbelowdot': 0x1001eac,
'acircumflexbelowdot': 0x1001ead,
'Abreveacute': 0x1001eae,
'abreveacute': 0x1001eaf,
'Abrevegrave': 0x1001eb0,
'abrevegrave': 0x1001eb1,
'Abrevehook': 0x1001eb2,
'abrevehook': 0x1001eb3,
'Abrevetilde': 0x1001eb4,
'abrevetilde': 0x1001eb5,
'Abrevebelowdot': 0x1001eb6,
'abrevebelowdot': 0x1001eb7,
'Ebelowdot': 0x1001eb8,
'ebelowdot': 0x1001eb9,
'Ehook': 0x1001eba,
'ehook': 0x1001ebb,
'Etilde': 0x1001ebc,
'etilde': 0x1001ebd,
'Ecircumflexacute': 0x1001ebe,
'ecircumflexacute': 0x1001ebf,
'Ecircumflexgrave': 0x1001ec0,
'ecircumflexgrave': 0x1001ec1,
'Ecircumflexhook': 0x1001ec2,
'ecircumflexhook': 0x1001ec3,
'Ecircumflextilde': 0x1001ec4,
'ecircumflextilde': 0x1001ec5,
'Ecircumflexbelowdot': 0x1001ec6,
'ecircumflexbelowdot': 0x1001ec7,
'Ihook': 0x1001ec8,
'ihook': 0x1001ec9,
'Ibelowdot': 0x1001eca,
'ibelowdot': 0x1001ecb,
'Obelowdot': 0x1001ecc,
'obelowdot': 0x1001ecd,
'Ohook': 0x1001ece,
'ohook': 0x1001ecf,
'Ocircumflexacute': 0x1001ed0,
'ocircumflexacute': 0x1001ed1,
'Ocircumflexgrave': 0x1001ed2,
'ocircumflexgrave': 0x1001ed3,
'Ocircumflexhook': 0x1001ed4,
'ocircumflexhook': 0x1001ed5,
'Ocircumflextilde': 0x1001ed6,
'ocircumflextilde': 0x1001ed7,
'Ocircumflexbelowdot': 0x1001ed8,
'ocircumflexbelowdot': 0x1001ed9,
'Ohornacute': 0x1001eda,
'ohornacute': 0x1001edb,
'Ohorngrave': 0x1001edc,
'ohorngrave': 0x1001edd,
'Ohornhook': 0x1001ede,
'ohornhook': 0x1001edf,
'Ohorntilde': 0x1001ee0,
'ohorntilde': 0x1001ee1,
'Ohornbelowdot': 0x1001ee2,
'ohornbelowdot': 0x1001ee3,
'Ubelowdot': 0x1001ee4,
'ubelowdot': 0x1001ee5,
'Uhook': 0x1001ee6,
'uhook': 0x1001ee7,
'Uhornacute': 0x1001ee8,
'uhornacute': 0x1001ee9,
'Uhorngrave': 0x1001eea,
'uhorngrave': 0x1001eeb,
'Uhornhook': 0x1001eec,
'uhornhook': 0x1001eed,
'Uhorntilde': 0x1001eee,
'uhorntilde': 0x1001eef,
'Uhornbelowdot': 0x1001ef0,
'uhornbelowdot': 0x1001ef1,
'Ybelowdot': 0x1001ef4,
'ybelowdot': 0x1001ef5,
'Yhook': 0x1001ef6,
'yhook': 0x1001ef7,
'Ytilde': 0x1001ef8,
'ytilde': 0x1001ef9,
'Ohorn': 0x10001a0,
'ohorn': 0x10001a1,
'Uhorn': 0x10001af,
'uhorn': 0x10001b0,
'EcuSign': 0x10020a0,
'ColonSign': 0x10020a1,
'CruzeiroSign': 0x10020a2,
'FFrancSign': 0x10020a3,
'LiraSign': 0x10020a4,
'MillSign': 0x10020a5,
'NairaSign': 0x10020a6,
'PesetaSign': 0x10020a7,
'RupeeSign': 0x10020a8,
'WonSign': 0x10020a9,
'NewSheqelSign': 0x10020aa,
'DongSign': 0x10020ab,
'EuroSign': 0x20ac,
'zerosuperior': 0x1002070,
'foursuperior': 0x1002074,
'fivesuperior': 0x1002075,
'sixsuperior': 0x1002076,
'sevensuperior': 0x1002077,
'eightsuperior': 0x1002078,
'ninesuperior': 0x1002079,
'zerosubscript': 0x1002080,
'onesubscript': 0x1002081,
'twosubscript': 0x1002082,
'threesubscript': 0x1002083,
'foursubscript': 0x1002084,
'fivesubscript': 0x1002085,
'sixsubscript': 0x1002086,
'sevensubscript': 0x1002087,
'eightsubscript': 0x1002088,
'ninesubscript': 0x1002089,
'partdifferential': 0x1002202,
'emptyset': 0x1002205,
'elementof': 0x1002208,
'notelementof': 0x1002209,
'containsas': 0x100220B,
'squareroot': 0x100221A,
'cuberoot': 0x100221B,
'fourthroot': 0x100221C,
'dintegral': 0x100222C,
'tintegral': 0x100222D,
'because': 0x1002235,
'approxeq': 0x1002248,
'notapproxeq': 0x1002247,
'notidentical': 0x1002262,
'stricteq': 0x1002263,
'braille_dot_1': 0xfff1,
'braille_dot_2': 0xfff2,
'braille_dot_3': 0xfff3,
'braille_dot_4': 0xfff4,
'braille_dot_5': 0xfff5,
'braille_dot_6': 0xfff6,
'braille_dot_7': 0xfff7,
'braille_dot_8': 0xfff8,
'braille_dot_9': 0xfff9,
'braille_dot_10': 0xfffa,
'braille_blank': 0x1002800,
'braille_dots_1': 0x1002801,
'braille_dots_2': 0x1002802,
'braille_dots_12': 0x1002803,
'braille_dots_3': 0x1002804,
'braille_dots_13': 0x1002805,
'braille_dots_23': 0x1002806,
'braille_dots_123': 0x1002807,
'braille_dots_4': 0x1002808,
'braille_dots_14': 0x1002809,
'braille_dots_24': 0x100280a,
'braille_dots_124': 0x100280b,
'braille_dots_34': 0x100280c,
'braille_dots_134': 0x100280d,
'braille_dots_234': 0x100280e,
'braille_dots_1234': 0x100280f,
'braille_dots_5': 0x1002810,
'braille_dots_15': 0x1002811,
'braille_dots_25': 0x1002812,
'braille_dots_125': 0x1002813,
'braille_dots_35': 0x1002814,
'braille_dots_135': 0x1002815,
'braille_dots_235': 0x1002816,
'braille_dots_1235': 0x1002817,
'braille_dots_45': 0x1002818,
'braille_dots_145': 0x1002819,
'braille_dots_245': 0x100281a,
'braille_dots_1245': 0x100281b,
'braille_dots_345': 0x100281c,
'braille_dots_1345': 0x100281d,
'braille_dots_2345': 0x100281e,
'braille_dots_12345': 0x100281f,
'braille_dots_6': 0x1002820,
'braille_dots_16': 0x1002821,
'braille_dots_26': 0x1002822,
'braille_dots_126': 0x1002823,
'braille_dots_36': 0x1002824,
'braille_dots_136': 0x1002825,
'braille_dots_236': 0x1002826,
'braille_dots_1236': 0x1002827,
'braille_dots_46': 0x1002828,
'braille_dots_146': 0x1002829,
'braille_dots_246': 0x100282a,
'braille_dots_1246': 0x100282b,
'braille_dots_346': 0x100282c,
'braille_dots_1346': 0x100282d,
'braille_dots_2346': 0x100282e,
'braille_dots_12346': 0x100282f,
'braille_dots_56': 0x1002830,
'braille_dots_156': 0x1002831,
'braille_dots_256': 0x1002832,
'braille_dots_1256': 0x1002833,
'braille_dots_356': 0x1002834,
'braille_dots_1356': 0x1002835,
'braille_dots_2356': 0x1002836,
'braille_dots_12356': 0x1002837,
'braille_dots_456': 0x1002838,
'braille_dots_1456': 0x1002839,
'braille_dots_2456': 0x100283a,
'braille_dots_12456': 0x100283b,
'braille_dots_3456': 0x100283c,
'braille_dots_13456': 0x100283d,
'braille_dots_23456': 0x100283e,
'braille_dots_123456': 0x100283f,
'braille_dots_7': 0x1002840,
'braille_dots_17': 0x1002841,
'braille_dots_27': 0x1002842,
'braille_dots_127': 0x1002843,
'braille_dots_37': 0x1002844,
'braille_dots_137': 0x1002845,
'braille_dots_237': 0x1002846,
'braille_dots_1237': 0x1002847,
'braille_dots_47': 0x1002848,
'braille_dots_147': 0x1002849,
'braille_dots_247': 0x100284a,
'braille_dots_1247': 0x100284b,
'braille_dots_347': 0x100284c,
'braille_dots_1347': 0x100284d,
'braille_dots_2347': 0x100284e,
'braille_dots_12347': 0x100284f,
'braille_dots_57': 0x1002850,
'braille_dots_157': 0x1002851,
'braille_dots_257': 0x1002852,
'braille_dots_1257': 0x1002853,
'braille_dots_357': 0x1002854,
'braille_dots_1357': 0x1002855,
'braille_dots_2357': 0x1002856,
'braille_dots_12357': 0x1002857,
'braille_dots_457': 0x1002858,
'braille_dots_1457': 0x1002859,
'braille_dots_2457': 0x100285a,
'braille_dots_12457': 0x100285b,
'braille_dots_3457': 0x100285c,
'braille_dots_13457': 0x100285d,
'braille_dots_23457': 0x100285e,
'braille_dots_123457': 0x100285f,
'braille_dots_67': 0x1002860,
'braille_dots_167': 0x1002861,
'braille_dots_267': 0x1002862,
'braille_dots_1267': 0x1002863,
'braille_dots_367': 0x1002864,
'braille_dots_1367': 0x1002865,
'braille_dots_2367': 0x1002866,
'braille_dots_12367': 0x1002867,
'braille_dots_467': 0x1002868,
'braille_dots_1467': 0x1002869,
'braille_dots_2467': 0x100286a,
'braille_dots_12467': 0x100286b,
'braille_dots_3467': 0x100286c,
'braille_dots_13467': 0x100286d,
'braille_dots_23467': 0x100286e,
'braille_dots_123467': 0x100286f,
'braille_dots_567': 0x1002870,
'braille_dots_1567': 0x1002871,
'braille_dots_2567': 0x1002872,
'braille_dots_12567': 0x1002873,
'braille_dots_3567': 0x1002874,
'braille_dots_13567': 0x1002875,
'braille_dots_23567': 0x1002876,
'braille_dots_123567': 0x1002877,
'braille_dots_4567': 0x1002878,
'braille_dots_14567': 0x1002879,
'braille_dots_24567': 0x100287a,
'braille_dots_124567': 0x100287b,
'braille_dots_34567': 0x100287c,
'braille_dots_134567': 0x100287d,
'braille_dots_234567': 0x100287e,
'braille_dots_1234567': 0x100287f,
'braille_dots_8': 0x1002880,
'braille_dots_18': 0x1002881,
'braille_dots_28': 0x1002882,
'braille_dots_128': 0x1002883,
'braille_dots_38': 0x1002884,
'braille_dots_138': 0x1002885,
'braille_dots_238': 0x1002886,
'braille_dots_1238': 0x1002887,
'braille_dots_48': 0x1002888,
'braille_dots_148': 0x1002889,
'braille_dots_248': 0x100288a,
'braille_dots_1248': 0x100288b,
'braille_dots_348': 0x100288c,
'braille_dots_1348': 0x100288d,
'braille_dots_2348': 0x100288e,
'braille_dots_12348': 0x100288f,
'braille_dots_58': 0x1002890,
'braille_dots_158': 0x1002891,
'braille_dots_258': 0x1002892,
'braille_dots_1258': 0x1002893,
'braille_dots_358': 0x1002894,
'braille_dots_1358': 0x1002895,
'braille_dots_2358': 0x1002896,
'braille_dots_12358': 0x1002897,
'braille_dots_458': 0x1002898,
'braille_dots_1458': 0x1002899,
'braille_dots_2458': 0x100289a,
'braille_dots_12458': 0x100289b,
'braille_dots_3458': 0x100289c,
'braille_dots_13458': 0x100289d,
'braille_dots_23458': 0x100289e,
'braille_dots_123458': 0x100289f,
'braille_dots_68': 0x10028a0,
'braille_dots_168': 0x10028a1,
'braille_dots_268': 0x10028a2,
'braille_dots_1268': 0x10028a3,
'braille_dots_368': 0x10028a4,
'braille_dots_1368': 0x10028a5,
'braille_dots_2368': 0x10028a6,
'braille_dots_12368': 0x10028a7,
'braille_dots_468': 0x10028a8,
'braille_dots_1468': 0x10028a9,
'braille_dots_2468': 0x10028aa,
'braille_dots_12468': 0x10028ab,
'braille_dots_3468': 0x10028ac,
'braille_dots_13468': 0x10028ad,
'braille_dots_23468': 0x10028ae,
'braille_dots_123468': 0x10028af,
'braille_dots_568': 0x10028b0,
'braille_dots_1568': 0x10028b1,
'braille_dots_2568': 0x10028b2,
'braille_dots_12568': 0x10028b3,
'braille_dots_3568': 0x10028b4,
'braille_dots_13568': 0x10028b5,
'braille_dots_23568': 0x10028b6,
'braille_dots_123568': 0x10028b7,
'braille_dots_4568': 0x10028b8,
'braille_dots_14568': 0x10028b9,
'braille_dots_24568': 0x10028ba,
'braille_dots_124568': 0x10028bb,
'braille_dots_34568': 0x10028bc,
'braille_dots_134568': 0x10028bd,
'braille_dots_234568': 0x10028be,
'braille_dots_1234568': 0x10028bf,
'braille_dots_78': 0x10028c0,
'braille_dots_178': 0x10028c1,
'braille_dots_278': 0x10028c2,
'braille_dots_1278': 0x10028c3,
'braille_dots_378': 0x10028c4,
'braille_dots_1378': 0x10028c5,
'braille_dots_2378': 0x10028c6,
'braille_dots_12378': 0x10028c7,
'braille_dots_478': 0x10028c8,
'braille_dots_1478': 0x10028c9,
'braille_dots_2478': 0x10028ca,
'braille_dots_12478': 0x10028cb,
'braille_dots_3478': 0x10028cc,
'braille_dots_13478': 0x10028cd,
'braille_dots_23478': 0x10028ce,
'braille_dots_123478': 0x10028cf,
'braille_dots_578': 0x10028d0,
'braille_dots_1578': 0x10028d1,
'braille_dots_2578': 0x10028d2,
'braille_dots_12578': 0x10028d3,
'braille_dots_3578': 0x10028d4,
'braille_dots_13578': 0x10028d5,
'braille_dots_23578': 0x10028d6,
'braille_dots_123578': 0x10028d7,
'braille_dots_4578': 0x10028d8,
'braille_dots_14578': 0x10028d9,
'braille_dots_24578': 0x10028da,
'braille_dots_124578': 0x10028db,
'braille_dots_34578': 0x10028dc,
'braille_dots_134578': 0x10028dd,
'braille_dots_234578': 0x10028de,
'braille_dots_1234578': 0x10028df,
'braille_dots_678': 0x10028e0,
'braille_dots_1678': 0x10028e1,
'braille_dots_2678': 0x10028e2,
'braille_dots_12678': 0x10028e3,
'braille_dots_3678': 0x10028e4,
'braille_dots_13678': 0x10028e5,
'braille_dots_23678': 0x10028e6,
'braille_dots_123678': 0x10028e7,
'braille_dots_4678': 0x10028e8,
'braille_dots_14678': 0x10028e9,
'braille_dots_24678': 0x10028ea,
'braille_dots_124678': 0x10028eb,
'braille_dots_34678': 0x10028ec,
'braille_dots_134678': 0x10028ed,
'braille_dots_234678': 0x10028ee,
'braille_dots_1234678': 0x10028ef,
'braille_dots_5678': 0x10028f0,
'braille_dots_15678': 0x10028f1,
'braille_dots_25678': 0x10028f2,
'braille_dots_125678': 0x10028f3,
'braille_dots_35678': 0x10028f4,
'braille_dots_135678': 0x10028f5,
'braille_dots_235678': 0x10028f6,
'braille_dots_1235678': 0x10028f7,
'braille_dots_45678': 0x10028f8,
'braille_dots_145678': 0x10028f9,
'braille_dots_245678': 0x10028fa,
'braille_dots_1245678': 0x10028fb,
'braille_dots_345678': 0x10028fc,
'braille_dots_1345678': 0x10028fd,
'braille_dots_2345678': 0x10028fe,
'braille_dots_12345678': 0x10028ff,
}
|
|
# -*- coding: utf-8 -*-
"""
Role
====
The ``PluginFileLocator`` locates plugins when they are accessible via the filesystem.
It's default behaviour is to look for text files with the
'.yapsy-plugin' extensions and to read the plugin's decription in
them.
Customization
-------------
The behaviour of a ``PluginFileLocator`` can be customized by instanciating it with a specific 'analyzer'.
Two analyzers are already implemented and provided here:
``PluginFileAnalyzerWithInfoFile``
the default 'analyzer' that looks for plugin 'info files' as
text file with a predefined extension. This implements the way
yapsy looks for plugin since version 1.
``PluginFileAnalyzerMathingRegex``
look for files matching a regex and considers them as being
the plugin itself.
All analyzers must enforce the
It enforces the ``plugin locator`` policy as defined by ``IPluginLocator`` and used by ``PluginManager``.
``info_ext``
expects a plugin to be discovered through its *plugin info file*.
User just needs to provide an extension (without '.') to look
for *plugin_info_file*.
``regexp``
looks for file matching the given regular pattern expression.
User just needs to provide the regular pattern expression.
All analyzers must enforce the policy represented by the ``IPluginFileAnalyzer`` interface.
"""
import os
import re
from . import log
from .compat import ConfigParser, is_py2
from .PluginInfo import PluginInfo
from . import PLUGIN_NAME_FORBIDEN_STRING
from .IPluginLocator import IPluginLocator
class IPluginFileAnalyzer(object):
"""
Define the methods expected by PluginFileLocator for its 'analyzer'.
"""
def __init__(self,name):
self.name = name
def isValidPlugin(self, filename):
"""
Check if the resource found at filename is a valid plugin.
"""
raise NotImplementedError("'isValidPlugin' must be reimplemented by %s" % self)
def getInfosDictFromPlugin(self, dirpath, filename):
"""
Returns the extracted plugin informations as a dictionary.
This function ensures that "name" and "path" are provided.
*dirpath* is the full path to the directory where the plugin file is
*filename* is the name (ie the basename) of the plugin file.
If *callback* function has not been provided for this strategy,
we use the filename alone to extract minimal informations.
"""
raise NotImplementedError("'getInfosDictFromPlugin' must be reimplemented by %s" % self)
class PluginFileAnalyzerWithInfoFile(IPluginFileAnalyzer):
"""
Consider plugins described by a textual description file.
A plugin is expected to be described by a text file ('ini' format) with a specific extension (.yapsy-plugin by default).
This file must contain at least the following information::
[Core]
Name = name of the module
Module = relative_path/to/python_file_or_directory
Optionnally the description file may also contain the following section (in addition to the above one)::
[Documentation]
Author = Author Name
Version = Major.minor
Website = url_for_plugin
Description = A simple one-sentence description
"""
def __init__(self, name, extensions="yapsy-plugin"):
"""
Creates a new analyzer named *name* and dedicated to check and analyze plugins described by a textual "info file".
*name* name of the plugin.
*extensions* the expected extensions for the plugin info file. May be a string or a tuple of strings if several extensions are expected.
"""
IPluginFileAnalyzer.__init__(self,name)
self.setPluginInfoExtension(extensions)
def setPluginInfoExtension(self,extensions):
"""
Set the extension that will identify a plugin info file.
*extensions* May be a string or a tuple of strings if several extensions are expected.
"""
# Make sure extension is a tuple
if not isinstance(extensions, tuple):
extensions = (extensions, )
self.expectedExtensions = extensions
def isValidPlugin(self, filename):
"""
Check if it is a valid plugin based on the given plugin info file extension(s).
If several extensions are provided, the first matching will cause the function
to exit successfully.
"""
res = False
for ext in self.expectedExtensions:
if filename.endswith(".%s" % ext):
res = True
break
return res
def getPluginNameAndModuleFromStream(self, infoFileObject, candidate_infofile=None):
"""
Extract the name and module of a plugin from the
content of the info file that describes it and which
is stored in ``infoFileObject``.
.. note:: Prefer using ``_extractCorePluginInfo``
instead, whenever possible...
.. warning:: ``infoFileObject`` must be a file-like object:
either an opened file for instance or a string
buffer wrapped in a StringIO instance as another
example.
.. note:: ``candidate_infofile`` must be provided
whenever possible to get better error messages.
Return a 3-uple with the name of the plugin, its
module and the config_parser used to gather the core
data *in a tuple*, if the required info could be
localised, else return ``(None,None,None)``.
.. note:: This is supposed to be used internally by subclasses
and decorators.
"""
# parse the information buffer to get info about the plugin
config_parser = ConfigParser()
try:
if is_py2:
config_parser.readfp(infoFileObject)
else:
config_parser.read_file(infoFileObject)
except Exception as e:
log.debug("Could not parse the plugin file '%s' (exception raised was '%s')" % (candidate_infofile,e))
return (None, None, None)
# check if the basic info is available
if not config_parser.has_section("Core"):
log.debug("Plugin info file has no 'Core' section (in '%s')" % candidate_infofile)
return (None, None, None)
if not config_parser.has_option("Core","Name") or not config_parser.has_option("Core","Module"):
log.debug("Plugin info file has no 'Name' or 'Module' section (in '%s')" % candidate_infofile)
return (None, None, None)
# check that the given name is valid
name = config_parser.get("Core", "Name")
name = name.strip()
if PLUGIN_NAME_FORBIDEN_STRING in name:
log.debug("Plugin name contains forbiden character: %s (in '%s')" % (PLUGIN_NAME_FORBIDEN_STRING,
candidate_infofile))
return (None, None, None)
return (name, config_parser.get("Core", "Module"), config_parser)
def _extractCorePluginInfo(self,directory, filename):
"""
Gather the core information (name, and module to be loaded)
about a plugin described by it's info file (found at
'directory/filename').
Return a dictionary with name and path of the plugin as well
as the ConfigParser instance used to collect these info.
.. note:: This is supposed to be used internally by subclasses
and decorators.
"""
# now we can consider the file as a serious candidate
if not isinstance(filename, str):
# filename is a file object: use it
name, moduleName, config_parser = self.getPluginNameAndModuleFromStream(filename)
else:
candidate_infofile_path = os.path.join(directory, filename)
# parse the information file to get info about the plugin
with open(candidate_infofile_path) as candidate_infofile:
name, moduleName, config_parser = self.getPluginNameAndModuleFromStream(candidate_infofile,candidate_infofile_path)
if (name, moduleName, config_parser) == (None, None, None):
return (None,None)
infos = {"name":name, "path":os.path.join(directory, moduleName)}
return infos, config_parser
def _extractBasicPluginInfo(self,directory, filename):
"""
Gather some basic documentation about the plugin described by
it's info file (found at 'directory/filename').
Return a dictionary containing the core information (name and
path) as well as as the 'documentation' info (version, author,
description etc).
See also:
``self._extractCorePluginInfo``
"""
infos, config_parser = self._extractCorePluginInfo(directory, filename)
# collect additional (but usually quite usefull) information
if infos and config_parser and config_parser.has_section("Documentation"):
if config_parser.has_option("Documentation","Author"):
infos["author"] = config_parser.get("Documentation", "Author")
if config_parser.has_option("Documentation","Version"):
infos["version"] = config_parser.get("Documentation", "Version")
if config_parser.has_option("Documentation","Website"):
infos["website"] = config_parser.get("Documentation", "Website")
if config_parser.has_option("Documentation","Copyright"):
infos["copyright"] = config_parser.get("Documentation", "Copyright")
if config_parser.has_option("Documentation","Description"):
infos["description"] = config_parser.get("Documentation", "Description")
return infos, config_parser
def getInfosDictFromPlugin(self, dirpath, filename):
"""
Returns the extracted plugin informations as a dictionary.
This function ensures that "name" and "path" are provided.
If *callback* function has not been provided for this strategy,
we use the filename alone to extract minimal informations.
"""
infos, config_parser = self._extractBasicPluginInfo(dirpath, filename)
if not infos or infos.get("name", None) is None:
raise ValueError("Missing *name* of the plugin in extracted infos.")
if not infos or infos.get("path", None) is None:
raise ValueError("Missing *path* of the plugin in extracted infos.")
return infos, config_parser
class PluginFileAnalyzerMathingRegex(IPluginFileAnalyzer):
"""
An analyzer that targets plugins decribed by files whose name match a given regex.
"""
def __init__(self, name, regexp):
IPluginFileAnalyzer.__init__(self,name)
self.regexp = regexp
def isValidPlugin(self, filename):
"""
Checks if the given filename is a valid plugin for this Strategy
"""
reg = re.compile(self.regexp)
if reg.match(filename) is not None:
return True
return False
def getInfosDictFromPlugin(self, dirpath, filename):
"""
Returns the extracted plugin informations as a dictionary.
This function ensures that "name" and "path" are provided.
"""
# use the filename alone to extract minimal informations.
infos = {}
module_name = os.path.splitext(filename)[0]
plugin_filename = os.path.join(dirpath,filename)
if module_name == "__init__":
module_name = os.path.basename(dirpath)
plugin_filename = dirpath
infos["name"] = "%s" % module_name
infos["path"] = plugin_filename
cf_parser = ConfigParser()
cf_parser.add_section("Core")
cf_parser.set("Core","Name",infos["name"])
cf_parser.set("Core","Module",infos["path"])
return infos,cf_parser
class PluginFileLocator(IPluginLocator):
"""
Locates plugins on the file system using a set of analyzers to
determine what files actually corresponds to plugins.
If more than one analyzer is being used, the first that will discover a
new plugin will avoid other strategies to find it too.
By default each directory set as a "plugin place" is scanned
recursively. You can change that by a call to
``disableRecursiveScan``.
"""
def __init__(self, analyzers=None, plugin_info_cls=PluginInfo):
"""
Defines the strategies, and the places for plugins to look into.
"""
IPluginLocator.__init__(self)
self._discovered_plugins = {}
self.setPluginPlaces(None)
self._analyzers = analyzers # analyzers used to locate plugins
if self._analyzers is None:
self._analyzers = [PluginFileAnalyzerWithInfoFile("info_ext")]
self._default_plugin_info_cls = PluginInfo
self._plugin_info_cls_map = {}
self._max_size = 1e3*1024 # in octets (by default 1 Mo)
self.recursive = True
def disableRecursiveScan(self):
"""
Disable recursive scan of the directories given as plugin places.
"""
self.recursive = False
def setAnalyzers(self, analyzers):
"""
Sets a new set of analyzers.
.. warning:: the new analyzers won't be aware of the plugin
info class that may have been set via a previous
call to ``setPluginInfoClass``.
"""
self._analyzers = analyzers
def removeAnalyzers(self, name):
"""
Removes analyzers of a given name.
"""
analyzersListCopy = self._analyzers[:]
foundAndRemoved = False
for obj in analyzersListCopy:
if obj.name == name:
self._analyzers.remove(obj)
foundAndRemoved = True
if not foundAndRemoved:
log.debug("'%s' is not a known strategy name: can't remove it." % name)
def removeAllAnalyzer(self):
"""
Remove all analyzers.
"""
self._analyzers = []
def appendAnalyzer(self, analyzer):
"""
Append an analyzer to the existing list.
"""
self._analyzers.append(analyzer)
def _getInfoForPluginFromAnalyzer(self,analyzer,dirpath, filename):
"""
Return an instance of plugin_info_cls filled with data extracted by the analyzer.
May return None if the analyzer fails to extract any info.
"""
plugin_info_dict,config_parser = analyzer.getInfosDictFromPlugin(dirpath, filename)
if plugin_info_dict is None:
return None
plugin_info_cls = self._plugin_info_cls_map.get(analyzer.name,self._default_plugin_info_cls)
plugin_info = plugin_info_cls(plugin_info_dict["name"],plugin_info_dict["path"])
plugin_info.details = config_parser
return plugin_info
def locatePlugins(self):
"""
Walk through the plugins' places and look for plugins.
Return the candidates and number of plugins found.
"""
_candidates = []
_discovered = {}
for directory in map(os.path.abspath, self.plugins_places):
# first of all, is it a directory :)
if not os.path.isdir(directory):
log.debug("%s skips %s (not a directory)" % (self.__class__.__name__, directory))
continue
if self.recursive:
debug_txt_mode = "recursively"
walk_iter = os.walk(directory, followlinks=True)
else:
debug_txt_mode = "non-recursively"
walk_iter = [(directory,[],os.listdir(directory))]
# iteratively walks through the directory
log.debug("%s walks (%s) into directory: %s" % (self.__class__.__name__, debug_txt_mode, directory))
for item in walk_iter:
dirpath = item[0]
for filename in item[2]:
for analyzer in self._analyzers:
# eliminate the obvious non plugin files
if not analyzer.isValidPlugin(filename):
log.debug("%s is not a valid plugin for strategy %s" % (filename, analyzer.name))
continue
candidate_infofile = os.path.join(dirpath, filename)
if candidate_infofile in _discovered:
log.debug("%s (with strategy %s) rejected because already discovered" % (candidate_infofile, analyzer.name))
continue
log.debug("%s found a candidate:\n %s" % (self.__class__.__name__, candidate_infofile))
plugin_info = self._getInfoForPluginFromAnalyzer(analyzer, dirpath, filename)
if plugin_info is None:
log.warning("Plugin candidate '%s' rejected by strategy '%s'" % (candidate_infofile, analyzer.name))
break # we consider this was the good strategy to use for: it failed -> not a plugin -> don't try another strategy
# now determine the path of the file to execute,
# depending on wether the path indicated is a
# directory or a file
# Remember all the files belonging to a discovered
# plugin, so that strategies (if several in use) won't
# collide
if os.path.isdir(plugin_info.path):
candidate_filepath = os.path.join(plugin_info.path, "__init__")
# it is a package, adds all the files concerned
for _file in os.listdir(plugin_info.path):
if _file.endswith(".py"):
self._discovered_plugins[os.path.join(plugin_info.path, _file)] = candidate_filepath
_discovered[os.path.join(plugin_info.path, _file)] = candidate_filepath
elif (plugin_info.path.endswith(".py") and os.path.isfile(plugin_info.path)) or os.path.isfile(plugin_info.path+".py"):
candidate_filepath = plugin_info.path
if candidate_filepath.endswith(".py"):
candidate_filepath = candidate_filepath[:-3]
# it is a file, adds it
self._discovered_plugins[".".join((plugin_info.path, "py"))] = candidate_filepath
_discovered[".".join((plugin_info.path, "py"))] = candidate_filepath
else:
log.error("Plugin candidate rejected: cannot find the file or directory module for '%s'" % (candidate_infofile))
break
_candidates.append((candidate_infofile, candidate_filepath, plugin_info))
# finally the candidate_infofile must not be discovered again
_discovered[candidate_infofile] = candidate_filepath
self._discovered_plugins[candidate_infofile] = candidate_filepath
return _candidates, len(_candidates)
def gatherCorePluginInfo(self, directory, filename):
"""
Return a ``PluginInfo`` as well as the ``ConfigParser`` used to build it.
If filename is a valid plugin discovered by any of the known
strategy in use. Returns None,None otherwise.
"""
for analyzer in self._analyzers:
# eliminate the obvious non plugin files
if not analyzer.isValidPlugin(filename):
continue
plugin_info = self._getInfoForPluginFromAnalyzer(analyzer,directory, filename)
return plugin_info,plugin_info.details
return None,None
# -----------------------------------------------
# Backward compatible methods
# Note: their implementation must be conform to their
# counterpart in yapsy<1.10
# -----------------------------------------------
def getPluginNameAndModuleFromStream(self, infoFileObject, candidate_infofile=None):
for analyzer in self._analyzers:
if analyzer.name == "info_ext":
return analyzer.getPluginNameAndModuleFromStream(infoFileObject)
else:
raise RuntimeError("No current file analyzer is able to provide plugin information from stream")
def setPluginInfoClass(self, picls, name=None):
"""
Set the class that holds PluginInfo. The class should inherit
from ``PluginInfo``.
If name is given, then the class will be used only by the corresponding analyzer.
If name is None, the class will be set for all analyzers.
"""
if name is None:
self._default_plugin_info_cls = picls
self._plugin_info_cls_map = {}
else:
self._plugin_info_cls_map[name] = picls
def setPluginPlaces(self, directories_list):
"""
Set the list of directories where to look for plugin places.
"""
if directories_list is None:
directories_list = [os.path.dirname(__file__)]
self.plugins_places = directories_list
def updatePluginPlaces(self, directories_list):
"""
Updates the list of directories where to look for plugin places.
"""
self.plugins_places = list(set.union(set(directories_list), set(self.plugins_places)))
def setPluginInfoExtension(self, ext):
"""
DEPRECATED(>1.9): for backward compatibility. Directly configure the
IPluginLocator instance instead !
This will only work if the strategy "info_ext" is active
for locating plugins.
"""
for analyzer in self._analyzers:
if analyzer.name == "info_ext":
analyzer.setPluginInfoExtension(ext)
|
|
#!/usr/bin/env python
"""
Copyright (c) 2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import eth_ep
import xgmii_ep
import baser_serdes_ep
module = 'eth_phy_10g'
testbench = 'test_%s_64' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/eth_phy_10g_rx.v")
srcs.append("../rtl/eth_phy_10g_rx_if.v")
srcs.append("../rtl/eth_phy_10g_rx_ber_mon.v")
srcs.append("../rtl/eth_phy_10g_rx_frame_sync.v")
srcs.append("../rtl/eth_phy_10g_tx.v")
srcs.append("../rtl/eth_phy_10g_tx_if.v")
srcs.append("../rtl/xgmii_baser_dec_64.v")
srcs.append("../rtl/xgmii_baser_enc_64.v")
srcs.append("../rtl/lfsr.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
DATA_WIDTH = 64
CTRL_WIDTH = (DATA_WIDTH/8)
HDR_WIDTH = 2
BIT_REVERSE = 0
SCRAMBLER_DISABLE = 0
PRBS31_ENABLE = 1
TX_SERDES_PIPELINE = 2
RX_SERDES_PIPELINE = 2
BITSLIP_HIGH_CYCLES = 1
BITSLIP_LOW_CYCLES = 8
COUNT_125US = 1250/6.4
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
rx_clk = Signal(bool(0))
rx_rst = Signal(bool(0))
tx_clk = Signal(bool(0))
tx_rst = Signal(bool(0))
xgmii_txd = Signal(intbv(0)[DATA_WIDTH:])
xgmii_txc = Signal(intbv(0)[CTRL_WIDTH:])
serdes_rx_data = Signal(intbv(0)[DATA_WIDTH:])
serdes_rx_hdr = Signal(intbv(1)[HDR_WIDTH:])
tx_prbs31_enable = Signal(bool(0))
rx_prbs31_enable = Signal(bool(0))
serdes_rx_data_int = Signal(intbv(0)[DATA_WIDTH:])
serdes_rx_hdr_int = Signal(intbv(1)[HDR_WIDTH:])
# Outputs
xgmii_rxd = Signal(intbv(0)[DATA_WIDTH:])
xgmii_rxc = Signal(intbv(0)[CTRL_WIDTH:])
serdes_tx_data = Signal(intbv(0)[DATA_WIDTH:])
serdes_tx_hdr = Signal(intbv(0)[HDR_WIDTH:])
serdes_rx_bitslip = Signal(bool(0))
rx_error_count = Signal(intbv(0)[7:])
rx_bad_block = Signal(bool(0))
rx_block_lock = Signal(bool(0))
rx_high_ber = Signal(bool(0))
# sources and sinks
xgmii_source = xgmii_ep.XGMIISource()
xgmii_source_logic = xgmii_source.create_logic(
tx_clk,
tx_rst,
txd=xgmii_txd,
txc=xgmii_txc,
name='xgmii_source'
)
xgmii_sink = xgmii_ep.XGMIISink()
xgmii_sink_logic = xgmii_sink.create_logic(
rx_clk,
rx_rst,
rxd=xgmii_rxd,
rxc=xgmii_rxc,
name='xgmii_sink'
)
serdes_source = baser_serdes_ep.BaseRSerdesSource()
serdes_source_logic = serdes_source.create_logic(
rx_clk,
tx_data=serdes_rx_data_int,
tx_header=serdes_rx_hdr_int,
name='serdes_source'
)
serdes_sink = baser_serdes_ep.BaseRSerdesSink()
serdes_sink_logic = serdes_sink.create_logic(
tx_clk,
rx_data=serdes_tx_data,
rx_header=serdes_tx_hdr,
name='serdes_sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
rx_clk=rx_clk,
rx_rst=rx_rst,
tx_clk=tx_clk,
tx_rst=tx_rst,
xgmii_txd=xgmii_txd,
xgmii_txc=xgmii_txc,
xgmii_rxd=xgmii_rxd,
xgmii_rxc=xgmii_rxc,
serdes_tx_data=serdes_tx_data,
serdes_tx_hdr=serdes_tx_hdr,
serdes_rx_data=serdes_rx_data,
serdes_rx_hdr=serdes_rx_hdr,
serdes_rx_bitslip=serdes_rx_bitslip,
rx_error_count=rx_error_count,
rx_bad_block=rx_bad_block,
rx_block_lock=rx_block_lock,
rx_high_ber=rx_high_ber,
tx_prbs31_enable=tx_prbs31_enable,
rx_prbs31_enable=rx_prbs31_enable
)
@always(delay(4))
def clkgen():
clk.next = not clk
rx_clk.next = not rx_clk
tx_clk.next = not tx_clk
load_bit_offset = []
@instance
def shift_bits():
bit_offset = 0
last_data = 0
while True:
yield clk.posedge
if load_bit_offset:
bit_offset = load_bit_offset.pop(0)
if serdes_rx_bitslip:
bit_offset += 1
bit_offset = bit_offset % 66
data = int(serdes_rx_data_int) << 2 | int(serdes_rx_hdr_int)
out_data = ((last_data | data << 66) >> 66-bit_offset) & 0x3ffffffffffffffff
last_data = data
serdes_rx_data.next = out_data >> 2
serdes_rx_hdr.next = out_data & 3
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
tx_rst.next = 1
rx_rst.next = 1
yield clk.posedge
rst.next = 0
tx_rst.next = 0
rx_rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
# wait for block lock
while not rx_block_lock:
yield clk.posedge
# dump garbage
while not xgmii_sink.empty():
xgmii_sink.recv()
yield clk.posedge
print("test 1: test RX packet")
current_test.next = 1
test_frame = bytearray(range(128))
xgmii_frame = xgmii_ep.XGMIIFrame(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame)
xgmii_source.send(xgmii_frame)
yield serdes_sink.wait()
rx_frame = serdes_sink.recv()
assert rx_frame.data == xgmii_frame.data
assert xgmii_sink.empty()
assert serdes_sink.empty()
yield delay(100)
yield clk.posedge
print("test 2: test TX packet")
current_test.next = 2
test_frame = bytearray(range(128))
xgmii_frame = xgmii_ep.XGMIIFrame(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+test_frame)
serdes_source.send(xgmii_frame)
yield xgmii_sink.wait()
rx_frame = xgmii_sink.recv()
assert rx_frame.data == xgmii_frame.data
assert xgmii_sink.empty()
assert serdes_sink.empty()
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
|
"""
:class:`.Bing` geocoder.
"""
from geopy.compat import urlencode
from geopy.geocoders.base import Geocoder, DEFAULT_FORMAT_STRING, \
DEFAULT_TIMEOUT, DEFAULT_SCHEME
from geopy.location import Location
from geopy.exc import (
GeocoderAuthenticationFailure,
GeocoderQuotaExceeded,
GeocoderInsufficientPrivileges,
GeocoderUnavailable,
GeocoderServiceError,
)
from geopy.util import logger, join_filter
__all__ = ("Bing", )
class Bing(Geocoder):
"""
Geocoder using the Bing Maps Locations API. Documentation at:
https://msdn.microsoft.com/en-us/library/ff701715.aspx
"""
'''structured_query_params = {
'addressLine',
'locality',
'adminDistrict',
'countryRegion',
'postalCode',
}'''
structured_query_params = [
'addressLine',
'locality',
'adminDistrict',
'countryRegion',
'postalCode'
]
def __init__(
self,
api_key,
format_string=DEFAULT_FORMAT_STRING,
scheme=DEFAULT_SCHEME,
timeout=DEFAULT_TIMEOUT,
proxies=None,
user_agent=None,
): # pylint: disable=R0913
"""Initialize a customized Bing geocoder with location-specific
address information and your Bing Maps API key.
:param string api_key: Should be a valid Bing Maps API key.
:param string format_string: String containing '%s' where the
string to geocode should be interpolated before querying the
geocoder. For example: '%s, Mountain View, CA'. The default
is just '%s'.
:param string scheme: Use 'https' or 'http' as the API URL's scheme.
Default is https. Note that SSL connections' certificates are not
verified.
.. versionadded:: 0.97
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception.
.. versionadded:: 0.97
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
.. versionadded:: 0.96
"""
super(Bing, self).__init__(format_string, scheme, timeout, proxies, user_agent=user_agent)
self.api_key = api_key
self.api = "%s://dev.virtualearth.net/REST/v1/Locations" % self.scheme
def geocode(
self,
query,
exactly_one=True,
user_location=None,
timeout=None,
culture=None,
include_neighborhood=None,
include_country_code=False
): # pylint: disable=W0221
"""
Geocode an address.
:param string query: The address or query you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `addressLine`, `locality` (city), `adminDistrict` (state), `countryRegion`, or
`postalcode`.
:param bool exactly_one: Return one result or a list of results, if
available.
:param user_location: Prioritize results closer to
this location.
.. versionadded:: 0.96
:type user_location: :class:`geopy.point.Point`
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
.. versionadded:: 0.97
:param string culture: Affects the language of the response,
must be a two-letter country code.
.. versionadded:: 1.4.0
:param boolean include_neighborhood: Sets whether to include the
neighborhood field in the response.
.. versionadded:: 1.4.0
:param boolean include_country_code: Sets whether to include the
two-letter ISO code of the country in the response (field name
'countryRegionIso2').
.. versionadded:: 1.4.0
"""
if isinstance(query, dict):
'''params = {
key: val
for key, val
in query.items()
if key in self.structured_query_params
}'''
params = {}
for key, val in query.items():
if key in self.structured_query_params:
params[key] = val
params['key'] = self.api_key
else:
params = {
'query': self.format_string % query,
'key': self.api_key
}
if user_location:
params['userLocation'] = ",".join(
(str(user_location.latitude), str(user_location.longitude))
)
if exactly_one is True:
params['maxResults'] = 1
if culture:
params['culture'] = culture
if include_neighborhood is not None:
params['includeNeighborhood'] = include_neighborhood
if include_country_code:
params['include'] = 'ciso2' # the only acceptable value
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one
)
def reverse(self, query, exactly_one=True, timeout=None):
"""
Reverse geocode a point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s".
:param bool exactly_one: Return one result, or a list?
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
.. versionadded:: 0.97
"""
point = self._coerce_point_to_string(query)
params = {'key': self.api_key}
url = "%s/%s?%s" % (
self.api, point, urlencode(params))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one
)
@staticmethod
def _parse_json(doc, exactly_one=True): # pylint: disable=W0221
"""
Parse a location name, latitude, and longitude from an JSON response.
"""
status_code = doc.get("statusCode", 200)
if status_code != 200:
err = doc.get("errorDetails", "")
if status_code == 401:
raise GeocoderAuthenticationFailure(err)
elif status_code == 403:
raise GeocoderInsufficientPrivileges(err)
elif status_code == 429:
raise GeocoderQuotaExceeded(err)
elif status_code == 503:
raise GeocoderUnavailable(err)
else:
raise GeocoderServiceError(err)
resources = doc['resourceSets'][0]['resources']
if resources is None or not len(resources): # pragma: no cover
return None
def parse_resource(resource):
"""
Parse each return object.
"""
stripchars = ", \n"
addr = resource['address']
address = addr.get('addressLine', '').strip(stripchars)
city = addr.get('locality', '').strip(stripchars)
state = addr.get('adminDistrict', '').strip(stripchars)
zipcode = addr.get('postalCode', '').strip(stripchars)
country = addr.get('countryRegion', '').strip(stripchars)
city_state = join_filter(", ", [city, state])
place = join_filter(" ", [city_state, zipcode])
location = join_filter(", ", [address, place, country])
latitude = resource['point']['coordinates'][0] or None
longitude = resource['point']['coordinates'][1] or None
if latitude and longitude:
latitude = float(latitude)
longitude = float(longitude)
return Location(location, (latitude, longitude), resource)
if exactly_one:
return parse_resource(resources[0])
else:
return [parse_resource(resource) for resource in resources]
|
|
import pygame, sys, picamera, time, pyImgShow
from pygame.locals import *
from pyImgShow import *
from png_overlay import *
def text_screencenter( show_text, text_color,screen_info, DISPLAYSURF):
if len(show_text) < 2:
varfontObj= pygame.font.Font('freesansbold.ttf', int(screen_info.current_w / 2 ))
else:
varfontObj= pygame.font.Font('freesansbold.ttf', int(screen_info.current_w/ (len(show_text))))
textSurfaceObj = varfontObj.render(show_text, True, text_color)
textRectObj = textSurfaceObj.get_rect()
textRectObj.center = ( screen_info.current_w/2,
screen_info.current_h/2)
DISPLAYSURF.blit(textSurfaceObj, textRectObj)
def text_size_screencenter( show_text, text_color,screen_info, DISPLAYSURF, font_size):
varfontObj= pygame.font.Font('freesansbold.ttf', font_size)
textSurfaceObj = varfontObj.render(show_text, True, text_color)
textRectObj = textSurfaceObj.get_rect()
textRectObj.center = ( screen_info.current_w/2,
screen_info.current_h/2)
DISPLAYSURF.blit(textSurfaceObj, textRectObj)
def text_size_screencenter_bottom( show_text, text_color,screen_info, DISPLAYSURF, font_size):
varfontObj= pygame.font.Font('freesansbold.ttf', font_size)
textSurfaceObj = varfontObj.render(show_text, True, text_color)
textRectObj = textSurfaceObj.get_rect()
textRectObj.center = ( screen_info.current_w/2,
screen_info.current_h - textRectObj.height/2)
DISPLAYSURF.blit(textSurfaceObj, textRectObj)
def text_size_screencenter_top( show_text, text_color,screen_info, DISPLAYSURF, font_size):
varfontObj= pygame.font.Font('freesansbold.ttf', font_size)
textSurfaceObj = varfontObj.render(show_text, True, text_color)
textRectObj = textSurfaceObj.get_rect()
textRectObj.center = ( screen_info.current_w/2,
0 + textRectObj.height/2)
DISPLAYSURF.blit(textSurfaceObj, textRectObj)
def picam_booth_1shot(file_name, resolution, transparency, capture_bgcolor,
countdown_list, text_color, bg_color, img_file_type,
screen_info, DISPLAYSURF):
screen_info = pygame.display.Info()
DISPLAYSURF = pygame.display.set_mode( (screen_info.current_w,
screen_info.current_h),
FULLSCREEN)
countdown_len = len(countdown_list)
print(countdown_len)
with picamera.PiCamera(sensor_mode=2) as camera:
camera.resolution = resolution
camera.hflip = True
camera.vflip = False
camera.brightness = 65
camera.awb_mode = 'off'
camera.awb_gains = (1.4, 1.8)
camera.start_preview()
camera.preview_alpha = transparency
image_file = []
image_file.append(file_name)
image_file.append(".")
image_file.append(img_file_type)
image_file_name = ''.join(image_file)
print(image_file_name)
time.sleep(2)
for i in xrange(0, countdown_len, +1):
DISPLAYSURF.fill(bg_color)
text_screencenter( countdown_list[i], text_color,screen_info, DISPLAYSURF)
pygame.display.update()
pygame.time.wait(1000)
DISPLAYSURF.fill(capture_bgcolor)
pygame.display.update()
camera.capture(image_file_name)
Img_w_bgcolor( image_file_name, capture_bgcolor ,screen_info, DISPLAYSURF)
camera.stop_preview()
pygame.time.wait(1000)
def picam_booth_1shot_overlay(file_name, resolution, capture_bgcolor,
mask_img, countdown_img_list, bg_color, img_file_type,
screen_info, DISPLAYSURF):
screen_info = pygame.display.Info()
DISPLAYSURF = pygame.display.set_mode( (screen_info.current_w,
screen_info.current_h),
FULLSCREEN)
countdown_len = len(countdown_img_list)
print(countdown_len)
with picamera.PiCamera(sensor_mode=2) as camera:
camera.resolution = resolution
camera.hflip = True
camera.vflip = False
camera.awb_mode = 'off'
file = open("camara_param", "r")
line = file.readline()
file.close()
params = line.split(" ")
camera.brightness = int(params[2])
camera.awb_gains = (float(params[0]), float(params[1]))
camera.start_preview()
image_file = []
image_file.append(file_name)
image_file.append(".")
image_file.append(img_file_type)
image_file_name = ''.join(image_file)
print(image_file_name)
time.sleep(2)
preview_mask = PngOverlay(mask_img) #default =4, 1 layer higher than PiCamera Preview
preview_mask.show()
time.sleep(2)
cd_num = PngOverlay(countdown_img_list[0], 5) # layer 5, 1 layer higher than Preview mask
time.sleep(1)
for cd_img in countdown_img_list:
DISPLAYSURF.fill(bg_color)
cd_num.setImage(cd_img)
cd_num.show()
pygame.display.update()
pygame.time.wait(1000)
cd_num.hide()
camera.capture(image_file_name)
preview_mask.hide()
camera.stop_preview()
Img_w_bgcolor( image_file_name, capture_bgcolor ,screen_info, DISPLAYSURF)
pygame.time.wait(500)
def picam_booth_nshot(file_name, resolution, transparency, capture_bgcolor,
countdown_list, text_color, bg_color,
nshot_list, nshot_txt_color, nshot_bg_color, img_file_type,
screen_info, DISPLAYSURF):
nshot_len = len(nshot_list)
print("start nshot sub")
for j in xrange(0, nshot_len, +1):
DISPLAYSURF.fill(nshot_bg_color)
text_screencenter( nshot_list[j], text_color,screen_info, DISPLAYSURF)
pygame.display.update()
pygame.time.wait(1000)
nshot_filename = "".join((file_name,"_", str(j)))
print(nshot_filename)
picam_booth_1shot(nshot_filename, resolution, transparency, capture_bgcolor,
countdown_list, text_color, bg_color,img_file_type,
screen_info, DISPLAYSURF)
print("exit multi shot")
def picam_booth_nshot_overlay(file_name, resolution, capture_bgcolor,
mask_img_list, countdown_img_list, bg_color,
nshot_list, nshot_txt_color, nshot_bg_color,img_file_type,
screen_info, DISPLAYSURF):
nshot_len = len(nshot_list)
print("start nshot sub")
for j in xrange(0, nshot_len, +1):
DISPLAYSURF.fill(nshot_bg_color)
text_screencenter( nshot_list[j], nshot_txt_color,screen_info, DISPLAYSURF)
pygame.display.update()
pygame.time.wait(1000)
nshot_filename = "".join((file_name,"_", str(j)))
print(nshot_filename)
picam_booth_1shot_overlay(nshot_filename, resolution, capture_bgcolor,
mask_img_list[j], countdown_img_list, bg_color,img_file_type,
screen_info, DISPLAYSURF)
print("exit multi shot")
#Start Test Code
if 0:
#Set Count-down Display String
nshot_list=[]
nshot_list.append("1st SHOT")
nshot_list.append("2nd SHOT")
nshot_list.append("3rd SHOT")
nshot_list.append("4th SHOT")
countdown_list=[]
countdown_list.append("READY")
countdown_list.append("3")
countdown_list.append("2")
countdown_list.append("1")
countdown_list.append("SMILE")
print countdown_list
print countdown_list[1]
WHITE = (255,255,255)
RED = (200,24,24)
BLACK = (0,0,0)
img_file_type ="png"
pygame.init()
screen_info = pygame.display.Info()
DISPLAYSURF = pygame.display.set_mode( (screen_info.current_w,
screen_info.current_h),
FULLSCREEN)
count_down_img_list = ("count_down_img/num3.png","count_down_img/num2.png","count_down_img/num1.png")
mask_list =("Mask/2_0_hor.png", "Mask/overlay.png", "Mask/overlay.png", "Mask/2_0_hor.png")
picam_booth_1shot_overlay("test_pic", (640,480), WHITE,
"Mask/2_0_hor.png", count_down_img_list, BLACK,img_file_type,
screen_info, DISPLAYSURF)
picam_booth_nshot_overlay("test4in1", (640,480), WHITE,
mask_list, count_down_img_list, BLACK,
nshot_list, RED, BLACK,img_file_type,
screen_info, DISPLAYSURF)
exit()
text_size_screencenter( "test for center", RED,screen_info, DISPLAYSURF,150)
pygame.display.update()
pygame.time.wait(1000)
text_size_screencenter( "Left Click for Next Frame", RED,screen_info, DISPLAYSURF,150)
pygame.display.update()
pygame.time.wait(1000)
if 0:
text_size_screencenter_bottom( "test for bottom", RED,screen_info, DISPLAYSURF, 100)
pygame.display.update()
pygame.time.wait(1000)
text_size_screencenter_top( "test for top", RED,screen_info, DISPLAYSURF, 100)
pygame.display.update()
pygame.time.wait(1000)
if 0:
picam_booth_1shot("Test_pic", (640,480), 220, WHITE,
countdown_list, RED, BLACK,img_file_type,
screen_info, DISPLAYSURF)
pygame.time.wait(1000)
picam_booth_nshot("Test_pic", (640,480), 220, WHITE,
countdown_list, RED, BLACK,
nshot_list, RED, BLACK,img_file_type,
screen_info, DISPLAYSURF)
pygame.time.wait(1000)
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Country'
db.create_table('website_country', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
))
db.send_create_signal('website', ['Country'])
# Adding model 'Organization'
db.create_table('website_organization', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Country'], null=True)),
('members_role', self.gf('django.db.models.fields.CharField')(max_length=30)),
))
db.send_create_signal('website', ['Organization'])
# Adding model 'Field'
db.create_table('website_field', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('website', ['Field'])
# Adding model 'Training'
db.create_table('website_training', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=30)),
('resource_id', self.gf('django.db.models.fields.CharField')(max_length=100)),
('is_live', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_displayed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_featured', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name='creator', to=orm['auth.User'])),
))
db.send_create_signal('website', ['Training'])
# Adding M2M table for field cowriters on 'Training'
db.create_table('website_training_cowriters', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('training', models.ForeignKey(orm['website.training'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('website_training_cowriters', ['training_id', 'user_id'])
# Adding M2M table for field participants on 'Training'
db.create_table('website_training_participants', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('training', models.ForeignKey(orm['website.training'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('website_training_participants', ['training_id', 'user_id'])
# Adding model 'TrainingTempShare'
db.create_table('website_trainingtempshare', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('training', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Training'])),
('facebook_id', self.gf('django.db.models.fields.BigIntegerField')(null=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True)),
))
db.send_create_signal('website', ['TrainingTempShare'])
# Adding model 'TrainingSchedule'
db.create_table('website_trainingschedule', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('training', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Training'])),
('start_time', self.gf('django.db.models.fields.DateTimeField')()),
('is_scheduled', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('website', ['TrainingSchedule'])
# Adding model 'TrainingParticipation'
db.create_table('website_trainingparticipation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('training', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Training'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('count', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('website', ['TrainingParticipation'])
# Adding model 'UserProfile'
db.create_table('website_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),
('field', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Field'], null=True)),
('organization', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Organization'], null=True)),
('is_organization_verified', self.gf('django.db.models.fields.BooleanField')(default=False)),
('isUniStar', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_student', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('about_me', self.gf('django.db.models.fields.TextField')(blank=True)),
('facebook_id', self.gf('django.db.models.fields.BigIntegerField')(unique=True, null=True, blank=True)),
('access_token', self.gf('django.db.models.fields.TextField')(blank=True)),
('facebook_name', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('facebook_profile_url', self.gf('django.db.models.fields.TextField')(blank=True)),
('twitter_profile_url', self.gf('django.db.models.fields.TextField')(blank=True)),
('linkedin_profile_url', self.gf('django.db.models.fields.TextField')(blank=True)),
('website_url', self.gf('django.db.models.fields.TextField')(blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=255, null=True, blank=True)),
('date_of_birth', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('gender', self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True)),
('raw_data', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('website', ['UserProfile'])
# Adding model 'PublicProfilePermissions'
db.create_table('website_publicprofilepermissions', (
('public_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='public_user', primary_key=True, to=orm['auth.User'])),
('allowed_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='allowed_user', unique=True, to=orm['auth.User'])),
))
db.send_create_signal('website', ['PublicProfilePermissions'])
def backwards(self, orm):
# Deleting model 'Country'
db.delete_table('website_country')
# Deleting model 'Organization'
db.delete_table('website_organization')
# Deleting model 'Field'
db.delete_table('website_field')
# Deleting model 'Training'
db.delete_table('website_training')
# Removing M2M table for field cowriters on 'Training'
db.delete_table('website_training_cowriters')
# Removing M2M table for field participants on 'Training'
db.delete_table('website_training_participants')
# Deleting model 'TrainingTempShare'
db.delete_table('website_trainingtempshare')
# Deleting model 'TrainingSchedule'
db.delete_table('website_trainingschedule')
# Deleting model 'TrainingParticipation'
db.delete_table('website_trainingparticipation')
# Deleting model 'UserProfile'
db.delete_table('website_userprofile')
# Deleting model 'PublicProfilePermissions'
db.delete_table('website_publicprofilepermissions')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.country': {
'Meta': {'object_name': 'Country'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'website.field': {
'Meta': {'object_name': 'Field'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Country']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members_role': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.publicprofilepermissions': {
'Meta': {'object_name': 'PublicProfilePermissions'},
'allowed_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'allowed_user'", 'unique': 'True', 'to': "orm['auth.User']"}),
'public_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'public_user'", 'primary_key': 'True', 'to': "orm['auth.User']"})
},
'website.training': {
'Meta': {'object_name': 'Training'},
'cowriters': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cowriters'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'creator'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_displayed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_live': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'participants'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'resource_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'website.trainingparticipation': {
'Meta': {'object_name': 'TrainingParticipation'},
'count': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Training']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'website.trainingschedule': {
'Meta': {'object_name': 'TrainingSchedule'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_scheduled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Training']"})
},
'website.trainingtempshare': {
'Meta': {'object_name': 'TrainingTempShare'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Training']"})
},
'website.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'about_me': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Field']", 'null': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'isUniStar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_organization_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_student': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'linkedin_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True'}),
'raw_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'twitter_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'website_url': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['website']
|
|
import re
from django.contrib.auth.models import User
from django.test import TestCase
from ...core import models, testhelper
from ....simplified import PermissionDenied, FilterValidationError, InvalidNumberOfResults
from ....simplified.utils import modelinstance_to_dict, fix_expected_data_missing_database_fields
from ..simplified import (SimplifiedAssignment, SimplifiedAssignmentGroup, SimplifiedPeriod,
SimplifiedSubject, SimplifiedDeadline, SimplifiedStaticFeedback)
from datetime import timedelta
testhelper.TestHelper.set_memory_deliverystore()
class SimplifiedExaminerTestBase(TestCase, testhelper.TestHelper):
def setUp(self):
self.maxDiff = None # Show entire diffs
# create a base structure
self.add(nodes='uni:admin(admin)',
subjects=['inf101', 'inf110'],
periods=['firstsem', 'secondsem'],
assignments=['a1', 'a2'])
# add firstStud to the first and secondsem assignments
self.add_to_path('uni;inf101.firstsem.a1.g1:candidate(firstStud):examiner(firstExam).d1')
self.add_to_path('uni;inf101.firstsem.a2.g1:candidate(firstStud):examiner(firstExam).d1')
self.add_to_path('uni;inf110.secondsem.a1.g1:candidate(firstStud):examiner(firstExam).d1')
self.add_to_path('uni;inf110.secondsem.a2.g1:candidate(firstStud):examiner(firstExam).d1')
self.add_to_path('uni;inf110.secondsem.a3:anon(true).g1:candidate(firstStud):examiner(firstExam).d1')
# secondStud began secondsem
self.add_to_path('uni;inf101.secondsem.a1.g2:candidate(secondStud);examiner(secondExam).d1')
self.add_to_path('uni;inf101.secondsem.a2.g2:candidate(secondStud):examiner(secondExam).d1')
class TestSimplifiedExaminerSubject(SimplifiedExaminerTestBase):
def setUp(self):
super(TestSimplifiedExaminerSubject, self).setUp()
def test_search_filters(self):
qrywrap = SimplifiedSubject.search(self.firstExam)
self.assertEquals(len(qrywrap), 2)
qrywrap = SimplifiedSubject.search(self.firstExam,
filters=[dict(field='parentnode__short_name', comp='exact', value='uni')])
self.assertEquals(len(qrywrap), 2)
qrywrap = SimplifiedSubject.search(self.firstExam,
filters=[dict(field='short_name', comp='exact', value='inf110')])
self.assertEquals(len(qrywrap), 1)
with self.assertRaises(FilterValidationError):
SimplifiedSubject.search(self.firstExam,
filters=[dict(field='parentnode__INVALID__short_name', comp='exact', value='uni')])
with self.assertRaises(FilterValidationError):
SimplifiedSubject.search(self.firstExam,
filters=[dict(field='INVALIDparentnode__short_name', comp='exact', value='uni')])
with self.assertRaises(FilterValidationError):
SimplifiedSubject.search(self.firstExam,
filters=[dict(field='parentnode__short_nameINVALID', comp='exact', value='uni')])
def test_search_exact_number_of_results(self):
qrywrap = SimplifiedSubject.search(self.firstExam, exact_number_of_results=2)
self.assertEquals(len(qrywrap), 2)
qrywrap = SimplifiedSubject.search(self.firstExam, exact_number_of_results=None)
self.assertEquals(len(qrywrap), 2)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedSubject.search(self.firstExam, exact_number_of_results=1)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedSubject.search(self.firstExam, exact_number_of_results=3)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedSubject.search(self.firstExam, exact_number_of_results=0)
def test_search(self):
# do an empty search to get all subjects firstExam examines
search_res = SimplifiedSubject.search(self.firstExam)
expected_res = [modelinstance_to_dict(self.inf101, SimplifiedSubject._meta.resultfields.aslist()),
modelinstance_to_dict(self.inf110, SimplifiedSubject._meta.resultfields.aslist())]
self.assertEquals(search_res.count(), len(expected_res))
for s in search_res:
self.assertTrue(s in expected_res)
# do a search with query inf101
search_res = SimplifiedSubject.search(self.firstExam, query='inf101')
expected_res = modelinstance_to_dict(self.inf101, SimplifiedSubject._meta.resultfields.aslist())
self.assertEquals(search_res.count(), 1)
self.assertEquals(search_res[0], expected_res)
# do a search with partial query
search_res = SimplifiedSubject.search(self.firstExam, query='inf10')
expected_res = modelinstance_to_dict(self.inf101, SimplifiedSubject._meta.resultfields.aslist())
self.assertEquals(search_res.count(), 1)
self.assertEquals(search_res[0], expected_res)
def test_search_security_asstudent(self):
search_res = SimplifiedSubject.search(self.firstStud)
self.assertEquals(len(search_res), 0)
def test_search_security_asadmin(self):
search_res = SimplifiedSubject.search(self.admin)
self.assertEquals(len(search_res), 0)
def test_search_security_wrongsubject(self):
search_res = SimplifiedSubject.search(self.secondExam, query='inf110')
self.assertEquals(len(search_res), 0)
def test_read(self):
# read firstsem without extra fields
read_res = SimplifiedSubject.read(self.firstExam, self.inf101.id)
expected_res = modelinstance_to_dict(self.inf101, SimplifiedSubject._meta.resultfields.aslist())
self.assertEquals(read_res, expected_res)
def test_read_security(self):
# check that an examiner can't read a subject he's not signed
# up for
with self.assertRaises(PermissionDenied):
SimplifiedSubject.read(self.admin, self.inf101.id)
# add another student, to inf110, but not inf101
self.add_to_path('uni;inf110.firstsem.a1.g3:candidate(thirdStud)')
with self.assertRaises(PermissionDenied):
SimplifiedSubject.read(self.thirdStud, self.inf101.id)
with self.assertRaises(PermissionDenied):
SimplifiedSubject.read(self.secondExam, self.inf110.id)
# TODO: Examiner history?
# self.add_to_path('uni;inf102.oldSem:begins(-10).a1.g2:candidate(fourthStud):examiner(thirdExam)')
# print self.inf102_oldSem.end_time
# with self.assertRaises(PermissionDenied):
# SimplifiedSubject.read(self.thirdExam, self.inf102.id)
class TestSimplifiedExaminerPeriod(SimplifiedExaminerTestBase):
allExtras = SimplifiedPeriod._meta.resultfields.additional_aslist()
def setUp(self):
super(TestSimplifiedExaminerPeriod, self).setUp()
def test_search(self):
# search with no query and no extra fields
search_res = SimplifiedPeriod.search(self.firstExam)
expected_res = [modelinstance_to_dict(self.inf101_firstsem, SimplifiedPeriod._meta.resultfields.aslist()),
modelinstance_to_dict(self.inf110_secondsem, SimplifiedPeriod._meta.resultfields.aslist())]
# assert that all search results are as expected
self.assertEquals(search_res.count(), 2)
for s in search_res:
self.assertTrue(s in expected_res)
# search with no query and with extra fields
search_res = SimplifiedPeriod.search(self.firstExam, result_fieldgroups=self.allExtras)
expected_res = [modelinstance_to_dict(self.inf101_firstsem, SimplifiedPeriod._meta.resultfields.aslist(self.allExtras)),
modelinstance_to_dict(self.inf110_secondsem, SimplifiedPeriod._meta.resultfields.aslist(self.allExtras))]
self.assertEquals(search_res.count(), len(expected_res))
for s in search_res:
self.assertTrue(s in expected_res)
# search with query
search_res = SimplifiedPeriod.search(self.firstExam, query='inf101')
expected_res = modelinstance_to_dict(self.inf101_firstsem, SimplifiedPeriod._meta.resultfields.aslist())
self.assertEquals(search_res.count(), 1)
self.assertEquals(search_res[0], expected_res)
# with query and extra fields
search_res = SimplifiedPeriod.search(self.firstExam, query='inf101', result_fieldgroups=self.allExtras)
expected_res = [modelinstance_to_dict(self.inf101_firstsem, SimplifiedPeriod._meta.resultfields.aslist(self.allExtras))]
self.assertEquals(search_res.count(), len(expected_res))
for s in search_res:
self.assertTrue(s in expected_res)
def test_search_security_asstudent(self):
search_res = SimplifiedPeriod.search(self.firstStud)
self.assertEquals(len(search_res), 0)
def test_search_security_asadmin(self):
search_res = SimplifiedPeriod.search(self.admin)
self.assertEquals(len(search_res), 0)
def test_search_security_wrongsubject(self):
search_res = SimplifiedPeriod.search(self.secondExam, query='inf110')
self.assertEquals(len(search_res), 0)
def test_search_filters(self):
qrywrap = SimplifiedPeriod.search(self.firstExam)
self.assertEquals(len(qrywrap), 2)
qrywrap = SimplifiedPeriod.search(self.firstExam,
filters=[dict(field='parentnode__short_name', comp='exact', value='inf110')])
self.assertEquals(len(qrywrap), 1)
with self.assertRaises(FilterValidationError):
SimplifiedPeriod.search(self.firstExam,
filters=[dict(field='parentnode__INVALID__short_name', comp='exact', value='inf110')])
with self.assertRaises(FilterValidationError):
SimplifiedPeriod.search(self.firstExam,
filters=[dict(field='INVALIDparentnode__short_name', comp='exact', value='inf110')])
with self.assertRaises(FilterValidationError):
SimplifiedPeriod.search(self.firstExam,
filters=[dict(field='parentnode__short_nameINVALID', comp='exact', value='inf110')])
def test_search_exact_number_of_results(self):
qrywrap = SimplifiedPeriod.search(self.firstExam, exact_number_of_results=2)
self.assertEquals(len(qrywrap), 2)
qrywrap = SimplifiedPeriod.search(self.firstExam, exact_number_of_results=None)
self.assertEquals(len(qrywrap), 2)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedPeriod.search(self.firstExam, exact_number_of_results=1)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedPeriod.search(self.firstExam, exact_number_of_results=3)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedPeriod.search(self.firstExam, exact_number_of_results=0)
def test_read(self):
# read firstsem without extra fields
read_res = SimplifiedPeriod.read(self.firstExam, self.inf101_firstsem.id)
expected_res = modelinstance_to_dict(self.inf101_firstsem, SimplifiedPeriod._meta.resultfields.aslist())
self.assertEquals(read_res, expected_res)
# read firstsem with extras fields
read_res = SimplifiedPeriod.read(self.firstExam, self.inf101_firstsem.id, result_fieldgroups=self.allExtras)
expected_res = modelinstance_to_dict(self.inf101_firstsem, SimplifiedPeriod._meta.resultfields.aslist(self.allExtras))
self.assertEquals(read_res, expected_res)
def test_read_security(self):
# check that reading a non-existing id gives permission denied
with self.assertRaises(PermissionDenied):
SimplifiedPeriod.read(self.firstStud, -1)
# that secondStud can't read a period he's not in
with self.assertRaises(PermissionDenied):
SimplifiedPeriod.read(self.secondStud, self.inf101_firstsem.id)
class TestSimplifiedExaminerAssignment(SimplifiedExaminerTestBase):
allExtras = SimplifiedAssignment._meta.resultfields.additional_aslist()
def setUp(self):
super(TestSimplifiedExaminerAssignment, self).setUp()
def test_search(self):
# search with no query and no extra fields
search_res = SimplifiedAssignment.search(self.firstExam)
expected_res = [modelinstance_to_dict(self.inf101_firstsem_a1, SimplifiedAssignment._meta.resultfields.aslist()),
modelinstance_to_dict(self.inf101_firstsem_a2, SimplifiedAssignment._meta.resultfields.aslist()),
modelinstance_to_dict(self.inf110_secondsem_a1, SimplifiedAssignment._meta.resultfields.aslist()),
modelinstance_to_dict(self.inf110_secondsem_a2, SimplifiedAssignment._meta.resultfields.aslist()),
modelinstance_to_dict(self.inf110_secondsem_a3, SimplifiedAssignment._meta.resultfields.aslist())]
# assert that all search results are as expected
self.assertEquals(search_res.count(), len(expected_res))
for s in search_res:
self.assertTrue(s in expected_res)
# search with no query and with extra fields
search_res = SimplifiedAssignment.search(self.firstExam, result_fieldgroups=self.allExtras)
expected_res = [modelinstance_to_dict(self.inf101_firstsem_a1,
SimplifiedAssignment._meta.resultfields.aslist(self.allExtras)),
modelinstance_to_dict(self.inf101_firstsem_a2,
SimplifiedAssignment._meta.resultfields.aslist(self.allExtras)),
modelinstance_to_dict(self.inf110_secondsem_a1,
SimplifiedAssignment._meta.resultfields.aslist(self.allExtras)),
modelinstance_to_dict(self.inf110_secondsem_a2,
SimplifiedAssignment._meta.resultfields.aslist(self.allExtras)),
modelinstance_to_dict(self.inf110_secondsem_a3,
SimplifiedAssignment._meta.resultfields.aslist(self.allExtras))]
self.assertEquals(search_res.count(), len(expected_res))
for s in search_res:
self.assertTrue(s in expected_res)
# search with query
search_res = SimplifiedAssignment.search(self.firstExam, query='a1')
expected_res = [modelinstance_to_dict(self.inf101_firstsem_a1, SimplifiedAssignment._meta.resultfields.aslist()),
modelinstance_to_dict(self.inf110_secondsem_a1, SimplifiedAssignment._meta.resultfields.aslist())]
self.assertEquals(search_res.count(), len(expected_res))
for s in search_res:
self.assertTrue(s in expected_res)
# with query and extra fields
search_res = SimplifiedAssignment.search(self.firstExam, query='inf110', result_fieldgroups=self.allExtras)
expected_res = [modelinstance_to_dict(self.inf110_secondsem_a1,
SimplifiedAssignment._meta.resultfields.aslist(self.allExtras)),
modelinstance_to_dict(self.inf110_secondsem_a2,
SimplifiedAssignment._meta.resultfields.aslist(self.allExtras)),
modelinstance_to_dict(self.inf110_secondsem_a3,
SimplifiedAssignment._meta.resultfields.aslist(self.allExtras))]
self.assertEquals(search_res.count(), len(expected_res))
for s in search_res:
self.assertTrue(s in expected_res)
def test_search_security_asstudent(self):
search_res = SimplifiedAssignment.search(self.firstStud)
self.assertEquals(len(search_res), 0)
def test_search_security_asadmin(self):
search_res = SimplifiedAssignment.search(self.admin)
self.assertEquals(len(search_res), 0)
def test_search_security_wrongsubject(self):
search_res = SimplifiedAssignment.search(self.secondExam, query='inf110')
self.assertEquals(len(search_res), 0)
def test_search_filters(self):
qrywrap = SimplifiedAssignment.search(self.firstExam)
self.assertEquals(len(qrywrap), 5)
qrywrap = SimplifiedAssignment.search(self.firstExam,
#result_fieldgroups=['subject'], # has no effect on filters but nice for debugging
filters=[dict(field='parentnode__short_name', comp='exact', value='firstsem')])
self.assertEquals(len(qrywrap), 2)
qrywrap = SimplifiedAssignment.search(self.firstExam,
#result_fieldgroups=['subject'], # has no effect on filters but nice for debugging
filters=[dict(field='parentnode__short_name', comp='exact', value='firstsem'),
dict(field='parentnode__parentnode__short_name', comp='endswith', value='101')])
self.assertEquals(len(qrywrap), 2)
def test_search_exact_number_of_results(self):
qrywrap = SimplifiedAssignment.search(self.firstExam, exact_number_of_results=5)
self.assertEquals(len(qrywrap), 5)
qrywrap = SimplifiedAssignment.search(self.firstExam, exact_number_of_results=None)
self.assertEquals(len(qrywrap), 5)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedAssignment.search(self.firstExam, exact_number_of_results=6)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedAssignment.search(self.firstExam, exact_number_of_results=4)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedAssignment.search(self.firstExam, exact_number_of_results=0)
def test_read(self):
# do a read with no extra fields
read_res = SimplifiedAssignment.read(self.firstExam, self.inf101_firstsem_a1.id)
expected_res = modelinstance_to_dict(self.inf101_firstsem_a1,
SimplifiedAssignment._meta.resultfields.aslist())
self.assertEquals(read_res, expected_res)
# do a read with all extras
read_res = SimplifiedAssignment.read(self.firstExam, self.inf101_firstsem_a1.id, result_fieldgroups=self.allExtras)
expected_res = modelinstance_to_dict(self.inf101_firstsem_a1,
SimplifiedAssignment._meta.resultfields.aslist(self.allExtras))
self.assertEquals(read_res, expected_res)
def test_read_security(self):
# We know secondStud hasn't signed up for inf110. Assert that
# he can't do a read on inf101's id
with self.assertRaises(PermissionDenied):
SimplifiedAssignment.read(self.secondStud, self.inf110_firstsem_a1.id)
with self.assertRaises(PermissionDenied):
SimplifiedAssignment.read(self.admin, self.inf101_firstsem_a1.id)
class TestSimplifiedExaminerAssignmentGroup(SimplifiedExaminerTestBase):
allExtras = SimplifiedAssignmentGroup._meta.resultfields.additional_aslist()
def setUp(self):
super(TestSimplifiedExaminerAssignmentGroup, self).setUp()
def test_search_filters(self):
qrywrap = SimplifiedAssignment.search(self.firstExam)
self.assertEquals(len(qrywrap), 5)
qrywrap = SimplifiedAssignmentGroup.search(self.firstExam,
#result_fieldgroups=['subject'], # has no effect on filters but nice for debugging
filters=[dict(field='parentnode__short_name', comp='exact', value='a1')])
self.assertEquals(len(qrywrap), 2)
qrywrap = SimplifiedAssignmentGroup.search(self.firstExam,
#result_fieldgroups=['subject'], # has no effect on filters but nice for debugging
filters=[dict(field='parentnode__short_name', comp='exact', value='a2'),
dict(field='parentnode__parentnode__short_name', comp='endswith', value='sem'),
dict(field='parentnode__parentnode__parentnode__short_name', comp='endswith', value='101')])
self.assertEquals(len(qrywrap), 1)
qrywrap = SimplifiedAssignmentGroup.search(self.firstExam,
#result_fieldgroups=['subject'], # has no effect on filters but nice for debugging
filters=[dict(field='parentnode__short_name', comp='exact', value='a2'),
dict(field='parentnode__parentnode__short_name', comp='endswith', value='sem'),
dict(field='parentnode__parentnode__parentnode__short_name', comp='startswith', value='inf1')])
self.assertEquals(len(qrywrap), 2)
def test_search_exact_number_of_results(self):
qrywrap = SimplifiedAssignmentGroup.search(self.firstExam, exact_number_of_results=5)
self.assertEquals(len(qrywrap), 5)
qrywrap = SimplifiedAssignmentGroup.search(self.firstExam, exact_number_of_results=None)
self.assertEquals(len(qrywrap), 5)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedAssignmentGroup.search(self.firstExam, exact_number_of_results=6)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedAssignmentGroup.search(self.firstExam, exact_number_of_results=4)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedAssignmentGroup.search(self.firstExam, exact_number_of_results=0)
def test_search(self):
self.firstExam = User.objects.get(id=self.firstExam.id)
# search with no query and no extra fields
search_res = SimplifiedAssignmentGroup.search(self.firstExam, query='firstStud')
test_groups = [ self.inf101_firstsem_a1_g1,
self.inf101_firstsem_a2_g1,
self.inf110_secondsem_a1_g1,
self.inf110_secondsem_a2_g1,]
expected_res = map(lambda group: modelinstance_to_dict(group, SimplifiedAssignmentGroup._meta.resultfields.aslist()),
test_groups)
# Fix missing database fields by adding data from the test_groups
fix_expected_data_missing_database_fields(test_groups, expected_res)
# assert that all search results are as expected
self.assertEquals(search_res.count(), len(expected_res))
for i in xrange(len(search_res)):
self.assertEquals(search_res[i], expected_res[i])
# search with no query and with extra fields
search_res = SimplifiedAssignmentGroup.search(self.firstExam, result_fieldgroups=self.allExtras)
test_groups = [self.inf101_firstsem_a1_g1,
self.inf101_firstsem_a2_g1,
self.inf110_secondsem_a1_g1,
self.inf110_secondsem_a2_g1,
self.inf110_secondsem_a3_g1,]
expected_res = map(lambda group: modelinstance_to_dict(group,
SimplifiedAssignmentGroup._meta.resultfields.aslist(self.allExtras)),
test_groups)
# Fix missing database fields by adding data from the test_groups
fix_expected_data_missing_database_fields(test_groups, expected_res)
self.assertEquals(search_res.count(), len(expected_res))
for i in xrange(len(search_res)):
self.assertEquals(search_res[i], expected_res[i])
# search with query
search_res = SimplifiedAssignmentGroup.search(self.firstExam, query='a1')
test_groups = [self.inf101_firstsem_a1_g1,
self.inf110_secondsem_a1_g1,]
expected_res = map(lambda group: modelinstance_to_dict(group,
SimplifiedAssignmentGroup._meta.resultfields.aslist()),
test_groups)
# Fix missing database fields by adding data from the test_groups
fix_expected_data_missing_database_fields(test_groups, expected_res)
self.assertEquals(search_res.count(), len(expected_res))
for i in xrange(len(search_res)):
self.assertEquals(search_res[i], expected_res[i])
# with query and extra fields
search_res = SimplifiedAssignmentGroup.search(self.firstExam, query='inf110', result_fieldgroups=self.allExtras)
test_groups = [self.inf110_secondsem_a1_g1,
self.inf110_secondsem_a2_g1,
self.inf110_secondsem_a3_g1,]
expected_res = map(lambda group: modelinstance_to_dict(group,
SimplifiedAssignmentGroup._meta.resultfields.aslist(self.allExtras)),
test_groups)
# Fix missing database fields by adding data from the test_groups
fix_expected_data_missing_database_fields(test_groups, expected_res)
self.assertEquals(search_res.count(), len(expected_res))
for i in xrange(len(search_res)):
self.assertEquals(search_res[i], expected_res[i])
def test_search_security_asstudent(self):
search_res = SimplifiedAssignmentGroup.search(self.firstStud)
self.assertEquals(len(search_res), 0)
def test_search_security_asadmin(self):
search_res = SimplifiedAssignmentGroup.search(self.admin)
self.assertEquals(len(search_res), 0)
def test_search_security_wrongsubject(self):
search_res = SimplifiedAssignmentGroup.search(self.secondExam, query='inf110')
self.assertEquals(len(search_res), 0)
def test_read(self):
# do a read with no extra fields
read_res = SimplifiedAssignmentGroup.read(self.firstExam, self.inf101_firstsem_a1.id)
expected_res = modelinstance_to_dict(self.inf101_firstsem_a1_g1,
SimplifiedAssignmentGroup._meta.resultfields.aslist())
self.assertEquals(read_res, expected_res)
# do a read with all extras
read_res = SimplifiedAssignmentGroup.read(self.firstExam, self.inf101_firstsem_a1.id, result_fieldgroups=self.allExtras)
expected_res = modelinstance_to_dict(self.inf101_firstsem_a1_g1,
SimplifiedAssignmentGroup._meta.resultfields.aslist(self.allExtras))
self.assertEquals(read_res, expected_res)
def test_read_security(self):
# We know secondStud hasn't signed up for firstsem.inf101.
with self.assertRaises(PermissionDenied):
SimplifiedAssignmentGroup.read(self.secondStud, self.inf101_firstsem_a1_g1.id)
with self.assertRaises(PermissionDenied):
SimplifiedAssignmentGroup.read(self.admin, self.inf101_firstsem_a1_g1.id)
class TestSimplifiedExaminerDeadline(SimplifiedExaminerTestBase):
allExtras = SimplifiedAssignmentGroup._meta.resultfields.additional_aslist()
baseFields = SimplifiedAssignmentGroup._meta.resultfields.aslist()
allFields = SimplifiedAssignmentGroup._meta.resultfields.aslist(allExtras)
def set_up(self):
super(TestSimplifiedExaminerDeadline, self).setUp()
def test_search_filters(self):
qrywrap = SimplifiedDeadline.search(self.firstExam)
self.assertEquals(len(qrywrap), 5)
qrywrap = SimplifiedDeadline.search(self.firstExam,
#result_fieldgroups=['subject'], # has no effect on filters but nice for debugging
filters=[dict(field='assignment_group__parentnode__short_name', comp='exact', value='a1'),
dict(field='assignment_group__parentnode__parentnode__short_name', comp='endswith', value='sem'),
dict(field='assignment_group__parentnode__parentnode__parentnode__short_name', comp='endswith', value='101')])
self.assertEquals(len(qrywrap), 1)
def test_search_exact_number_of_results(self):
qrywrap = SimplifiedDeadline.search(self.firstExam, exact_number_of_results=5)
self.assertEquals(len(qrywrap), 5)
qrywrap = SimplifiedDeadline.search(self.firstExam, exact_number_of_results=None)
self.assertEquals(len(qrywrap), 5)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedDeadline.search(self.firstExam, exact_number_of_results=6)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedDeadline.search(self.firstExam, exact_number_of_results=4)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedDeadline.search(self.firstExam, exact_number_of_results=0)
def test_search_all(self):
search_res = SimplifiedDeadline.search(self.firstExam, result_fieldgroups=self.allExtras)
expected_res = [modelinstance_to_dict(self.inf101_firstsem_a1_g1.deadlines.all()[0],
SimplifiedDeadline._meta.resultfields.aslist(self.allExtras)),
modelinstance_to_dict(self.inf101_firstsem_a2_g1.deadlines.all()[0],
SimplifiedDeadline._meta.resultfields.aslist(self.allExtras)),
modelinstance_to_dict(self.inf110_secondsem_a1_g1.deadlines.all()[0],
SimplifiedDeadline._meta.resultfields.aslist(self.allExtras)),
modelinstance_to_dict(self.inf110_secondsem_a2_g1.deadlines.all()[0],
SimplifiedDeadline._meta.resultfields.aslist(self.allExtras)),
modelinstance_to_dict(self.inf110_secondsem_a3_g1.deadlines.all()[0],
SimplifiedDeadline._meta.resultfields.aslist(self.allExtras))]
for expected in expected_res: # Set annotated fields
expected['number_of_deliveries'] = 0
self.assertEquals(len(search_res), len(expected_res))
for s in search_res:
self.assertTrue(s in expected_res)
def test_search_query(self):
search_res = SimplifiedDeadline.search(self.firstExam, query='101', result_fieldgroups=self.allExtras)
expected_res = [modelinstance_to_dict(self.inf101_firstsem_a1_g1.deadlines.all()[0],
SimplifiedDeadline._meta.resultfields.aslist(self.allExtras)),
modelinstance_to_dict(self.inf101_firstsem_a2_g1.deadlines.all()[0],
SimplifiedDeadline._meta.resultfields.aslist(self.allExtras))]
for expected in expected_res: # Set annotated fields
expected['number_of_deliveries'] = 0
self.assertEquals(len(search_res), len(expected_res))
for s in search_res:
self.assertTrue(s in expected_res)
def test_search_security_asstudent(self):
search_res = SimplifiedDeadline.search(self.firstStud)
self.assertEquals(len(search_res), 0)
def test_search_security_asadmin(self):
search_res = SimplifiedDeadline.search(self.admin)
self.assertEquals(len(search_res), 0)
def test_search_security_wrongsubject(self):
search_res = SimplifiedDeadline.search(self.secondExam, query='inf110')
self.assertEquals(len(search_res), 0)
def test_read_base(self):
# do a read with no extra fields
read_res = SimplifiedDeadline.read(self.firstExam, self.inf101_firstsem_a1.id)
expected_res = modelinstance_to_dict(self.inf101_firstsem_a1_g1.deadlines.all()[0],
SimplifiedDeadline._meta.resultfields.aslist())
self.assertEquals(read_res, expected_res)
def test_read_all(self):
# do a read with all extras
read_res = SimplifiedDeadline.read(self.firstExam, self.inf101_firstsem_a1.id, result_fieldgroups=self.allExtras)
expected_res = modelinstance_to_dict(self.inf101_firstsem_a1_g1.deadlines.all()[0],
SimplifiedDeadline._meta.resultfields.aslist(self.allExtras))
self.assertEquals(read_res, expected_res)
def test_read_security(self):
# We know secondStud hasn't signed up for firstsem.inf101.
with self.assertRaises(PermissionDenied):
SimplifiedDeadline.read(self.secondStud, self.inf101_firstsem_a1_g1.id)
with self.assertRaises(PermissionDenied):
SimplifiedDeadline.read(self.admin, self.inf101_firstsem_a1_g1.id)
def test_create(self):
kw = dict(text='test',
assignment_group=self.inf101_firstsem_a1_g1,
deadline=self.inf101_firstsem_a1.publishing_time + timedelta(days=12))
created_pk = SimplifiedDeadline.create(self.firstExam, **kw)
create_res = models.Deadline.objects.get(pk=created_pk)
self.assertEquals(create_res.text, 'test')
self.assertEquals(create_res.assignment_group,
self.inf101_firstsem_a1_g1)
self.assertEquals(create_res.deadline,
self.inf101_firstsem_a1_g1.get_active_deadline().deadline)
def test_create_security(self):
kw = dict(text='test',
assignment_group=self.inf101_firstsem_a1_g1,
deadline=self.inf101_firstsem_a1_g1.deadlines.all()[0])
with self.assertRaises(PermissionDenied):
SimplifiedDeadline.create(self.firstStud, **kw)
with self.assertRaises(PermissionDenied):
SimplifiedDeadline.create(self.admin, **kw)
def test_delete(self):
SimplifiedDeadline.delete(self.firstExam,
self.inf101_firstsem_a1_g1.deadlines.all()[0].id)
with self.assertRaises(IndexError): # TODO: this should probably be PermissionDenied, but atm it gets an IndexError..
SimplifiedDeadline.delete(self.firstExam,
self.inf101_firstsem_a1_g1.deadlines.all()[0].id)
def test_delete_as_student(self):
with self.assertRaises(PermissionDenied):
SimplifiedDeadline.delete(self.firstStud,
self.inf101_firstsem_a1_g1.deadlines.all()[0].id)
def test_delete_wrong_assignment_group(self):
with self.assertRaises(PermissionDenied):
SimplifiedDeadline.delete(self.secondExam,
self.inf101_firstsem_a1_g1.deadlines.all()[0].id)
class TestSimplifiedExaminerStaticFeedback(SimplifiedExaminerTestBase):
allExtras = SimplifiedStaticFeedback._meta.resultfields.additional_aslist()
baseFields = SimplifiedStaticFeedback._meta.resultfields.aslist()
allFields = SimplifiedStaticFeedback._meta.resultfields.aslist(allExtras)
def setUp(self):
super(TestSimplifiedExaminerStaticFeedback, self).setUp()
# we need to add some deliveries here! Use the admin of uni as
# an examiner
# add deliveries and feedbacks to every group that was
# created. Default values are good enough
for var in dir(self):
# find any variable that ends with '_gN' where N is a
# number
if re.search('_g\d$', var):
group = getattr(self, var)
group.examiners.create(user=self.admin)
self.add_delivery(group)
self.add_feedback(group)
def test_search_filters(self):
qrywrap = SimplifiedStaticFeedback.search(self.firstExam)
self.assertEquals(len(qrywrap), 5)
qrywrap = SimplifiedStaticFeedback.search(self.firstExam,
filters=[dict(field='delivery', comp='exact', value='1')])
self.assertEquals(len(qrywrap), 1)
def test_search_exact_number_of_results(self):
qrywrap = SimplifiedStaticFeedback.search(self.firstExam, exact_number_of_results=5)
self.assertEquals(len(qrywrap), 5)
qrywrap = SimplifiedStaticFeedback.search(self.firstExam, exact_number_of_results=None)
self.assertEquals(len(qrywrap), 5)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedStaticFeedback.search(self.firstExam, exact_number_of_results=6)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedStaticFeedback.search(self.firstExam, exact_number_of_results=4)
with self.assertRaises(InvalidNumberOfResults):
SimplifiedStaticFeedback.search(self.firstExam, exact_number_of_results=0)
def test_search(self):
search_res = SimplifiedStaticFeedback.search(self.firstExam)
expected_res = [modelinstance_to_dict(self.inf101_firstsem_a1_g1_feedbacks[0],
SimplifiedStaticFeedback._meta.resultfields.aslist()),
modelinstance_to_dict(self.inf101_firstsem_a2_g1_feedbacks[0],
SimplifiedStaticFeedback._meta.resultfields.aslist()),
modelinstance_to_dict(self.inf110_secondsem_a1_g1_feedbacks[0],
SimplifiedStaticFeedback._meta.resultfields.aslist()),
modelinstance_to_dict(self.inf110_secondsem_a2_g1_feedbacks[0],
SimplifiedStaticFeedback._meta.resultfields.aslist()),
modelinstance_to_dict(self.inf110_secondsem_a3_g1_feedbacks[0],
SimplifiedStaticFeedback._meta.resultfields.aslist()),
]
# assert that all search results are as expected
self.assertEquals(search_res.count(), len(expected_res))
for s in search_res:
self.assertTrue(s in expected_res)
def test_search_security_asstudent(self):
search_res = SimplifiedStaticFeedback.search(self.firstStud)
self.assertEquals(len(search_res), 0)
def test_search_security_asadmin(self):
self.create_superuser('superadminuser')
search_res = SimplifiedStaticFeedback.search(self.superadminuser)
self.assertEquals(len(search_res), 0)
def test_search_security_wrongsubject(self):
search_res = SimplifiedStaticFeedback.search(self.secondExam, query='inf110')
self.assertEquals(len(search_res), 0)
def test_read(self):
read_res = SimplifiedStaticFeedback.read(self.firstExam, self.inf101_firstsem_a1_g1_feedbacks[0].id)
expected_res = modelinstance_to_dict(self.inf101_firstsem_a1_g1_feedbacks[0],
SimplifiedStaticFeedback._meta.resultfields.aslist())
self.assertEquals(read_res, expected_res)
def test_read_security(self):
# try to read one of secondExam's feedbacks
with self.assertRaises(PermissionDenied):
SimplifiedStaticFeedback.read(self.firstExam, self.inf101_secondsem_a1_g2_feedbacks[0].id)
def test_create(self):
created_pk = SimplifiedStaticFeedback.create(self.firstExam,
delivery=self.inf101_firstsem_a2_g1_deliveries[0],
grade='B',
points=80,
is_passing_grade=True,
rendered_view='<html></html>')
# TODO: test results
|
|
# -*- coding: utf-8 -*-
"""
This is an example of VAE, whose p(z) is a mixture of Gaussian:
.. math::
p(z) = \\sum_{k=1}^K \\pi(k) p_{k}(z)
"""
import functools
import sys
from argparse import ArgumentParser
import tensorflow as tf
from pprint import pformat
from tensorflow.contrib.framework import arg_scope, add_arg_scope
import tfsnippet as spt
from tfsnippet.examples.utils import (MLResults,
save_images_collection,
bernoulli_as_pixel,
bernoulli_flow,
print_with_title)
class ExpConfig(spt.Config):
# model parameters
z_dim = 40
x_dim = 784
z_logstd_min = -1.
n_mixture_components = 3
# training parameters
result_dir = None
write_summary = False
max_epoch = 3000
max_step = None
batch_size = 128
l2_reg = 0.0001
initial_lr = 0.001
lr_anneal_factor = 0.5
lr_anneal_epoch_freq = 300
lr_anneal_step_freq = None
# evaluation parameters
test_n_z = 500
test_batch_size = 128
config = ExpConfig()
@spt.global_reuse
@add_arg_scope
def q_net(x, observed=None, n_z=None, is_initializing=False):
net = spt.BayesianNet(observed=observed)
normalizer_fn = functools.partial(
spt.layers.act_norm, initializing=is_initializing)
# compute the hidden features
with arg_scope([spt.layers.dense],
activation_fn=tf.nn.leaky_relu,
normalizer_fn=normalizer_fn,
weight_norm=True,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)):
h_x = tf.to_float(x)
h_x = spt.layers.dense(h_x, 500)
h_x = spt.layers.dense(h_x, 500)
# sample z ~ q(z|x)
z_mean = spt.layers.dense(h_x, config.z_dim, name='z_mean')
z_logstd = spt.layers.dense(h_x, config.z_dim, name='z_logstd')
z = net.add('z', spt.Normal(mean=z_mean, logstd=z_logstd), n_samples=n_z,
group_ndims=1)
return net
@spt.global_reuse
@add_arg_scope
def p_net(observed=None, n_z=None, is_initializing=False):
net = spt.BayesianNet(observed=observed)
normalizer_fn = functools.partial(
spt.layers.act_norm, initializing=is_initializing)
# sample z ~ p(z)
def make_component(i):
normal = spt.Normal(
mean=tf.get_variable('mean_{}'.format(i), shape=[1, config.z_dim],
dtype=tf.float32, trainable=True),
logstd=tf.maximum(
tf.get_variable('logstd_{}'.format(i), shape=[1, config.z_dim],
dtype=tf.float32, trainable=True),
config.z_logstd_min
)
)
return normal.expand_value_ndims(1)
components = [make_component(i) for i in range(config.n_mixture_components)]
mixture = spt.Mixture(
categorical=spt.Categorical(
logits=tf.zeros([1, config.n_mixture_components])),
components=components,
is_reparameterized=True
)
z = net.add('z', mixture, n_samples=n_z)
# compute the hidden features
with arg_scope([spt.layers.dense],
activation_fn=tf.nn.leaky_relu,
normalizer_fn=normalizer_fn,
weight_norm=True,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)):
h_z = z
h_z = spt.layers.dense(h_z, 500)
h_z = spt.layers.dense(h_z, 500)
# sample x ~ p(x|z)
x_logits = spt.layers.dense(h_z, config.x_dim, name='x_logits')
x = net.add('x', spt.Bernoulli(logits=x_logits), group_ndims=1)
return net
def main():
# parse the arguments
arg_parser = ArgumentParser()
spt.register_config_arguments(config, arg_parser, title='Model options')
spt.register_config_arguments(spt.settings, arg_parser, prefix='tfsnippet',
title='TFSnippet options')
arg_parser.parse_args(sys.argv[1:])
# print the config
print_with_title('Configurations', pformat(config.to_dict()), after='\n')
# open the result object and prepare for result directories
results = MLResults(config.result_dir)
results.save_config(config) # save experiment settings for review
results.make_dirs('plotting', exist_ok=True)
results.make_dirs('train_summary', exist_ok=True)
# input placeholders
input_x = tf.placeholder(
dtype=tf.int32, shape=(None, config.x_dim), name='input_x')
learning_rate = spt.AnnealingVariable(
'learning_rate', config.initial_lr, config.lr_anneal_factor)
# derive the output for initialization
with tf.name_scope('initialization'), \
spt.utils.scoped_set_config(spt.settings, auto_histogram=False):
init_q_net = q_net(input_x, is_initializing=True)
init_chain = init_q_net.chain(
p_net, observed={'x': input_x}, is_initializing=True)
init_lb = tf.reduce_mean(init_chain.vi.lower_bound.elbo())
# derive the loss and lower-bound for training
with tf.name_scope('training'):
train_q_net = q_net(input_x)
train_chain = train_q_net.chain(p_net, observed={'x': input_x})
vae_loss = tf.reduce_mean(train_chain.vi.training.sgvb())
loss = vae_loss + tf.losses.get_regularization_loss()
# derive the nll and logits output for testing
with tf.name_scope('testing'):
test_q_net = q_net(input_x, n_z=config.test_n_z)
test_chain = test_q_net.chain(
p_net, latent_axis=0, observed={'x': input_x})
test_nll = -tf.reduce_mean(test_chain.vi.evaluation.is_loglikelihood())
test_lb = tf.reduce_mean(test_chain.vi.lower_bound.elbo())
# derive the optimizer
with tf.name_scope('optimizing'):
optimizer = tf.train.AdamOptimizer(learning_rate)
params = tf.trainable_variables()
grads = optimizer.compute_gradients(loss, var_list=params)
with tf.control_dependencies(
tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.apply_gradients(grads)
# derive the plotting function
with tf.name_scope('plotting'):
plot_p_net = p_net(n_z=100)
x_plots = tf.reshape(bernoulli_as_pixel(plot_p_net['x']), (-1, 28, 28))
def plot_samples(loop):
with loop.timeit('plot_time'):
images = session.run(x_plots)
save_images_collection(
images=images,
filename='plotting/{}.png'.format(loop.epoch),
grid_size=(10, 10),
results=results
)
# prepare for training and testing data
(x_train, y_train), (x_test, y_test) = \
spt.datasets.load_mnist(x_shape=[784])
train_flow = bernoulli_flow(
x_train, config.batch_size, shuffle=True, skip_incomplete=True)
test_flow = bernoulli_flow(
x_test, config.test_batch_size, sample_now=True)
with spt.utils.create_session().as_default() as session, \
train_flow.threaded(5) as train_flow:
spt.utils.ensure_variables_initialized()
# initialize the network
for [x] in train_flow:
print('Network initialized, first-batch loss is {:.6g}.\n'.
format(session.run(init_lb, feed_dict={input_x: x})))
break
# train the network
with spt.TrainLoop(params,
var_groups=['q_net', 'p_net'],
max_epoch=config.max_epoch,
max_step=config.max_step,
summary_dir=(results.system_path('train_summary')
if config.write_summary else None),
summary_graph=tf.get_default_graph(),
early_stopping=False) as loop:
trainer = spt.Trainer(
loop, train_op, [input_x], train_flow,
metrics={'loss': loss},
summaries=tf.summary.merge_all(spt.GraphKeys.AUTO_HISTOGRAM)
)
trainer.anneal_after(
learning_rate,
epochs=config.lr_anneal_epoch_freq,
steps=config.lr_anneal_step_freq
)
evaluator = spt.Evaluator(
loop,
metrics={'test_nll': test_nll, 'test_lb': test_lb},
inputs=[input_x],
data_flow=test_flow,
time_metric_name='test_time'
)
evaluator.events.on(
spt.EventKeys.AFTER_EXECUTION,
lambda e: results.update_metrics(evaluator.last_metrics_dict)
)
trainer.evaluate_after_epochs(evaluator, freq=10)
trainer.evaluate_after_epochs(
functools.partial(plot_samples, loop), freq=10)
trainer.log_after_epochs(freq=1)
trainer.run()
# print the final metrics and close the results object
print_with_title('Results', results.format_metrics(), before='\n')
results.close()
if __name__ == '__main__':
main()
|
|
# coding: utf-8
# # Accessing ERDDAP from Python
#
# ERDDAP rich responses and RESTful API is makes it **THE** most convenient way to serve data.
#
# One can build URLs manually or programmatically like:
#
# <small>`https://erddap-uncabled.oceanobservatories.org/uncabled/erddap/tabledap/CP05MOAS-GL336-02-FLORTM000-flort_m_glider_instrument-telemetered-deployment0005-tabledap.csv?ctdgv_m_glider_instrument_sci_water_temp,time&time>=2017-02-10T00:00:00Z`</small>
# - server: `https://erddap-uncabled.oceanobservatories.org/uncabled/erddap/`
# - protocol: `tabledap`
# - dataset_id: `CP05MOAS-GL336-02-FLORTM000-flort_m_glider_instrument-telemetered-deployment0005-tabledap`
# - variables: `ctdgv_m_glider_instrument_sci_water_temp,latitude,longitude,temperature,time`
# - constraints:
# - `time>=2017-10-11T00:00:00Z`
# - `time<=2017-10-18T00:00:00Z`
# - `latitude>=38.0`
# - `latitude<=41.0`
# - `longitude>=-72.0`
# - `longitude<=-69.0`
# In[1]:
from erddapy import ERDDAP
server = 'https://erddap-uncabled.oceanobservatories.org/uncabled/erddap'
dataset_id = 'CP05MOAS-GL336-02-FLORTM000-flort_m_glider_instrument-telemetered-deployment0005-tabledap'
constraints = {
'time>=': '2017-10-11T00:00:00Z',
'time<=': '2017-10-18T08:16:57Z',
'latitude>=': 38.0,
'latitude<=': 41.0,
'longitude>=': -72.0,
'longitude<=': -69.0,
}
depth = 'ctdgv_m_glider_instrument_sci_water_pressure_dbar'
salinity = 'ctdgv_m_glider_instrument_practical_salinity'
temperature = 'ctdgv_m_glider_instrument_sci_water_temp'
variables = [
depth,
'latitude',
'longitude',
salinity,
temperature,
'time',
]
# In[2]:
e = ERDDAP(
server=server,
dataset_id=dataset_id,
constraints=constraints,
variables=variables,
protocol='tabledap',
response='mat',
)
print(e.get_download_url())
# # Obtaining the data
#
# There are a few methods to obtain the data with *to_pandas()* and *to_xarray()*:
# In[3]:
df = e.to_pandas(
index_col='time',
parse_dates=True,
skiprows=(1,) # units information can be dropped.
).dropna()
# In[4]:
df.head()
# # Let's plot the data
# # Exploring an ERDDAP server
# In[5]:
from erddapy import ERDDAP
e = ERDDAP(server='https://erddap-uncabled.oceanobservatories.org/uncabled/erddap')
# In[6]:
import pandas as pd
df = pd.read_csv(e.get_search_url(response='csv', search_for='all'))
# In[7]:
'We have {} tabledap, {} griddap, and {} wms endpoints.'.format(
len(set(df['tabledap'].dropna())),
len(set(df['griddap'].dropna())),
len(set(df['wms'].dropna()))
)
# # ERDDAP Advanced Search
#
# Let's narrow the search area, time span, and look for *sea_water_temperature* only.
# In[8]:
bbox = [-72.0, -69.0, 38.0, 41.0]
min_time = '2018-02-01T00:00:00Z'
max_time = '2018-02-08T00:00:00Z'
kw = {
'standard_name': 'sea_water_temperature',
'search_for': 'glider',
'min_lon': bbox[0],
'max_lon': bbox[1],
'min_lat': bbox[2],
'max_lat': bbox[3],
'min_time': min_time,
'max_time': max_time,
'cdm_data_type': 'trajectory'
}
# In[9]:
search_url = e.get_search_url(response='csv', **kw)
search = pd.read_csv(search_url)
gliders = search['Dataset ID'].values
msg = 'Found {} Glider Datasets:\n\n{}'.format
print(msg(len(gliders), '\n'.join(gliders)))
# With the Dataset IDs we can explore the metadata with the *get_info_url*
# In[10]:
print(gliders[0])
info_url = e.get_info_url(dataset_id=gliders[0], response='csv')
info = pd.read_csv(info_url)
info.head()
# In[11]:
cdm_profile_variables = info.loc[
info['Attribute Name'] == 'cdm_profile_variables', 'Value'
]
print(''.join(cdm_profile_variables))
# # Selecting variables by attributes
# In[12]:
e.get_var_by_attr(
dataset_id='CP02PMCI-WFP01-03-CTDPFK000-ctdpf_ckl_wfp_instrument-telemetered-deployment0008-tabledap',
standard_name='sea_water_temperature'
)
# # Easy to use CF conventions standards
# In[13]:
t_vars = [
e.get_var_by_attr(
dataset_id=glider, standard_name='sea_water_temperature'
)[0] for glider in gliders
]
t_vars
# In[14]:
s_vars = [
e.get_var_by_attr(
dataset_id=glider, standard_name='sea_water_practical_salinity'
)[0] for glider in gliders
]
s_vars
# In[15]:
d_vars = [
e.get_var_by_attr(
dataset_id=glider, standard_name='sea_water_pressure'
)[0] for glider in gliders
]
d_vars
# In[16]:
# FIX: should not really assume that variables are the same for each dataset
depth = d_vars[0]
salinity = s_vars[0]
temperature = t_vars[0]
# # Putting everything together
# In[17]:
from requests.exceptions import HTTPError
constraints = {
'time>=': min_time,
'time<=': max_time,
'longitude>=': bbox[0],
'longitude<=': bbox[1],
'latitude>=': bbox[2],
'latitude<=': bbox[3]
}
def download_csv(url):
return pd.read_csv(
url, index_col='time', parse_dates=True, skiprows=[1]
)
dfs = {}
for glider in gliders:
try:
download_url = e.get_download_url(
dataset_id=glider,
protocol='tabledap',
variables=['time', 'latitude', 'longitude', depth, salinity, temperature],
response='csv',
constraints=constraints
)
except HTTPError:
print('Failed to download {}'.format(glider))
continue
dfs.update({glider: download_csv(download_url)})
# In[18]:
import numpy as np
for glider in dfs.keys():
dfs[glider].loc[dfs[glider][salinity] <= .1, salinity] = np.NaN
dfs[glider].loc[dfs[glider][temperature] <= .1, temperature] = np.NaN
# In[19]:
import folium
zoom_start = 7
lon = (bbox[0] + bbox[1]) / 2
lat = (bbox[2] + bbox[3]) / 2
m = folium.Map(width='100%', height='100%',
location=[lat, lon], zoom_start=zoom_start)
url = 'https://gis.ngdc.noaa.gov/arcgis/services/gebco08_hillshade/MapServer/WMSServer'
w = folium.WmsTileLayer(
url,
name='GEBCO Bathymetry',
fmt='image/png',
layers='GEBCO_08 Hillshade',
attr='GEBCO',
overlay=True,
transparent=True)
w.add_to(m)
colors = ['orange','pink','yellow']
k=0
for glider, df in dfs.items():
line = folium.PolyLine(locations=list(zip(df['latitude'],df['longitude'])),
color=colors[k],
weight=8,
opacity=0.6,
popup=glider[:22]).add_to(m)
k = k+1
m
# In[20]:
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
def glider_scatter(df, ax, glider):
ax.scatter(df[temperature], df[salinity],
s=10, alpha=0.5, label=glider)
fig, ax = plt.subplots(figsize=(12, 7))
ax.set_ylabel('salinity')
ax.set_xlabel('temperature')
ax.grid(True)
for glider, df in dfs.items():
glider_scatter(df, ax, glider)
leg = ax.legend()
# ## Plot one of the glider transects
# In[21]:
df = next(iter(dfs.values()))
# In[22]:
import matplotlib.dates as mdates
fig, ax = plt.subplots(figsize=(17, 2))
cs = ax.scatter(df.index, df[depth], s=15, c=df[temperature], marker='o', edgecolor='none')
ax.invert_yaxis()
ax.set_xlim(df.index[0], df.index[-1])
xfmt = mdates.DateFormatter('%H:%Mh\n%d-%b')
ax.xaxis.set_major_formatter(xfmt)
cbar = fig.colorbar(cs, orientation='vertical', extend='both')
cbar.ax.set_ylabel('Temperature ($^\circ$C)')
ax.set_ylabel('Depth (m)');
# In[ ]:
|
|
import argparse
from collections import OrderedDict
import matplotlib.pyplot as plt
import seaborn as sns
from aod_cells.schemata import *
# plot_params = dict(cmap=plt.cm.gray, vmin=0, vmax=1)
plot_params = dict(cmap=plt.cm.gray)
plot_paramsP = dict(cmap=sns.blend_palette(['yellow', 'deeppink'], as_cmap=True), zorder=5)
class CellLabeler:
def __init__(self, X, cells=None, P=None):
self.X = X
self.cells = cells
self.cell_idx = 0 if cells is not None else None
self.cut = OrderedDict(zip(['row', 'col', 'depth'], [0, 0, 0]))
self.P = 0 * self.X
if P is not None:
i, j, k = [(i - j + 1) // 2 for i, j in zip(self.X.shape, P.shape)]
self.P[i:-i, j:-j, k:-k] = P
fig = plt.figure(facecolor='w')
gs = plt.GridSpec(3, 5)
ax = dict()
ax['depth'] = fig.add_subplot(gs[1:3, :2])
ax['row'] = fig.add_subplot(gs[0, :2], sharex=ax['depth'])
ax['col'] = fig.add_subplot(gs[1:3, 2], sharey=ax['depth'])
ax['3d'] = fig.add_subplot(gs[1:3, 3:], projection='3d')
self.fig, self.ax = fig, ax
self.fig.canvas.mpl_connect('scroll_event', self.on_scroll)
self.fig.canvas.mpl_connect('button_press_event', self.on_press)
self.fig.canvas.mpl_connect('key_press_event', self.on_key)
self.replot()
plt.show()
def replot(self):
X0 = self.X
P0 = np.asarray(self.P)
P0[P0 < 0.005] = np.nan
row, col, depth = self.cut.values()
nr, nc, nd = self.X.shape[:3]
fig, ax = self.fig, self.ax
for a in ax.values():
a.clear()
color = 'red'
if self.cells is not None and len(self.cells) > 0:
out = np.asarray(list(self.cut.values()), dtype=int)
d = np.sqrt(((self.cells - out) ** 2).sum(axis=1))
if np.any(d <= 5):
color = 'dodgerblue'
ax['row'].imshow(X0[row, :, :].T, **plot_params)
ax['row'].imshow(P0[row, :, :].T, **plot_paramsP)
ax['row'].plot([0, nc], [depth, depth], '-', lw=.5, zorder=10, color=color)
ax['row'].plot([col, col], [0, nd], '-', lw=.5, zorder=10, color=color)
ax['row'].axis('tight')
ax['row'].set_aspect('equal')
ax['row'].axis('off')
ax['row'].set_xlim((0, nc))
ax['row'].set_title('col-depth plane')
ax['col'].imshow(X0[:, col, :], **plot_params)
ax['col'].imshow(P0[:, col, :], **plot_paramsP)
ax['col'].plot([depth, depth], [0, nr], '-', lw=.5, zorder=10, color=color)
ax['col'].plot([0, nd], [row, row], '-', lw=.5, zorder=10, color=color)
ax['col'].axis('tight')
ax['col'].set_aspect('equal')
ax['col'].axis('off')
ax['col'].set_ylim((0, nr))
ax['col'].set_title('row-depth plane')
ax['depth'].imshow(X0[:, :, depth], **plot_params)
ax['depth'].imshow(P0[:, :, depth], **plot_paramsP)
ax['depth'].plot([col, col], [0, nr], '-', lw=.5, zorder=10, color=color)
ax['depth'].plot([0, nc], [row, row], '-', lw=.5, zorder=10, color=color)
ax['depth'].axis('tight')
ax['depth'].set_xlim((0, nc))
ax['depth'].set_ylim((0, nr))
ax['depth'].set_aspect('equal')
ax['depth'].axis('off')
ax['depth'].set_title('row-col plane')
if self.cells is not None and len(self.cells) > 0:
c = self.cells
dz = np.abs(c[:, 2] - out[2]) / 5
dz = dz * (dz <= 1)
for cc, alpha in zip(c[dz > 0], 1 - dz[dz > 0]):
ax['depth'].plot(cc[1], cc[0], 'ok', mfc='dodgerblue', alpha=alpha)
idx = c[:, 2] == depth
if np.any(idx):
ax['depth'].plot(c[idx, 1], c[idx, 0], 'ok', mfc='deeppink', alpha=0.5)
idx = c[:, 0] == row
if np.any(idx):
ax['row'].plot(c[idx, 1], c[idx, 2], 'ok', mfc='deeppink', alpha=0.5)
idx = c[:, 1] == col
if np.any(idx):
ax['col'].plot(c[idx, 2], c[idx, 0], 'ok', mfc='deeppink', alpha=0.5)
ax['3d'].plot(c[:, 0], c[:, 1], c[:, 2], 'ok', mfc='deeppink')
ax['3d'].plot([row, row], [0, nc], [depth, depth], '--', lw=2, color=color)
ax['3d'].plot([row, row], [col, col], [0, nd], '--', lw=2, color=color)
ax['3d'].plot([0, nr], [col, col], [depth, depth], '--', lw=2, color=color)
plt.draw()
def _determine_axes(self, event):
for k, v in self.ax.items():
if event.inaxes == v:
return k
def on_scroll(self, event):
what = self._determine_axes(event)
dimensions = list(self.cut.keys())
if what in dimensions:
i = dimensions.index(what)
k = self.cut[what] + event.step
k = min(self.X.shape[i], max(k, 0))
self.cut[what] = k
self.replot()
def on_key(self, event):
if event.key in ['t', 'r', 'e']:
if event.key == 'e':
self.cell_idx = max(0, self.cell_idx - 1)
elif event.key == 't':
self.cell_idx = min(len(self.cells) - 1, self.cell_idx + 1)
for k, i in zip(self.cut, self.cells[self.cell_idx, :]):
self.cut[k] = i
# if event.key == 's':
# fname = input('Please enter filename:')
# print('Saving')
# self.stack.cells = self.cells
# self.stack.save(fname)
# self.fig.suptitle('File saved to %s' % (fname,))
if event.key == 'a':
new_cell = np.asarray(list(self.cut.values()), dtype=int)
print('Adding new cell at', new_cell)
self.cells = np.vstack((self.cells, new_cell))
self.fig.suptitle('New cell added')
self.replot()
def on_press(self, event):
what = self._determine_axes(event)
if what == 'depth':
self.cut['row'], self.cut['col'] = int(event.ydata), int(event.xdata)
elif what == 'row':
self.cut['depth'], self.cut['col'] = int(event.ydata), int(event.xdata)
elif what == 'col':
self.cut['depth'], self.cut['row'] = int(event.xdata), int(event.ydata)
if what is not None:
if event.button == 1:
new_cell = np.asarray(list(self.cut.values()), dtype=int)
print('Adding new cell at', new_cell)
if self.cells is None:
self.cells = new_cell[None, :]
else:
self.cells = np.vstack((self.cells, new_cell))
if event.button == 3:
out = np.asarray(list(self.cut.values()), dtype=int)
d = abs(self.cells - out).sum(axis=1)
self.cells = self.cells[d > 3, :]
self.replot()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Manually label cells in a stack.')
# parser.add_argument('file', type=str, help='hdf5 file containing the stack (dims row, col, depth, 1, channels)')
# parser.add_argument('--probability', type=str, help='numpy file containing the probability map for file')
#
# args = parser.parse_args()
# s = Stack(args.file,
# preprocessor=lambda x: average_channels(whiten(unsharp_masking(medianfilter(center(x.squeeze()))))))
# if args.probability:
# P = np.load(args.probability)
# else:
# P = None
stacks = Stacks().project().fetch.as_dict()
for i, key in enumerate(stacks):
print(i, '\t'.join(key.values()))
key = stacks[int(input('Please select dataset: '))]
cells = (CellLocations() & key).project().fetch.as_dict()
if len(cells) > 0:
for i, ckey in enumerate(cells):
print(i, '\t'.join(ckey.values()))
selection = input('Do you want to load a set of locations? [press enter for no] ')
if len(selection) > 0:
key = cells[int(selection)]
cells = (CellLocations() & key).fetch1['cells']
else:
cells = None
prep = list(preprocessors.keys())
for i, name in enumerate(prep):
print(i, name)
key['preprocessing'] = prep[int(input('Please select the preprocessing. '))]
X = Stacks().load(key)
labeler = CellLabeler(X, cells)
|
|
#!/usr/bin/env python
"""Registry for filters and abstract classes for basic filter functionality."""
import collections
import glob
import itertools
import os
import yaml
import logging
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib.aff4_objects import collections as collections_aff4
from grr.lib.checks import filters
from grr.lib.checks import hints
from grr.lib.checks import triggers
from grr.lib.rdfvalues import anomaly as anomaly_rdf
from grr.lib.rdfvalues import structs as structs_rdf
from grr.proto import checks_pb2
class Error(Exception):
"""Base error class."""
class DefinitionError(Error):
"""A check was defined badly."""
class ProcessingError(Error):
"""A check generated bad results."""
def ValidateMultiple(component, hint):
errors = []
for item in component:
try:
item.Validate()
except (DefinitionError) as e:
errors.append(str(e))
if errors:
raise DefinitionError("%s:\n %s" % (hint, "\n ".join(errors)))
def MatchStrToList(match=None):
# Set a default match type of ANY, if unset.
# Allow multiple match types, either as a list or as a string.
if match is None:
match = ["ANY"]
elif isinstance(match, basestring):
match = match.split()
return match
class CheckResult(structs_rdf.RDFProtoStruct):
"""Results of a single check performed on a host."""
protobuf = checks_pb2.CheckResult
def __nonzero__(self):
return bool(self.anomaly)
def ExtendAnomalies(self, other):
"""Merge anomalies from another CheckResult."""
for o in other:
if o is not None:
self.anomaly.Extend(list(o.anomaly))
class CheckResultsCollection(collections_aff4.RDFValueCollection):
"""A collection of check results."""
_rdf_type = CheckResult
class CheckResults(structs_rdf.RDFProtoStruct):
"""All results for a single host."""
protobuf = checks_pb2.CheckResults
def __nonzero__(self):
return bool(self.result)
class Target(structs_rdf.RDFProtoStruct):
"""Definitions of hosts to target."""
protobuf = checks_pb2.Target
def __init__(self, initializer=None, age=None, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Target, self).__init__(initializer=initializer, age=age, **conf)
def __nonzero__(self):
return any([self.cpe, self.os, self.label])
def Validate(self):
if self.cpe:
# TODO(user): Add CPE library to GRR.
pass
if self.os:
pass
if self.label:
pass
class Check(structs_rdf.RDFProtoStruct):
"""A definition of a problem, and ways to detect it.
Checks contain an identifier of a problem (check_id) that is a reference to an
externally or internally defined vulnerability.
Checks use one or more Methods to determine if an issue exists. Methods define
data collection and processing, and return an Anomaly if the conditions tested
by the method weren't met.
Checks can define a default platform, OS or environment to target. This
is passed to each Method, but can be overridden by more specific definitions.
"""
protobuf = checks_pb2.Check
def __init__(self, initializer=None, age=None, check_id=None, target=None,
match=None, method=None, hint=None):
super(Check, self).__init__(initializer=initializer, age=age)
self.check_id = check_id
self.match = MatchStrToList(match)
self.hint = Hint(hint, reformat=False)
self.target = target
if method is None:
method = []
self.triggers = triggers.Triggers()
self.matcher = Matcher(self.match, self.hint)
for cfg in method:
# Use the value of "target" as a default for each method, if defined.
# Targets defined in methods or probes override this default value.
if hint:
cfg["hint"] = hints.Overlay(child=cfg.get("hint", {}), parent=hint)
if target:
cfg.setdefault("target", target)
# Create the method and add its triggers to the check.
m = Method(**cfg)
self.method.append(m)
self.triggers.Update(m.triggers, callback=m)
self.artifacts = set([t.artifact for t in self.triggers.conditions])
def SelectChecks(self, conditions):
"""Identifies which check methods to use based on host attributes.
Queries the trigger map for any check methods that apply to a combination of
OS, CPE and/or label.
Args:
conditions: A list of Condition objects.
Returns:
A list of method callbacks that should perform checks.
"""
return self.triggers.Calls(conditions)
def UsesArtifact(self, artifacts):
"""Determines if the check uses the specified artifact.
Args:
artifacts: Either a single artifact name, or a list of artifact names
Returns:
True if the check uses a specific artifact.
"""
# If artifact is a single string, see if it is in the list of artifacts
# as-is. Otherwise, test whether any of the artifacts passed in to this
# function exist in the list of artifacts.
if isinstance(artifacts, basestring):
return artifacts in self.artifacts
else:
return any(True for artifact in artifacts if artifact in self.artifacts)
def Parse(self, conditions, host_data):
"""Runs methods that evaluate whether collected host_data has an issue.
Args:
conditions: A list of conditions to determine which Methods to trigger.
host_data: A map of artifacts and rdf data.
Returns:
A CheckResult populated with Anomalies if an issue exists.
"""
result = CheckResult(check_id=self.check_id)
methods = self.SelectChecks(conditions)
result.ExtendAnomalies([m.Parse(conditions, host_data) for m in methods])
return result
def Validate(self):
"""Check the method is well constructed."""
if not self.check_id:
raise DefinitionError("Check has missing check_id value")
cls_name = self.check_id
if not self.method:
raise DefinitionError("Check %s has no methods" % cls_name)
ValidateMultiple(self.method,
"Check %s has invalid method definitions" % cls_name)
class Method(structs_rdf.RDFProtoStruct):
"""A specific test method using 0 or more filters to process data."""
protobuf = checks_pb2.Method
def __init__(self, initializer=None, age=None, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Method, self).__init__(initializer=initializer, age=age)
probe = conf.get("probe", {})
resource = conf.get("resource", {})
hint = conf.get("hint", {})
target = conf.get("target", {})
if hint:
# Add the hint to children.
for cfg in probe:
cfg["hint"] = hints.Overlay(child=cfg.get("hint", {}), parent=hint)
self.probe = [Probe(**cfg) for cfg in probe]
self.hint = Hint(hint, reformat=False)
self.match = MatchStrToList(kwargs.get("match"))
self.matcher = Matcher(self.match, self.hint)
self.resource = [rdfvalue.Dict(**r) for r in resource]
self.target = Target(**target)
self.triggers = triggers.Triggers()
for p in self.probe:
# If the probe has a target, use it. Otherwise, use the method's target.
target = p.target or self.target
self.triggers.Add(p.artifact, target, p)
def Parse(self, conditions, host_data):
"""Runs probes that evaluate whether collected data has an issue.
Args:
conditions: The trigger conditions.
host_data: A map of artifacts and rdf data.
Returns:
Anomalies if an issue exists.
"""
processed = []
probes = self.triggers.Calls(conditions)
for p in probes:
# Get the data required for the probe.
rdf_data = host_data.get(p.artifact)
try:
result = p.Parse(rdf_data)
except ProcessingError as e:
raise ProcessingError("Bad artifact %s: %s" % (p.artifact, e))
if result:
processed.append(result)
# Matcher compares the number of probes that triggered with results.
return self.matcher.Detect(probes, processed)
def Validate(self):
"""Check the Method is well constructed."""
ValidateMultiple(self.probe, "Method has invalid probes")
ValidateMultiple(self.target, "Method has invalid target")
ValidateMultiple(self.hint, "Method has invalid hint")
class Probe(structs_rdf.RDFProtoStruct):
"""The suite of filters applied to host data."""
protobuf = checks_pb2.Probe
def __init__(self, initializer=None, age=None, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
conf["match"] = MatchStrToList(kwargs.get("match"))
super(Probe, self).__init__(initializer=initializer, age=age, **conf)
if self.filters:
handler = filters.GetHandler(mode=self.mode)
else:
handler = filters.GetHandler()
self.baseliner = handler(artifact=self.artifact, filters=self.baseline)
self.handler = handler(artifact=self.artifact, filters=self.filters)
hinter = Hint(conf.get("hint", {}), reformat=False)
self.matcher = Matcher(conf["match"], hinter)
def Parse(self, rdf_data):
"""Process rdf data through filters. Test if results match expectations.
Processing of rdf data is staged by a filter handler, which manages the
processing of host data. The output of the filters are compared against
expected results.
Args:
rdf_data: An list containing 0 or more rdf values.
Returns:
An anomaly if data didn't match expectations.
Raises:
ProcessingError: If rdf_data is not a handled type.
"""
if not isinstance(rdf_data, (list, set)):
raise ProcessingError("Bad host data format: %s" % type(rdf_data))
if self.baseline:
comparison = self.baseliner.Parse(rdf_data)
else:
comparison = rdf_data
found = self.handler.Parse(comparison)
results = self.hint.Render(found)
return self.matcher.Detect(comparison, results)
def Validate(self):
"""Check the test set is well constructed."""
ValidateMultiple(self.target, "Probe has invalid target")
self.baseliner.Validate()
self.handler.Validate()
self.hint.Validate()
class Filter(structs_rdf.RDFProtoStruct):
"""Generic filter to provide an interface for different types of filter."""
protobuf = checks_pb2.Filter
def __init__(self, initializer=None, age=None, **kwargs):
# FIXME(sebastianw): Probe seems to pass in the configuration for filters
# as a dict in initializer, rather than as kwargs.
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Filter, self).__init__(initializer=initializer, age=age, **conf)
filter_name = self.type or "Filter"
self._filter = filters.Filter.GetFilter(filter_name)
def Parse(self, rdf_data):
"""Process rdf data through the filter.
Filters sift data according to filter rules. Data that passes the filter
rule is kept, other data is dropped.
If no filter method is provided, the data is returned as a list.
Otherwise, a items that meet filter conditions are returned in a list.
Args:
rdf_data: Host data that has already been processed by a Parser into RDF.
Returns:
A list containing data items that matched the filter rules.
"""
if self._filter:
return list(self._filter.Parse(rdf_data, self.expression))
return rdf_data
def Validate(self):
"""The filter exists, and has valid filter and hint expressions."""
if self.type not in filters.Filter.classes:
raise DefinitionError("Undefined filter type %s" % self.type)
self._filter.Validate(self.expression)
ValidateMultiple(self.hint, "Filter has invalid hint")
class Hint(structs_rdf.RDFProtoStruct):
"""Human-formatted descriptions of problems, fixes and findings."""
protobuf = checks_pb2.Hint
def __init__(self, initializer=None, age=None, reformat=True, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Hint, self).__init__(initializer=initializer, age=age, **conf)
if not self.max_results:
self.max_results = config_lib.CONFIG.Get("Checks.max_results")
if reformat:
self.hinter = hints.Hinter(self.format)
else:
self.hinter = hints.Hinter()
def Render(self, rdf_data):
"""Processes data according to formatting rules."""
report_data = rdf_data[:self.max_results]
results = [self.hinter.Render(rdf) for rdf in report_data]
extra = len(rdf_data) - len(report_data)
if extra > 0:
results.append("...plus another %d issues." % extra)
return results
def Explanation(self, state):
"""Creates an anomaly explanation string."""
if self.problem:
return "%s: %s" % (state, self.problem)
def Validate(self):
"""Ensures that required values are set and formatting rules compile."""
# TODO(user): Default format string.
if self.problem:
pass
class Matcher(object):
"""Performs comparisons between baseline and result data."""
def __init__(self, matches, hint):
method_map = {"NONE": self.GotNone,
"ONE": self.GotSingle,
"SOME": self.GotMultiple,
"ANY": self.GotAny,
"ALL": self.GotAll}
try:
self.detectors = [method_map.get(str(match)) for match in matches]
except KeyError:
raise DefinitionError("Match uses undefined check condition: %s" % match)
self.hint = hint
def Detect(self, baseline, host_data):
"""Run host_data through detectors and return them if a detector triggers.
Args:
baseline: The base set of rdf values used to evaluate whether an issue
exists.
host_data: The rdf values passed back by the filters.
Returns:
A CheckResult message containing anomalies if any detectors identified an
issue, None otherwise.
"""
result = CheckResult()
for detector in self.detectors:
for finding in detector(baseline, host_data):
if finding:
result.ExtendAnomalies(finding)
if result:
return result
def Issue(self, state, results):
"""Collect anomalous findings into a CheckResult.
Comparisons with anomalous conditions collect anomalies into a single
CheckResult message. The contents of the result varies depending on whether
the method making the comparison is a Check, Method or Probe.
- Probes evaluate raw host data and generate Anomalies. These are condensed
into a new CheckResult.
- Checks and Methods evaluate the results of probes (i.e. CheckResults). If
there are multiple probe results, all probe anomalies are aggregated into
a single new CheckResult for the Check or Method.
Args:
state: A text description of what combination of results were anomalous
(e.g. some condition was missing or present.)
results: Anomalies or CheckResult messages.
Returns:
A CheckResult message.
"""
result = CheckResult()
anomaly = anomaly_rdf.Anomaly(type="ANALYSIS_ANOMALY",
explanation=self.hint.Explanation(state))
# If there are CheckResults we're aggregating methods or probes.
# Merge all current results into one CheckResult.
# Otherwise, the results are raw host data.
# Generate a new CheckResult and add the specific findings.
if results and all(isinstance(r, rdfvalue.CheckResult) for r in results):
result.ExtendAnomalies(results)
else:
anomaly.finding = self.hint.Render(results)
result.anomaly = anomaly
return result
def GotNone(self, _, results):
"""Anomaly for no results, an empty list otherwise."""
if not results:
return self.Issue("Missing attribute", [])
return []
def GotSingle(self, _, results):
"""Anomaly for exactly one result, an empty list otherwise."""
if len(results) == 1:
return self.Issue("Found one", results)
return []
def GotMultiple(self, _, results):
"""Anomaly for >1 result, an empty list otherwise."""
if len(results) > 1:
return self.Issue("Found multiple", results)
return []
def GotAny(self, _, results):
"""Anomaly for 1+ results, an empty list otherwise."""
if results:
return self.Issue("Found", results)
return []
def GotAll(self, baseline, results):
"""Anomaly if baseline vs result counts differ, an empty list otherwise."""
num_base = len(baseline)
num_rslt = len(results)
if num_rslt > num_base:
raise ProcessingError("Filter generated more results than base data: "
"%s > %s" % (num_rslt, num_base))
if num_rslt == num_base and num_base > 0:
return self.Issue("Found all", results)
return []
class CheckRegistry(object):
"""A class to register the mapping between checks and host data.
This is used to trigger all relevant checks when we collect the data.
The method registry maps the combination of platform, environment and host
data required by a given method.
"""
checks = {}
triggers = triggers.Triggers()
@classmethod
def Clear(cls):
"""Remove all checks and triggers from the registry."""
cls.checks = {}
cls.triggers = triggers.Triggers()
@classmethod
def RegisterCheck(cls, check, source="unknown", overwrite_if_exists=False):
"""Adds a check to the registry, refresh the trigger to check map."""
if not overwrite_if_exists and check.check_id in cls.checks:
raise DefinitionError("Check named %s already exists and "
"overwrite_if_exists is set to False." %
check.check_id)
check.loaded_from = source
cls.checks[check.check_id] = check
cls.triggers.Update(check.triggers, check)
@staticmethod
def _AsList(arg):
"""Encapsulates an argument in a list, if it's not already iterable."""
if isinstance(arg, basestring) or not isinstance(arg, collections.Iterable):
return [arg]
else:
return list(arg)
@classmethod
def Conditions(cls, artifact=None, os_name=None, cpe=None, labels=None):
"""Provide a series of condition tuples.
A Target can specify multiple artifact, os_name, cpe or label entries. These
are expanded to all distinct tuples. When an entry is undefined or None, it
is treated as a single definition of None, meaning that the condition does
not apply.
Args:
artifact: Names of artifacts that should trigger an action.
os_name: Names of OS' that should trigger an action.
cpe: CPE strings that should trigger an action.
labels: Host labels that should trigger an action.
Yields:
a permuted series of (artifact, os_name, cpe, label) tuples.
"""
artifact = cls._AsList(artifact)
os_name = cls._AsList(os_name)
cpe = cls._AsList(cpe)
labels = cls._AsList(labels)
for condition in itertools.product(artifact, os_name, cpe, labels):
yield condition
@classmethod
def FindChecks(cls, artifact=None, os_name=None, cpe=None, labels=None):
"""Takes targeting info, identifies relevant checks.
FindChecks will return results when a host has the conditions necessary for
a check to occur. Conditions with partial results are not returned. For
example, FindChecks will not return checks that if a check targets
os_name=["Linux"], labels=["foo"] and a host only has the os_name=["Linux"]
attribute.
Args:
artifact: 0+ artifact names.
os_name: 0+ OS names.
cpe: 0+ CPE identifiers.
labels: 0+ GRR labels.
Returns:
the check_ids that apply.
"""
check_ids = set()
conditions = list(cls.Conditions(artifact, os_name, cpe, labels))
for chk_id, chk in cls.checks.iteritems():
# A quick test to determine whether to dive into the checks.
if chk.UsesArtifact(artifact):
for condition in conditions:
if chk.triggers.Match(*condition):
check_ids.add(chk_id)
break # No need to keep checking other conditions.
return check_ids
@classmethod
def SelectArtifacts(cls, os_name=None, cpe=None, labels=None):
"""Takes targeting info, identifies artifacts to fetch.
Args:
os_name: 0+ OS names.
cpe: 0+ CPE identifiers.
labels: 0+ GRR labels.
Returns:
the artifacts that should be collected.
"""
results = set()
for condition in cls.Conditions(None, os_name, cpe, labels):
trigger = condition[1:]
for chk in cls.checks.values():
results.update(chk.triggers.Artifacts(*trigger))
return results
@classmethod
def Process(cls, host_data, os_name=None, cpe=None, labels=None):
"""Runs checks over all host data.
Args:
host_data: The data collected from a host, mapped to artifact name.
os_name: 0+ OS names.
cpe: 0+ CPE identifiers.
labels: 0+ GRR labels.
Yields:
A CheckResult message for each check that was performed.
"""
# All the conditions that apply to this host.
artifacts = host_data.keys()
check_ids = cls.FindChecks(artifacts, os_name, cpe, labels)
conditions = list(cls.Conditions(artifacts, os_name, cpe, labels))
for check_id in check_ids:
try:
chk = cls.checks[check_id]
yield chk.Parse(conditions, host_data)
except ProcessingError as e:
logging.warn("Check ID %s raised: %s" % (check_id, e))
def CheckHost(host_data, os_name=None, cpe=None, labels=None):
"""Perform all checks on a host using acquired artifacts.
Checks are selected based on the artifacts available and the host attributes
(e.g. os_name/cpe/labels) provided as either parameters, or in the
knowledgebase artifact.
A KnowledgeBase artifact should be provided that contains, at a minimum:
- OS
- Hostname or IP
Other knowldegebase attributes may be required for specific checks.
CPE is currently unused, pending addition of a CPE module in the GRR client.
Labels are arbitrary string labels attached to a client.
Args:
host_data: A dictionary with artifact names as keys, and rdf data as values.
os_name: An OS name (optional).
cpe: A CPE string (optional).
labels: An iterable of labels (optional).
Returns:
A CheckResults object that contains results for all checks that were
performed on the host.
"""
# Get knowledgebase, os_name from hostdata
kb = host_data.get("KnowledgeBase")
if os_name is None:
os_name = kb.os
if cpe is None:
# TODO(user): Get CPE (requires new artifact/parser)
pass
if labels is None:
# TODO(user): Get labels (see grr/lib/export.py for acquisition
# from client)
pass
return CheckRegistry.Process(host_data, os_name=os_name, cpe=cpe,
labels=labels)
def LoadConfigsFromFile(file_path):
"""Loads check definitions from a file."""
with open(file_path) as data:
return {d["check_id"]: d for d in yaml.safe_load_all(data)}
def LoadCheckFromFile(file_path, check_id, overwrite_if_exists=True):
"""Load a single check from a file."""
configs = LoadConfigsFromFile(file_path)
conf = configs.get(check_id)
check = rdfvalue.Check(**conf)
check.Validate()
CheckRegistry.RegisterCheck(check, source="file:%s" % file_path,
overwrite_if_exists=overwrite_if_exists)
logging.debug("Loaded check %s from %s", check.check_id, file_path)
return check
def LoadChecksFromFiles(file_paths, overwrite_if_exists=True):
"""Load the checks defined in the specified files."""
loaded = []
for file_path in file_paths:
configs = LoadConfigsFromFile(file_path)
for conf in configs.values():
check = rdfvalue.Check(**conf)
# Validate will raise if the check doesn't load.
check.Validate()
loaded.append(check)
CheckRegistry.RegisterCheck(check, source="file:%s" % file_path,
overwrite_if_exists=overwrite_if_exists)
logging.debug("Loaded check %s from %s", check.check_id, file_path)
return loaded
def LoadChecksFromDirs(dir_paths, overwrite_if_exists=True):
"""Load checks from all yaml files in the specified directories."""
loaded = []
for dir_path in dir_paths:
cfg_files = glob.glob(os.path.join(dir_path, "*.yaml"))
loaded.extend(LoadChecksFromFiles(cfg_files, overwrite_if_exists))
return loaded
class CheckLoader(registry.InitHook):
"""Loads checks from the filesystem."""
# TODO(user): Add check loading from datastore.
def RunOnce(self):
LoadChecksFromDirs(config_lib.CONFIG["Checks.config_dir"])
LoadChecksFromFiles(config_lib.CONFIG["Checks.config_files"])
logging.debug("Loaded checks: %s", ",".join(sorted(CheckRegistry.checks)))
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'LinkedInfo.user_pk'
db.add_column(u'accounting_tools_linkedinfo', 'user_pk',
self.gf('django.db.models.fields.PositiveIntegerField')(default=1),
keep_default=False)
def backwards(self, orm):
# Deleting field 'LinkedInfo.user_pk'
db.delete_column(u'accounting_tools_linkedinfo', 'user_pk')
models = {
u'accounting_core.account': {
'Meta': {'unique_together': "(('name', 'accounting_year'), ('account_number', 'accounting_year'))", 'object_name': 'Account'},
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountCategory']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'visibility': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'accounting_core.accountcategory': {
'Meta': {'unique_together': "(('name', 'accounting_year'),)", 'object_name': 'AccountCategory'},
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'parent_hierarchique': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountCategory']", 'null': 'True', 'blank': 'True'})
},
u'accounting_core.accountingyear': {
'Meta': {'object_name': 'AccountingYear'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_preparing'", 'max_length': '255'}),
'subvention_deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'accounting_core.costcenter': {
'Meta': {'unique_together': "(('name', 'accounting_year'), ('account_number', 'accounting_year'))", 'object_name': 'CostCenter'},
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"})
},
u'accounting_tools.expenseclaim': {
'Meta': {'object_name': 'ExpenseClaim'},
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'costcenter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.CostCenter']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'nb_proofs': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_draft'", 'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'accounting_tools.expenseclaimfile': {
'Meta': {'object_name': 'ExpenseClaimFile'},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'files'", 'null': 'True', 'to': u"orm['accounting_tools.ExpenseClaim']"}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploader': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'accounting_tools.expenseclaimline': {
'Meta': {'object_name': 'ExpenseClaimLine'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.Account']"}),
'expense_claim': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': u"orm['accounting_tools.ExpenseClaim']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'proof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tva': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'value_ttc': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'})
},
u'accounting_tools.expenseclaimlogging': {
'Meta': {'object_name': 'ExpenseClaimLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['accounting_tools.ExpenseClaim']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'accounting_tools.internaltransfer': {
'Meta': {'object_name': 'InternalTransfer'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.Account']"}),
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'cost_center_from': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'internal_transfer_from'", 'to': u"orm['accounting_core.CostCenter']"}),
'cost_center_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'internal_transfer_to'", 'to': u"orm['accounting_core.CostCenter']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_draft'", 'max_length': '255'})
},
u'accounting_tools.internaltransferlogging': {
'Meta': {'object_name': 'InternalTransferLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['accounting_tools.InternalTransfer']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'accounting_tools.internaltransfertag': {
'Meta': {'object_name': 'InternalTransferTag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tags'", 'to': u"orm['accounting_tools.InternalTransfer']"}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'accounting_tools.invoice': {
'Meta': {'object_name': 'Invoice'},
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'annex': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'costcenter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.CostCenter']"}),
'custom_bvr_number': ('django.db.models.fields.CharField', [], {'max_length': '59', 'null': 'True', 'blank': 'True'}),
'date_and_place': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'display_account': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_bvr': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'ending': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'greetings': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'preface': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sign': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_preparing'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"})
},
u'accounting_tools.invoiceline': {
'Meta': {'object_name': 'InvoiceLine'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': u"orm['accounting_tools.Invoice']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'quantity': ('django.db.models.fields.DecimalField', [], {'default': '1', 'max_digits': '20', 'decimal_places': '0'}),
'tva': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'value_ttc': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'})
},
u'accounting_tools.invoicelogging': {
'Meta': {'object_name': 'InvoiceLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['accounting_tools.Invoice']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'accounting_tools.invoicetag': {
'Meta': {'object_name': 'InvoiceTag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tags'", 'to': u"orm['accounting_tools.Invoice']"}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'accounting_tools.linkedinfo': {
'Meta': {'object_name': 'LinkedInfo'},
'address': ('django.db.models.fields.TextField', [], {}),
'bank': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'iban_ccp': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'user_pk': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'accounting_tools.subvention': {
'Meta': {'unique_together': "(('unit', 'unit_blank_name', 'accounting_year'),)", 'object_name': 'Subvention'},
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'amount_asked': ('django.db.models.fields.IntegerField', [], {}),
'amount_given': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment_root': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'mobility_asked': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mobility_given': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_draft'", 'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'unit_blank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'unit_blank_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']", 'null': 'True', 'blank': 'True'})
},
u'accounting_tools.subventionfile': {
'Meta': {'object_name': 'SubventionFile'},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'files'", 'null': 'True', 'to': u"orm['accounting_tools.Subvention']"}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploader': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'accounting_tools.subventionline': {
'Meta': {'object_name': 'SubventionLine'},
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nb_spec': ('django.db.models.fields.SmallIntegerField', [], {}),
'order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'subvention': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': u"orm['accounting_tools.Subvention']"})
},
u'accounting_tools.subventionlogging': {
'Meta': {'object_name': 'SubventionLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['accounting_tools.Subvention']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'accounting_tools.withdrawal': {
'Meta': {'object_name': 'Withdrawal'},
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'costcenter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.CostCenter']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'desired_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_draft'", 'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"}),
'withdrawn_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'accounting_tools.withdrawalfile': {
'Meta': {'object_name': 'WithdrawalFile'},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'files'", 'null': 'True', 'to': u"orm['accounting_tools.Withdrawal']"}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploader': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'accounting_tools.withdrawallogging': {
'Meta': {'object_name': 'WithdrawalLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['accounting_tools.Withdrawal']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'accounting_tools.withdrawaltag': {
'Meta': {'object_name': 'WithdrawalTag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tags'", 'to': u"orm['accounting_tools.Withdrawal']"}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'units.unit': {
'Meta': {'object_name': 'Unit'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_epfl': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'is_commission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_equipe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent_hierarchique': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'users.truffeuser': {
'Meta': {'object_name': 'TruffeUser'},
'adresse': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'default': "'.'", 'max_length': '1'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'email_perso': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'iban_ou_ccp': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'nom_banque': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['accounting_tools']
|
|
import os
import pytest
from unittest.mock import Mock
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.test.utils import override_settings
from olympia import amo
from olympia.addons.models import Addon
from olympia.amo import models as amo_models
from olympia.amo.urlresolvers import reverse
from olympia.amo.tests import TestCase
from olympia.core.tests.m2m_testapp.models import Artist, Singer, Song
from olympia.users.models import UserProfile
from olympia.zadmin.models import Config
pytestmark = pytest.mark.django_db
class ManualOrderTest(TestCase):
fixtures = ('base/addon_3615', 'base/addon_5299_gcal', 'base/addon_40')
def test_ordering(self):
"""Given a specific set of primary keys, assure that we return addons
in that order."""
semi_arbitrary_order = [40, 5299, 3615]
addons = amo_models.manual_order(
Addon.objects.all(), semi_arbitrary_order)
assert semi_arbitrary_order == [addon.id for addon in addons]
def test_use_primary_db():
local = amo_models.multidb.pinning._locals
assert not getattr(local, 'pinned', False)
with amo_models.use_primary_db():
assert local.pinned
with amo_models.use_primary_db():
assert local.pinned
assert local.pinned
assert not local.pinned
class TestModelBase(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestModelBase, self).setUp()
self.saved_cb = amo_models._on_change_callbacks.copy()
amo_models._on_change_callbacks.clear()
self.cb = Mock()
self.cb.__name__ = 'testing_mock_callback'
Addon.on_change(self.cb)
def tearDown(self):
amo_models._on_change_callbacks = self.saved_cb
super(TestModelBase, self).tearDown()
def test_multiple_ignored(self):
cb = Mock()
cb.__name__ = 'something'
old = len(amo_models._on_change_callbacks[Addon])
Addon.on_change(cb)
assert len(amo_models._on_change_callbacks[Addon]) == old + 1
Addon.on_change(cb)
assert len(amo_models._on_change_callbacks[Addon]) == old + 1
def test_change_called_on_new_instance_save(self):
for create_addon in (Addon, Addon.objects.create):
addon = create_addon(disabled_by_user=False,
type=amo.ADDON_EXTENSION)
addon.disabled_by_user = True
addon.save()
assert self.cb.called
kw = self.cb.call_args[1]
assert not kw['old_attr']['disabled_by_user']
assert kw['new_attr']['disabled_by_user']
assert kw['instance'].id == addon.id
assert kw['sender'] == Addon
def test_change_called_on_update(self):
addon = Addon.objects.get(pk=3615)
addon.update(disabled_by_user=True)
assert self.cb.called
kw = self.cb.call_args[1]
assert not kw['old_attr']['disabled_by_user']
assert kw['new_attr']['disabled_by_user']
assert kw['instance'].id == addon.id
assert kw['sender'] == Addon
def test_change_called_on_save(self):
addon = Addon.objects.get(pk=3615)
addon.disabled_by_user = True
addon.save()
assert self.cb.called
kw = self.cb.call_args[1]
assert not kw['old_attr']['disabled_by_user']
assert kw['new_attr']['disabled_by_user']
assert kw['instance'].id == addon.id
assert kw['sender'] == Addon
def test_change_is_not_recursive(self):
class fn:
called = False
def callback(old_attr=None, new_attr=None, instance=None,
sender=None, **kw):
fn.called = True
# Both save and update should be protected:
instance.update(disabled_by_user=True)
instance.save()
Addon.on_change(callback)
addon = Addon.objects.get(pk=3615)
assert not addon.disabled_by_user
addon.save()
assert fn.called
# No exception = pass
def test_get_or_create_read_committed(self):
"""Test get_or_create behavior.
This test originally tested our own `safer_get_or_create` method
but since we switched to using 'read committed' isolation level
Djangos builtin `get_or_create` works perfectly for us now.
"""
data = {'guid': '123', 'type': amo.ADDON_EXTENSION}
a, c = Addon.objects.get_or_create(**data)
assert c
b, c = Addon.objects.get_or_create(**data)
assert not c
assert a == b
def test_reload(self):
# Make it an extension.
addon = Addon.objects.create(type=amo.ADDON_EXTENSION)
addon.save()
# Make it a theme.
Addon.objects.get(id=addon.id).update(type=amo.ADDON_STATICTHEME)
# Still an extension.
assert addon.type == amo.ADDON_EXTENSION
# Reload. And it's magically now a theme.
assert addon.reload().type == amo.ADDON_STATICTHEME
assert addon.type == amo.ADDON_STATICTHEME
def test_get_unfiltered_manager(self):
Addon.get_unfiltered_manager() == Addon.unfiltered
UserProfile.get_unfiltered_manager() == UserProfile.objects
def test_get_url_path(self):
addon = Addon.objects.get(pk=3615)
assert addon.get_url_path() == reverse(
'addons.detail', args=[addon.slug], add_prefix=True)
def test_get_absolute_url_with_frontend_view(self):
addon = Addon.objects.get(pk=3615)
relative = reverse('addons.detail', args=[addon.slug], add_prefix=True)
with override_settings(EXTERNAL_SITE_URL=settings.SITE_URL):
# The normal case
assert addon.get_absolute_url() == settings.SITE_URL + relative
with override_settings(EXTERNAL_SITE_URL='https://example.com'):
# When an external site url has been set
assert addon.get_absolute_url() == (
'https://example.com' + relative)
def test_get_absolute_url_with_django_view(self):
file = Addon.objects.get(pk=3615).current_version.all_files[0]
relative = os.path.join(
reverse('downloads.file', args=[file.id]), file.filename)
with override_settings(EXTERNAL_SITE_URL=settings.SITE_URL):
# The normal case
assert file.get_absolute_url() == settings.SITE_URL + relative
with override_settings(EXTERNAL_SITE_URL='https://example.com'):
# downloads.file is a django served view so the same.
assert file.get_absolute_url() == settings.SITE_URL + relative
def test_get_admin_url_path(self):
addon = Addon.objects.get(pk=3615)
expected_url_path = reverse(
'admin:addons_addon_change', args=(addon.pk,))
assert addon.get_admin_url_path() == expected_url_path
def test_get_admin_absolute_url(self):
addon = Addon.objects.get(pk=3615)
expected_url_path = reverse(
'admin:addons_addon_change', args=(addon.pk,))
with override_settings(EXTERNAL_SITE_URL=settings.SITE_URL):
# The normal case
assert addon.get_admin_absolute_url() == (
settings.SITE_URL + expected_url_path)
with override_settings(EXTERNAL_SITE_URL='https://example.com'):
# When an external site url has been set, it shouldn't matter since
# admin must not live there.
assert addon.get_admin_absolute_url() == (
settings.SITE_URL + expected_url_path)
class BasePreviewMixin(object):
def get_object(self):
raise NotImplementedError
def test_filename(self):
preview = self.get_object()
assert preview.thumbnail_path.endswith('.png')
assert preview.image_path.endswith('.png')
assert preview.original_path.endswith('.png')
def test_filename_in_url(self):
preview = self.get_object()
assert '.png?modified=' in preview.thumbnail_url
assert '.png?modified=' in preview.image_url
def check_delete(self, preview, filename):
"""
Test that when the Preview object is deleted, its image, thumb, and
original are deleted from the filesystem.
"""
try:
with storage.open(filename, 'w') as f:
f.write('sample data\n')
assert storage.exists(filename)
preview.delete()
assert not storage.exists(filename)
finally:
if storage.exists(filename):
storage.delete(filename)
def test_delete_image(self):
preview = self.get_object()
self.check_delete(preview, preview.image_path)
def test_delete_thumbnail(self):
preview = self.get_object()
self.check_delete(preview, preview.thumbnail_path)
def test_delete_original(self):
preview = self.get_object()
self.check_delete(preview, preview.original_path)
class BaseQuerysetTestCase(TestCase):
def test_queryset_transform(self):
# We test with the Config model because it's a simple model
# with no translated fields, no caching or other fancy features.
Config.objects.create(key='a', value='Zero')
first = Config.objects.create(key='b', value='First')
second = Config.objects.create(key='c', value='Second')
Config.objects.create(key='d', value='Third')
Config.objects.create(key='e', value='')
seen_by_first_transform = []
seen_by_second_transform = []
with self.assertNumQueries(0):
# No database hit yet, everything is still lazy.
qs = amo_models.BaseQuerySet(Config)
qs = qs.exclude(value='').order_by('key')[1:3]
qs = qs.transform(
lambda items: seen_by_first_transform.extend(list(items)))
qs = qs.transform(
lambda items: seen_by_second_transform.extend(
list(reversed(items))))
with self.assertNumQueries(1):
assert list(qs) == [first, second]
# Check that each transform function was hit correctly, once.
assert seen_by_first_transform == [first, second]
assert seen_by_second_transform == [second, first]
class TestFilterableManyToManyField(TestCase):
def setUp(self):
self.bob = Artist.objects.create()
self.sue = Artist.objects.create()
self.joe = Artist.objects.create()
self.twinkle_twinkle = Song.objects.create()
self.humpty_dumpty = Song.objects.create()
self.twinkle_twinkle.performers.add(self.bob)
self.twinkle_twinkle.performers.add(self.joe)
self.sue.songs.add(self.humpty_dumpty)
self.humpty_dumpty.performers.add(self.joe)
def test_basic(self):
assert Singer.objects.count() == 4
assert list(self.bob.songs.all()) == [self.twinkle_twinkle]
assert list(self.sue.songs.all()) == [self.humpty_dumpty]
assert list(self.joe.songs.all()) == [
self.twinkle_twinkle, self.humpty_dumpty]
assert list(self.twinkle_twinkle.performers.all()) == [
self.bob, self.joe]
assert list(self.humpty_dumpty.performers.all()) == [
self.sue, self.joe]
def test_through_filtered_out(self):
twinkle_joe_collab = Singer.objects.get(
song=self.twinkle_twinkle, artist=self.joe)
twinkle_joe_collab.credited = False
twinkle_joe_collab.save()
# the relation still exists
assert Singer.objects.count() == 4
# but now doesn't show up in the field querysets - on the Song side
assert list(self.joe.songs.all()) == [
self.humpty_dumpty]
# and the reverse too
assert list(self.twinkle_twinkle.performers.all()) == [
self.bob]
# But Joe is still on the other song
assert list(self.humpty_dumpty.performers.all()) == [
self.sue, self.joe]
|
|
#/u/GoldenSights
import traceback
from dateutil.parser import parse as dateparse
import string
import datetime
import time
import praw
import sqlite3
import re
""" USER CONFIG """
USERAGENT = ""
#Describe the bot and what it does. Include your username
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
SUBREDDIT = "Goldtesting"
#This is the subreddit where the bot finds the schedules
#It should be private with only the team of moderators
TITLESEPARATOR = "||"
#This is what demarcates the timestamp from the sub from the title
#This should not be a naturally occuring part of any title
#Example: "15 December 2014 ||| GoldTesting ||| Welcome to the subreddit"
# ^Time to post ^Sub to post ^Title of post
IGNORE_FLAG = "#"
#If this character is THE FIRST CHARACTER IN THE TITLE,
#The bot will ignore that post. Used for meta / discussion.
SCHEDULEDFLAIR_TEXT = "Scheduled!"
SCHEDULEDFLAIR_CSS = "scheduled"
#This flair will be assigned to the source when the source is scheduled
POSTEDFLAIR_TEXT = "Post made!"
POSTEDFLAIR_CSS = "posted"
#This flair will be assigned to the source when the post is made
MAXPOSTS = 3
#The number of items you want to get from /new. Recommended 100
ALLOWOTHEREDITS = False
#Are users allowed to edit other peoples' post schedules?
WAIT = 30
#How many seconds in between loop cycles. Completely inactive during this time.
ADMINS = ["ApexRedditr", "GoldenSights"]
#These are the people who will get tracebacks when the bot has problems.
TRACEBACK_SUBJECT = "SchedulizerM Error traceback"
POSTEDCOMMENT = "Your post to /r/%s has been created. %s"
#Made in the source when the post is made
FOOTER = """
_____
If any information is incorrect, reply to this comment with the incorrect key,
a colon, and new value. See the
[Bot code](https://github.com/voussoir/reddit/tree/master/Schedulizer-ModTeam)
page for examples. Only make 1 edit per line.
A foolproof time format is
"DD Monthname YYYY HH:MM". All times are in UTC
([Timezone map](http://www.timeanddate.com/time/map/))
Deleting your post will cause it to be removed from the schedule.
If you think the bot is down, send it
[this message](http://www.reddit.com/message/compose?to=??????&subject=Ping&message=Ping).
""" # Don't forget to put your username in this message
SCHEDULECOMMENT = """
Your post has been scheduled. Please check that this information is correct:
"""
#Made in the source when the source is made
ERRORCOMMENT = """
Encountered the following errors:
%s
The post will use placeholder values until you correct the information
_______
"""
ERRORDISTINGUISHFAIL = "Attempted to distinguish post and failed."
ERRORSTICKYFAIL = "Attempted to sticky post and failed."
ERRORDATETIME = '!! DateTime: Could not understand time format, or date is invalid. You entered: `%s`'
ERRORTOOEARLY = '!! DateTime: The time you have entered is before present time. You entered `%s`'
ERRORTITLEFORM = '!! Title: Title expected 3 attributes separated by `' + TITLESEPARATOR + '`'
ERRORLONGTITLE = "!! Title: Your title is too long. Max 300 characters, you have %d"
ERRORSUBREDDIT = '!! Reddit: Subbreddit /r/%s could not be found'
ERRORNOTALLOWED = "!! Reddit: Bot is not allowed to submit to /r/%s."
ERRORUNKNOWNCOMMAND = "Did not understand the command: `%s`"
ERRORCRITICAL = '\n\nBecause of a critical post error, your chosen timestamp has been forfeited. You will need to edit it along with the other keys.\n\n'
IMPOSSIBLETIME = 2147483646
""" All done! """
try:
import bot
USERAGENT = bot.aG
except ImportError:
pass
print('Loading database')
sql = sqlite3.connect('sql.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS schedules(ID TEXT, TIME INT, REDDIT TEXT, TITLE TEXT, DIST INT, STICKY INT, FLAIR TEXT, FLCSS TEXT, POST TEXT)')
# 0 1 2 3 4 5 6 7 *
sql.commit()
print('Logging in')
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def getTime(bool):
timeNow = datetime.datetime.now(datetime.timezone.utc)
timeUnix = timeNow.timestamp()
if bool == False:
return timeNow
else:
return timeUnix
def processpost(inputpost):
if isinstance(inputpost, str):
if 't3_' not in inputpost:
inputpost = 't3_' + inputpost
inputpost = r.get_info(thing_id=inputpost)
sourceid = inputpost.id
print('Schedulizing post ' + sourceid)
nowstamp = getTime(True)
sourcetitle = inputpost.title
sourcesplit = sourcetitle.split(TITLESEPARATOR)
errors = []
critical = False
dosticky = 0
dodist = 0
try:
posttime = "?"
postsub = "?"
posttitle = "?"
postflair = ""
postflcss = ""
posttime = sourcesplit[0]
postsub = sourcesplit[1]
postsub = postsub.replace('/r/', '')
if '[d]' in postsub.lower():
dodist = 1
if '[s]' in postsub.lower():
dosticky = 1
regex = re.search("\[f:[^\]]*\]", postsub, re.IGNORECASE)
if regex:
postflair = regex.group(0)
postflair = postflair[3:-1]
regex = re.search("\[fc:[^\]]*\]", postsub, re.IGNORECASE)
if regex:
postflcss = regex.group(0)
postflcss = postflcss[4:-1]
elif postflair != "":
postflcss = removespecial(postflair)
postsubsplit = postsub.split(' ')
while '' in postsubsplit:
postsubsplit.remove('')
postsub = postsubsplit[0]
posttitle = '||'.join(sourcesplit[2:])
except IndexError:
errors.append(ERRORTITLEFORM)
critical = True
try:
posttimerender = dateparse(posttime)
posttimerender = posttimerender.replace(tzinfo=datetime.timezone.utc)
posttimestamp = posttimerender.timestamp()
if posttimestamp < nowstamp:
errors.append(ERRORTOOEARLY % posttime)
critical = True
except:
#December 31, 2500
posttimestamp = IMPOSSIBLETIME
errors.append(ERRORDATETIME % posttime)
critical = True
try:
validatesubreddit(postsub)
except:
errors.append(ERRORSUBREDDIT % postsub)
critical = True
#ID TEXT, TIME INT, REDDIT TEXT, TITLE TEXT, DIST INT, STICKY INT, FLAIR TEXT, FLCSS TEXT, POST TEXT
# 0 1 2 3 4 5 6 7 8
if critical:
posttimestamp = IMPOSSIBLETIME
datalist = [sourceid, posttimestamp, postsub, posttitle, dodist, dosticky, postflair, postflcss, "None"]
cur.execute('SELECT * FROM schedules WHERE ID=?', [sourceid])
fetch = cur.fetchone()
if not fetch:
cur.execute('INSERT INTO schedules VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', datalist)
sql.commit()
schedulecomment = buildcomment(datalist, errors, critical)
print('Writing comment')
inputpost.add_comment(schedulecomment)
inputpost.set_flair(flair_text=SCHEDULEDFLAIR_TEXT, flair_css_class=SCHEDULEDFLAIR_CSS)
def updatepost(comment):
source = comment.submission
print('Updating schedule for ' + source.id + ' via comment ' + comment.id)
pauthor = source.author.name
cauthor = comment.author.name
if ALLOWOTHEREDITS or (pauthor == cauthor) or any(pauthor.lower() == admin.lower() for admin in ADMINS):
cur.execute('SELECT * FROM schedules WHERE ID=?', [source.id])
data=cur.fetchone()
if data:
data= list(data)
errors = []
commentsplit = comment.body.split('\n')
while '' in commentsplit:
commentsplit.remove('')
for line in commentsplit:
line = line.split(':')
line[0] = line[0].replace(' ', '')
command = line[0].lower()
arg = ':'.join(line[1:])
if command in ['time', 'timestamp']:
try:
posttimerender = dateparse(arg)
posttimerender = posttimerender.replace(tzinfo=datetime.timezone.utc)
posttimestamp = posttimerender.timestamp()
except:
#December 31, 2500
posttimestamp = IMPOSSIBLETIME
errors.append(ERRORDATETIME % posttime)
data[1] = posttimestamp
elif command in ['reddit', 'subreddit', 'sr']:
try:
arg = arg.replace(' ', '')
arg=arg.replace('/r/', '')
validatesubreddit(arg)
except:
#This will be errored in the upcoming `ispostvalid` line
pass
data[2] = arg
elif command in ['title']:
data[3] = arg
elif command in ['distinguish', 'dist', 'd']:
if arg.lower() in ['0', 'no', 'false', 'off']:
arg = 0
if arg.lower() in ['1', 'yes', 'true', 'on']:
arg = 1
data[4] = arg
elif command in ['sticky', 's']:
if arg.lower() in ['0', 'no', 'false', 'off']:
arg = 0
if arg.lower() in ['1', 'yes', 'true', 'on']:
arg = 1
data[5] = arg
elif command in ['flair-text', 'flairtext', 'flair_text']:
data[6] = arg
elif command in ['flair-css', 'flaircss', 'flair_css']:
data[7] = removespecial(arg)
else:
errors.append(ERRORUNKNOWNCOMMAND % command)
print('\tChecking schedule validity')
status = ispostvalid(data, errors)
if status[0] == False:
data[1] = IMPOSSIBLETIME
critical = True
else:
critical = False
schedulecomment = buildcomment(data[:], errors, critical)
print('\tWriting comment')
comment.reply(schedulecomment)
cur.execute('DELETE FROM schedules WHERE ID=?', [source.id])
cur.execute('INSERT INTO schedules VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', data)
sql.commit()
print('\tDone.')
else:
print(cauthor + ' may not edit ' + pauthor + "'s post")
def validatesubreddit(sr):
#This will intentionall crash if /r/sr does not exist
sr = sr.replace('/r/', '')
sr = sr.replace('r/', '')
sr = sr.replace('/', '')
r.get_subreddit(sr, fetch=True)
def ispostvalid(inputdata, errors):
nowstamp = getTime(True)
status = True
if inputdata[1] < nowstamp:
n = datetime.datetime.utcfromtimestamp(inputdata[1])
n = datetime.datetime.strftime(n, "%B %d %Y %H:%M")
errors.append(ERRORTOOEARLY % n)
status = False
try:
validatesubreddit(inputdata[2])
except:
print('\tBad subreddit: ' + inputdata[2])
errors.append(ERRORSUBREDDIT % inputdata[2])
status = False
if len(inputdata[3]) > 300:
errors.append(ERRORLONGTITLE % len(inputdata[3]))
status = False
return [status, errors]
def buildcomment(datalist, errors, critical=False):
schedulecomment = SCHEDULECOMMENT
if len(errors) > 0:
errors = "\n\n".join(errors)
schedulecomment = ERRORCOMMENT % errors
if critical:
schedulecomment += ERRORCRITICAL
schedulecomment += buildtable(datalist)
schedulecomment += FOOTER
return schedulecomment
#ID TEXT, TIME INT, REDDIT TEXT, TITLE TEXT, DIST INT, STICKY INT, FLAIR TEXT, FLCSS TEXT, POST TEXT
# 0 1 2 3 4 5 6 7 8
def buildtable(inputdata):
print(inputdata[1], type(inputdata[1])) #Troubleshooting with Apex
timeobj = datetime.datetime.utcfromtimestamp(inputdata[1])
inputdata[1] = datetime.datetime.strftime(timeobj, "%B %d %Y %H:%M UTC")
inputdata[2] = '/r/' + inputdata[2]
inputdata[3] = '`' + inputdata[3] + '`'
inputdata[4] = "True" if inputdata[4] == 1 else "False"
inputdata[5] = "True" if inputdata[5] == 1 else "False"
inputdata = inputdata[1:-1]
table = """
Key | Value
:- | :-
Time | {0}
Subreddit | {1}
Title | {2}
Distinguish | {3}
Sticky | {4}
Flair-text | {5}
Flair-CSS | {6}
""".format(*inputdata)
return table
def removespecial(inputstr):
ok = string.ascii_letters + string.digits
outstr = "".join([x for x in inputstr if x in ok])
return outstr
def manage_new():
print('Managing ' + SUBREDDIT + '/new')
subreddit = r.get_subreddit(SUBREDDIT)
new = list(subreddit.get_new(limit=MAXPOSTS))
for post in new:
pid = post.id
cur.execute('SELECT * FROM schedules WHERE ID=?', [pid])
if not cur.fetchone():
if post.title[0] != IGNORE_FLAG:
processpost(post)
else:
data = [post.id, 1, "", "", 0, 0, "", "", "meta"]
cur.execute('INSERT INTO schedules VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', data)
sql.commit()
def manage_unread():
print('Managing inbox')
inbox = list(r.get_unread(limit=100))
for message in inbox:
if isinstance(message, praw.objects.Message):
if "ping" in message.subject.lower():
message.reply("Pong")
print('Responding to ping')
try:
mauthor = message.author.name
if any(mauthor.lower() == admin.lower() for admin in ADMINS):
if "kill" in message.subject.lower():
alertadmins("Hard shutdown", "The bot is being killed by " + mauthor)
quit()
except AttributeError:
pass
elif isinstance(message, praw.objects.Comment):
commentsub = message.subreddit.display_name
if commentsub.lower() == SUBREDDIT.lower():
updatepost(message)
message.mark_as_read()
def manage_schedule():
print('Managing schedules')
cur.execute('SELECT * FROM schedules WHERE POST =?', ['None'])
fetch = cur.fetchall()
fetch = list(fetch)
fetch.sort(key=lambda x: x[1])
reread = False
idlist = ['t3_'+i[0] for i in fetch]
submissionlist = []
print('Checking for deletions')
while len(idlist) > 0:
submissionlist += r.get_info(thing_id=idlist[:100])
idlist = idlist[100:]
for item in submissionlist:
if (not item.author) or (item.banned_by):
print('\t' + item.id + ' has been deleted')
cur.execute('DELETE FROM schedules WHERE ID=?', [item.id])
sql.commit()
reread = True
if reread:
cur.execute('SELECT * FROM schedules WHERE POST =?', ['None'])
fetch = cur.fetchall()
fetch = list(fetch)
fetch.sort(key=lambda x: x[1])
nowstamp = getTime(True)
for schedule in fetch:
postid = schedule[0]
print('Checking schedule ' + postid, end="")
posttime = int(schedule[1])
if posttime < nowstamp:
print()
print('\tPreparing to post')
post = r.get_info(thing_id="t3_" + postid)
ptitle = schedule[3]
psub = schedule[2]
print('\tSubmitting post')
try:
if post.is_self:
pbody = post.selftext
newpost = r.submit(psub, ptitle, text=pbody)
else:
purl = post.url
newpost = r.submit(psub, ptitle, url=purl, resubmit=True)
errors = []
if schedule[4] == 1:
try:
print('\tDistinguishing')
newpost.distinguish()
except:
print('\tDistinguish failed')
errors.append(ERRORDISTINGUISHFAIL)
if schedule[5] == 1:
try:
print('\tStickying')
newpost.sticky()
except:
print('\tSticky failed')
errors.append(ERRORSTICKYFAIL)
if schedule[6] != "" or schedule[7] != "":
try:
print('\tFlairing')
newpost.set_flair(flair_text=schedule[6], flair_css_class=schedule[7])
except:
print('\tFlair failed')
newsub = newpost.subreddit.display_name
newlink = newpost.short_link
newid = newpost.id
newcomment = POSTEDCOMMENT % (newsub, newlink)
newcomment += '\n\n'.join(errors)
cur.execute('UPDATE schedules SET POST=? WHERE ID=?', [newid, postid])
sql.commit()
print('Flairing source.')
post.add_comment(newcomment)
post.set_flair(flair_text=POSTEDFLAIR_TEXT, flair_css_class=POSTEDFLAIR_CSS)
except praw.errors.APIException as error:
if error.error_type == "SUBREDDIT_NOTALLOWED":
print("\tNOT ALLOWED IN SUBREDDIT!")
cur.execute('UPDATE schedules SET TIME=? WHERE ID=?', [IMPOSSIBLETIME, postid])
sql.commit()
scheduledata = list(schedule)
scheduledata[1] = IMPOSSIBLETIME
comment=buildcomment(scheduledata, [ERRORNOTALLOWED%psub], critical=True)
post.add_comment(comment)
else:
print(" : T-" + str(round(posttime - nowstamp)))
def alertadmins(messagesubject, messagetext):
for admin in ADMINS:
print('Messaging ' + admin)
try:
r.send_message(admin, messagesubject, messagetext)
except:
print('COULD NOT MESSAGE ADMIN')
while True:
try:
manage_new()
manage_unread()
manage_schedule()
except Exception as e:
error_message = traceback.format_exc()
print(error_message)
now = getTime(False)
now = datetime.datetime.strftime(now, "%B %d %H:%M:%S UTC")
error_message = ' ' + error_message
error_message = error_message.replace('\n', '\n ')
error_message += '\n' + str(now)
alertadmins(TRACEBACK_SUBJECT, error_message)
print("Sleeping\n")
time.sleep(WAIT)
|
|
import os
import re
import glob
import logging
import pattern.web
import pages_scrape
import mongo_connection
from goose import Goose
from pymongo import MongoClient
from ConfigParser import ConfigParser
from multiprocessing import Pool
# Initialize Logger
logger = None
def scrape_func(website, lang, address, COLL, db_auth, db_user, db_pass, db_host=None):
"""
Function to scrape various RSS feeds.
Parameters
----------
website: String
Nickname for the RSS feed being scraped.
address: String
Address for the RSS feed to scrape.
COLL: String
Collection within MongoDB that holds the scraped data.
db_auth: String.
MongoDB database that should be used for user authentication.
db_user: String.
Username for MongoDB authentication.
db_user: String.
Password for MongoDB authentication.
"""
# Setup the database
if db_host:
connection = MongoClient(host=db_host)
else:
connection = MongoClient()
if db_auth:
connection[db_auth].authenticate(db_user, db_pass)
db = connection.event_scrape
collection = db[COLL]
# Scrape the RSS feed
results = get_rss(address, website)
# Pursue each link in the feed
if results:
parse_results(results, website, lang, collection)
logger.info('Scrape of {} finished'.format(website))
def get_rss(address, website):
"""
Function to parse an RSS feed and extract the relevant links.
Parameters
----------
address: String.
Address for the RSS feed to scrape.
website: String.
Nickname for the RSS feed being scraped.
Returns
-------
results : pattern.web.Results.
Object containing data on the parsed RSS feed. Each item
represents a unique entry in the RSS feed and contains relevant
information such as the URL and title of the story.
"""
try:
results = pattern.web.Newsfeed().search(address, count=100,
cached=False, timeout=30)
logger.debug('There are {} results from {}'.format(len(results),
website))
except Exception, e:
print 'There was an error. Check the log file for more information.'
logger.warning('Problem fetching RSS feed for {}. {}'.format(address,
e))
results = None
return results
def parse_results(rss_results, website, lang, db_collection):
"""
Function to parse the links drawn from an RSS feed.
Parameters
----------
rss_results: pattern.web.Results.
Object containing data on the parsed RSS feed. Each item
represents a unique entry in the RSS feed and contains
relevant information such as the URL and title of the
story.
website: String.
Nickname for the RSS feed being scraped.
db_collection: pymongo Collection.
Collection within MongoDB that in which results are
stored.
"""
if lang == 'english':
goose_extractor = Goose({'use_meta_language': False,
'target_language': 'en',
'enable_image_fetching': False})
elif lang == 'arabic':
from goose.text import StopWordsArabic
goose_extractor = Goose({'stopwords_class': StopWordsArabic,
'enable_image_fetching': False})
else:
print(lang)
for result in rss_results:
page_url = _convert_url(result.url, website)
in_database = _check_mongo(page_url, db_collection)
if not in_database:
try:
text, meta = pages_scrape.scrape(page_url, goose_extractor)
text = text.encode('utf-8')
except TypeError:
logger.warning('Problem obtaining text from URL: {}'.format(page_url))
text = ''
else:
logger.debug('Result from {} already in database'.format(page_url))
text = ''
if text:
cleaned_text = _clean_text(text, website)
entry_id = mongo_connection.add_entry(db_collection, cleaned_text,
result.title, result.url,
result.date, website, lang)
if entry_id:
try:
logger.info('Added entry from {} with id {}'.format(page_url,
entry_id))
except UnicodeDecodeError:
logger.info('Added entry from {}. Unicode error for id'.format(result.url))
def _check_mongo(url, db_collection):
"""
Private function to check if a URL appears in the database.
Parameters
----------
url: String.
URL for the news stories to be scraped.
db_collection: pymongo Collection.
Collection within MongoDB that in which results are
stored.
Returns
-------
found: Boolean.
Indicates whether or not a URL was found in the database.
"""
if db_collection.find_one({"url": url}):
found = True
else:
found = False
return found
def _convert_url(url, website):
"""
Private function to clean a given page URL.
Parameters
----------
url: String.
URL for the news stories to be scraped.
website: String.
Nickname for the RSS feed being scraped.
Returns
-------
page_url: String.
Cleaned and unicode converted page URL.
"""
if website == 'xinhua':
page_url = url.replace('"', '')
page_url = page_url.encode('ascii')
elif website == 'upi':
page_url = url.encode('ascii')
elif website == 'zaman':
# Find the weird thing. They tend to be ap or reuters, but generalized
# just in case
com = url.find('.com')
slash = url[com + 4:].find('/')
replaced_url = url.replace(url[com + 4:com + slash + 4], '')
split = replaced_url.split('/')
# This is nasty and hackish but it gets the jobs done.
page_url = '/'.join(['/'.join(split[0:3]), 'world_' + split[-1]])
else:
page_url = url.encode('utf-8')
return page_url
def _clean_text(text, website):
"""
Private function to clean some of the cruft from the content pulled from
various sources.
Parameters
----------
text: String.
Dirty text.
website: String.
Nickname for the RSS feed being scraped.
Returns
-------
text: String.
Less dirty text.
"""
site_list = ['menafn_algeria', 'menafn_bahrain', 'menafn_egypt',
'menafn_iraq', 'menafn_jordan', 'menafn_kuwait',
'menafn_lebanon', 'menafn_morocco', 'menafn_oman',
'menafn_palestine', 'menafn_qatar', 'menafn_saudi',
'menafn_syria', 'menafn_tunisia', 'menafn_turkey',
'menafn_uae', 'menafn_yemen']
if website == 'bbc':
text = text.replace(
"This page is best viewed in an up-to-date web browser with style sheets (CSS) "
"enabled. While you will be able to view the content of this page in your current "
"browser, you will not be able to get the full visual experience. Please consider "
"upgrading your browser software or enabling style sheets (CSS) if you are able to do "
"so.",
'')
if website == 'almonitor':
text = re.sub("^.*?\(photo by REUTERS.*?\)", "", text)
if website in site_list:
text = re.sub("^\(.*?MENAFN.*?\)", "", text)
elif website == 'upi':
text = text.replace(
"Since 1907, United Press International (UPI) has been a leading provider of critical "
"information to media outlets, businesses, governments and researchers worldwide. UPI "
"is a global operation with offices in Beirut, Hong Kong, London, Santiago, Seoul and "
"Tokyo. Our headquarters is located in downtown Washington, DC, surrounded by major "
"international policy-making governmental and non-governmental organizations. UPI "
"licenses content directly to print outlets, online media and institutions of all "
"types. In addition, UPI's distribution partners provide our content to thousands of "
"businesses, policy groups and academic institutions worldwide. Our audience consists "
"of millions of decision-makers who depend on UPI's insightful and analytical stories "
"to make better business or policy decisions. In the year of our 107th anniversary, "
"our company strives to continue being a leading and trusted source for news, "
"analysis and insight for readers around the world.",
'')
text = text.replace('\n', ' ')
return text
def call_scrape_func(siteList, db_collection, pool_size, db_auth, db_user,
db_pass, db_host=None):
"""
Helper function to iterate over a list of RSS feeds and scrape each.
Parameters
----------
siteList: dictionary
Dictionary of sites, with a nickname as the key and RSS URL
as the value.
db_collection : collection
Mongo collection to put stories
pool_size : int
Number of processes to distribute work
"""
pool = Pool(pool_size)
results = [pool.apply_async(scrape_func, (address, lang, website,
db_collection, db_auth, db_user,
db_pass, db_host))
for address, (website, lang) in siteList.iteritems()]
[r.get(9999999) for r in results]
pool.terminate()
logger.info('Completed full scrape.')
def _parse_config(parser):
try:
if 'Auth' in parser.sections():
auth_db = parser.get('Auth', 'auth_db')
auth_user = parser.get('Auth', 'auth_user')
auth_pass = parser.get('Auth', 'auth_pass')
db_host = parser.get('Auth', 'db_host')
else:
# Try env vars too
auth_db = os.getenv('MONGO_AUTH_DB') or ''
auth_user = os.getenv('MONGO_AUTH_USER') or ''
auth_pass = os.getenv('MONGO_AUTH_PASS') or ''
db_host = os.getenv('MONGO_HOST') or ''
log_dir = parser.get('Logging', 'log_file')
log_level = parser.get('Logging', 'level')
collection = parser.get('Database', 'collection_list')
whitelist = parser.get('URLS', 'file')
sources = parser.get('URLS', 'sources').split(',')
pool_size = int(parser.get('Processes', 'pool_size'))
return collection, whitelist, sources, pool_size, log_dir, log_level, auth_db, auth_user, \
auth_pass, db_host
except Exception, e:
print 'Problem parsing config file. {}'.format(e)
def parse_config():
"""Function to parse the config file."""
config_file = glob.glob('config.ini')
parser = ConfigParser()
if config_file:
parser.read(config_file)
else:
cwd = os.path.abspath(os.path.dirname(__file__))
config_file = os.path.join(cwd, 'default_config.ini')
parser.read(config_file)
return _parse_config(parser)
def run_scraper():
global logger
# Get the info from the config
db_collection, whitelist_file, sources, pool_size, log_dir, log_level, auth_db, auth_user, \
auth_pass, db_host = parse_config()
# Setup the logging
logger = logging.getLogger('scraper_log')
if log_level == 'info':
logger.setLevel(logging.INFO)
elif log_level == 'warning':
logger.setLevel(logging.WARNING)
elif log_level == 'debug':
logger.setLevel(logging.DEBUG)
if log_dir:
fh = logging.FileHandler(log_dir, 'a')
else:
fh = logging.FileHandler('scraping.log', 'a')
formatter = logging.Formatter('%(levelname)s %(asctime)s: %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info('Running in scheduled hourly mode')
print 'Running. See log file for further information.'
# Convert from CSV of URLs to a dictionary
try:
url_whitelist = open(whitelist_file, 'r').readlines()
url_whitelist = [line.replace('\n', '').split(',') for line in
url_whitelist if line]
# Filtering based on list of sources from the config file
to_scrape = {listing[0]: [listing[1], listing[3]] for listing in
url_whitelist if listing[2] in sources}
except IOError:
print 'There was an error. Check the log file for more information.'
logger.warning('Could not open URL whitelist file.')
raise
call_scrape_func(to_scrape, db_collection, pool_size, auth_db, auth_user,
auth_pass, db_host=db_host)
logger.info('All done.')
if __name__ == '__main__':
run_scraper()
|
|
from construct import *
from construct import adapters
from sflow.protocol import utils
class IPv4Header(object):
def __init__(self, data):
ip = Struct("ip_header",
EmbeddedBitStruct(
Const(Nibble("version"), 4),
Nibble("header_length"),
),
BitStruct("tos",
Bits("precedence", 3),
Flag("minimize_delay"),
Flag("high_throuput"),
Flag("high_reliability"),
Flag("minimize_cost"),
Padding(1),
),
UBInt16("total_length"),
UBInt16("id"),
UBInt16("flags"),
UBInt8("ttl"),
Enum(UBInt8("proto"),
UDP=0x11,
TCP=0x06
),
UBInt16("checksum"),
UBInt32("src"),
UBInt32("dst"),
)
self.ip = ip.parse(data[:ip.sizeof()])
self.ip.src = utils.IPv4Address(self.ip.src)
self.ip.dst = utils.IPv4Address(self.ip.dst)
data = data[ip.sizeof():]
if self.ip.proto == 'TCP':
self.tcp = Struct("tcp",
UBInt16("sport"),
UBInt16("dport"),
).parse(data)
self.ip_sport = self.tcp.sport
self.ip_dport = self.tcp.dport
if self.ip.proto == 'UDP':
self.udp = Struct("tcp",
UBInt16("sport"),
UBInt16("dport"),
).parse(data)
self.ip_sport = self.udp.sport
self.ip_dport = self.udp.dport
class ISO8023Header(object):
def __init__(self, data):
frame = Struct("Frame",
Bytes("destination", 6),
Bytes("source", 6),
Enum(UBInt16("type"),
IPv4=0x0800,
ARP=0x0806,
RARP=0x8035,
X25=0x0805,
IPX=0x8137,
IPv6=0x86DD,
VLAN=0x8100
)
)
try:
ethernet = frame.parse(data[:14])
except adapters.MappingError:
print "Broken ethernet header"
self.frame = None
print repr(data)
return
data = data[14:]
self.src_mac = ethernet.destination
self.dst_mac = ethernet.source
if ethernet.type == 'VLAN':
d = ord(data[0])
self.vlan = d & 0x0fff
self.vlan_priority = d >> 13
elif ethernet.type == 'IPv4':
ipv4 = IPv4Header(data)
self.ip = ipv4.ip
self.ip_sport = ipv4.ip_sport
self.ip_dport = ipv4.ip_dport
else:
print ethernet.type, repr(data)
class IPv6Header(object):
def __init__(self, u):
pass
class IEEE80211MACHeader(object):
def __init__(self, u):
pass
class PPPHeader(object):
def __init__(self, u):
pass
class HeaderSample(object):
def __init__(self, u):
self.protocol = u.unpack_uint()
self.frame_len = u.unpack_uint()
self.payload_removed = u.unpack_uint()
self.sample_header = u.unpack_string()
self.samplers = {
1: ISO8023Header,
7: PPPHeader,
11: IPv4Header,
12: IPv6Header
}
if self.samplers.get(self.protocol):
self.frame = self.samplers[self.protocol](
self.sample_header
)
else:
print "Unknown protocol:", self.protocol
self.frame = None
class EthernetSample(object):
def __init__(self, u):
self.length = u.unpack_uint()
self.src_mac = u.unpack_fopaque(6)
self.dst_mac = u.unpack_fopaque(6)
self.type = u.unpack_uint()
class IPV4Sample(object):
def __init__(self, u):
self.length = u.unpack_uint()
self.protocol = u.unpack_uint()
self.src_ip = u.unpack_fstring(4)
self.dst_ip = u.unpack_fstring(4)
self.src_port = u.unpack_uint()
self.dst_port = u.unpack_uint()
self.tcp_flags = u.unpack_uint()
self.tos = u.unpack_uint()
class IPV6Sample(object):
def __init__(self, u):
self.length = u.unpack_uint()
self.protocol = u.unpack_uint()
self.src_ip = u.unpack_fstring(16)
self.dst_ip = u.unpack_fstring(16)
self.src_port = u.unpack_uint()
self.dst_port = u.unpack_uint()
self.tcp_flags = u.unpack_uint()
self.priority = u.unpack_uint()
class SwitchSample(object):
def __init__(self, u):
self.src_vlan = u.unpack_uint()
self.src_priority = u.unpack_uint()
self.dst_vlan = u.unpack_uint()
self.dst_priority = u.unpack_uint()
class RouterSample(object):
def __init__(self, u):
self.next_hop = utils.unpack_address(u)
self.src_mask_len = u.unpack_uint()
self.dst_mask_len = u.unpack_uint()
class GatewaySample(object):
def __init__(self, u):
self.next_hop = utils.unpack_address(u)
self.asn = u.unpack_uint()
self.src_as = u.unpack_uint()
self.src_peer_as = u.unpack_uint()
self.as_path_type = u.unpack_uint()
self.as_path = u.unpack_array(u.unpack_uint)
self.communities = u.unpack_array(u.unpack_uint)
self.localpref = u.unpack_uint()
class UserSample(object):
def __init__(self, u):
self.src_charset = u.unpack_uint()
self.src_user = u.unpack_string()
self.dst_charset = u.unpack_uint()
self.dst_user = u.unpack_string()
class URLSample(object):
def __init__(self, u):
self.url_direction = u.unpack_uint()
self.url = u.unpack_string()
self.host = u.unpack_string()
class MPLSSample(object):
def __init__(self, u):
self.next_hop = utils.unpack_address(u)
self.in_stack = u.unpack_array(u.unpack_uint)
self.out_stack = u.unpack_array(u.unpack_uint)
class NATSample(object):
def __init__(self, u):
self.src_address = utils.unpack_address(u)
self.dst_address = utils.unpack_address(u)
class MPLSTunnelSample(object):
def __init__(self, u):
self.tunnel_lsp_name = u.unpack_string()
self.tunnel_id = u.unpack_uint()
self.tunnel_cos = u.unpack_uint()
class MPLSVCSample(object):
def __init__(self, u):
self.vc_instance_name = u.unpack_string()
self.vc_id = u.unpack_uint()
self.vc_cos = u.unpack_uint()
class MPLSFTNSample(object):
def __init__(self, u):
self.mplsFTNDescr = u.unpack_string()
self.mplsFTNMask = u.unpack_uint()
class MPLSLDPFECSample(object):
def __init__(self, u):
self.mplsFecAddrPrefixLength = u.unpack_uint()
class VLANTunnelSample(object):
def __init__(self, u):
self.stack = u.unpack_array(u.unpack_uint)
def getDecoder(format):
decoders = {
1: HeaderSample,
2: EthernetSample,
3: IPV4Sample,
4: IPV6Sample,
1001: SwitchSample,
1002: RouterSample,
1003: GatewaySample,
1004: UserSample,
1005: URLSample,
1006: MPLSSample,
1007: NATSample,
1008: MPLSTunnelSample,
1009: MPLSVCSample,
1010: MPLSFTNSample,
1011: MPLSLDPFECSample,
1012: VLANTunnelSample
}
return decoders.get(format, None)
|
|
"""Allow to set up simple automation rules via the config file."""
import asyncio
import importlib
import logging
from typing import Any, Awaitable, Callable, List, Optional, Set
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_DEVICE_ID,
CONF_ENTITY_ID,
CONF_ID,
CONF_PLATFORM,
CONF_ZONE,
EVENT_HOMEASSISTANT_STARTED,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import Context, CoreState, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import condition, extract_domain_configs, script
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.service import async_register_admin_service
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util.dt import parse_datetime, utcnow
# mypy: allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs, no-warn-return-any
DOMAIN = "automation"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
GROUP_NAME_ALL_AUTOMATIONS = "all automations"
CONF_ALIAS = "alias"
CONF_DESCRIPTION = "description"
CONF_HIDE_ENTITY = "hide_entity"
CONF_CONDITION = "condition"
CONF_ACTION = "action"
CONF_TRIGGER = "trigger"
CONF_CONDITION_TYPE = "condition_type"
CONF_INITIAL_STATE = "initial_state"
CONF_SKIP_CONDITION = "skip_condition"
CONDITION_USE_TRIGGER_VALUES = "use_trigger_values"
CONDITION_TYPE_AND = "and"
CONDITION_TYPE_NOT = "not"
CONDITION_TYPE_OR = "or"
DEFAULT_CONDITION_TYPE = CONDITION_TYPE_AND
DEFAULT_INITIAL_STATE = True
EVENT_AUTOMATION_RELOADED = "automation_reloaded"
EVENT_AUTOMATION_TRIGGERED = "automation_triggered"
ATTR_LAST_TRIGGERED = "last_triggered"
ATTR_VARIABLES = "variables"
SERVICE_TRIGGER = "trigger"
_LOGGER = logging.getLogger(__name__)
AutomationActionType = Callable[[HomeAssistant, TemplateVarsType], Awaitable[None]]
def _platform_validator(config):
"""Validate it is a valid platform."""
try:
platform = importlib.import_module(f".{config[CONF_PLATFORM]}", __name__)
except ImportError:
raise vol.Invalid("Invalid platform specified") from None
return platform.TRIGGER_SCHEMA(config)
_TRIGGER_SCHEMA = vol.All(
cv.ensure_list,
[
vol.All(
vol.Schema({vol.Required(CONF_PLATFORM): str}, extra=vol.ALLOW_EXTRA),
_platform_validator,
)
],
)
_CONDITION_SCHEMA = vol.All(cv.ensure_list, [cv.CONDITION_SCHEMA])
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_HIDE_ENTITY, invalidation_version="0.110"),
vol.Schema(
{
# str on purpose
CONF_ID: str,
CONF_ALIAS: cv.string,
vol.Optional(CONF_DESCRIPTION): cv.string,
vol.Optional(CONF_INITIAL_STATE): cv.boolean,
vol.Optional(CONF_HIDE_ENTITY): cv.boolean,
vol.Required(CONF_TRIGGER): _TRIGGER_SCHEMA,
vol.Optional(CONF_CONDITION): _CONDITION_SCHEMA,
vol.Required(CONF_ACTION): cv.SCRIPT_SCHEMA,
}
),
)
@bind_hass
def is_on(hass, entity_id):
"""
Return true if specified automation entity_id is on.
Async friendly.
"""
return hass.states.is_state(entity_id, STATE_ON)
@callback
def automations_with_entity(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all automations that reference the entity."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
return [
automation_entity.entity_id
for automation_entity in component.entities
if entity_id in automation_entity.referenced_entities
]
@callback
def entities_in_automation(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all entities in a scene."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
automation_entity = component.get_entity(entity_id)
if automation_entity is None:
return []
return list(automation_entity.referenced_entities)
@callback
def automations_with_device(hass: HomeAssistant, device_id: str) -> List[str]:
"""Return all automations that reference the device."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
return [
automation_entity.entity_id
for automation_entity in component.entities
if device_id in automation_entity.referenced_devices
]
@callback
def devices_in_automation(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all devices in a scene."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
automation_entity = component.get_entity(entity_id)
if automation_entity is None:
return []
return list(automation_entity.referenced_devices)
async def async_setup(hass, config):
"""Set up the automation."""
hass.data[DOMAIN] = component = EntityComponent(_LOGGER, DOMAIN, hass)
await _async_process_config(hass, config, component)
async def trigger_service_handler(entity, service_call):
"""Handle automation triggers."""
await entity.async_trigger(
service_call.data[ATTR_VARIABLES],
skip_condition=service_call.data[CONF_SKIP_CONDITION],
context=service_call.context,
)
component.async_register_entity_service(
SERVICE_TRIGGER,
{
vol.Optional(ATTR_VARIABLES, default={}): dict,
vol.Optional(CONF_SKIP_CONDITION, default=True): bool,
},
trigger_service_handler,
)
component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle")
component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on")
component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off")
async def reload_service_handler(service_call):
"""Remove all automations and load new ones from config."""
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
hass.bus.async_fire(EVENT_AUTOMATION_RELOADED, context=service_call.context)
async_register_admin_service(
hass, DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=vol.Schema({})
)
return True
class AutomationEntity(ToggleEntity, RestoreEntity):
"""Entity to show status of entity."""
def __init__(
self,
automation_id,
name,
trigger_config,
cond_func,
action_script,
initial_state,
):
"""Initialize an automation entity."""
self._id = automation_id
self._name = name
self._trigger_config = trigger_config
self._async_detach_triggers = None
self._cond_func = cond_func
self.action_script = action_script
self._last_triggered = None
self._initial_state = initial_state
self._is_enabled = False
self._referenced_entities: Optional[Set[str]] = None
self._referenced_devices: Optional[Set[str]] = None
@property
def name(self):
"""Name of the automation."""
return self._name
@property
def unique_id(self):
"""Return unique ID."""
return self._id
@property
def should_poll(self):
"""No polling needed for automation entities."""
return False
@property
def state_attributes(self):
"""Return the entity state attributes."""
return {ATTR_LAST_TRIGGERED: self._last_triggered}
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._async_detach_triggers is not None or self._is_enabled
@property
def referenced_devices(self):
"""Return a set of referenced devices."""
if self._referenced_devices is not None:
return self._referenced_devices
referenced = self.action_script.referenced_devices
if self._cond_func is not None:
for conf in self._cond_func.config:
referenced |= condition.async_extract_devices(conf)
for conf in self._trigger_config:
device = _trigger_extract_device(conf)
if device is not None:
referenced.add(device)
self._referenced_devices = referenced
return referenced
@property
def referenced_entities(self):
"""Return a set of referenced entities."""
if self._referenced_entities is not None:
return self._referenced_entities
referenced = self.action_script.referenced_entities
if self._cond_func is not None:
for conf in self._cond_func.config:
referenced |= condition.async_extract_entities(conf)
for conf in self._trigger_config:
for entity_id in _trigger_extract_entities(conf):
referenced.add(entity_id)
self._referenced_entities = referenced
return referenced
async def async_added_to_hass(self) -> None:
"""Startup with initial state or previous state."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
enable_automation = state.state == STATE_ON
last_triggered = state.attributes.get("last_triggered")
if last_triggered is not None:
self._last_triggered = parse_datetime(last_triggered)
_LOGGER.debug(
"Loaded automation %s with state %s from state "
" storage last state %s",
self.entity_id,
enable_automation,
state,
)
else:
enable_automation = DEFAULT_INITIAL_STATE
_LOGGER.debug(
"Automation %s not in state storage, state %s from default is used.",
self.entity_id,
enable_automation,
)
if self._initial_state is not None:
enable_automation = self._initial_state
_LOGGER.debug(
"Automation %s initial state %s overridden from "
"config initial_state",
self.entity_id,
enable_automation,
)
if enable_automation:
await self.async_enable()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on and update the state."""
await self.async_enable()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
await self.async_disable()
async def async_trigger(self, variables, skip_condition=False, context=None):
"""Trigger automation.
This method is a coroutine.
"""
if (
not skip_condition
and self._cond_func is not None
and not self._cond_func(variables)
):
return
# Create a new context referring to the old context.
parent_id = None if context is None else context.id
trigger_context = Context(parent_id=parent_id)
self.async_set_context(trigger_context)
self._last_triggered = utcnow()
self.async_write_ha_state()
self.hass.bus.async_fire(
EVENT_AUTOMATION_TRIGGERED,
{ATTR_NAME: self._name, ATTR_ENTITY_ID: self.entity_id},
context=trigger_context,
)
_LOGGER.info("Executing %s", self._name)
try:
await self.action_script.async_run(variables, trigger_context)
except Exception: # pylint: disable=broad-except
pass
async def async_will_remove_from_hass(self):
"""Remove listeners when removing automation from Home Assistant."""
await super().async_will_remove_from_hass()
await self.async_disable()
async def async_enable(self):
"""Enable this automation entity.
This method is a coroutine.
"""
if self._is_enabled:
return
self._is_enabled = True
# HomeAssistant is starting up
if self.hass.state != CoreState.not_running:
self._async_detach_triggers = await self._async_attach_triggers(False)
self.async_write_ha_state()
return
async def async_enable_automation(event):
"""Start automation on startup."""
# Don't do anything if no longer enabled or already attached
if not self._is_enabled or self._async_detach_triggers is not None:
return
self._async_detach_triggers = await self._async_attach_triggers(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STARTED, async_enable_automation
)
self.async_write_ha_state()
async def async_disable(self):
"""Disable the automation entity."""
if not self._is_enabled:
return
self._is_enabled = False
if self._async_detach_triggers is not None:
self._async_detach_triggers()
self._async_detach_triggers = None
self.async_write_ha_state()
async def _async_attach_triggers(
self, home_assistant_start: bool
) -> Optional[Callable[[], None]]:
"""Set up the triggers."""
info = {"name": self._name, "home_assistant_start": home_assistant_start}
triggers = []
for conf in self._trigger_config:
platform = importlib.import_module(f".{conf[CONF_PLATFORM]}", __name__)
triggers.append(
platform.async_attach_trigger( # type: ignore
self.hass, conf, self.async_trigger, info
)
)
results = await asyncio.gather(*triggers)
if None in results:
_LOGGER.error("Error setting up trigger %s", self._name)
removes = [remove for remove in results if remove is not None]
if not removes:
return None
_LOGGER.info("Initialized trigger %s", self._name)
@callback
def remove_triggers():
"""Remove attached triggers."""
for remove in removes:
remove()
return remove_triggers
@property
def device_state_attributes(self):
"""Return automation attributes."""
if self._id is None:
return None
return {CONF_ID: self._id}
async def _async_process_config(hass, config, component):
"""Process config and add automations.
This method is a coroutine.
"""
entities = []
for config_key in extract_domain_configs(config, DOMAIN):
conf = config[config_key]
for list_no, config_block in enumerate(conf):
automation_id = config_block.get(CONF_ID)
name = config_block.get(CONF_ALIAS) or f"{config_key} {list_no}"
initial_state = config_block.get(CONF_INITIAL_STATE)
action_script = script.Script(
hass, config_block.get(CONF_ACTION, {}), name, logger=_LOGGER
)
if CONF_CONDITION in config_block:
cond_func = await _async_process_if(hass, config, config_block)
if cond_func is None:
continue
else:
cond_func = None
entity = AutomationEntity(
automation_id,
name,
config_block[CONF_TRIGGER],
cond_func,
action_script,
initial_state,
)
entities.append(entity)
if entities:
await component.async_add_entities(entities)
async def _async_process_if(hass, config, p_config):
"""Process if checks."""
if_configs = p_config[CONF_CONDITION]
checks = []
for if_config in if_configs:
try:
checks.append(await condition.async_from_config(hass, if_config, False))
except HomeAssistantError as ex:
_LOGGER.warning("Invalid condition: %s", ex)
return None
def if_action(variables=None):
"""AND all conditions."""
return all(check(hass, variables) for check in checks)
if_action.config = if_configs
return if_action
@callback
def _trigger_extract_device(trigger_conf: dict) -> Optional[str]:
"""Extract devices from a trigger config."""
if trigger_conf[CONF_PLATFORM] != "device":
return None
return trigger_conf[CONF_DEVICE_ID]
@callback
def _trigger_extract_entities(trigger_conf: dict) -> List[str]:
"""Extract entities from a trigger config."""
if trigger_conf[CONF_PLATFORM] in ("state", "numeric_state"):
return trigger_conf[CONF_ENTITY_ID]
if trigger_conf[CONF_PLATFORM] == "zone":
return trigger_conf[CONF_ENTITY_ID] + [trigger_conf[CONF_ZONE]]
if trigger_conf[CONF_PLATFORM] == "geo_location":
return [trigger_conf[CONF_ZONE]]
if trigger_conf[CONF_PLATFORM] == "sun":
return ["sun.sun"]
return []
|
|
# Copyright (C) 2011 Midokura KK
# Copyright (C) 2011 Nicira, Inc
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for libvirt."""
import copy
import os
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.network import linux_net
from nova.network import model as network_model
from nova import objects
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
from nova.virt import osinfo
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
cfg.BoolOpt('use_virtio_for_bridges',
default=True,
help='Use virtio for bridge interfaces with KVM/QEMU'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_vif_opts, 'libvirt')
CONF.import_opt('use_ipv6', 'nova.netconf')
# vhostuser queues support
MIN_LIBVIRT_VHOSTUSER_MQ = (1, 2, 17)
def is_vif_model_valid_for_virt(virt_type, vif_model):
valid_models = {
'qemu': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'kvm': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'xen': [network_model.VIF_MODEL_NETFRONT,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000],
'lxc': [],
'uml': [],
}
if vif_model is None:
return True
if virt_type not in valid_models:
raise exception.UnsupportedVirtType(virt=virt_type)
return vif_model in valid_models[virt_type]
class LibvirtGenericVIFDriver(object):
"""Generic VIF driver for libvirt networking."""
def _normalize_vif_type(self, vif_type):
return vif_type.replace('2.1q', '2q')
def get_vif_devname(self, vif):
if 'devname' in vif:
return vif['devname']
return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN]
def get_vif_devname_with_prefix(self, vif, prefix):
devname = self.get_vif_devname(vif)
return prefix + devname[3:]
def get_base_config(self, instance, vif, image_meta,
inst_type, virt_type):
conf = vconfig.LibvirtConfigGuestInterface()
# Default to letting libvirt / the hypervisor choose the model
model = None
driver = None
vhost_queues = None
# If the user has specified a 'vif_model' against the
# image then honour that model
if image_meta:
model = osinfo.HardwareProperties(image_meta).network_model
# Else if the virt type is KVM/QEMU, use virtio according
# to the global config parameter
if (model is None and
virt_type in ('kvm', 'qemu') and
CONF.libvirt.use_virtio_for_bridges):
model = network_model.VIF_MODEL_VIRTIO
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if (model == network_model.VIF_MODEL_VIRTIO and
virt_type == "qemu"):
driver = "qemu"
if not is_vif_model_valid_for_virt(virt_type,
model):
raise exception.UnsupportedHardware(model=model,
virt=virt_type)
if (virt_type == 'kvm' and
model == network_model.VIF_MODEL_VIRTIO):
vhost_drv, vhost_queues = self._get_virtio_mq_settings(image_meta,
inst_type)
driver = vhost_drv or driver
designer.set_vif_guest_frontend_config(
conf, vif['address'], model, driver, vhost_queues)
return conf
def _get_virtio_mq_settings(self, image_meta, flavor):
"""A methods to set the number of virtio queues,
if it has been requested in extra specs.
"""
driver = None
vhost_queues = None
if not isinstance(image_meta, objects.ImageMeta):
image_meta = objects.ImageMeta.from_dict(image_meta)
img_props = image_meta.properties
if img_props.get('hw_vif_multiqueue_enabled'):
driver = 'vhost'
vhost_queues = flavor.vcpus
return (driver, vhost_queues)
def get_bridge_name(self, vif):
return vif['network']['bridge']
def get_ovs_interfaceid(self, vif):
return vif.get('ovs_interfaceid') or vif['id']
def get_br_name(self, iface_id):
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
def get_firewall_required(self, vif):
if vif.is_neutron_filtering_enabled():
return False
if CONF.firewall_driver != "nova.virt.firewall.NoopFirewallDriver":
return True
return False
def get_config_bridge(self, instance, vif, image_meta,
inst_type, virt_type, host):
"""Get VIF configurations for bridge type."""
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
designer.set_vif_host_backend_bridge_config(
conf, self.get_bridge_name(vif),
self.get_vif_devname(vif))
mac_id = vif['address'].replace(':', '')
name = "nova-instance-" + instance.name + "-" + mac_id
if self.get_firewall_required(vif):
conf.filtername = name
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_bridge(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
designer.set_vif_host_backend_ovs_config(
conf, self.get_bridge_name(vif),
self.get_ovs_interfaceid(vif),
self.get_vif_devname(vif))
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_hybrid(self, instance, vif, image_meta,
inst_type, virt_type, host):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance, newvif, image_meta,
inst_type, virt_type, host)
def get_config_ovs(self, instance, vif, image_meta,
inst_type, virt_type, host):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ovs_hybrid(instance, vif,
image_meta,
inst_type,
virt_type,
host)
else:
return self.get_config_ovs_bridge(instance, vif,
image_meta,
inst_type,
virt_type,
host)
def get_config_ivs_hybrid(self, instance, vif, image_meta,
inst_type, virt_type, host):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance,
newvif,
image_meta,
inst_type,
virt_type,
host)
def get_config_ivs_ethernet(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance,
vif,
image_meta,
inst_type,
virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_ivs(self, instance, vif, image_meta,
inst_type, virt_type, host):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ivs_hybrid(instance, vif,
image_meta,
inst_type,
virt_type,
host)
else:
return self.get_config_ivs_ethernet(instance, vif,
image_meta,
inst_type,
virt_type,
host)
def get_config_802qbg(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
params = vif["qbg_params"]
designer.set_vif_host_backend_802qbg_config(
conf, vif['network'].get_meta('interface'),
params['managerid'],
params['typeid'],
params['typeidversion'],
params['instanceid'])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_802qbh(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_802qbh_config(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_PROFILEID])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_hw_veb(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_hw_veb(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_VLAN])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_macvtap(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
vif_details = vif['details']
macvtap_src = vif_details.get(network_model.VIF_DETAILS_MACVTAP_SOURCE)
macvtap_mode = vif_details.get(network_model.VIF_DETAILS_MACVTAP_MODE)
phys_interface = vif_details.get(
network_model.VIF_DETAILS_PHYS_INTERFACE)
missing_params = []
if macvtap_src is None:
missing_params.append(network_model.VIF_DETAILS_MACVTAP_SOURCE)
if macvtap_mode is None:
missing_params.append(network_model.VIF_DETAILS_MACVTAP_MODE)
if phys_interface is None:
missing_params.append(network_model.VIF_DETAILS_PHYS_INTERFACE)
if len(missing_params) > 0:
raise exception.VifDetailsMissingMacvtapParameters(
vif_id=vif['id'],
missing_params=missing_params)
designer.set_vif_host_backend_direct_config(
conf, macvtap_src, macvtap_mode)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_iovisor(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_midonet(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_tap(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def _get_vhostuser_settings(self, vif):
vif_details = vif['details']
mode = vif_details.get(network_model.VIF_DETAILS_VHOSTUSER_MODE,
'server')
sock_path = vif_details.get(network_model.VIF_DETAILS_VHOSTUSER_SOCKET)
if sock_path is None:
raise exception.VifDetailsMissingVhostuserSockPath(
vif_id=vif['id'])
return mode, sock_path
def get_config_vhostuser(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
mode, sock_path = self._get_vhostuser_settings(vif)
designer.set_vif_host_backend_vhostuser_config(conf, mode, sock_path)
# (vladikr) Not setting up driver and queues for vhostuser
# as queues are not supported in Libvirt until version 1.2.17
if not host.has_min_version(MIN_LIBVIRT_VHOSTUSER_MQ):
LOG.debug('Queues are not a vhostuser supported feature.')
conf.driver_name = None
conf.vhost_queues = None
return conf
def get_config_ib_hostdev(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = vconfig.LibvirtConfigGuestHostdevPCI()
pci_slot = vif['profile']['pci_slot']
designer.set_vif_host_backend_ib_hostdev_config(conf, pci_slot)
return conf
def get_config_vrouter(self, instance, vif, image_meta,
inst_type, virt_type, host):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config(self, instance, vif, image_meta,
inst_type, virt_type, host):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s virt_type%(virt_type)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif, 'virt_type': virt_type})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'get_config_%s' % vif_slug, None)
if not func:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
return func(instance, vif, image_meta,
inst_type, virt_type, host)
def plug_bridge(self, instance, vif):
"""Ensure that the bridge exists, and add VIF to it."""
network = vif['network']
if (not network.get_meta('multi_host', False) and
network.get_meta('should_create_bridge', False)):
if network.get_meta('should_create_vlan', False):
iface = CONF.vlan_interface or \
network.get_meta('bridge_interface')
LOG.debug('Ensuring vlan %(vlan)s and bridge %(bridge)s',
{'vlan': network.get_meta('vlan'),
'bridge': self.get_bridge_name(vif)},
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network.get_meta('vlan'),
self.get_bridge_name(vif),
iface)
else:
iface = CONF.flat_interface or \
network.get_meta('bridge_interface')
LOG.debug("Ensuring bridge %s",
self.get_bridge_name(vif), instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
self.get_bridge_name(vif),
iface)
def plug_ovs_bridge(self, instance, vif):
"""No manual plugging required."""
pass
def _plug_bridge_with_port(self, instance, vif, port):
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
utils.execute('tee',
('/sys/class/net/%s/bridge/multicast_snooping' %
br_name),
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
disv6 = '/proc/sys/net/ipv6/conf/%s/disable_ipv6' % br_name
if os.path.exists(disv6):
utils.execute('tee',
disv6,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
if not linux_net.device_exists(v2_name):
mtu = vif['network'].get_meta('mtu')
linux_net._create_veth_pair(v1_name, v2_name, mtu)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
if port == 'ovs':
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
v2_name, iface_id,
vif['address'], instance.uuid,
mtu)
elif port == 'ivs':
linux_net.create_ivs_vif_port(v2_name, iface_id,
vif['address'], instance.uuid)
def plug_ovs_hybrid(self, instance, vif):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
self._plug_bridge_with_port(instance, vif, port='ovs')
def plug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ovs_hybrid(instance, vif)
else:
self.plug_ovs_bridge(instance, vif)
def plug_ivs_ethernet(self, instance, vif):
iface_id = self.get_ovs_interfaceid(vif)
dev = self.get_vif_devname(vif)
linux_net.create_tap_dev(dev)
linux_net.create_ivs_vif_port(dev, iface_id, vif['address'],
instance.uuid)
def plug_ivs_hybrid(self, instance, vif):
"""Plug using hybrid strategy (same as OVS)
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal IVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
self._plug_bridge_with_port(instance, vif, port='ivs')
def plug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ivs_hybrid(instance, vif)
else:
self.plug_ivs_ethernet(instance, vif)
def plug_ib_hostdev(self, instance, vif):
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id']
)
pci_slot = vif['profile']['pci_slot']
device_id = instance['uuid']
vnic_mac = vif['address']
try:
utils.execute('ebrctl', 'add-port', vnic_mac, device_id,
fabric, network_model.VIF_TYPE_IB_HOSTDEV,
pci_slot, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(
_LE("Failed while plugging ib hostdev vif"),
instance=instance
)
def plug_802qbg(self, instance, vif):
pass
def plug_802qbh(self, instance, vif):
pass
def plug_hw_veb(self, instance, vif):
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
linux_net.set_vf_interface_vlan(
vif['profile']['pci_slot'],
mac_addr=vif['address'],
vlan=vif['details'][network_model.VIF_DETAILS_VLAN])
def plug_macvtap(self, instance, vif):
vif_details = vif['details']
vlan = vif_details.get(network_model.VIF_DETAILS_VLAN)
if vlan:
vlan_name = vif_details.get(
network_model.VIF_DETAILS_MACVTAP_SOURCE)
phys_if = vif_details.get(network_model.VIF_DETAILS_PHYS_INTERFACE)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan(
vlan, phys_if, interface=vlan_name)
def plug_midonet(self, instance, vif):
"""Plug into MidoNet's network port
Bind the vif to a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
linux_net.create_tap_dev(dev)
utils.execute('mm-ctl', '--bind-port', port_id, dev,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_iovisor(self, instance, vif):
"""Plug using PLUMgrid IO Visor Driver
Connect a network device to their respective
Virtual Domain in PLUMgrid Platform.
"""
dev = self.get_vif_devname(vif)
iface_id = vif['id']
linux_net.create_tap_dev(dev)
net_id = vif['network']['id']
tenant_id = instance.project_id
try:
utils.execute('ifc_ctl', 'gateway', 'add_port', dev,
run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'ifup', dev,
'access_vm', iface_id, vif['address'],
'pgtag2=%s' % net_id, 'pgtag1=%s' % tenant_id,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_tap(self, instance, vif):
"""Plug a VIF_TYPE_TAP virtual interface."""
dev = self.get_vif_devname(vif)
mac = vif['details'].get(network_model.VIF_DETAILS_TAP_MAC_ADDRESS)
linux_net.create_tap_dev(dev, mac)
network = vif.get('network')
mtu = network.get_meta('mtu') if network else None
linux_net._set_device_mtu(dev, mtu)
def plug_vhostuser_fp(self, instance, vif):
"""Create a fp netdevice interface with a vhostuser socket"""
dev = self.get_vif_devname(vif)
if linux_net.device_exists(dev):
return
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
sockmode_qemu, sockpath = self._get_vhostuser_settings(vif)
sockmode_port = 'client' if sockmode_qemu == 'server' else 'server'
try:
linux_net.create_fp_dev(dev, sockpath, sockmode_port)
if ovs_plug:
if vif.is_hybrid_plug_enabled():
self.plug_ovs_hybrid(instance, vif)
utils.execute('brctl', 'addif',
self.get_br_name(vif['id']),
dev, run_as_root=True)
else:
iface_id = self.get_ovs_interfaceid(vif)
mtu = vif['network'].get_meta('mtu')
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
dev, iface_id,
vif['address'],
instance.uuid, mtu)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_vhostuser_ovs(self, instance, vif):
"""Plug a VIF_TYPE_VHOSTUSER into an ovs bridge"""
iface_id = self.get_ovs_interfaceid(vif)
port_name = os.path.basename(
vif['details'][network_model.VIF_DETAILS_VHOSTUSER_SOCKET])
mtu = vif['network'].get_meta('mtu')
linux_net.create_ovs_vif_port(
self.get_bridge_name(vif),
port_name, iface_id, vif['address'],
instance.uuid, mtu,
interface_type=network_model.OVS_VHOSTUSER_INTERFACE_TYPE)
def plug_vhostuser(self, instance, vif):
fp_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG,
False)
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
if fp_plug:
self.plug_vhostuser_fp(instance, vif)
elif ovs_plug:
self.plug_vhostuser_ovs(instance, vif)
def plug_vrouter(self, instance, vif):
"""Plug into Contrail's network port
Bind the vif to a Contrail virtual port.
"""
dev = self.get_vif_devname(vif)
ip_addr = '0.0.0.0'
ip6_addr = None
subnets = vif['network']['subnets']
for subnet in subnets:
if not subnet['ips']:
continue
ips = subnet['ips'][0]
if not ips['address']:
continue
if (ips['version'] == 4):
if ips['address'] is not None:
ip_addr = ips['address']
if (ips['version'] == 6):
if ips['address'] is not None:
ip6_addr = ips['address']
ptype = 'NovaVMPort'
if (cfg.CONF.libvirt.virt_type == 'lxc'):
ptype = 'NameSpacePort'
cmd_args = ("--oper=add --uuid=%s --instance_uuid=%s --vn_uuid=%s "
"--vm_project_uuid=%s --ip_address=%s --ipv6_address=%s"
" --vm_name=%s --mac=%s --tap_name=%s --port_type=%s "
"--tx_vlan_id=%d --rx_vlan_id=%d" % (vif['id'],
instance.uuid, vif['network']['id'],
instance.project_id, ip_addr, ip6_addr,
instance.display_name, vif['address'],
vif['devname'], ptype, -1, -1))
try:
linux_net.create_tap_dev(dev)
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug(self, instance, vif):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.VirtualInterfacePlugException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'plug_%s' % vif_slug, None)
if not func:
raise exception.VirtualInterfacePlugException(
_("Plug vif failed because of unexpected "
"vif_type=%s") % vif_type)
func(instance, vif)
def unplug_bridge(self, instance, vif):
"""No manual unplugging required."""
pass
def unplug_ovs_bridge(self, instance, vif):
"""No manual unplugging required."""
pass
def unplug_ovs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if linux_net.device_exists(br_name):
utils.execute('brctl', 'delif', br_name, v1_name,
run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name,
run_as_root=True)
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ovs_hybrid(instance, vif)
else:
self.unplug_ovs_bridge(instance, vif)
def unplug_ivs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
linux_net.delete_ivs_vif_port(self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ivs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy (same as OVS)
Unhook port from IVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
linux_net.delete_ivs_vif_port(v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ivs_hybrid(instance, vif)
else:
self.unplug_ivs_ethernet(instance, vif)
def unplug_ib_hostdev(self, instance, vif):
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id']
)
vnic_mac = vif['address']
try:
utils.execute('ebrctl', 'del-port', fabric, vnic_mac,
run_as_root=True)
except Exception:
LOG.exception(_LE("Failed while unplugging ib hostdev vif"))
def unplug_802qbg(self, instance, vif):
pass
def unplug_802qbh(self, instance, vif):
pass
def unplug_hw_veb(self, instance, vif):
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
# The ip utility doesn't accept the MAC 00:00:00:00:00:00.
# Therefore, keep the MAC unchanged. Later operations on
# the same VF will not be affected by the existing MAC.
linux_net.set_vf_interface_vlan(vif['profile']['pci_slot'],
mac_addr=vif['address'])
def unplug_macvtap(self, instance, vif):
pass
def unplug_midonet(self, instance, vif):
"""Unplug from MidoNet network port
Unbind the vif from a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
utils.execute('mm-ctl', '--unbind-port', port_id,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_tap(self, instance, vif):
"""Unplug a VIF_TYPE_TAP virtual interface."""
dev = self.get_vif_devname(vif)
try:
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_iovisor(self, instance, vif):
"""Unplug using PLUMgrid IO Visor Driver
Delete network device and to their respective
connection to the Virtual Domain in PLUMgrid Platform.
"""
dev = self.get_vif_devname(vif)
try:
utils.execute('ifc_ctl', 'gateway', 'ifdown',
dev, run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'del_port', dev,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_vhostuser_fp(self, instance, vif):
"""Delete a fp netdevice interface with a vhostuser socket"""
dev = self.get_vif_devname(vif)
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
try:
if ovs_plug:
if vif.is_hybrid_plug_enabled():
self.unplug_ovs_hybrid(instance, vif)
else:
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
dev, False)
linux_net.delete_fp_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_vhostuser_ovs(self, instance, vif):
"""Unplug a VIF_TYPE_VHOSTUSER into an ovs bridge"""
port_name = os.path.basename(
vif['details'][network_model.VIF_DETAILS_VHOSTUSER_SOCKET])
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
port_name)
def unplug_vhostuser(self, instance, vif):
fp_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG,
False)
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
if fp_plug:
self.unplug_vhostuser_fp(instance, vif)
elif ovs_plug:
self.unplug_vhostuser_ovs(instance, vif)
def unplug_vrouter(self, instance, vif):
"""Unplug Contrail's network port
Unbind the vif from a Contrail virtual port.
"""
dev = self.get_vif_devname(vif)
cmd_args = ("--oper=delete --uuid=%s" % (vif['id']))
try:
utils.execute('vrouter-port-control', cmd_args, run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(
_LE("Failed while unplugging vif"), instance=instance)
def unplug(self, instance, vif):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'unplug_%s' % vif_slug, None)
if not func:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
func(instance, vif)
|
|
import numpy as np
from multiprocessing import Process
import logging_utils
import tensorflow as tf
import ctypes
import pyximport; pyximport.install()
from hogupdatemv import copy, apply_grads_mom_rmsprop, apply_grads_adam
import time
import utils
CHECKPOINT_INTERVAL = 500000
logger = logging_utils.getLogger('actor_learner')
def generate_final_epsilon():
""" Generate lower limit for decaying epsilon. """
epsilon = {'limits': [0.1, 0.01, 0.5], 'probs': [0.4, 0.3, 0.3]}
return np.random.choice(epsilon['limits'], p=epsilon['probs'])
class ActorLearner(Process):
def __init__(self, args):
super(ActorLearner, self).__init__()
self.summ_base_dir = args.summ_base_dir
self.local_step = 0
self.global_step = args.global_step
self.actor_id = args.actor_id
self.alg_type = args.alg_type
self.max_local_steps = args.max_local_steps
self.optimizer_type = args.opt_type
self.optimizer_mode = args.opt_mode
self.num_actions = args.num_actions
self.initial_lr = args.initial_lr
self.lr_annealing_steps = args.lr_annealing_steps
# Shared mem vars
self.learning_vars = args.learning_vars
size = self.learning_vars.size
self.flat_grads = np.empty(size, dtype = ctypes.c_float)
if (self.optimizer_mode == "local"):
if (self.optimizer_type == "rmsprop"):
self.opt_st = np.ones(size, dtype = ctypes.c_float)
else:
self.opt_st = np.zeros(size, dtype = ctypes.c_float)
elif (self.optimizer_mode == "shared"):
self.opt_st = args.opt_state
# rmsprop/momentum
self.alpha = args.alpha
# adam
self.b1 = args.b1
self.b2 = args.b2
self.e = args.e
if args.env == "GYM":
from atari_environment import AtariEnvironment
self.emulator = AtariEnvironment(args.game, args.visualize)
else:
from emulator import Emulator
self.emulator = Emulator(
args.rom_path,
args.game,
args.visualize,
self.actor_id,
args.random_seed,
args.single_life_episodes)
self.grads_update_steps = args.grads_update_steps
self.max_global_steps = args.max_global_steps
self.gamma = args.gamma
# Exploration epsilons
self.epsilon = 1.0
self.initial_epsilon = 1.0
self.final_epsilon = generate_final_epsilon()
self.epsilon_annealing_steps = args.epsilon_annealing_steps
self.rescale_rewards = args.rescale_rewards
self.max_achieved_reward = -1000000
if self.rescale_rewards:
self.thread_max_reward = 1.0
# Barrier to synchronize all actors after initialization is done
self.barrier = args.barrier
self.summary_ph, self.update_ops, self.summary_ops = self.setup_summaries()
self.game = args.game
def run(self):
self.session = tf.Session()
# self.session = tf.Session(config=tf.ConfigProto(
# inter_op_parallelism_threads=1,
# intra_op_parallelism_threads=1))
if (self.actor_id==0):
#Initizlize Tensorboard summaries
self.summary_op = tf.merge_all_summaries()
self.summary_writer = tf.train.SummaryWriter(
"{}/{}".format(self.summ_base_dir, self.actor_id), self.session.graph_def)
# Initialize network parameters
g_step = utils.restore_vars(self.saver, self.session, self.game, self.alg_type, self.max_local_steps)
self.global_step.val.value = g_step
self.last_saving_step = g_step
logger.debug("T{}: Initializing shared memory...".format(self.actor_id))
self.init_shared_memory()
# Wait until actor 0 finishes initializing shared memory
self.barrier.wait()
if self.actor_id > 0:
logger.debug("T{}: Syncing with shared memory...".format(self.actor_id))
self.sync_net_with_shared_memory(self.local_network, self.learning_vars)
if self.alg_type <> "a3c":
self.sync_net_with_shared_memory(self.target_network, self.target_vars)
# Wait until all actors are ready to start
self.barrier.wait()
# Introduce a different start delay for each actor, so that they do not run in synchronism.
# This is to avoid concurrent updates of parameters as much as possible
time.sleep(0.1877 * self.actor_id)
def save_vars(self):
if (self.actor_id == 0 and
(self.global_step.value() - self.last_saving_step >= CHECKPOINT_INTERVAL)):
self.last_saving_step = self.global_step.value()
utils.save_vars(self.saver, self.session, self.game, self.alg_type, self.max_local_steps, self.last_saving_step)
def init_shared_memory(self):
# Initialize shared memory with tensorflow var values
params = self.session.run(self.local_network.params)
# Merge all param matrices into a single 1-D array
params = np.hstack([p.reshape(-1) for p in params])
np.frombuffer(self.learning_vars.vars, ctypes.c_float)[:] = params
if self.alg_type <> "a3c":
np.frombuffer(self.target_vars.vars, ctypes.c_float)[:] = params
#memoryview(self.learning_vars.vars)[:] = params
#memoryview(self.target_vars.vars)[:] = memoryview(self.learning_vars.vars)
def reduce_thread_epsilon(self):
""" Linear annealing """
if self.epsilon > self.final_epsilon:
self.epsilon -= (self.initial_epsilon - self.final_epsilon) / self.epsilon_annealing_steps
def apply_gradients_to_shared_memory_vars(self, grads):
#Flatten grads
offset = 0
for g in grads:
self.flat_grads[offset:offset + g.size] = g.reshape(-1)
offset += g.size
g = self.flat_grads
if self.optimizer_type == "adam" and self.optimizer_mode == "shared":
p = np.frombuffer(self.learning_vars.vars, ctypes.c_float)
p_size = self.learning_vars.size
m = np.frombuffer(self.opt_st.ms, ctypes.c_float)
v = np.frombuffer(self.opt_st.vs, ctypes.c_float)
T = self.global_step.value()
self.opt_st.lr.value = 1.0 * self.opt_st.lr.value * (1 - self.b2**T)**0.5 / (1 - self.b1**T)
apply_grads_adam(m, v, g, p, p_size, self.opt_st.lr.value, self.b1, self.b2, self.e)
else: #local or shared rmsprop/momentum
lr = self.decay_lr()
if (self.optimizer_mode == "local"):
m = self.opt_st
else: #shared
m = np.frombuffer(self.opt_st.vars, ctypes.c_float)
p = np.frombuffer(self.learning_vars.vars, ctypes.c_float)
p_size = self.learning_vars.size
_type = 0 if self.optimizer_type == "momentum" else 1
#print "BEFORE", "RMSPROP m", m[0], "GRAD", g[0], self.flat_grads[0], self.flat_grads2[0]
apply_grads_mom_rmsprop(m, g, p, p_size, _type, lr, self.alpha, self.e)
#print "AFTER", "RMSPROP m", m[0], "GRAD", g[0], self.flat_grads[0], self.flat_grads2[0]
def rescale_reward(self, reward):
if self.rescale_rewards:
""" Rescale immediate reward by max reward encountered thus far. """
if reward > self.thread_max_reward:
self.thread_max_reward = reward
return reward/self.thread_max_reward
else:
""" Clip immediate reward """
if reward > 1.0:
reward = 1.0
elif reward < -1.0:
reward = -1.0
return reward
def sync_net_with_shared_memory(self, dest_net, shared_mem_vars):
feed_dict = {}
offset = 0
params = np.frombuffer(shared_mem_vars.vars,
ctypes.c_float)
for i in xrange(len(dest_net.params)):
shape = shared_mem_vars.var_shapes[i]
size = np.prod(shape)
feed_dict[dest_net.params_ph[i]] = \
params[offset:offset+size].reshape(shape)
offset += size
self.session.run(dest_net.sync_with_shared_memory,
feed_dict=feed_dict)
def decay_lr(self):
if self.global_step.value() <= self.lr_annealing_steps:
return self.initial_lr - (self.global_step.value() * self.initial_lr / self.lr_annealing_steps)
else:
return 0.0
def setup_summaries(self):
episode_reward = tf.Variable(0.)
s1 = tf.scalar_summary("Episode Reward " + str(self.actor_id), episode_reward)
if self.alg_type == "a3c":
summary_vars = [episode_reward]
else:
episode_ave_max_q = tf.Variable(0.)
s2 = tf.scalar_summary("Max Q Value " + str(self.actor_id), episode_ave_max_q)
logged_epsilon = tf.Variable(0.)
s3 = tf.scalar_summary("Epsilon " + str(self.actor_id), logged_epsilon)
summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
summary_placeholders = [tf.placeholder("float") for _ in range(len(summary_vars))]
update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
with tf.control_dependencies(update_ops):
summary_ops = tf.merge_all_summaries()
return summary_placeholders, update_ops, summary_ops
|
|
import collections
import contextlib
import cProfile
import inspect
import gc
import multiprocessing
import os
import random
import sys
import time
import unittest
import warnings
from io import StringIO
from unittest import result, runner, signals, suite, loader, case
from .loader import TestLoader
from numba.core import config
try:
from multiprocessing import TimeoutError
except ImportError:
from Queue import Empty as TimeoutError
def make_tag_decorator(known_tags):
"""
Create a decorator allowing tests to be tagged with the *known_tags*.
"""
def tag(*tags):
"""
Tag a test method with the given tags.
Can be used in conjunction with the --tags command-line argument
for runtests.py.
"""
for t in tags:
if t not in known_tags:
raise ValueError("unknown tag: %r" % (t,))
def decorate(func):
if (not callable(func) or isinstance(func, type)
or not func.__name__.startswith('test_')):
raise TypeError("@tag(...) should be used on test methods")
try:
s = func.tags
except AttributeError:
s = func.tags = set()
s.update(tags)
return func
return decorate
return tag
def cuda_sensitive_mtime(x):
"""
Return a key for sorting tests bases on mtime and test name. For CUDA
tests, interleaving tests from different classes is dangerous as the CUDA
context might get reset unexpectedly between methods of a class, so for
CUDA tests the key prioritises the test module and class ahead of the
mtime.
"""
cls = x.__class__
key = str(os.path.getmtime(inspect.getfile(cls))) + str(x)
from numba.cuda.testing import CUDATestCase
if CUDATestCase in cls.mro():
key = "%s.%s %s" % (str(cls.__module__), str(cls.__name__), key)
return key
def parse_slice(useslice):
"""Parses the argument string "useslice" as the arguments to the `slice()`
constructor and returns a slice object that's been instantiated with those
arguments. i.e. input useslice="1,20,2" leads to output `slice(1, 20, 2)`.
"""
try:
l = {}
exec("sl = slice(%s)" % useslice, l)
return l['sl']
except Exception:
msg = ("Expected arguments consumable by 'slice' to follow "
"option `-j`, found '%s'" % useslice)
raise ValueError(msg)
class TestLister(object):
"""Simply list available tests rather than running them."""
def __init__(self, useslice):
self.useslice = parse_slice(useslice)
def run(self, test):
result = runner.TextTestResult(sys.stderr, descriptions=True, verbosity=1)
self._test_list = _flatten_suite(test)
masked_list = self._test_list[self.useslice]
self._test_list.sort(key=cuda_sensitive_mtime)
for t in masked_list:
print(t.id())
print('%d tests found. %s selected' % (len(self._test_list), len(masked_list)))
return result
class SerialSuite(unittest.TestSuite):
"""A simple marker to make sure tests in this suite are run serially.
Note: As the suite is going through internals of unittest,
it may get unpacked and stuffed into a plain TestSuite.
We need to set an attribute on the TestCase objects to
remember they should not be run in parallel.
"""
def addTest(self, test):
if not isinstance(test, unittest.TestCase):
# It's a sub-suite, recurse
for t in test:
self.addTest(t)
else:
# It's a test case, mark it serial
test._numba_parallel_test_ = False
super(SerialSuite, self).addTest(test)
class BasicTestRunner(runner.TextTestRunner):
def __init__(self, useslice, **kwargs):
runner.TextTestRunner.__init__(self, **kwargs)
self.useslice = parse_slice(useslice)
def run(self, test):
run = _flatten_suite(test)[self.useslice]
run.sort(key=cuda_sensitive_mtime)
wrapped = unittest.TestSuite(run)
return super(BasicTestRunner, self).run(wrapped)
# "unittest.main" is really the TestProgram class!
# (defined in a module named itself "unittest.main"...)
class NumbaTestProgram(unittest.main):
"""
A TestProgram subclass adding the following options:
* a -R option to enable reference leak detection
* a --profile option to enable profiling of the test run
* a -m option for parallel execution
* a -l option to (only) list tests
Currently the options are only added in 3.4+.
"""
refleak = False
profile = False
multiprocess = False
useslice = None
list = False
tags = None
exclude_tags = None
random_select = None
random_seed = 42
def __init__(self, *args, **kwargs):
# Disable interpreter fallback if we are running the test suite
if config.COMPATIBILITY_MODE:
warnings.warn("Unset INTERPRETER_FALLBACK")
config.COMPATIBILITY_MODE = False
topleveldir = kwargs.pop('topleveldir', None)
kwargs['testLoader'] = TestLoader(topleveldir)
# HACK to force unittest not to change warning display options
# (so that NumbaWarnings don't appear all over the place)
sys.warnoptions.append(':x')
self.nomultiproc = kwargs.pop('nomultiproc', False)
super(NumbaTestProgram, self).__init__(*args, **kwargs)
def _getParentArgParser(self):
# NOTE: this hook only exists on Python 3.4+. The options won't be
# added in earlier versions (which use optparse - 3.3 - or getopt()
# - 2.x).
parser = super(NumbaTestProgram, self)._getParentArgParser()
if self.testRunner is None:
parser.add_argument('-R', '--refleak', dest='refleak',
action='store_true',
help='Detect reference / memory leaks')
parser.add_argument('-m', '--multiprocess', dest='multiprocess',
nargs='?',
type=int,
const=multiprocessing.cpu_count(),
help='Parallelize tests')
parser.add_argument('-l', '--list', dest='list',
action='store_true',
help='List tests without running them')
parser.add_argument('--tags', dest='tags', type=str,
help='Comma-separated list of tags to select '
'a subset of the test suite')
parser.add_argument('--exclude-tags', dest='exclude_tags', type=str,
help='Comma-separated list of tags to de-select '
'a subset of the test suite')
parser.add_argument('--random', dest='random_select', type=float,
help='Random proportion of tests to select')
parser.add_argument('--profile', dest='profile',
action='store_true',
help='Profile the test run')
parser.add_argument('-j', '--slice', dest='useslice', nargs='?',
type=str, const="None",
help='Slice the test sequence')
parser.add_argument('-g', '--gitdiff', dest='gitdiff',
action='store_true',
help=('Run tests from changes made against '
'origin/master as identified by `git diff`'))
return parser
def _handle_tags(self, argv, tagstr):
found = None
for x in argv:
if tagstr in x:
if found is None:
found = x
else:
raise ValueError("argument %s supplied repeatedly" % tagstr)
if found is not None:
posn = argv.index(found)
try:
if found == tagstr: # --tagstr <arg>
tag_args = argv[posn + 1].strip()
argv.remove(tag_args)
else: # --tagstr=<arg>
if '=' in found:
tag_args = found.split('=')[1].strip()
else:
raise AssertionError('unreachable')
except IndexError:
# at end of arg list, raise
msg = "%s requires at least one tag to be specified"
raise ValueError(msg % tagstr)
# see if next arg is "end options" or some other flag
if tag_args.startswith('-'):
raise ValueError("tag starts with '-', probably a syntax error")
# see if tag is something like "=<tagname>" which is likely a syntax
# error of form `--tags =<tagname>`, note the space prior to `=`.
if '=' in tag_args:
msg = "%s argument contains '=', probably a syntax error"
raise ValueError(msg % tagstr)
attr = tagstr[2:].replace('-', '_')
setattr(self, attr, tag_args)
argv.remove(found)
def parseArgs(self, argv):
if '-l' in argv:
argv.remove('-l')
self.list = True
super(NumbaTestProgram, self).parseArgs(argv)
# If at this point self.test doesn't exist, it is because
# no test ID was given in argv. Use the default instead.
if not hasattr(self, 'test') or not self.test.countTestCases():
self.testNames = (self.defaultTest,)
self.createTests()
if self.tags:
tags = [s.strip() for s in self.tags.split(',')]
self.test = _choose_tagged_tests(self.test, tags, mode='include')
if self.exclude_tags:
tags = [s.strip() for s in self.exclude_tags.split(',')]
self.test = _choose_tagged_tests(self.test, tags, mode='exclude')
if self.random_select:
self.test = _choose_random_tests(self.test, self.random_select,
self.random_seed)
if self.gitdiff:
self.test = _choose_gitdiff_tests(self.test)
if self.verbosity <= 0:
# We aren't interested in informational messages / warnings when
# running with '-q'.
self.buffer = True
def _do_discovery(self, argv, Loader=None):
# Disable unittest's implicit test discovery when parsing
# CLI arguments, as it can select other tests than Numba's
# (e.g. some test_xxx module that may happen to be directly
# reachable from sys.path)
return
def runTests(self):
if self.refleak:
self.testRunner = RefleakTestRunner
if not hasattr(sys, "gettotalrefcount"):
warnings.warn("detecting reference leaks requires a debug build "
"of Python, only memory leaks will be detected")
elif self.list:
self.testRunner = TestLister(self.useslice)
elif self.testRunner is None:
self.testRunner = BasicTestRunner(self.useslice,
verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer)
if self.multiprocess and not self.nomultiproc:
if self.multiprocess < 1:
msg = ("Value specified for the number of processes to use in "
"running the suite must be > 0")
raise ValueError(msg)
self.testRunner = ParallelTestRunner(runner.TextTestRunner,
self.multiprocess,
self.useslice,
verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer)
def run_tests_real():
super(NumbaTestProgram, self).runTests()
if self.profile:
filename = os.path.splitext(
os.path.basename(sys.modules['__main__'].__file__)
)[0] + '.prof'
p = cProfile.Profile(timer=time.perf_counter) # 3.3+
p.enable()
try:
p.runcall(run_tests_real)
finally:
p.disable()
print("Writing test profile data into %r" % (filename,))
p.dump_stats(filename)
else:
run_tests_real()
# These are tests which are generated and injected into the test suite, what
# gets injected depends on features of the test environment, e.g. TBB presence
# it's important for doing the CI "slice tests" that these are run at the end
# See notes in `_flatten_suite` for why. Simple substring matching is used to
# determine a match.
_GENERATED = ("numba.tests.test_num_threads",
"numba.tests.test_parallel_backend",
"numba.tests.test_svml",
"numba.tests.test_ufuncs",)
def _flatten_suite_inner(test):
"""
Workhorse for _flatten_suite
"""
tests = []
if isinstance(test, (unittest.TestSuite, list, tuple)):
for x in test:
tests.extend(_flatten_suite_inner(x))
else:
tests.append(test)
return tests
def _flatten_suite(test):
"""
Expand nested suite into list of test cases.
"""
tests = _flatten_suite_inner(test)
# Strip out generated tests and stick them at the end, this is to make sure
# that tests appear in a consistent order regardless of features available.
# This is so that a slice through the test suite e.g. (1::N) would likely be
# consistent up to the point of the generated tests, which rely on specific
# features.
generated = set()
for t in tests:
for g in _GENERATED:
if g in str(t):
generated.add(t)
normal = set(tests) - generated
def key(x):
return x.__module__, type(x).__name__, x._testMethodName
tests = sorted(normal, key=key)
tests.extend(sorted(list(generated), key=key))
return tests
def _choose_gitdiff_tests(tests):
try:
from git import Repo
except ImportError:
raise ValueError("gitpython needed for git functionality")
repo = Repo('.')
path = os.path.join('numba', 'tests')
target = 'origin/master..HEAD'
gdiff_paths = repo.git.diff(target, path, name_only=True).split()
# normalise the paths as they are unix style from repo.git.diff
gdiff_paths = [os.path.normpath(x) for x in gdiff_paths]
selected = []
gdiff_paths = [os.path.join(repo.working_dir, x) for x in gdiff_paths]
for test in _flatten_suite(tests):
assert isinstance(test, unittest.TestCase)
fname = inspect.getsourcefile(test.__class__)
if fname in gdiff_paths:
selected.append(test)
print("Git diff identified %s tests" % len(selected))
return unittest.TestSuite(selected)
def _choose_tagged_tests(tests, tags, mode='include'):
"""
Select tests that are tagged/not tagged with at least one of the given tags.
Set mode to 'include' to include the tests with tags, or 'exclude' to
exclude the tests with the tags.
"""
selected = []
tags = set(tags)
for test in _flatten_suite(tests):
assert isinstance(test, unittest.TestCase)
func = getattr(test, test._testMethodName)
try:
# Look up the method's underlying function (Python 2)
func = func.im_func
except AttributeError:
pass
found_tags = getattr(func, 'tags', None)
# only include the test if the tags *are* present
if mode == 'include':
if found_tags is not None and found_tags & tags:
selected.append(test)
elif mode == 'exclude':
# only include the test if the tags *are not* present
if found_tags is None or not (found_tags & tags):
selected.append(test)
else:
raise ValueError("Invalid 'mode' supplied: %s." % mode)
return unittest.TestSuite(selected)
def _choose_random_tests(tests, ratio, seed):
"""
Choose a given proportion of tests at random.
"""
rnd = random.Random()
rnd.seed(seed)
if isinstance(tests, unittest.TestSuite):
tests = _flatten_suite(tests)
tests = rnd.sample(tests, int(len(tests) * ratio))
tests = sorted(tests, key=lambda case: case.id())
return unittest.TestSuite(tests)
# The reference leak detection code is liberally taken and adapted from
# Python's own Lib/test/regrtest.py.
def _refleak_cleanup():
# Collect cyclic trash and read memory statistics immediately after.
func1 = sys.getallocatedblocks
try:
func2 = sys.gettotalrefcount
except AttributeError:
func2 = lambda: 42
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
sys._clear_type_cache()
# This also clears the various internal CPython freelists.
gc.collect()
return func1(), func2()
class ReferenceLeakError(RuntimeError):
pass
class IntPool(collections.defaultdict):
def __missing__(self, key):
return key
class RefleakTestResult(runner.TextTestResult):
warmup = 3
repetitions = 6
def _huntLeaks(self, test):
self.stream.flush()
repcount = self.repetitions
nwarmup = self.warmup
rc_deltas = [0] * (repcount - nwarmup)
alloc_deltas = [0] * (repcount - nwarmup)
# Preallocate ints likely to be stored in rc_deltas and alloc_deltas,
# to make sys.getallocatedblocks() less flaky.
_int_pool = IntPool()
for i in range(-200, 200):
_int_pool[i]
for i in range(repcount):
# Use a pristine, silent result object to avoid recursion
res = result.TestResult()
test.run(res)
# Poorly-written tests may fail when run several times.
# In this case, abort the refleak run and report the failure.
if not res.wasSuccessful():
self.failures.extend(res.failures)
self.errors.extend(res.errors)
raise AssertionError
del res
alloc_after, rc_after = _refleak_cleanup()
if i >= nwarmup:
rc_deltas[i - nwarmup] = _int_pool[rc_after - rc_before]
alloc_deltas[i - nwarmup] = _int_pool[alloc_after - alloc_before]
alloc_before, rc_before = alloc_after, rc_after
return rc_deltas, alloc_deltas
def addSuccess(self, test):
try:
rc_deltas, alloc_deltas = self._huntLeaks(test)
except AssertionError:
# Test failed when repeated
assert not self.wasSuccessful()
return
# These checkers return False on success, True on failure
def check_rc_deltas(deltas):
return any(deltas)
def check_alloc_deltas(deltas):
# At least 1/3rd of 0s
if 3 * deltas.count(0) < len(deltas):
return True
# Nothing else than 1s, 0s and -1s
if not set(deltas) <= set((1, 0, -1)):
return True
return False
failed = False
for deltas, item_name, checker in [
(rc_deltas, 'references', check_rc_deltas),
(alloc_deltas, 'memory blocks', check_alloc_deltas)]:
if checker(deltas):
msg = '%s leaked %s %s, sum=%s' % (
test, deltas, item_name, sum(deltas))
failed = True
try:
raise ReferenceLeakError(msg)
except Exception:
exc_info = sys.exc_info()
if self.showAll:
self.stream.write("%s = %r " % (item_name, deltas))
self.addFailure(test, exc_info)
if not failed:
super(RefleakTestResult, self).addSuccess(test)
class RefleakTestRunner(runner.TextTestRunner):
resultclass = RefleakTestResult
class ParallelTestResult(runner.TextTestResult):
"""
A TestResult able to inject results from other results.
"""
def add_results(self, result):
"""
Add the results from the other *result* to this result.
"""
self.stream.write(result.stream.getvalue())
self.stream.flush()
self.testsRun += result.testsRun
self.failures.extend(result.failures)
self.errors.extend(result.errors)
self.skipped.extend(result.skipped)
self.expectedFailures.extend(result.expectedFailures)
self.unexpectedSuccesses.extend(result.unexpectedSuccesses)
class _MinimalResult(object):
"""
A minimal, picklable TestResult-alike object.
"""
__slots__ = (
'failures', 'errors', 'skipped', 'expectedFailures',
'unexpectedSuccesses', 'stream', 'shouldStop', 'testsRun',
'test_id')
def fixup_case(self, case):
"""
Remove any unpicklable attributes from TestCase instance *case*.
"""
# Python 3.3 doesn't reset this one.
case._outcomeForDoCleanups = None
def __init__(self, original_result, test_id=None):
for attr in self.__slots__:
setattr(self, attr, getattr(original_result, attr, None))
for case, _ in self.expectedFailures:
self.fixup_case(case)
for case, _ in self.errors:
self.fixup_case(case)
for case, _ in self.failures:
self.fixup_case(case)
self.test_id = test_id
class _FakeStringIO(object):
"""
A trivial picklable StringIO-alike for Python 2.
"""
def __init__(self, value):
self._value = value
def getvalue(self):
return self._value
class _MinimalRunner(object):
"""
A minimal picklable object able to instantiate a runner in a
child process and run a test case with it.
"""
def __init__(self, runner_cls, runner_args):
self.runner_cls = runner_cls
self.runner_args = runner_args
# Python 2 doesn't know how to pickle instance methods, so we use __call__
# instead.
def __call__(self, test):
# Executed in child process
kwargs = self.runner_args
# Force recording of output in a buffer (it will be printed out
# by the parent).
kwargs['stream'] = StringIO()
runner = self.runner_cls(**kwargs)
result = runner._makeResult()
# Avoid child tracebacks when Ctrl-C is pressed.
signals.installHandler()
signals.registerResult(result)
result.failfast = runner.failfast
result.buffer = runner.buffer
with self.cleanup_object(test):
test(result)
# HACK as cStringIO.StringIO isn't picklable in 2.x
result.stream = _FakeStringIO(result.stream.getvalue())
return _MinimalResult(result, test.id())
@contextlib.contextmanager
def cleanup_object(self, test):
"""
A context manager which cleans up unwanted attributes on a test case
(or any other object).
"""
vanilla_attrs = set(test.__dict__)
try:
yield test
finally:
spurious_attrs = set(test.__dict__) - vanilla_attrs
for name in spurious_attrs:
del test.__dict__[name]
def _split_nonparallel_tests(test, sliced=slice(None)):
"""
Split test suite into parallel and serial tests.
"""
ptests = []
stests = []
flat = _flatten_suite(test)[sliced]
def is_parallelizable_test_case(test):
# Guard for the fake test case created by unittest when test
# discovery fails, as it isn't picklable (e.g. "LoadTestsFailure")
method_name = test._testMethodName
method = getattr(test, method_name)
if method.__name__ != method_name and method.__name__ == "testFailure":
return False
# Was parallel execution explicitly disabled?
return getattr(test, "_numba_parallel_test_", True)
for t in flat:
if is_parallelizable_test_case(t):
ptests.append(t)
else:
stests.append(t)
return ptests, stests
# A test can't run longer than 10 minutes
_TIMEOUT = 600
class ParallelTestRunner(runner.TextTestRunner):
"""
A test runner which delegates the actual running to a pool of child
processes.
"""
resultclass = ParallelTestResult
timeout = _TIMEOUT
def __init__(self, runner_cls, nprocs, useslice, **kwargs):
runner.TextTestRunner.__init__(self, **kwargs)
self.runner_cls = runner_cls
self.nprocs = nprocs
self.useslice = parse_slice(useslice)
self.runner_args = kwargs
def _run_inner(self, result):
# We hijack TextTestRunner.run()'s inner logic by passing this
# method as if it were a test case.
child_runner = _MinimalRunner(self.runner_cls, self.runner_args)
# Split the tests and recycle the worker process to tame memory usage.
chunk_size = 100
splitted_tests = [self._ptests[i:i + chunk_size]
for i in range(0, len(self._ptests), chunk_size)]
for tests in splitted_tests:
pool = multiprocessing.Pool(self.nprocs)
try:
self._run_parallel_tests(result, pool, child_runner, tests)
except:
# On exception, kill still active workers immediately
pool.terminate()
# Make sure exception is reported and not ignored
raise
else:
# Close the pool cleanly unless asked to early out
if result.shouldStop:
pool.terminate()
break
else:
pool.close()
finally:
# Always join the pool (this is necessary for coverage.py)
pool.join()
if not result.shouldStop:
stests = SerialSuite(self._stests)
stests.run(result)
return result
def _run_parallel_tests(self, result, pool, child_runner, tests):
remaining_ids = set(t.id() for t in tests)
tests.sort(key=cuda_sensitive_mtime)
it = pool.imap_unordered(child_runner, tests)
while True:
try:
child_result = it.__next__(self.timeout)
except StopIteration:
return
except TimeoutError as e:
# Diagnose the names of unfinished tests
msg = ("Tests didn't finish before timeout (or crashed):\n%s"
% "".join("- %r\n" % tid for tid in sorted(remaining_ids))
)
e.args = (msg,) + e.args[1:]
raise e
else:
result.add_results(child_result)
remaining_ids.discard(child_result.test_id)
if child_result.shouldStop:
result.shouldStop = True
return
def run(self, test):
self._ptests, self._stests = _split_nonparallel_tests(test,
sliced=
self.useslice)
print("Parallel: %s. Serial: %s" % (len(self._ptests),
len(self._stests)))
# This will call self._run_inner() on the created result object,
# and print out the detailed test results at the end.
return super(ParallelTestRunner, self).run(self._run_inner)
|
|
import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import pytest
from scipy.spatial.distance import cdist
from sklearn.neighbors import DistanceMetric
from sklearn.neighbors import BallTree
from sklearn.utils import check_random_state
from sklearn.utils._testing import assert_raises_regex
from sklearn.utils.fixes import sp_version, parse_version
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
rng = check_random_state(0)
d = 4
n1 = 20
n2 = 25
X1 = rng.random_sample((n1, d)).astype('float64', copy=False)
X2 = rng.random_sample((n2, d)).astype('float64', copy=False)
# make boolean arrays: ones and zeros
X1_bool = X1.round(0)
X2_bool = X2.round(0)
V = rng.random_sample((d, d))
VI = np.dot(V, V.T)
BOOL_METRICS = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
METRICS_DEFAULT_PARAMS = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(rng.random_sample(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(rng.random_sample(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
@pytest.mark.parametrize('metric', METRICS_DEFAULT_PARAMS)
def test_cdist(metric):
argdict = METRICS_DEFAULT_PARAMS[metric]
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(X1, X2, metric, **kwargs)
check_cdist(metric, kwargs, D_true)
@pytest.mark.parametrize('metric', BOOL_METRICS)
def test_cdist_bool_metric(metric):
D_true = cdist(X1_bool, X2_bool, metric)
check_cdist_bool(metric, D_true)
def check_cdist(metric, kwargs, D_true):
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(X1, X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(X1_bool, X2_bool)
assert_array_almost_equal(D12, D_true)
@pytest.mark.parametrize('metric', METRICS_DEFAULT_PARAMS)
def test_pdist(metric):
argdict = METRICS_DEFAULT_PARAMS[metric]
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(X1, X1, metric, **kwargs)
check_pdist(metric, kwargs, D_true)
@pytest.mark.parametrize('metric', BOOL_METRICS)
def test_pdist_bool_metrics(metric):
D_true = cdist(X1_bool, X1_bool, metric)
check_pdist_bool(metric, D_true)
def check_pdist(metric, kwargs, D_true):
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(X1_bool)
# Based on https://github.com/scipy/scipy/pull/7373
# When comparing two all-zero vectors, scipy>=1.2.0 jaccard metric
# was changed to return 0, instead of nan.
if metric == 'jaccard' and sp_version < parse_version('1.2.0'):
D_true[np.isnan(D_true)] = 0
assert_array_almost_equal(D12, D_true)
@pytest.mark.parametrize('metric', METRICS_DEFAULT_PARAMS)
def test_pickle(metric):
argdict = METRICS_DEFAULT_PARAMS[metric]
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
check_pickle(metric, kwargs)
@pytest.mark.parametrize('metric', BOOL_METRICS)
def test_pickle_bool_metrics(metric):
dm = DistanceMetric.get_metric(metric)
D1 = dm.pairwise(X1_bool)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(X1_bool)
assert_array_almost_equal(D1, D2)
def check_pickle(metric, kwargs):
dm = DistanceMetric.get_metric(metric, **kwargs)
D1 = dm.pairwise(X1)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(X1)
assert_array_almost_equal(D1, D2)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
def test_bad_pyfunc_metric():
def wrong_distance(x, y):
return "1"
X = np.ones((5, 2))
assert_raises_regex(TypeError,
"Custom distance function must accept two vectors",
BallTree, X, metric=wrong_distance)
def test_input_data_size():
# Regression test for #6288
# Previously, a metric requiring a particular input dimension would fail
def custom_metric(x, y):
assert x.shape[0] == 3
return np.sum((x - y) ** 2)
rng = check_random_state(0)
X = rng.rand(10, 3)
pyfunc = DistanceMetric.get_metric("pyfunc", func=custom_metric)
eucl = DistanceMetric.get_metric("euclidean")
assert_array_almost_equal(pyfunc.pairwise(X), eucl.pairwise(X) ** 2)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import mxnet as mx
import numpy as np
import pickle as pkl
from mxnet.test_utils import *
from numpy.testing import assert_allclose
def check_with_uniform(uf, arg_shapes, dim=None, npuf=None, rmin=-10, type_list=[np.float32]):
"""check function consistency with uniform random numbers"""
if isinstance(arg_shapes, int):
assert dim
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
arg_shapes = [shape] * arg_shapes
for dtype in type_list:
ndarray_arg = []
numpy_arg = []
for s in arg_shapes:
npy = np.random.uniform(rmin, 10, s).astype(dtype)
narr = mx.nd.array(npy, dtype=dtype)
ndarray_arg.append(narr)
numpy_arg.append(npy)
out1 = uf(*ndarray_arg)
if npuf is None:
out2 = uf(*numpy_arg).astype(dtype)
else:
out2 = npuf(*numpy_arg).astype(dtype)
assert out1.shape == out2.shape
if isinstance(out1, mx.nd.NDArray):
out1 = out1.asnumpy()
if dtype == np.float16:
assert_almost_equal(out1, out2, rtol=2e-3)
else:
assert_almost_equal(out1, out2)
def random_ndarray(dim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
data = mx.nd.array(np.random.uniform(-10, 10, shape))
return data
def test_ndarray_setitem():
shape = (3, 4, 2)
# scalar assignment
x = mx.nd.zeros(shape)
x[:] = 1
x_np = np.ones(shape, dtype=x.dtype)
assert same(x.asnumpy(), x_np)
# ndarray assignment
x = mx.nd.zeros(shape)
x[:] = mx.nd.ones(shape)
x_np = np.ones(shape, dtype=x.dtype)
assert same(x.asnumpy(), x_np)
# numpy assignment
x = mx.nd.zeros(shape)
x[:] = np.ones(shape)
x_np = np.ones(shape, dtype=x.dtype)
assert same(x.asnumpy(), x_np)
# indexing sub-arrays
x = mx.nd.zeros(shape)
x[1] = 1
x_np = np.zeros(shape, dtype=x.dtype)
x_np[1] = 1
assert same(x.asnumpy(), x_np)
# all-dim indexing
x = mx.nd.zeros(shape)
val = mx.nd.ones((3, 2, 1))
x[:, 1:3, 1] = val
x_np = np.zeros(shape, dtype=x.dtype)
x_np[:, 1:3, 1:2] = val.asnumpy()
assert same(x.asnumpy(), x_np)
# short all-dim indexing
x = mx.nd.zeros(shape)
val = mx.nd.ones((3, 2))
x[:, 1:3, 1] = val
x_np = np.zeros(shape, dtype=x.dtype)
x_np[:, 1:3, 1] = val.asnumpy()
assert same(x.asnumpy(), x_np)
x = mx.nd.zeros(shape)
x[:, 1:3, 1] = 1
x_np = np.zeros(shape, dtype=x.dtype)
x_np[:, 1:3, 1:2] = 1
assert same(x.asnumpy(), x_np)
def test_ndarray_elementwise():
np.random.seed(0)
nrepeat = 10
maxdim = 4
all_type = [np.float32, np.float64, np.float16, np.uint8, np.int32]
real_type = [np.float32, np.float64, np.float16]
for repeat in range(nrepeat):
for dim in range(1, maxdim):
check_with_uniform(lambda x, y: x + y, 2, dim, type_list=all_type)
check_with_uniform(lambda x, y: x - y, 2, dim, type_list=all_type)
check_with_uniform(lambda x, y: x * y, 2, dim, type_list=all_type)
check_with_uniform(lambda x, y: x / y, 2, dim, type_list=real_type)
check_with_uniform(lambda x, y: x / y, 2, dim, rmin=1, type_list=all_type)
check_with_uniform(mx.nd.sqrt, 1, dim, np.sqrt, rmin=0)
check_with_uniform(mx.nd.square, 1, dim, np.square, rmin=0)
check_with_uniform(lambda x: mx.nd.norm(x).asscalar(), 1, dim, np.linalg.norm)
def test_ndarray_elementwisesum():
ones = mx.nd.ones((10,), dtype=np.int32)
res = mx.nd.ElementWiseSum(ones, ones*2, ones*4, ones*8)
assert same(res.asnumpy(), ones.asnumpy()*15)
def test_ndarray_negate():
npy = np.random.uniform(-10, 10, (2,3,4))
arr = mx.nd.array(npy)
assert_almost_equal(npy, arr.asnumpy())
assert_almost_equal(-npy, (-arr).asnumpy())
# a final check to make sure the negation (-) is not implemented
# as inplace operation, so the contents of arr does not change after
# we compute (-arr)
assert_almost_equal(npy, arr.asnumpy())
def test_ndarray_reshape():
tensor = mx.nd.array([[[1, 2], [3, 4]],
[[5, 6], [7, 8]]])
true_res = mx.nd.arange(8) + 1
assert same(tensor.reshape((-1, )).asnumpy(), true_res.asnumpy())
true_res = mx.nd.array([[1, 2, 3, 4],
[5, 6, 7, 8]])
assert same(tensor.reshape((2, -1)).asnumpy(), true_res.asnumpy())
assert same(tensor.reshape((0, -1)).asnumpy(), true_res.asnumpy())
true_res = mx.nd.array([[1, 2],
[3, 4],
[5, 6],
[7, 8]])
assert same(tensor.reshape((-1, 2)).asnumpy(), true_res.asnumpy())
def test_ndarray_choose():
shape = (100, 20)
npy = np.arange(np.prod(shape)).reshape(shape)
arr = mx.nd.array(npy)
nrepeat = 3
for repeat in range(nrepeat):
indices = np.random.randint(shape[1], size=shape[0])
assert same(npy[np.arange(shape[0]), indices],
mx.nd.choose_element_0index(arr, mx.nd.array(indices)).asnumpy())
def test_ndarray_fill():
shape = (100, 20)
npy = np.arange(np.prod(shape)).reshape(shape)
arr = mx.nd.array(npy)
new_npy = npy.copy()
nrepeat = 3
for repeat in range(nrepeat):
indices = np.random.randint(shape[1], size=shape[0])
val = np.random.randint(shape[1], size=shape[0])
new_npy[:] = npy
new_npy[np.arange(shape[0]), indices] = val
assert same(new_npy,
mx.nd.fill_element_0index(arr, mx.nd.array(val), mx.nd.array(indices)).asnumpy())
def test_ndarray_onehot():
shape = (100, 20)
npy = np.arange(np.prod(shape)).reshape(shape)
arr = mx.nd.array(npy)
nrepeat = 3
for repeat in range(nrepeat):
indices = np.random.randint(shape[1], size=shape[0])
npy[:] = 0.0
npy[np.arange(shape[0]), indices] = 1.0
mx.nd.onehot_encode(mx.nd.array(indices), out=arr)
assert same(npy, arr.asnumpy())
def test_ndarray_copy():
c = mx.nd.array(np.random.uniform(-10, 10, (10, 10)))
d = c.copyto(mx.Context('cpu', 0))
assert np.sum(np.abs(c.asnumpy() != d.asnumpy())) == 0.0
def test_ndarray_scalar():
c = mx.nd.empty((10,10))
d = mx.nd.empty((10,10))
c[:] = 0.5
d[:] = 1.0
d -= c * 2 / 3 * 6.0
c += 0.5
assert(np.sum(c.asnumpy()) - 100 < 1e-5)
assert(np.sum(d.asnumpy()) + 100 < 1e-5)
c[:] = 2
assert(np.sum(c.asnumpy()) - 200 < 1e-5)
d = -c + 2
assert(np.sum(d.asnumpy()) < 1e-5)
def test_ndarray_pickle():
np.random.seed(0)
maxdim = 5
nrepeat = 10
for repeat in range(nrepeat):
for dim in range(1, maxdim):
a = random_ndarray(dim)
b = mx.nd.empty(a.shape)
a[:] = np.random.uniform(-10, 10, a.shape)
b[:] = np.random.uniform(-10, 10, a.shape)
a = a + b
data = pkl.dumps(a)
a2 = pkl.loads(data)
assert np.sum(a.asnumpy() != a2.asnumpy()) == 0
def test_ndarray_saveload():
np.random.seed(0)
nrepeat = 10
fname = 'tmp_list.bin'
for repeat in range(nrepeat):
data = []
# test save/load as list
for i in range(10):
data.append(random_ndarray(np.random.randint(1, 5)))
mx.nd.save(fname, data)
data2 = mx.nd.load(fname)
assert len(data) == len(data2)
for x, y in zip(data, data2):
assert np.sum(x.asnumpy() != y.asnumpy()) == 0
# test save/load as dict
dmap = {'ndarray xx %s' % i : x for i, x in enumerate(data)}
mx.nd.save(fname, dmap)
dmap2 = mx.nd.load(fname)
assert len(dmap2) == len(dmap)
for k, x in dmap.items():
y = dmap2[k]
assert np.sum(x.asnumpy() != y.asnumpy()) == 0
# test save/load as ndarray
# we expect the single ndarray to be converted into a list containing the ndarray
single_ndarray = data[0]
mx.nd.save(fname, single_ndarray)
single_ndarray_loaded = mx.nd.load(fname)
assert len(single_ndarray_loaded) == 1
single_ndarray_loaded = single_ndarray_loaded[0]
assert np.sum(single_ndarray.asnumpy() != single_ndarray_loaded.asnumpy()) == 0
os.remove(fname)
def test_ndarray_legacy_load():
data = []
for i in range(6):
data.append(mx.nd.arange(128))
path = os.path.dirname(os.path.realpath(__file__))
legacy_data = mx.nd.load(os.path.join(path, 'legacy_ndarray.v0'))
assert len(data) == len(legacy_data)
for i in range(len(data)):
assert same(data[i].asnumpy(), legacy_data[i].asnumpy())
def test_ndarray_slice():
shape = (10,)
A = mx.nd.array(np.random.uniform(-10, 10, shape))
A2 = A.asnumpy()
assert same(A[3:8].asnumpy(), A2[3:8])
A2[3:8] *= 10;
A[3:8] = A2[3:8]
assert same(A[3:8].asnumpy(), A2[3:8])
shape = (3,4,5,6,7)
A = mx.nd.random_uniform(shape=shape)
A2 = A.asnumpy()
assert same(A[1,3:4,:,1:5].asnumpy(), A2[1,3:4,:,1:5])
assert A[1,2,3,4,5].asscalar() == A2[1,2,3,4,5]
def test_ndarray_crop():
# get crop
x = mx.nd.ones((2, 3, 4))
y = mx.nd.crop(x, begin=(0, 0, 0), end=(2, 1, 3))
assert same(y.asnumpy(), np.ones((2, 1, 3), dtype=y.dtype))
# crop assign
z = mx.nd.zeros((2, 1, 3))
mx.nd._internal._crop_assign(x, z, begin=(0, 0, 0),
end=(2, 1, 3), out=x)
np_x = np.ones(x.shape, dtype=x.dtype)
np_x[0:2, 0:1, 0:3] = 0
assert same(x.asnumpy(), np_x)
# crop assign with scalar
x = mx.nd.ones((2, 3, 4))
mx.nd._internal._crop_assign_scalar(x, scalar=5,
begin=(0, 0, 0),
end=(2, 1, 3), out=x)
np_x = np.ones(x.shape, dtype=x.dtype)
np_x[0:2, 0:1, 0:3] = 5
assert same(x.asnumpy(), np_x)
def test_ndarray_concatenate():
axis = 1
shapes = [(2, 3, 4, 2), (2, 2, 4, 2), (2, 1, 4, 2)]
arrays_np = [np.random.uniform(-10, 10, s).astype(np.float32) for s in shapes]
arrays_nd = [mx.nd.array(x) for x in arrays_np]
array_nd = mx.nd.concatenate(arrays_nd, axis=axis)
array_np = np.concatenate(arrays_np, axis=axis)
assert same(array_np, array_nd.asnumpy())
def test_clip():
shape = (10,)
A = mx.random.uniform(-10, 10, shape)
B = mx.nd.clip(A, -2, 2)
B1 = B.asnumpy()
for i in range(shape[0]):
assert B1[i] >= -2
assert B1[i] <= 2
def test_dot():
# Test normal dot
a = np.random.uniform(-3, 3, (3, 4))
b = np.random.uniform(-3, 3, (4, 5))
c = np.dot(a, b)
A = mx.nd.array(a)
B = mx.nd.array(b)
C = mx.nd.dot(A, B)
assert_almost_equal(c, C.asnumpy())
# Test dot with transpose kargs
a = np.random.uniform(-3, 3, (3, 4))
b = np.random.uniform(-3, 3, (3, 5))
c = np.dot(a.T, b)
A = mx.nd.array(a)
B = mx.nd.array(b)
C = mx.nd.dot(A, B, transpose_a=True)
assert_almost_equal(c, C.asnumpy())
# Test dot with transpose kargs
a = np.random.uniform(-3, 3, (3, 4))
b = np.random.uniform(-3, 3, (5, 4))
c = np.dot(a, b.T)
A = mx.nd.array(a)
B = mx.nd.array(b)
C = mx.nd.dot(A, B, transpose_b=True)
assert_almost_equal(c, C.asnumpy())
# Test dot with transpose kargs
a = np.random.uniform(-3, 3, (4, 3))
b = np.random.uniform(-3, 3, (5, 4))
c = np.dot(a.T, b.T)
A = mx.nd.array(a)
B = mx.nd.array(b)
C = mx.nd.dot(A, B, transpose_a=True, transpose_b=True)
assert_almost_equal(c, C.asnumpy())
def test_reduce():
sample_num = 200
def test_reduce_inner(numpy_reduce_func, nd_reduce_func, multi_axes):
for i in range(sample_num):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 11, size=ndim)
dat = np.random.rand(*shape) - 0.5
keepdims = np.random.randint(0, 2)
if multi_axes:
axis_flags = np.random.randint(0, 2, size=ndim)
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = tuple(range(ndim))
else:
axes = tuple(axes)
else:
axes = np.random.randint(0, ndim)
numpy_ret = numpy_reduce_func(dat, axis=axes, keepdims=keepdims)
ndarray_ret = nd_reduce_func(mx.nd.array(dat), axis=axes, keepdims=keepdims)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == numpy_ret.shape) or \
(ndarray_ret.shape == (1,) and numpy_ret.shape == ()), "nd:%s, numpy:%s" \
%(ndarray_ret.shape, numpy_ret.shape)
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-4
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
mx.nd.sum, True)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.max),
mx.nd.max, True)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.min),
mx.nd.min, True)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.argmax),
mx.nd.argmax, False)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.argmin),
mx.nd.argmin, False)
def test_broadcast():
sample_num = 1000
def test_broadcast_to():
for i in range(sample_num):
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 11, size=ndim)
shape = target_shape.copy()
axis_flags = np.random.randint(0, 2, size=ndim)
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
shape[axis] = 1
dat = np.random.rand(*shape) - 0.5
numpy_ret = dat
ndarray_ret = mx.nd.array(dat).broadcast_to(shape=target_shape)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == target_shape).all()
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-8
test_broadcast_to()
def test_broadcast_binary():
N = 100
def check_broadcast_binary(fn):
for _ in range(N):
ndim = np.random.randint(1, 6)
oshape = np.random.randint(1, 6, size=(ndim,))
bdim = np.random.randint(1, ndim+1)
lshape = list(oshape)
rshape = list(oshape[ndim-bdim:])
for i in range(bdim):
sep = np.random.uniform(0, 1)
if sep < 0.33:
lshape[ndim-i-1] = 1
elif sep < 0.66:
rshape[bdim-i-1] = 1
lhs = np.random.normal(0, 1, size=lshape)
rhs = np.random.normal(0, 1, size=rshape)
assert_allclose(fn(lhs, rhs),
fn(mx.nd.array(lhs), mx.nd.array(rhs)).asnumpy(),
rtol=1e-4, atol=1e-4)
check_broadcast_binary(lambda x, y: x + y)
check_broadcast_binary(lambda x, y: x - y)
check_broadcast_binary(lambda x, y: x * y)
check_broadcast_binary(lambda x, y: x / y)
check_broadcast_binary(lambda x, y: x > y)
check_broadcast_binary(lambda x, y: x < y)
check_broadcast_binary(lambda x, y: x >= y)
check_broadcast_binary(lambda x, y: x <= y)
check_broadcast_binary(lambda x, y: x == y)
def test_moveaxis():
X = mx.nd.array([[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]])
res = mx.nd.moveaxis(X, 0, 3).asnumpy()
true_res = mx.nd.array([[[ 1., 7.],
[ 2., 8.],
[ 3., 9.]],
[[ 4., 10.],
[ 5., 11.],
[ 6., 12.]]])
assert same(res, true_res.asnumpy())
assert mx.nd.moveaxis(X, 2, 0).shape == (3, 2, 2)
def test_arange():
for i in range(5):
start = np.random.rand() * 10
stop = start + np.random.rand() * 100
step = np.random.rand() * 4
repeat = int(np.random.rand() * 5) + 1
gt = np.arange(start=start, stop=stop, step=step)
gt = np.broadcast_to(gt.reshape((gt.shape[0], 1)), shape=(gt.shape[0], repeat)).ravel()
pred = mx.nd.arange(start=start, stop=stop, step=step, repeat=repeat).asnumpy()
assert_almost_equal(pred, gt)
def test_order(ctx=default_context()):
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis ==1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
a_npy = np.random.normal(size=(5, 5, 5, 5))
a_nd = mx.nd.array(a_npy, ctx=ctx)
# test for ret_typ=indices
nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="indices", k=3, is_ascend=True).asnumpy()
gt = gt_topk(a_npy, axis=1, ret_typ="indices", k=3, is_ascend=True)
assert_almost_equal(nd_ret_topk, gt)
nd_ret_topk = mx.nd.topk(a_nd, axis=3, ret_typ="indices", k=2, is_ascend=False).asnumpy()
gt = gt_topk(a_npy, axis=3, ret_typ="indices", k=2, is_ascend=False)
assert_almost_equal(nd_ret_topk, gt)
nd_ret_topk = mx.nd.topk(a_nd, axis=None, ret_typ="indices", k=21, is_ascend=False).asnumpy()
gt = gt_topk(a_npy, axis=None, ret_typ="indices", k=21, is_ascend=False)
assert_almost_equal(nd_ret_topk, gt)
# test for ret_typ=value
nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="value", k=3, is_ascend=True).asnumpy()
gt = gt_topk(a_npy, axis=1, ret_typ="value", k=3, is_ascend=True)
assert_almost_equal(nd_ret_topk, gt)
nd_ret_topk = mx.nd.topk(a_nd, axis=3, ret_typ="value", k=2, is_ascend=False).asnumpy()
gt = gt_topk(a_npy, axis=3, ret_typ="value", k=2, is_ascend=False)
assert_almost_equal(nd_ret_topk, gt)
nd_ret_topk = mx.nd.topk(a_nd, axis=None, ret_typ="value", k=21, is_ascend=False).asnumpy()
gt = gt_topk(a_npy, axis=None, ret_typ="value", k=21, is_ascend=False)
assert_almost_equal(nd_ret_topk, gt)
# test for ret_typ=mask
nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="mask", k=3, is_ascend=True).asnumpy()
gt = gt_topk(a_npy, axis=1, ret_typ="mask", k=3, is_ascend=True)
assert_almost_equal(nd_ret_topk, gt)
nd_ret_topk = mx.nd.topk(a_nd, axis=1, ret_typ="mask", k=2, is_ascend=False).asnumpy()
gt = gt_topk(a_npy, axis=1, ret_typ="mask", k=2, is_ascend=False)
assert_almost_equal(nd_ret_topk, gt)
nd_ret_topk = mx.nd.topk(a_nd, axis=None, ret_typ="mask", k=21, is_ascend=False).asnumpy()
gt = gt_topk(a_npy, axis=None, ret_typ="mask", k=21, is_ascend=False)
assert_almost_equal(nd_ret_topk, gt)
# test for ret_typ=both
nd_ret_topk_val, nd_ret_topk_ind = mx.nd.topk(a_nd, axis=1, ret_typ="both", k=3, is_ascend=True)
nd_ret_topk_val = nd_ret_topk_val.asnumpy()
nd_ret_topk_ind = nd_ret_topk_ind.asnumpy()
gt_val = gt_topk(a_npy, axis=1, ret_typ="value", k=3, is_ascend=True)
gt_ind = gt_topk(a_npy, axis=1, ret_typ="indices", k=3, is_ascend=True)
assert_almost_equal(nd_ret_topk_val, gt_val)
assert_almost_equal(nd_ret_topk_ind, gt_ind)
# test for sort
nd_ret_sort = mx.nd.sort(a_nd, axis=1, is_ascend=True).asnumpy()
gt = gt_topk(a_npy, axis=1, ret_typ="value", k=5, is_ascend=True)
assert_almost_equal(nd_ret_sort, gt)
nd_ret_sort = mx.nd.sort(a_nd, axis=None, is_ascend=False).asnumpy()
gt = gt_topk(a_npy, axis=None, ret_typ="value", k=5*5*5*5, is_ascend=False)
assert_almost_equal(nd_ret_sort, gt)
# test for argsort
nd_ret_argsort = mx.nd.argsort(a_nd, axis=3, is_ascend=True).asnumpy()
gt = gt_topk(a_npy, axis=3, ret_typ="indices", k=5, is_ascend=True)
assert_almost_equal(nd_ret_argsort, gt)
nd_ret_argsort = mx.nd.argsort(a_nd, axis=None, is_ascend=False).asnumpy()
gt = gt_topk(a_npy, axis=None, ret_typ="indices", k=5*5*5*5, is_ascend=False)
assert_almost_equal(nd_ret_argsort, gt)
def test_ndarray_equal():
x = mx.nd.zeros((2, 3))
y = mx.nd.ones((2, 3))
z = x == y
assert (z.asnumpy() == np.zeros((2, 3))).all()
z = 0 == x
assert (z.asnumpy() == np.ones((2, 3))).all()
def test_ndarray_not_equal():
x = mx.nd.zeros((2, 3))
y = mx.nd.ones((2, 3))
z = x != y
assert (z.asnumpy() == np.ones((2, 3))).all()
z = 0 != x
assert (z.asnumpy() == np.zeros((2, 3))).all()
def test_ndarray_greater():
x = mx.nd.zeros((2, 3))
y = mx.nd.ones((2, 3))
z = x > y
assert (z.asnumpy() == np.zeros((2, 3))).all()
z = y > 0
assert (z.asnumpy() == np.ones((2, 3))).all()
z = 0 > y
assert (z.asnumpy() == np.zeros((2, 3))).all()
def test_ndarray_greater_equal():
x = mx.nd.zeros((2, 3))
y = mx.nd.ones((2, 3))
z = x >= y
assert (z.asnumpy() == np.zeros((2, 3))).all()
z = y >= 0
assert (z.asnumpy() == np.ones((2, 3))).all()
z = 0 >= y
assert (z.asnumpy() == np.zeros((2, 3))).all()
z = y >= 1
assert (z.asnumpy() == np.ones((2, 3))).all()
def test_ndarray_lesser():
x = mx.nd.zeros((2, 3))
y = mx.nd.ones((2, 3))
z = y < x
assert (z.asnumpy() == np.zeros((2, 3))).all()
z = 0 < y
assert (z.asnumpy() == np.ones((2, 3))).all()
z = y < 0
assert (z.asnumpy() == np.zeros((2, 3))).all()
def test_ndarray_lesser_equal():
x = mx.nd.zeros((2, 3))
y = mx.nd.ones((2, 3))
z = y <= x
assert (z.asnumpy() == np.zeros((2, 3))).all()
z = 0 <= y
assert (z.asnumpy() == np.ones((2, 3))).all()
z = y <= 0
assert (z.asnumpy() == np.zeros((2, 3))).all()
z = 1 <= y
assert (z.asnumpy() == np.ones((2, 3))).all()
def test_take():
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
data_real = np.random.normal(size=data_shape).astype('float32')
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
idx_real = np.random.randint(low=0, high=data_shape[0], size=idx_shape)
data_real_mx = mx.nd.array(data_real)
idx_real_mx = mx.nd.array(idx_real)
result = mx.nd.take(data_real_mx, idx_real_mx)
assert_almost_equal(result.asnumpy(), data_real[idx_real])
def test_iter():
x = mx.nd.array([1, 2, 3])
y = []
for a in x:
y.append(a)
for i in range(x.size):
assert same(y[i].asnumpy(), x[i].asnumpy())
def test_cached():
sym = mx.sym.Convolution(kernel=(3, 3), num_filter=10) + 2
op = mx.nd.CachedOp(sym)
data = mx.nd.ones((3, 4, 10, 10))
weight = mx.nd.ones((10, 4, 3, 3))
bias = mx.nd.ones((10,))
o1 = op(data, weight, bias)
bias[:] = 2
o2 = op(data, weight, bias)
assert_almost_equal(o2.asnumpy(), o1.asnumpy()+1)
o2[:] = 0
op(data, weight, bias, out=o2)
assert_almost_equal(o2.asnumpy(), o1.asnumpy()+1)
def test_output():
shape = (2,2)
ones = mx.nd.ones(shape)
zeros = mx.nd.zeros(shape)
out = mx.nd.zeros(shape)
mx.nd.ones(shape, out=out)
assert_almost_equal(out.asnumpy(), ones.asnumpy())
mx.nd.zeros(shape, out=out)
assert_almost_equal(out.asnumpy(), zeros.asnumpy())
mx.nd.full(shape, 2, out=out)
assert_almost_equal(out.asnumpy(), ones.asnumpy() * 2)
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
"""The tests for the Tasmota fan platform."""
import copy
import json
from unittest.mock import patch
from hatasmota.utils import (
get_topic_stat_result,
get_topic_tele_state,
get_topic_tele_will,
)
import pytest
from voluptuous import MultipleInvalid
from homeassistant.components import fan
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.const import ATTR_ASSUMED_STATE, STATE_OFF, STATE_ON, Platform
from .test_common import (
DEFAULT_CONFIG,
help_test_availability,
help_test_availability_discovery_update,
help_test_availability_poll_state,
help_test_availability_when_connection_lost,
help_test_discovery_device_remove,
help_test_discovery_removal,
help_test_discovery_update_unchanged,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
)
from tests.common import async_fire_mqtt_message
from tests.components.fan import common
async def test_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["if"] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("fan.tasmota")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("fan.tasmota")
assert state.state == STATE_OFF
assert state.attributes["percentage"] is None
assert state.attributes["supported_features"] == fan.SUPPORT_SET_SPEED
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"FanSpeed":1}')
state = hass.states.get("fan.tasmota")
assert state.state == STATE_ON
assert state.attributes["percentage"] == 33
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"FanSpeed":2}')
state = hass.states.get("fan.tasmota")
assert state.state == STATE_ON
assert state.attributes["percentage"] == 66
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"FanSpeed":3}')
state = hass.states.get("fan.tasmota")
assert state.state == STATE_ON
assert state.attributes["percentage"] == 100
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"FanSpeed":0}')
state = hass.states.get("fan.tasmota")
assert state.state == STATE_OFF
assert state.attributes["percentage"] == 0
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"FanSpeed":1}')
state = hass.states.get("fan.tasmota")
assert state.state == STATE_ON
assert state.attributes["percentage"] == 33
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"FanSpeed":0}')
state = hass.states.get("fan.tasmota")
assert state.state == STATE_OFF
assert state.attributes["percentage"] == 0
async def test_sending_mqtt_commands(hass, mqtt_mock, setup_tasmota):
"""Test the sending MQTT commands."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["if"] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("fan.tasmota")
assert state.state == STATE_OFF
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Turn the fan on and verify MQTT message is sent
await common.async_turn_on(hass, "fan.tasmota")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/FanSpeed", "2", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Tasmota is not optimistic, the state should still be off
state = hass.states.get("fan.tasmota")
assert state.state == STATE_OFF
# Turn the fan off and verify MQTT message is sent
await common.async_turn_off(hass, "fan.tasmota")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/FanSpeed", "0", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set speed percentage and verify MQTT message is sent
await common.async_set_percentage(hass, "fan.tasmota", 0)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/FanSpeed", "0", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set speed percentage and verify MQTT message is sent
await common.async_set_percentage(hass, "fan.tasmota", 15)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/FanSpeed", "1", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set speed percentage and verify MQTT message is sent
await common.async_set_percentage(hass, "fan.tasmota", 50)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/FanSpeed", "2", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set speed percentage and verify MQTT message is sent
await common.async_set_percentage(hass, "fan.tasmota", 90)
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/FanSpeed", "3", 0, False
)
async def test_invalid_fan_speed_percentage(hass, mqtt_mock, setup_tasmota):
"""Test the sending MQTT commands."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["if"] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("fan.tasmota")
assert state.state == STATE_OFF
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Set an unsupported speed and verify MQTT message is not sent
with pytest.raises(MultipleInvalid) as excinfo:
await common.async_set_percentage(hass, "fan.tasmota", 101)
assert "value must be at most 100" in str(excinfo.value)
mqtt_mock.async_publish.assert_not_called()
async def test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test availability after MQTT disconnection."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["if"] = 1
await help_test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, Platform.FAN, config
)
async def test_availability(hass, mqtt_mock, setup_tasmota):
"""Test availability."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["if"] = 1
await help_test_availability(hass, mqtt_mock, Platform.FAN, config)
async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test availability discovery update."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["if"] = 1
await help_test_availability_discovery_update(hass, mqtt_mock, Platform.FAN, config)
async def test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test polling after MQTT connection (re)established."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["if"] = 1
poll_topic = "tasmota_49A3BC/cmnd/STATE"
await help_test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, Platform.FAN, config, poll_topic, ""
)
async def test_discovery_removal_fan(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removal of discovered fan."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["dn"] = "Test"
config1["if"] = 1
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["dn"] = "Test"
config2["if"] = 0
await help_test_discovery_removal(
hass, mqtt_mock, caplog, Platform.FAN, config1, config2
)
async def test_discovery_update_unchanged_fan(hass, mqtt_mock, caplog, setup_tasmota):
"""Test update of discovered fan."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["if"] = 1
with patch(
"homeassistant.components.tasmota.fan.TasmotaFan.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, Platform.FAN, config, discovery_update
)
async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota):
"""Test device registry remove."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["if"] = 1
unique_id = f"{DEFAULT_CONFIG['mac']}_fan_fan_ifan"
await help_test_discovery_device_remove(
hass, mqtt_mock, Platform.FAN, unique_id, config
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota):
"""Test MQTT subscriptions are managed when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["if"] = 1
topics = [
get_topic_stat_result(config),
get_topic_tele_state(config),
get_topic_tele_will(config),
]
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, Platform.FAN, config, topics
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test MQTT discovery update when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["dn"] = "Test"
config["if"] = 1
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, Platform.FAN, config
)
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Client configuration management.
This module holds the code for detecting and configuring the current client and
it's output directories.
It is responsible for writing out the client specific plugins that tell the
rest of the cr tool what the client is capable of.
"""
import os
import pprint
import sys
import cr
import cr.auto.build
import cr.auto.client
# The config version currently supported.
VERSION = 0.5
# The default directory name to store configs inside.
CONFIG_PATH = '.cr'
# The filename of the config file inside a config directory.
CONFIG_FILE = 'config.py'
# The directory inside the config directory which contains the client config.
CLIENT_CONFIG_DIR = 'client'
# The directory inside the config directory which contains build configs.
BUILD_CONFIG_DIR = 'builds'
# The format string for the header of a config file.
CONFIG_FILE_PREFIX = """
# This is an autogenerated file
# it *will* be overwritten, and changes may lost
# The system will autoload any other python file in the same folder.
import cr
OVERRIDES = cr.Config.From("""
# The format string for each value in a config file.
CONFIG_VAR_LINE = '\n {0} = {1!r},'
# The format string for the tail of a config file.
CONFIG_FILE_SUFFIX = '\n)\n'
# The name of the gclient config file
GCLIENT_FILENAME = '.gclient'
# The default config values installed by this module.
DEFAULT = cr.Config.From(
CR_ROOT_PATH=os.path.join('{GOOGLE_CODE}'),
CR_CLIENT_NAME='chromium',
CR_CLIENT_PATH=os.path.join('{CR_ROOT_PATH}', '{CR_CLIENT_NAME}'),
CR_SRC=os.path.join('{CR_CLIENT_PATH}', 'src'),
CR_BUILD_DIR=os.path.join('{CR_SRC}', '{CR_OUT_FULL}'),
)
def DetectClient():
# Attempt to detect the current client from the cwd
# See if we can detect the source tree root
client_path = os.getcwd()
while (client_path and
not os.path.exists(os.path.join(client_path, GCLIENT_FILENAME))):
old = client_path
client_path = os.path.dirname(client_path)
if client_path == old:
client_path = None
if client_path is not None:
dirname, basename = os.path.split(client_path)
if basename == 'src':
# we have the src path, base is one level up
client_path = dirname
if client_path is not None:
cr.context.derived['CR_CLIENT_PATH'] = client_path
# now get the value from it may be different
client_path = cr.context.Get('CR_CLIENT_PATH')
if client_path is not None:
cr.context.derived['CR_CLIENT_NAME'] = os.path.basename(client_path)
def _GetConfigDir(use_build_dir):
base_path = os.path.join(cr.context.Get('CR_CLIENT_PATH'), CONFIG_PATH)
if use_build_dir:
path_suffix = os.path.join(BUILD_CONFIG_DIR, cr.context.Get('CR_OUT_FULL'))
else:
path_suffix = CLIENT_CONFIG_DIR
return os.path.realpath(os.path.join(base_path, path_suffix))
def _GetDeprecatedConfigDir(use_build_dir):
if use_build_dir:
path = cr.context.Get('CR_BUILD_DIR')
else:
path = cr.context.Get('CR_CLIENT_PATH')
return os.path.realpath(os.path.join(path, CONFIG_PATH))
def _GetConfigFile(config_dir):
return os.path.join(config_dir, CONFIG_FILE)
def _MigrateAndGetConfigDir(use_build_dir):
new_config_dir = _GetConfigDir(use_build_dir)
new_config_file = _GetConfigFile(new_config_dir)
new_config_exists = os.path.exists(new_config_file)
old_config_dir = _GetDeprecatedConfigDir(use_build_dir)
old_config_file = _GetConfigFile(old_config_dir)
old_config_exists = os.path.exists(old_config_file)
if old_config_exists:
if new_config_exists:
print 'Warning: Old config file %s superseded by new config file %s' % (
old_config_file, new_config_file)
else:
print 'Migrating config file from %s to %s...' % (
old_config_file, new_config_file)
if not cr.context.dry_run:
# Make the new config directory (if necessary).
try:
os.makedirs(new_config_dir)
except OSError:
if not os.path.isdir(new_config_dir):
raise
# Move the config file.
os.rename(old_config_file, new_config_file)
# Delete the old config directory (only applies to the build config).
if use_build_dir:
try:
os.removedirs(old_config_dir)
except OSError:
print 'Warning: Old config directory %s could not be removed' % (
old_config_dir)
return new_config_dir
def _WriteConfig(writer, data):
writer.write(CONFIG_FILE_PREFIX)
for key, value in data.items():
writer.write(CONFIG_VAR_LINE.format(key, value))
writer.write(CONFIG_FILE_SUFFIX)
def AddArguments(parser):
parser.add_argument(
'-o', '--out', dest='_out', metavar='name',
default=None,
help='The name of the out directory to use. Overrides CR_OUT.'
)
def GetOutArgument():
return getattr(cr.context.args, '_out', None)
def ApplyOutArgument():
# TODO(iancottrell): be flexible, allow out to do approximate match...
out = GetOutArgument()
if out:
cr.context.derived.Set(CR_OUT_FULL=out)
def ReadGClient():
"""Loads the .gclient configuration for the current client.
This will load from CR_CLIENT_PATH.
Returns:
The dict of values set in the .gclient file.
"""
# Now attempt to load and parse the .gclient file
result = {}
try:
gclient_file = cr.context.Substitute(
os.path.join('{CR_CLIENT_PATH}', GCLIENT_FILENAME))
with open(gclient_file, 'r') as spec_file:
# matching the behaviour of gclient, so pylint: disable=exec-used
exec(spec_file.read(), {}, result)
except IOError:
# no .gclient file, skip it
pass
return result
def WriteGClient():
"""Writes the .gclient configuration for the current client.
This will write to CR_CLIENT_PATH.
"""
gclient_file = cr.context.Substitute(
os.path.join('{CR_CLIENT_PATH}', GCLIENT_FILENAME))
spec = '\n'.join('%s = %s' % (key, pprint.pformat(value))
for key,value in cr.context.gclient.items())
if cr.context.dry_run:
print 'Write the following spec to', gclient_file
print spec
else:
with open(gclient_file, 'w') as spec_file:
spec_file.write(spec)
def LoadConfig():
"""Loads the client configuration for the given context.
This will load configuration if present from CR_CLIENT_PATH and then
CR_BUILD_DIR.
Returns:
True if configuration was fully loaded.
"""
# Load the root config, will help set default build dir
client_config_dir = _MigrateAndGetConfigDir(use_build_dir=False)
cr.auto.client.__path__.append(client_config_dir)
cr.loader.Scan()
# Now load build dir config
build_config_dir = _MigrateAndGetConfigDir(use_build_dir=True)
cr.auto.build.__path__.append(build_config_dir)
cr.loader.Scan()
if not hasattr(cr.auto.build, 'config'):
return False
cr.context.derived.Set(CR_BUILD_CONFIG_PATH=_GetConfigFile(build_config_dir))
return True
def WriteConfig(use_build_dir, data):
"""Writes a configuration out to a file.
This writes all the key value pairs in data out to a config file.
Args:
use_build_dir: True if the config file should be written to the build
directory. Otherwise it will be written to the root config directory.
data: The key value pairs to write.
"""
config_dir = _GetConfigDir(use_build_dir)
filename = _GetConfigFile(config_dir)
if cr.context.dry_run:
print 'makedirs', config_dir
print 'Write config to', filename
_WriteConfig(sys.stdout, data)
else:
try:
os.makedirs(config_dir)
except OSError:
if not os.path.isdir(config_dir):
raise
with open(filename, 'w') as writer:
_WriteConfig(writer, data)
def PrintInfo():
print 'Selected output directory is', cr.context.Find('CR_BUILD_DIR')
print 'Build config file is', _GetConfigFile(_GetConfigDir(
use_build_dir=True))
try:
for name in cr.auto.build.config.OVERRIDES.exported.keys():
print ' ', name, '=', cr.context.Get(name)
except AttributeError:
pass
class InitHook(cr.Plugin, cr.Plugin.Type):
"""Base class for output directory initialization hooks.
Implementations used to fix from old version to new ones live in the
cr.fixups package.
"""
def Run(self, old_version, config):
"""Run the initialization hook.
This is invoked once per init invocation.
Args:
old_version: The old version,
0.0 if the old version was bad or missing,
None if building a new output direcory.
config: The mutable config that will be written.
"""
raise NotImplementedError('Must be overridden.')
|
|
#!/usr/bin/python
import getopt
import sys
from Bio import SeqIO
import time
import os
import shutil
import pandas
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
__author__ = "Andriy Sheremet"
#Helper functions definitions
def genome_shredder(input_dct, shear_val):
shredded = {}
for key, value in input_dct.items():
#print input_dct[i].seq
#print i
dic_name = key
rec_name = value.name
for j in range(0, len(str(value.seq)), int(shear_val)):
# print j
record = str(value.seq)[0+j:int(shear_val)+j]
shredded[dic_name+"_"+str(j)] = SeqRecord(Seq(record),rec_name+"_"+str(j),'','')
#record = SeqRecord(input_ref_records[i].seq[0+i:int(shear_val)+i],input_ref_records[i].name+"_%i"%i,"","")
return shredded
def parse_contigs_ind(f_name):
"""
Returns sequences index from the input files(s)
remember to close index object after use
"""
handle = open(f_name, "rU")
record_dict = SeqIO.index(f_name,"fasta")
handle.close()
return record_dict
#returning specific sequences and overal list
def retrive_sequence(contig_lst, rec_dic):
"""
Returns list of sequence elements from dictionary/index of SeqIO objects specific to the contig_lst parameter
"""
contig_seqs = list()
#record_dict = rec_dic
#handle.close()
for contig in contig_lst:
contig_seqs.append(str(rec_dic[contig].seq))#fixing BiopythonDeprecationWarning
return contig_seqs
def filter_seq_dict(key_lst, rec_dic):
"""
Returns filtered dictionary element from rec_dic according to sequence names passed in key_lst
"""
return { key: rec_dic[key] for key in key_lst }
def unique_scaffold_topEval(dataframe):
#returns pandas series object
variables = list(dataframe.columns.values)
scaffolds=dict()
rows=list()
for row in dataframe.itertuples():
#if row[1]=='Ga0073928_10002560':
if row[1] not in scaffolds:
scaffolds[row[1]]=row
else:
if row[11]<scaffolds[row[1]][11]:
scaffolds[row[1]]=row
rows=scaffolds.values()
#variables=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
df = pandas.DataFrame([[getattr(i,j) for j in variables] for i in rows], columns = variables)
return df
def unique_scaffold_topBits(dataframe):
#returns pandas series object
variables = list(dataframe.columns.values)
scaffolds=dict()
rows=list()
for row in dataframe.itertuples():
#if row[1]=='Ga0073928_10002560':
if row[1] not in scaffolds:
scaffolds[row[1]]=row
else:
if row[12]>scaffolds[row[1]][12]:
scaffolds[row[1]]=row
rows=scaffolds.values()
#variables=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
df = pandas.DataFrame([[getattr(i,j) for j in variables] for i in rows], columns = variables)
return df
def close_ind_lst(ind_lst):
"""
Closes index objects supplied in input parameter list
"""
for index in ind_lst:
index.close()
def usage():
print "\nThis is the usage function\n"
# print 'Usage: '+sys.argv[0]+' -i <input_file> [-o <output>] [-l <minimum length>]'
# print 'Example: '+sys.argv[0]+' -i input.fasta -o output.fasta -l 100'
def main(argv):
#default parameters
mg_lst = []
ref_lst = []
e_val = 1e-5
alen = 50.0
alen_percent = True
alen_bp = False
iden = 95.0
name= "output"
fmt_lst = ["fasta"]
supported_formats =["fasta", "csv"]
iterations = 1
alen_increment = 5.0
iden_increment = 0.0
blast_db_Dir = ""
results_Dir = ""
input_files_Dir = ""
ref_out_0 = ""
blasted_lst = []
continue_from_previous = False #poorly supported, just keeping the directories
skip_blasting = False
debugging = False
sheared = False
shear_val = None
try:
opts, args = getopt.getopt(argv, "r:m:n:e:a:i:s:f:h", ["reference=", "metagenome=", "name=", "e_value=", "alignment_length=", "identity=","shear=","format=", "iterations=", "alen_increment=", "iden_increment=","continue_from_previous","skip_blasting","debugging", "help"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
# elif opt in ("--recover_after_failure"):
# recover_after_failure = True
# print "Recover after failure:", recover_after_failure
elif opt in ("--continue_from_previous"):
continue_from_previous = True
if debugging:
print "Continue after failure:", continue_from_previous
elif opt in ("--debugging"):
debugging = True
if debugging:
print "Debugging messages:", debugging
elif opt in ("-r", "--reference"):
if arg:
ref_lst=arg.split(',')
#infiles = arg
if debugging:
print "Reference file(s)", ref_lst
elif opt in ("-m", "--metagenome"):
if arg:
mg_lst=arg.split(',')
#infiles = arg
if debugging:
print "Metagenome file(s)", mg_lst
elif opt in ("-f", "--format"):
if arg:
fmt_lst=arg.split(',')
#infiles = arg
if debugging:
print "Output format(s)", fmt_lst
elif opt in ("-n", "--name"):
if arg.strip():
name = arg
if debugging:
print "Project name", name
elif opt in ("-e", "--e_value"):
try:
e_val = float(arg)
except:
print "\nERROR: Please enter numerical value as -e parameter (default: 1e-5)"
usage()
sys.exit(1)
if debugging:
print "E value", e_val
elif opt in ("-a", "--alignment_length"):
if arg.strip()[-1]=="%":
alen_bp = False
alen_percent = True
else:
alen_bp = True
alen_percent = False
try:
alen = float(arg.split("%")[0])
except:
print "\nERROR: Please enter a numerical value as -a parameter (default: 50.0)"
usage()
sys.exit(1)
if debugging:
print "Alignment length", alen
elif opt in ("-i", "--identity"):
try:
iden = float(arg)
except:
print "\nERROR: Please enter a numerical value as -i parameter (default: 95.0)"
usage()
sys.exit(1)
if debugging:
print "Alignment length", iden
elif opt in ("-s", "--shear"):
sheared = True
try:
shear_val = int(arg)
except:
print "\nERROR: Please enter an integer value as -s parameter"
usage()
sys.exit(1)
if debugging:
print "Alignment length", iden
elif opt in ("--iterations"):
try:
iterations = int(arg)
except:
print "\nWARNING: Please enter integer value as --iterations parameter (using default: 1)"
if debugging:
print "Iterations: ", iterations
elif opt in ("--alen_increment"):
try:
alen_increment = float(arg)
except:
print "\nWARNING: Please enter numerical value as --alen_increment parameter (using default: )", alen_increment
if debugging:
print "Alignment length increment: ", alen_increment
elif opt in ("--iden_increment"):
try:
iden_increment = float(arg)
except:
print "\nWARNING: Please enter numerical value as --iden_increment parameter (using default: )", iden_increment
if debugging:
print "Alignment length increment: ", iden_increment
elif opt in ("--skip_blasting"):
skip_blasting = True
if debugging:
print "Blasting step omitted; Using previous blast output."
for ref_file in [x for x in ref_lst if x]:
try:
#
with open(ref_file, "rU") as hand_ref:
pass
except:
print "\nERROR: Reference File(s) ["+ref_file+"] doesn't exist"
usage()
sys.exit(1)
for mg_file in [x for x in mg_lst if x]:
try:
#
with open(mg_file, "rU") as hand_mg:
pass
except:
print "\nERROR: Metagenome File(s) ["+mg_file+"] doesn't exist"
usage()
sys.exit(1)
for fmt in [x for x in fmt_lst if x]:
if fmt not in supported_formats:
print "\nWARNING: Output format [",fmt,"] is not supported"
print "\tUse -h(--help) option for the list of supported formats"
fmt_lst=["fasta"]
print "\tUsing default output format: ", fmt_lst[0]
project_dir = name
if not continue_from_previous:
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
try:
os.mkdir(project_dir)
except OSError:
print "ERROR: Cannot create project directory: " + name
raise
print "\n\t Initial Parameters:"
print "\nProject Name: ", name,'\n'
print "Project Directory: ", os.path.abspath(name),'\n'
print "Reference File(s): ", ref_lst,'\n'
print "Metagenome File(s): ", mg_lst,'\n'
print "E Value: ", e_val, "\n"
if alen_percent:
print "Alignment Length: "+str(alen)+'%\n'
if alen_bp:
print "Alignment Length: "+str(alen)+'bp\n'
print "Sequence Identity: "+str(iden)+'%\n'
print "Output Format(s):", fmt_lst,'\n'
if iterations > 1:
print "Iterations: ", iterations, '\n'
print "Alignment Length Increment: ", alen_increment, '\n'
print "Sequence identity Increment: ", iden_increment, '\n'
#Initializing directories
blast_db_Dir = name+"/blast_db"
if not continue_from_previous:
if os.path.exists(blast_db_Dir):
shutil.rmtree(blast_db_Dir)
try:
os.mkdir(blast_db_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + blast_db_Dir
raise
results_Dir = name+"/results"
if not continue_from_previous:
if os.path.exists(results_Dir):
shutil.rmtree(results_Dir)
try:
os.mkdir(results_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + results_Dir
raise
input_files_Dir = name+"/input_files"
if not continue_from_previous:
if os.path.exists(input_files_Dir):
shutil.rmtree(input_files_Dir)
try:
os.mkdir(input_files_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + input_files_Dir
raise
# Writing raw reference files into a specific input filename
input_ref_records = {}
for reference in ref_lst:
ref_records_ind = parse_contigs_ind(reference)
#ref_records = dict(ref_records_ind)
input_ref_records.update(ref_records_ind)
ref_records_ind.close()
#input_ref_records.update(ref_records)
ref_out_0 = input_files_Dir+"/reference0.fna"
if (sheared & bool(shear_val)):
with open(ref_out_0, "w") as handle:
SeqIO.write(genome_shredder(input_ref_records, shear_val).values(), handle, "fasta")
#NO NEED TO CLOSE with statement will automatically close the file
else:
with open(ref_out_0, "w") as handle:
SeqIO.write(input_ref_records.values(), handle, "fasta")
# Making BLAST databases
#output fname from before used as input for blast database creation
input_ref_0 = ref_out_0
title_db = name+"_db"#add iteration functionality
outfile_db = blast_db_Dir+"/iteration"+str(iterations)+"/"+name+"_db"#change into for loop
os.system("makeblastdb -in "+input_ref_0+" -dbtype nucl -title "+title_db+" -out "+outfile_db+" -parse_seqids")
# BLASTing query contigs
if not skip_blasting:
print "\nBLASTing query file(s):"
for i in range(len(mg_lst)):
database = outfile_db # adjust for iterations
blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab")
start = time.time()
os_string = 'blastn -db '+database+' -query \"'+mg_lst[i]+'\" -out '+blasted_lst[i]+" -evalue "+str(e_val)+" -outfmt 6 -num_threads 8"
#print os_string
os.system(os_string)
print "\t"+mg_lst[i]+"; Time elapsed: "+str(time.time()-start)+" seconds."
else:
for i in range(len(mg_lst)):
blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab")
# Parsing BLAST outputs
blast_cols = ['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
recruited_mg=[]
for i in range(len(mg_lst)):
df = pandas.read_csv(blasted_lst[i] ,sep="\t", header=None)
df.columns=blast_cols
recruited_mg.append(df)
# print len(recruited_mg[0])
# print len(recruited_mg[1])
#creating all_records entry
#! Remember to close index objects after they are no longer needed
#! Use helper function close_ind_lst()
all_records = []
all_input_recs = parse_contigs_ind(ref_out_0)
# _ = 0
# for key, value in all_input_recs.items():
# _ +=1
# if _ < 20:
# print key, len(value)
print "\nIndexing metagenome file(s):"
for i in range(len(mg_lst)):
start = time.time()
all_records.append(parse_contigs_ind(mg_lst[i]))
print "\t"+mg_lst[i]+" Indexed in : "+str(time.time()-start)+" seconds."
# Transforming data
for i in range(len(mg_lst)):
#cutoff_contigs[dataframe]=evalue_filter(cutoff_contigs[dataframe])
recruited_mg[i]=unique_scaffold_topBits(recruited_mg[i])
contig_list = recruited_mg[i]['quid'].tolist()
recruited_mg[i]['Seq_nt']=retrive_sequence(contig_list, all_records[i])
recruited_mg[i]['Seq_size']=recruited_mg[i]['Seq_nt'].apply(lambda x: len(x))
recruited_mg[i]['Ref_size']=recruited_mg[i]['suid'].apply(lambda x: len(all_input_recs[str(x)]))
#recruited_mg[i]['Coverage']=recruited_mg[i]['alen'].apply(lambda x: 100.0*float(x))/min(recruited_mg[i]['Seq_size'].apply(lambda y: y),recruited_mg[i]['Ref_size'].apply(lambda z: z))
#df.loc[:, ['B0', 'B1', 'B2']].min(axis=1)
recruited_mg[i]['Coverage']=recruited_mg[i]['alen'].apply(lambda x: 100.0*float(x))/recruited_mg[i].loc[:,["Seq_size", "Ref_size"]].min(axis=1)
recruited_mg[i]['Metric']=recruited_mg[i]['Coverage']*recruited_mg[i]['iden']/100.0
recruited_mg[i] = recruited_mg[i][['quid', 'suid', 'iden', 'alen','Coverage','Metric', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits','Ref_size','Seq_size','Seq_nt']]
# Here would go statistics functions and producing plots
#
#
#
#
#
# Quality filtering before outputting
if alen_percent:
for i in range(len(recruited_mg)):
recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['Coverage']>=alen)&(recruited_mg[i]['eval']<=e_val)]
if alen_bp:
for i in range(len(recruited_mg)):
recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['alen']>=alen)&(recruited_mg[i]['eval']<=e_val)]
# print len(recruited_mg[0])
# print len(recruited_mg[1])
# Batch export to outfmt (csv and/or multiple FASTA)
alen_str = ""
iden_str = "_iden_"+str(iden)+"%"
if alen_percent:
alen_str = "_alen_"+str(alen)+"%"
if alen_bp:
alen_str = "_alen_"+str(alen)+"bp"
if iterations > 1:
prefix=results_Dir+"/"+name+"_iter_e_"+str(e_val)+iden_str+alen_str+"_recruited_mg_"
else:
prefix=results_Dir+"/"+name+"_e_"+str(e_val)+iden_str+alen_str+"_recruited_mg_"
print "\nWriting files:"
for i in range(len(mg_lst)):
records= []
# try:
# os.remove(outfile1)
# except OSError:
# pass
if "csv" in fmt_lst:
outfile1 = prefix+str(i)+".csv"
recruited_mg[i].to_csv(outfile1, sep='\t')
print str(len(recruited_mg[i]))+" sequences written to "+outfile1
if "fasta" in fmt_lst:
ids = recruited_mg[i]['quid'].tolist()
#if len(ids)==len(sequences):
for j in range(len(ids)):
records.append(all_records[i][ids[j]])
outfile2 = prefix+str(i)+".fasta"
with open(outfile2, "w") as output_handle:
SeqIO.write(records, output_handle, "fasta")
print str(len(ids))+" sequences written to "+outfile2
close_ind_lst(all_records)
close_ind_lst([all_input_recs])
#all_records[i].close()# keep open if multiple iterations
#recruited_mg_1 = pandas.read_csv(out_name1 ,sep="\t", header=None)
#recruited_mg_1.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
#recruited_mg_2 = pandas.read_csv(out_name2 ,sep="\t", header=None)
#recruited_mg_2.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
#recruited_mg = [recruited_mg_1, recruited_mg_2]
# blast_db_Dir = ""
# results_Dir = ""
# input_files_Dir = ""
# parsed = SeqIO.parse(handle, "fasta")
#
# records = list()
#
#
# total = 0
# processed = 0
# for record in parsed:
# total += 1
# #print(record.id), len(record.seq)
# if len(record.seq) >= length:
# processed += 1
# records.append(record)
# handle.close()
#
# print "%d sequences found"%(total)
#
# try:
# output_handle = open(outfile, "w")
# SeqIO.write(records, output_handle, "fasta")
# output_handle.close()
# print "%d sequences written"%(processed)
# except:
# print "ERROR: Illegal output filename"
# sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
|
from datetime import date
from pywps.Process import WPSProcess
from flyingpigeon.datafetch import _PRESSUREDATA_
from flyingpigeon.log import init_process_logger
import logging
logger = logging.getLogger(__name__)
class AnalogsProcess(WPSProcess):
<<<<<<< HEAD
def __init__(self):
# definition of this process
WPSProcess.__init__(self,
identifier = "analogs_detection",
title="Analogues -- Detection",
version = "0.9",
metadata= [
{"title": "LSCE", "href": "http://www.lsce.ipsl.fr/en/index.php"},
{"title": "Documentation", "href": "http://flyingpigeon.readthedocs.io/en/latest/descriptions/analogues.html#analogues-of-circulation"}
],
abstract="Search for days with analogue pressure pattern for reanalyses data sets",
statusSupported=True,
storeSupported=True
)
self.experiment = self.addLiteralInput(
identifier="experiment",
title="Data experiment",
abstract="Choose the experiment (if 'None' is selected, provide a resource)",
default="NCEP_slp",
type=type(''),
minOccurs=1,
maxOccurs=1,
allowedValues=_PRESSUREDATA_
)
self.BBox = self.addBBoxInput(
identifier="BBox",
title="Bounding Box",
abstract="coordinates to define the region to be analysed",
minOccurs=1,
maxOccurs=1,
crss=['EPSG:4326']
)
self.dateSt = self.addLiteralInput(
identifier="dateSt",
title="Start date of analysis period",
abstract="This is a Date: 2013-07-15",
default="2013-07-15",
type=type(date(2013,7,15)),
minOccurs=1,
maxOccurs=1,
)
self.dateEn = self.addLiteralInput(
identifier="dateEn",
title="End date of analysis period",
abstract="This is a Date: 2013-12-31",
default="2014-12-31",
type=type(date(2014,12,31)),
minOccurs=1,
maxOccurs=1,
)
self.refSt = self.addLiteralInput(
identifier="refSt",
title="Start reference period",
abstract="Start YEAR of reference period",
default="2013-01-01",
type=type(date(1955,01,01)),
minOccurs=1,
maxOccurs=1,
)
self.refEn = self.addLiteralInput(
identifier="refEn",
title="End reference period",
abstract="End YEAR of reference period",
default="2014-12-31",
type=type(date(1957,12,31)),
minOccurs=1,
maxOccurs=1,
)
self.normalize = self.addLiteralInput(
identifier="normalize",
title="normalization",
abstract="Normalize by subtraction of annual cycle",
default='base',
type=type(''),
minOccurs=1,
maxOccurs=1,
allowedValues=['None','base','sim','own']
)
self.seasonwin = self.addLiteralInput(
identifier="seasonwin",
title="Seasonal window",
abstract="Number of days befor and after the date to be analysed",
default=30,
type=type(1),
minOccurs=0,
maxOccurs=1,
)
self.nanalog = self.addLiteralInput(
identifier="nanalog",
title="Nr of analogues",
abstract="Number of analogues to be detected",
default=20,
type=type(1),
minOccurs=0,
maxOccurs=1,
)
self.distance = self.addLiteralInput(
identifier="dist",
title="Distance",
abstract="Distance function to define analogues",
default='euclidean',
type=type(''),
minOccurs=1,
maxOccurs=1,
allowedValues=['euclidean','mahalanobis','cosine','of']
)
self.outformat = self.addLiteralInput(
identifier="outformat",
title="output file format",
abstract="Choose the format for the analogue output file",
default="ascii",
type=type(''),
minOccurs=1,
maxOccurs=1,
allowedValues=['ascii','netCDF4']
)
self.timewin = self.addLiteralInput(
identifier="timewin",
title="Time window",
abstract="Number of days following the analogue day the distance will be averaged",
default=1,
type=type(1),
minOccurs=0,
maxOccurs=1,
)
### ###################
# define the outputs
### ###################
self.config = self.addLiteralOutput(
identifier="config",
title="Config File",
abstract="Config file used for the Fortran process",
default=None,
type=type(''),
#formats=[{"mimeType":"text/plain"}],
#asReference=True,
)
self.analogs = self.addComplexOutput(
identifier="analogs",
title="Analogues File",
abstract="mulit-column text file",
formats=[{"mimeType":"text/plain"}],
asReference=True,
)
self.output_netcdf = self.addComplexOutput(
title="prepared netCDF",
abstract="NetCDF file with subset and normaized values",
formats=[{"mimeType":"application/x-netcdf"}],
asReference=True,
identifier="ncout",
)
self.output_html = self.addComplexOutput(
identifier="output_html",
title="Analogues Viewer html page",
abstract="Interactive visualization of calculated analogues",
formats=[{"mimeType":"text/html"}],
asReference=True,
)
def execute(self):
import time # performance test
process_start_time = time.time() # measure process execution time ...
from os import path
from tempfile import mkstemp
from datetime import datetime as dt
from flyingpigeon.ocgis_module import call
from flyingpigeon import analogs
from flyingpigeon.datafetch import reanalyses
self.status.set('execution started at : %s ' % dt.now(),5)
start_time = time.time() # measure init ...
#######################
### read input parameters
#######################
try:
self.status.set('read input parameter : %s ' % dt.now(),5)
refSt = self.getInputValues(identifier='refSt')
refEn = self.getInputValues(identifier='refEn')
dateSt = self.getInputValues(identifier='dateSt')
dateEn = self.getInputValues(identifier='dateEn')
seasonwin = int(self.getInputValues(identifier='seasonwin')[0])
nanalog = int(self.getInputValues(identifier='nanalog')[0])
bbox_obj = self.BBox.getValue()
normalize = self.getInputValues(identifier='normalize')[0]
distance = self.getInputValues(identifier='dist')[0]
outformat = self.getInputValues(identifier='outformat')[0]
timewin = int(self.getInputValues(identifier='timewin')[0])
experiment = self.getInputValues(identifier='experiment')[0]
logger.info('input parameters set')
self.status.set('Read in and convert the arguments', 5)
except Exception as e:
msg = 'failed to read input prameter %s ' % e
logger.error(msg)
raise Exception(msg)
######################################
### convert types and set environment
######################################
try:
self.status.set('Start preparing enviroment converting arguments', 7)
refSt = dt.strptime(refSt[0],'%Y-%m-%d')
refEn = dt.strptime(refEn[0],'%Y-%m-%d')
dateSt = dt.strptime(dateSt[0],'%Y-%m-%d')
dateEn = dt.strptime(dateEn[0],'%Y-%m-%d')
if normalize == 'None':
seacyc = False
else:
seacyc = True
if outformat == 'ascii':
outformat = '.txt'
elif outformat == 'netCDF':
outformat = '.nc'
else:
logger.error('output format not valid')
start = min( refSt, dateSt )
end = max( refEn, dateEn )
if bbox_obj is not None:
logger.info("bbox_obj={0}".format(bbox_obj.coords))
bbox = [bbox_obj.coords[0][0], bbox_obj.coords[0][1],bbox_obj.coords[1][0],bbox_obj.coords[1][1]]
logger.info("bbox={0}".format(bbox))
else:
bbox=None
# region = self.getInputValues(identifier='region')[0]
# bbox = [float(b) for b in region.split(',')]
dataset , var = experiment.split('_')
logger.info('environment set')
except Exception as e:
msg = 'failed to set environment %s ' % e
logger.error(msg)
raise Exception(msg)
try:
if dataset == 'NCEP':
if 'z' in var:
variable='hgt'
level=var.strip('z')
#conform_units_to=None
else:
variable='slp'
level=None
#conform_units_to='hPa'
elif '20CRV2' in var:
if 'z' in level:
variable='hgt'
level=var.strip('z')
#conform_units_to=None
else:
variable='prmsl'
level=None
#conform_units_to='hPa'
else:
logger.error('Reanalyses dataset not known')
logger.info('environment set')
except Exception as e:
msg = 'failed to set environment %s ' % e
logger.error(msg)
raise Exception(msg)
logger.debug("init took %s seconds.", time.time() - start_time)
self.status.set('Read in and convert the arguments done', 8)
#################
# get input data
#################
start_time = time.time() # measure get_input_data ...
self.status.set('fetching input data', 7)
try:
input = reanalyses(start = start.year, end = end.year, variable=var, dataset=dataset)
logger.info('input files %s' % input)
nc_subset = call(resource=input, variable=var, geom=bbox, spatial_wrapping='wrap')
except Exception as e :
msg = 'failed to fetch or subset input files %s' % e
logger.error(msg)
raise Exception(msg)
logger.debug("get_input_subset_dataset took %s seconds.", time.time() - start_time)
self.status.set('**** Input data fetched', 10)
########################
# input data preperation
########################
self.status.set('Start preparing input data', 12)
start_time = time.time() # measure data preperation ...
try:
#Construct descriptive filenames for the three files listed in config file
refDatesString = dt.strftime(refSt,'%Y-%m-%d') + "_" + dt.strftime(refEn,'%Y-%m-%d')
simDatesString = dt.strftime(dateSt,'%Y-%m-%d') + "_" + dt.strftime(dateEn,'%Y-%m-%d')
archiveNameString = "base_" + var +"_" + refDatesString + '_%.1f_%.1f_%.1f_%.1f' % (bbox[0], bbox[2], bbox[1], bbox[3])
simNameString = "sim_" + var +"_" + simDatesString + '_%.1f_%.1f_%.1f_%.1f' % (bbox[0], bbox[2], bbox[1], bbox[3])
archive = call(resource=nc_subset, time_range=[refSt , refEn], prefix=archiveNameString)
simulation = call(resource=nc_subset, time_range=[dateSt , dateEn], prefix=simNameString)
logger.info('archive and simulation files generated: %s, %s' % (archive, simulation))
except Exception as e:
msg = 'failed to prepare archive and simulation files %s ' % e
logger.debug(msg)
raise Exception(msg)
try:
if seacyc == True:
logger.info('normalization function with method: %s ' % normalize)
seasoncyc_base, seasoncyc_sim = analogs.seacyc(archive, simulation, method=normalize)
else:
seasoncyc_base = seasoncyc_sim = None
except Exception as e:
msg = 'failed to generate normalization files %s ' % e
logger.debug(msg)
raise Exception(msg)
ip, output_file = mkstemp(dir='.',suffix='.txt')
# =======
# #Create an empty config with with random name
# ip, output = mkstemp(dir='.', suffix='.txt')
# #Rename random name of config file to more descriptive string
# import os
# anlgname = "ana_" + var + "_" + distance + "_sim_" + simDatesString + "_ref_" + refDatesString + '_%.1f_%.1f_%.1f_%.1f_seasonwin%ddays_%danalogs.txt' % (bbox[0], bbox[2], bbox[1], bbox[3], seasonwin, nanalog) #+ seasonwin
# os.rename(output,anlgname)
# #Put config file in temporary working dir
# tmppath = os.path.dirname(output)
# output_file = os.path.join(tmppath, anlgname)
# #Put all three files with their paths in array
# >>>>>>> analogs detn gives descriptive names to files in config file
files=[path.abspath(archive), path.abspath(simulation), output_file]
logger.debug("Data preperation took %s seconds.", time.time() - start_time)
############################
# generate the config file
############################
self.status.set('writing config file', 15)
start_time = time.time() # measure write config ...
try:
config_file = analogs.get_configfile(
files=files,
seasoncyc_base = seasoncyc_base,
seasoncyc_sim=seasoncyc_sim,
timewin=timewin,
varname=var,
seacyc=seacyc,
cycsmooth=91,
nanalog=nanalog,
seasonwin=seasonwin,
distfun=distance,
outformat=outformat,
calccor=True,
silent=False,
period=[dt.strftime(refSt,'%Y-%m-%d'),dt.strftime(refEn,'%Y-%m-%d')],
bbox="%s,%s,%s,%s" % (bbox[0],bbox[2],bbox[1],bbox[3]))
except Exception as e:
msg = 'failed to generate config file %s ' % e
logger.debug(msg)
raise Exception(msg)
logger.debug("write_config took %s seconds.", time.time() - start_time)
#######################
# CASTf90 call
#######################
import subprocess
import shlex
start_time = time.time() # measure call castf90
self.status.set('Start CASTf90 call', 20)
try:
#self.status.set('execution of CASTf90', 50)
cmd = 'analogue.out %s' % path.relpath(config_file)
#system(cmd)
args = shlex.split(cmd)
output,error = subprocess.Popen(args, stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate()
logger.info('analogue.out info:\n %s ' % output)
logger.debug('analogue.out errors:\n %s ' % error)
self.status.set('**** CASTf90 suceeded', 90)
except Exception as e:
msg = 'CASTf90 failed %s ' % e
logger.error(msg)
raise Exception(msg)
logger.debug("castf90 took %s seconds.", time.time() - start_time)
########################
# generate analog viewer
########################
try:
f = analogs.reformat_analogs(output_file)
logger.info('analogs reformated')
self.status.set('Successfully reformatted analog file', 50)
# put config file into output folder
config_output_path, config_output_url = analogs.copy_configfile(config_file)
output_av = analogs.get_viewer(f, path.basename(config_output_path))
logger.info('Viewer generated')
self.status.set('Successfully generated analogs viewer', 90)
logger.info('output_av: %s ' % output_av)
except Exception as e:
msg = 'Failed to reformat analogs file or generate viewer%s ' % e
logger.debug(msg)
self.status.set('preparting output', 99)
self.config.setValue( config_output_url ) #config_file )
self.analogs.setValue( output_file )
self.output_netcdf.setValue( simulation )
self.output_html.setValue( output_av )
self.status.set('execution ended', 100)
logger.debug("total execution took %s seconds.", time.time() - process_start_time)
=======
def __init__(self):
# definition of this process
WPSProcess.__init__(
self,
identifier="analogs_detection",
title="Analogues -- Detection",
version="0.9",
metadata=[
{"title": "LSCE",
"href": "http://www.lsce.ipsl.fr/en/index.php"},
{"title": "Doc",
"href": "http://flyingpigeon.readthedocs.io/en/latest/\
descriptions/analogues.html#analogues-of-circulation"}
],
abstract="Search for days with analogue pressure pattern for reanalyses data sets",
statusSupported=True,
storeSupported=True
)
self.experiment = self.addLiteralInput(
identifier="experiment",
title="Data experiment",
abstract="Choose the experiment",
default="NCEP_slp",
type=type(''),
minOccurs=1,
maxOccurs=1,
allowedValues=_PRESSUREDATA_
)
self.BBox = self.addBBoxInput(
identifier="BBox",
title="Bounding Box",
abstract="coordinates to define the region to be analysed",
minOccurs=1,
maxOccurs=1,
crss=['EPSG:4326']
)
self.dateSt = self.addLiteralInput(
identifier="dateSt",
title="Start date of analysis period",
abstract="This is a Date: 2013-07-15",
default="2013-07-15",
type=type(date(2013, 7, 15)),
minOccurs=1,
maxOccurs=1,
)
self.dateEn = self.addLiteralInput(
identifier="dateEn",
title="End date of analysis period",
abstract="This is a Date: 2013-12-31",
default="2014-12-31",
type=type(date(2014, 12, 31)),
minOccurs=1,
maxOccurs=1,
)
self.refSt = self.addLiteralInput(
identifier="refSt",
title="Start reference period",
abstract="Start YEAR of reference period",
default="2013-01-01",
type=type(date(1955, 01, 01)),
minOccurs=1,
maxOccurs=1,
)
self.refEn = self.addLiteralInput(
identifier="refEn",
title="End reference period",
abstract="End YEAR of reference period",
default="2014-12-31",
type=type(date(1957, 12, 31)),
minOccurs=1,
maxOccurs=1,
)
self.normalize = self.addLiteralInput(
identifier="normalize",
title="normalization",
abstract="Normalize by subtraction of annual cycle",
default='base',
type=type(''),
minOccurs=1,
maxOccurs=1,
allowedValues=['None', 'base', 'sim', 'own']
)
self.seasonwin = self.addLiteralInput(
identifier="seasonwin",
title="Seasonal window",
abstract="Number of days befor and after the date to be analysed",
default=30,
type=type(1),
minOccurs=0,
maxOccurs=1,
)
self.nanalog = self.addLiteralInput(
identifier="nanalog",
title="Nr of analogues",
abstract="Number of analogues to be detected",
default=20,
type=type(1),
minOccurs=0,
maxOccurs=1,
)
self.distance = self.addLiteralInput(
identifier="dist",
title="Distance",
abstract="Distance function to define analogues",
default='euclidean',
type=type(''),
minOccurs=1,
maxOccurs=1,
allowedValues=['euclidean', 'mahalanobis', 'cosine', 'of']
)
self.outformat = self.addLiteralInput(
identifier="outformat",
title="output file format",
abstract="Choose the format for the analogue output file",
default="ascii",
type=type(''),
minOccurs=1,
maxOccurs=1,
allowedValues=['ascii', 'netCDF4']
)
self.timewin = self.addLiteralInput(
identifier="timewin",
title="Time window",
abstract="Number of days following the analogue day the distance will be averaged",
default=1,
type=type(1),
minOccurs=0,
maxOccurs=1,
)
######################
# define the outputs
######################
self.config = self.addLiteralOutput(
identifier="config",
title="Config File",
abstract="Config file used for the Fortran process",
default=None,
type=type(''),
# formats=[{"mimeType":"text/plain"}],
# asReference=True,
)
self.analogs = self.addComplexOutput(
identifier="analogs",
title="Analogues File",
abstract="mulit-column text file",
formats=[{"mimeType": "text/plain"}],
asReference=True,
)
self.output_netcdf = self.addComplexOutput(
title="prepared netCDF",
abstract="NetCDF file with subset and normaized values",
formats=[{"mimeType": "application/x-netcdf"}],
asReference=True,
identifier="ncout",
)
self.output_html = self.addComplexOutput(
identifier="output_html",
title="Analogues Viewer html page",
abstract="Interactive visualization of calculated analogues",
formats=[{"mimeType": "text/html"}],
asReference=True,
)
self.output_log = self.addComplexOutput(
identifier="output_log",
title="Logging information",
abstract="Collected logs during process run.",
formats=[{"mimeType": "text/plain"}],
asReference=True,
)
def execute(self):
init_process_logger('log.txt')
self.output_log.setValue('log.txt')
import time # performance test
process_start_time = time.time() # measure process execution time ...
from os import path
from tempfile import mkstemp
from datetime import datetime as dt
from flyingpigeon.ocgis_module import call
from flyingpigeon import analogs
from flyingpigeon.datafetch import reanalyses
self.status.set('execution started at : %s ' % dt.now(), 5)
start_time = time.time() # measure init ...
#######################
# read input parameters
#######################
try:
self.status.set('read input parameter : %s ' % dt.now(), 5)
refSt = self.getInputValues(identifier='refSt')
refEn = self.getInputValues(identifier='refEn')
dateSt = self.getInputValues(identifier='dateSt')
dateEn = self.getInputValues(identifier='dateEn')
seasonwin = int(self.getInputValues(identifier='seasonwin')[0])
nanalog = int(self.getInputValues(identifier='nanalog')[0])
bbox_obj = self.BBox.getValue()
normalize = self.getInputValues(identifier='normalize')[0]
distance = self.getInputValues(identifier='dist')[0]
outformat = self.getInputValues(identifier='outformat')[0]
timewin = int(self.getInputValues(identifier='timewin')[0])
experiment = self.getInputValues(identifier='experiment')[0]
logger.info('input parameters set')
self.status.set('Read in and convert the arguments', 5)
except Exception as e:
msg = 'failed to read input prameter %s ' % e
logger.error(msg)
raise Exception(msg)
######################################
# convert types and set environment
######################################
try:
self.status.set('Preparing enviroment converting arguments', 7)
refSt = dt.strptime(refSt[0], '%Y-%m-%d')
refEn = dt.strptime(refEn[0], '%Y-%m-%d')
dateSt = dt.strptime(dateSt[0], '%Y-%m-%d')
dateEn = dt.strptime(dateEn[0], '%Y-%m-%d')
if normalize == 'None':
seacyc = False
else:
seacyc = True
if outformat == 'ascii':
outformat = '.txt'
elif outformat == 'netCDF':
outformat = '.nc'
else:
logger.error('output format not valid')
start = min(refSt, dateSt)
end = max(refEn, dateEn)
if bbox_obj is not None:
logger.info("bbox_obj={0}".format(bbox_obj.coords))
bbox = [bbox_obj.coords[0][0],
bbox_obj.coords[0][1],
bbox_obj.coords[1][0],
bbox_obj.coords[1][1]]
logger.info("bbox={0}".format(bbox))
else:
bbox = None
# region = self.getInputValues(identifier='region')[0]
# bbox = [float(b) for b in region.split(',')]
dataset, var = experiment.split('_')
logger.info('environment set')
except Exception as e:
msg = 'failed to set environment %s ' % e
logger.error(msg)
raise Exception(msg)
try:
if dataset == 'NCEP':
if 'z' in var:
variable = 'hgt'
level = var.strip('z')
# conform_units_to=None
else:
variable = 'slp'
level = None
# conform_units_to='hPa'
elif '20CRV2' in var:
if 'z' in level:
variable = 'hgt'
level = var.strip('z')
# conform_units_to=None
else:
variable = 'prmsl'
level = None
# conform_units_to='hPa'
else:
logger.error('Reanalyses dataset not known')
logger.info('environment set')
except Exception as e:
msg = 'failed to set environment %s ' % e
logger.error(msg)
raise Exception(msg)
logger.debug("init took %s seconds.", time.time() - start_time)
self.status.set('Read in and convert the arguments done', 8)
#################
# get input data
#################
start_time = time.time() # measure get_input_data ...
self.status.set('fetching input data', 7)
try:
input = reanalyses(start=start.year, end=end.year,
variable=var, dataset=dataset)
logger.info('input files %s' % input)
nc_subset = call(resource=input, variable=var,
geom=bbox, spatial_wrapping='wrap')
except Exception as e:
msg = 'failed to fetch or subset input files %s' % e
logger.error(msg)
raise Exception(msg)
logger.debug("get_input_subset_dataset took %s seconds.",
time.time() - start_time)
self.status.set('**** Input data fetched', 10)
########################
# input data preperation
########################
self.status.set('Start preparing input data', 12)
start_time = time.time() # measure data preperation ...
try:
# Construct descriptive filenames for the three files
# listed in config file
refDatesString = dt.strftime(refSt, '%Y-%m-%d') + "_" + dt.strftime(refEn, '%Y-%m-%d')
simDatesString = dt.strftime(dateSt, '%Y-%m-%d') + "_" + dt.strftime(dateEn, '%Y-%m-%d')
archiveNameString = "base_" + var + "_" + refDatesString + '_%.1f_%.1f_%.1f_%.1f' \
% (bbox[0], bbox[2], bbox[1], bbox[3])
simNameString = "sim_" + var + "_" + simDatesString + '_%.1f_%.1f_%.1f_%.1f' \
% (bbox[0], bbox[2], bbox[1], bbox[3])
archive = call(resource=nc_subset,
time_range=[refSt, refEn],
prefix=archiveNameString)
simulation = call(resource=nc_subset, time_range=[dateSt, dateEn],
prefix=simNameString)
logger.info('archive and simulation files generated: %s, %s'
% (archive, simulation))
except Exception as e:
msg = 'failed to prepare archive and simulation files %s ' % e
logger.debug(msg)
raise Exception(msg)
try:
if seacyc is True:
logger.info('normalization function with method: %s '
% normalize)
seasoncyc_base, seasoncyc_sim = analogs.seacyc(
archive,
simulation,
method=normalize)
else:
seasoncyc_base = seasoncyc_sim = None
except Exception as e:
msg = 'failed to generate normalization files %s ' % e
logger.debug(msg)
raise Exception(msg)
ip, output_file = mkstemp(dir='.', suffix='.txt')
files = [path.abspath(archive), path.abspath(simulation), output_file]
logger.debug("Data preperation took %s seconds.",
time.time() - start_time)
############################
# generate the config file
############################
self.status.set('writing config file', 15)
start_time = time.time() # measure write config ...
try:
config_file = analogs.get_configfile(
files=files,
seasoncyc_base=seasoncyc_base,
seasoncyc_sim=seasoncyc_sim,
timewin=timewin,
varname=var,
seacyc=seacyc,
cycsmooth=91,
nanalog=nanalog,
seasonwin=seasonwin,
distfun=distance,
outformat=outformat,
calccor=True,
silent=False,
period=[dt.strftime(refSt, '%Y-%m-%d'),
dt.strftime(refEn, '%Y-%m-%d')],
bbox="%s,%s,%s,%s" % (bbox[0],
bbox[2],
bbox[1],
bbox[3]))
except Exception as e:
msg = 'failed to generate config file %s ' % e
logger.debug(msg)
raise Exception(msg)
logger.debug("write_config took %s seconds.", time.time() - start_time)
#######################
# CASTf90 call
#######################
import subprocess
import shlex
start_time = time.time() # measure call castf90
self.status.set('Start CASTf90 call', 20)
try:
# self.status.set('execution of CASTf90', 50)
cmd = 'analogue.out %s' % path.relpath(config_file)
# system(cmd)
args = shlex.split(cmd)
output, error = subprocess.Popen(
args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
logger.info('analogue.out info:\n %s ' % output)
logger.debug('analogue.out errors:\n %s ' % error)
self.status.set('**** CASTf90 suceeded', 90)
except Exception as e:
msg = 'CASTf90 failed %s ' % e
logger.error(msg)
raise Exception(msg)
logger.debug("castf90 took %s seconds.", time.time() - start_time)
########################
# generate analog viewer
########################
try:
f = analogs.reformat_analogs(output_file)
logger.info('analogs reformated')
self.status.set('Successfully reformatted analog file', 50)
# put config file into output folder
config_output_path, config_output_url = analogs.copy_configfile(
config_file
)
output_av = analogs.get_viewer(
f,
path.basename(config_output_path))
logger.info('Viewer generated')
self.status.set('Successfully generated analogs viewer', 90)
logger.info('output_av: %s ' % output_av)
except Exception as e:
msg = 'Failed to reformat analogs file or generate viewer%s ' % e
logger.debug(msg)
self.status.set('preparting output', 99)
self.config.setValue(config_output_url) # config_file )
self.analogs.setValue(output_file)
self.output_netcdf.setValue(simulation)
self.output_html.setValue(output_av)
self.status.set('execution ended', 100)
logger.debug("total execution took %s seconds.",
time.time() - process_start_time)
>>>>>>> cdb1ffd22e714d4c7d0bbbd6cdf3ff5856b9612d
|
|
# Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run a custodian policy across an organization's accounts
"""
from collections import Counter
import logging
import os
import time
import subprocess
import six
import sys
import multiprocessing
from concurrent.futures import (
ProcessPoolExecutor,
as_completed)
import yaml
import boto3
from botocore.compat import OrderedDict
from botocore.exceptions import ClientError
import click
import jsonschema
from c7n.credentials import assumed_session, SessionFactory
from c7n.executor import MainThreadExecutor
from c7n.config import Config
from c7n.policy import PolicyCollection
from c7n.provider import get_resource_class
from c7n.reports.csvout import Formatter, fs_record_set
from c7n.resources import load_resources
from c7n.utils import CONN_CACHE, dumps
from c7n_org.utils import environ, account_tags
from c7n.utils import UnicodeWriter
log = logging.getLogger('c7n_org')
# Workaround OSX issue, note this exists for py2 but there
# isn't anything we can do in that case.
# https://bugs.python.org/issue33725
if sys.platform == 'darwin' and (
sys.version_info.major > 3 and sys.version_info.minor > 4):
multiprocessing.set_start_method('spawn')
WORKER_COUNT = int(
os.environ.get('C7N_ORG_PARALLEL', multiprocessing.cpu_count() * 4))
CONFIG_SCHEMA = {
'$schema': 'http://json-schema.org/draft-07/schema',
'id': 'http://schema.cloudcustodian.io/v0/orgrunner.json',
'definitions': {
'account': {
'type': 'object',
'additionalProperties': True,
'anyOf': [
{'required': ['role', 'account_id']},
{'required': ['profile', 'account_id']}
],
'properties': {
'name': {'type': 'string'},
'email': {'type': 'string'},
'account_id': {
'type': 'string',
'pattern': '^[0-9]{12}$',
'minLength': 12, 'maxLength': 12},
'profile': {'type': 'string', 'minLength': 3},
'tags': {'type': 'array', 'items': {'type': 'string'}},
'regions': {'type': 'array', 'items': {'type': 'string'}},
'role': {'oneOf': [
{'type': 'array', 'items': {'type': 'string'}},
{'type': 'string', 'minLength': 3}]},
'external_id': {'type': 'string'},
'vars': {'type': 'object'},
}
},
'subscription': {
'type': 'object',
'additionalProperties': False,
'required': ['subscription_id'],
'properties': {
'subscription_id': {'type': 'string'},
'tags': {'type': 'array', 'items': {'type': 'string'}},
'name': {'type': 'string'},
'vars': {'type': 'object'},
}
},
'project': {
'type': 'object',
'additionalProperties': False,
'required': ['project_id'],
'properties': {
'project_id': {'type': 'string'},
'tags': {'type': 'array', 'items': {'type': 'string'}},
'name': {'type': 'string'},
'vars': {'type': 'object'},
}
},
},
'type': 'object',
'additionalProperties': False,
'oneOf': [
{'required': ['accounts']},
{'required': ['projects']},
{'required': ['subscriptions']}
],
'properties': {
'vars': {'type': 'object'},
'accounts': {
'type': 'array',
'items': {'$ref': '#/definitions/account'}
},
'subscriptions': {
'type': 'array',
'items': {'$ref': '#/definitions/subscription'}
},
'projects': {
'type': 'array',
'items': {'$ref': '#/definitions/project'}
}
}
}
@click.group()
def cli():
"""custodian organization multi-account runner."""
class LogFilter(object):
"""We want to keep the main c7n-org cli output to be readable.
We previously did so via squelching custodian's log output via
level filter on the logger, however doing that meant that log
outputs stored to output locations were also squelched.
We effectively want differential handling at the top level logger
stream handler, ie. we want `custodian` log messages to propagate
to the root logger based on level, but we also want them to go the
custodian logger's directly attached handlers on debug level.
"""
def filter(self, r):
if not r.name.startswith('custodian'):
return 1
elif r.levelno >= logging.WARNING:
return 1
return 0
def init(config, use, debug, verbose, accounts, tags, policies, resource=None, policy_tags=()):
level = verbose and logging.DEBUG or logging.INFO
logging.basicConfig(
level=level,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
logging.getLogger().setLevel(level)
logging.getLogger('botocore').setLevel(logging.ERROR)
logging.getLogger('s3transfer').setLevel(logging.WARNING)
logging.getLogger('custodian.s3').setLevel(logging.ERROR)
logging.getLogger('urllib3').setLevel(logging.WARNING)
# Filter out custodian log messages on console output if not
# at warning level or higher, see LogFilter docs and #2674
for h in logging.getLogger().handlers:
if isinstance(h, logging.StreamHandler):
h.addFilter(LogFilter())
with open(config, 'rb') as fh:
accounts_config = yaml.safe_load(fh.read())
jsonschema.validate(accounts_config, CONFIG_SCHEMA)
if use:
with open(use) as fh:
custodian_config = yaml.safe_load(fh.read())
else:
custodian_config = {}
accounts_config['accounts'] = list(accounts_iterator(accounts_config))
filter_policies(custodian_config, policy_tags, policies, resource)
filter_accounts(accounts_config, tags, accounts)
load_resources()
MainThreadExecutor.c7n_async = False
executor = debug and MainThreadExecutor or ProcessPoolExecutor
return accounts_config, custodian_config, executor
def resolve_regions(regions):
if 'all' in regions:
client = boto3.client('ec2')
return [region['RegionName'] for region in client.describe_regions()['Regions']]
if not regions:
return ('us-east-1', 'us-west-2')
return regions
def get_session(account, session_name, region):
if account.get('role'):
roles = account['role']
if isinstance(roles, six.string_types):
roles = [roles]
s = None
for r in roles:
try:
s = assumed_session(
r, session_name, region=region,
external_id=account.get('external_id'),
session=s)
except ClientError as e:
log.error(
"unable to obtain credentials for account:%s role:%s error:%s",
account['name'], r, e)
raise
return s
elif account.get('profile'):
return SessionFactory(region, account['profile'])()
else:
raise ValueError(
"No profile or role assume specified for account %s" % account)
def filter_accounts(accounts_config, tags, accounts, not_accounts=None):
filtered_accounts = []
for a in accounts_config.get('accounts', ()):
if not_accounts and a['name'] in not_accounts:
continue
if accounts and a['name'] not in accounts:
continue
if tags:
found = set()
for t in tags:
if t in a.get('tags', ()):
found.add(t)
if not found == set(tags):
continue
filtered_accounts.append(a)
accounts_config['accounts'] = filtered_accounts
def filter_policies(policies_config, tags, policies, resource, not_policies=None):
filtered_policies = []
for p in policies_config.get('policies', ()):
if not_policies and p['name'] in not_policies:
continue
if policies and p['name'] not in policies:
continue
if resource and p['resource'] != resource:
continue
if tags:
found = set()
for t in tags:
if t in p.get('tags', ()):
found.add(t)
if not found == set(tags):
continue
filtered_policies.append(p)
policies_config['policies'] = filtered_policies
def report_account(account, region, policies_config, output_path, cache_path, debug):
output_path = os.path.join(output_path, account['name'], region)
cache_path = os.path.join(cache_path, "%s-%s.cache" % (account['name'], region))
config = Config.empty(
region=region,
output_dir=output_path,
account_id=account['account_id'], metrics_enabled=False,
cache=cache_path, log_group=None, profile=None, external_id=None)
if account.get('role'):
config['assume_role'] = account['role']
config['external_id'] = account.get('external_id')
elif account.get('profile'):
config['profile'] = account['profile']
policies = PolicyCollection.from_data(policies_config, config)
records = []
for p in policies:
# initializee policy execution context for output access
p.ctx.initialize()
log.debug(
"Report policy:%s account:%s region:%s path:%s",
p.name, account['name'], region, output_path)
policy_records = fs_record_set(p.ctx.log_dir, p.name)
for r in policy_records:
r['policy'] = p.name
r['region'] = p.options.region
r['account'] = account['name']
for t in account.get('tags', ()):
if ':' in t:
k, v = t.split(':', 1)
r[k] = v
records.extend(policy_records)
return records
@cli.command()
@click.option('-c', '--config', required=True, help="Accounts config file")
@click.option('-f', '--output', type=click.File('w'), default='-', help="Output File")
@click.option('-u', '--use', required=True)
@click.option('-s', '--output-dir', required=True, type=click.Path())
@click.option('-a', '--accounts', multiple=True, default=None)
@click.option('--field', multiple=True)
@click.option('--no-default-fields', default=False, is_flag=True)
@click.option('-t', '--tags', multiple=True, default=None, help="Account tag filter")
@click.option('-r', '--region', default=None, multiple=True)
@click.option('--debug', default=False, is_flag=True)
@click.option('-v', '--verbose', default=False, help="Verbose", is_flag=True)
@click.option('-p', '--policy', multiple=True)
@click.option('-l', '--policytags', 'policy_tags',
multiple=True, default=None, help="Policy tag filter")
@click.option('--format', default='csv', type=click.Choice(['csv', 'json']))
@click.option('--resource', default=None)
@click.option('--cache-path', required=False, type=click.Path(), default="~/.cache/c7n-org")
def report(config, output, use, output_dir, accounts,
field, no_default_fields, tags, region, debug, verbose,
policy, policy_tags, format, resource, cache_path):
"""report on a cross account policy execution."""
accounts_config, custodian_config, executor = init(
config, use, debug, verbose, accounts, tags, policy,
resource=resource, policy_tags=policy_tags)
resource_types = set()
for p in custodian_config.get('policies'):
resource_types.add(p['resource'])
if len(resource_types) > 1:
raise ValueError("can only report on one resource type at a time")
elif not len(custodian_config['policies']) > 0:
raise ValueError("no matching policies found")
records = []
with executor(max_workers=WORKER_COUNT) as w:
futures = {}
for a in accounts_config.get('accounts', ()):
for r in resolve_regions(region or a.get('regions', ())):
futures[w.submit(
report_account,
a, r,
custodian_config,
output_dir,
cache_path,
debug)] = (a, r)
for f in as_completed(futures):
a, r = futures[f]
if f.exception():
if debug:
raise
log.warning(
"Error running policy in %s @ %s exception: %s",
a['name'], r, f.exception())
records.extend(f.result())
log.debug(
"Found %d records across %d accounts and %d policies",
len(records), len(accounts_config['accounts']),
len(custodian_config['policies']))
if format == 'json':
dumps(records, output, indent=2)
return
prefix_fields = OrderedDict(
(('Account', 'account'), ('Region', 'region'), ('Policy', 'policy')))
config = Config.empty()
factory = get_resource_class(list(resource_types)[0])
formatter = Formatter(
factory.resource_type,
extra_fields=field,
include_default_fields=not(no_default_fields),
include_region=False,
include_policy=False,
fields=prefix_fields)
rows = formatter.to_csv(records, unique=False)
writer = UnicodeWriter(output, formatter.headers())
writer.writerow(formatter.headers())
writer.writerows(rows)
def _get_env_creds(session, region):
creds = session._session.get_credentials()
env = {}
env['AWS_ACCESS_KEY_ID'] = creds.access_key
env['AWS_SECRET_ACCESS_KEY'] = creds.secret_key
env['AWS_SESSION_TOKEN'] = creds.token
env['AWS_DEFAULT_REGION'] = region
return env
def run_account_script(account, region, output_dir, debug, script_args):
try:
session = get_session(account, "org-script", region)
except ClientError:
return 1
env = os.environ.copy()
env.update(_get_env_creds(session, region))
log.info("running script on account:%s region:%s script: `%s`",
account['name'], region, " ".join(script_args))
if debug:
subprocess.check_call(args=script_args, env=env)
return 0
output_dir = os.path.join(output_dir, account['name'], region)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(os.path.join(output_dir, 'stdout'), 'wb') as stdout:
with open(os.path.join(output_dir, 'stderr'), 'wb') as stderr:
return subprocess.call(
args=script_args, env=env, stdout=stdout, stderr=stderr)
@cli.command(name='run-script', context_settings=dict(ignore_unknown_options=True))
@click.option('-c', '--config', required=True, help="Accounts config file")
@click.option('-s', '--output-dir', required=True, type=click.Path())
@click.option('-a', '--accounts', multiple=True, default=None)
@click.option('-t', '--tags', multiple=True, default=None, help="Account tag filter")
@click.option('-r', '--region', default=None, multiple=True)
@click.option('--echo', default=False, is_flag=True)
@click.option('--serial', default=False, is_flag=True)
@click.argument('script_args', nargs=-1, type=click.UNPROCESSED)
def run_script(config, output_dir, accounts, tags, region, echo, serial, script_args):
"""run an aws script across accounts"""
# TODO count up on success / error / error list by account
accounts_config, custodian_config, executor = init(
config, None, serial, True, accounts, tags, (), ())
if echo:
print("command to run: `%s`" % (" ".join(script_args)))
return
# Support fully quoted scripts, which are common to avoid parameter
# overlap with c7n-org run-script.
if len(script_args) == 1 and " " in script_args[0]:
script_args = script_args[0].split()
success = True
with executor(max_workers=WORKER_COUNT) as w:
futures = {}
for a in accounts_config.get('accounts', ()):
for r in resolve_regions(region or a.get('regions', ())):
futures[
w.submit(run_account_script, a, r, output_dir,
serial, script_args)] = (a, r)
for f in as_completed(futures):
a, r = futures[f]
if f.exception():
if serial:
raise
log.warning(
"Error running script in %s @ %s exception: %s",
a['name'], r, f.exception())
success = False
exit_code = f.result()
if exit_code == 0:
log.info(
"ran script on account:%s region:%s script: `%s`",
a['name'], r, " ".join(script_args))
else:
log.info(
"error running script on account:%s region:%s script: `%s`",
a['name'], r, " ".join(script_args))
success = False
if not success:
sys.exit(1)
def accounts_iterator(config):
for a in config.get('accounts', ()):
yield a
for a in config.get('subscriptions', ()):
d = {'account_id': a['subscription_id'],
'name': a.get('name', a['subscription_id']),
'regions': ['global'],
'tags': a.get('tags', ()),
'vars': a.get('vars', {})}
yield d
for a in config.get('projects', ()):
d = {'account_id': a['project_id'],
'name': a.get('name', a['project_id']),
'regions': ['global'],
'tags': a.get('tags', ()),
'vars': a.get('vars', {})}
yield d
def run_account(account, region, policies_config, output_path,
cache_period, cache_path, metrics, dryrun, debug):
"""Execute a set of policies on an account.
"""
logging.getLogger('custodian.output').setLevel(logging.ERROR + 1)
CONN_CACHE.session = None
CONN_CACHE.time = None
# allow users to specify interpolated output paths
if '{' not in output_path:
output_path = os.path.join(output_path, account['name'], region)
cache_path = os.path.join(cache_path, "%s-%s.cache" % (account['account_id'], region))
config = Config.empty(
region=region, cache=cache_path,
cache_period=cache_period, dryrun=dryrun, output_dir=output_path,
account_id=account['account_id'], metrics_enabled=metrics,
log_group=None, profile=None, external_id=None)
env_vars = account_tags(account)
if account.get('role'):
if isinstance(account['role'], six.string_types):
config['assume_role'] = account['role']
config['external_id'] = account.get('external_id')
else:
env_vars.update(
_get_env_creds(get_session(account, 'custodian', region), region))
elif account.get('profile'):
config['profile'] = account['profile']
policies = PolicyCollection.from_data(policies_config, config)
policy_counts = {}
success = True
st = time.time()
with environ(**env_vars):
for p in policies:
# Variable expansion and non schema validation (not optional)
p.expand_variables(p.get_variables(account.get('vars', {})))
p.validate()
if p.region and p.region != region:
continue
log.debug(
"Running policy:%s account:%s region:%s",
p.name, account['name'], region)
try:
resources = p.run()
policy_counts[p.name] = resources and len(resources) or 0
if not resources:
continue
if not config.dryrun and p.execution_mode != 'pull':
log.info("Ran account:%s region:%s policy:%s provisioned time:%0.2f",
account['name'], region, p.name, time.time() - st)
continue
log.info(
"Ran account:%s region:%s policy:%s matched:%d time:%0.2f",
account['name'], region, p.name, len(resources),
time.time() - st)
except ClientError as e:
success = False
if e.response['Error']['Code'] == 'AccessDenied':
log.warning('Access denied api:%s policy:%s account:%s region:%s',
e.operation_name, p.name, account['name'], region)
return policy_counts, success
log.error(
"Exception running policy:%s account:%s region:%s error:%s",
p.name, account['name'], region, e)
continue
except Exception as e:
success = False
log.error(
"Exception running policy:%s account:%s region:%s error:%s",
p.name, account['name'], region, e)
if not debug:
continue
import traceback, pdb, sys
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
raise
return policy_counts, success
@cli.command(name='run')
@click.option('-c', '--config', required=True, help="Accounts config file")
@click.option("-u", "--use", required=True)
@click.option('-s', '--output-dir', required=True, type=click.Path())
@click.option('-a', '--accounts', multiple=True, default=None)
@click.option('-t', '--tags', multiple=True, default=None, help="Account tag filter")
@click.option('-r', '--region', default=None, multiple=True)
@click.option('-p', '--policy', multiple=True)
@click.option('-l', '--policytags', 'policy_tags',
multiple=True, default=None, help="Policy tag filter")
@click.option('--cache-period', default=15, type=int)
@click.option('--cache-path', required=False,
type=click.Path(
writable=True, readable=True, exists=True,
resolve_path=True, allow_dash=False,
file_okay=False, dir_okay=True),
default=None)
@click.option("--metrics", default=False, is_flag=True)
@click.option("--metrics-uri", default=None, help="Configure provider metrics target")
@click.option("--dryrun", default=False, is_flag=True)
@click.option('--debug', default=False, is_flag=True)
@click.option('-v', '--verbose', default=False, help="Verbose", is_flag=True)
def run(config, use, output_dir, accounts, tags, region,
policy, policy_tags, cache_period, cache_path, metrics,
dryrun, debug, verbose, metrics_uri):
"""run a custodian policy across accounts"""
accounts_config, custodian_config, executor = init(
config, use, debug, verbose, accounts, tags, policy, policy_tags=policy_tags)
policy_counts = Counter()
success = True
if metrics_uri:
metrics = metrics_uri
if not cache_path:
cache_path = os.path.expanduser("~/.cache/c7n-org")
if not os.path.exists(cache_path):
os.makedirs(cache_path)
with executor(max_workers=WORKER_COUNT) as w:
futures = {}
for a in accounts_config['accounts']:
for r in resolve_regions(region or a.get('regions', ())):
futures[w.submit(
run_account,
a, r,
custodian_config,
output_dir,
cache_period,
cache_path,
metrics,
dryrun,
debug)] = (a, r)
for f in as_completed(futures):
a, r = futures[f]
if f.exception():
if debug:
raise
log.warning(
"Error running policy in %s @ %s exception: %s",
a['name'], r, f.exception())
account_region_pcounts, account_region_success = f.result()
for p in account_region_pcounts:
policy_counts[p] += account_region_pcounts[p]
if not account_region_success:
success = False
log.info("Policy resource counts %s" % policy_counts)
if not success:
sys.exit(1)
|
|
from __future__ import unicode_literals
import hashlib
from django.conf import settings
try:
from django.contrib.auth import get_user_model # Django 1.5
except ImportError:
from postman.future_1_5 import get_user_model
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
try:
from django.utils.text import Truncator # Django 1.4
except ImportError:
from postman.future_1_4 import Truncator
try:
from django.utils.timezone import now # Django 1.4 aware datetimes
except ImportError:
from datetime import datetime
now = datetime.now
from django.utils.translation import ugettext, ugettext_lazy as _
from . import OPTION_MESSAGES
from .query import PostmanQuery
from .utils import email_visitor, notify_user
# moderation constants
STATUS_PENDING = 'p'
STATUS_ACCEPTED = 'a'
STATUS_REJECTED = 'r'
STATUS_CHOICES = (
(STATUS_PENDING, _('Pending')),
(STATUS_ACCEPTED, _('Accepted')),
(STATUS_REJECTED, _('Rejected')),
)
# ordering constants
ORDER_BY_KEY = 'o' # as 'order'
ORDER_BY_FIELDS = {
'f': 'sender__' + get_user_model().USERNAME_FIELD, # as 'from'
't': 'recipient__' + get_user_model().USERNAME_FIELD, # as 'to'
's': 'subject', # as 'subject'
'd': 'sent_at', # as 'date'
}
ORDER_BY_MAPPER = {'sender': 'f', 'recipient': 't', 'subject': 's', 'date': 'd'} # for templatetags usage
def get_order_by(query_dict):
"""
Return a field name, optionally prefixed for descending order, or None if not found.
Argument:
``query_dict``: a dictionary to look for a key dedicated to ordering purpose
"""
if ORDER_BY_KEY in query_dict:
code = query_dict[ORDER_BY_KEY] # code may be uppercase or lowercase
order_by_field = ORDER_BY_FIELDS.get(code.lower())
if order_by_field:
if code.isupper():
order_by_field = '-' + order_by_field
return order_by_field
def get_user_representation(user):
"""
Return a User representation for display, configurable through an optional setting.
"""
show_user_as = getattr(settings, 'POSTMAN_SHOW_USER_AS', None)
if isinstance(show_user_as, six.string_types):
attr = getattr(user, show_user_as, None)
if callable(attr):
attr = attr()
if attr:
return force_text(attr)
elif callable(show_user_as):
try:
return force_text(show_user_as(user))
except:
pass
return force_text(user) # default value, or in case of empty attribute or exception
class MessageManager(models.Manager):
"""The manager for Message."""
def _folder(self, related, filters, option=None, order_by=None):
"""Base code, in common to the folders."""
qs = self.all() if option == OPTION_MESSAGES else QuerySet(self.model, PostmanQuery(self.model), using=self._db)
if related:
qs = qs.select_related(*related)
if order_by:
qs = qs.order_by(order_by)
if isinstance(filters, (list, tuple)):
lookups = models.Q()
for filter in filters:
lookups |= models.Q(**filter)
else:
lookups = models.Q(**filters)
if option == OPTION_MESSAGES:
return qs.filter(lookups)
# Adding a 'count' attribute, to be similar to the by-conversation case,
# should not be necessary. Otherwise add:
# .extra(select={'count': 'SELECT 1'})
else:
qs = qs.extra(select={'count': '{0}.count'.format(qs.query.pm_alias_prefix)})
qs.query.pm_set_extra(table=(
# extra columns are always first in the SELECT query
self.filter(lookups, thread_id__isnull=True).extra(select={'count': 0})\
.values_list('id', 'count').order_by(),
# use separate annotate() to keep control of the necessary order
self.filter(lookups, thread_id__isnull=False).values('thread').annotate(count=models.Count('pk')).annotate(id=models.Max('pk'))\
.values_list('id', 'count').order_by(),
))
return qs
def inbox(self, user, related=True, **kwargs):
"""
Return accepted messages received by a user but not marked as archived or deleted.
"""
related = ('sender',) if related else None
filters = {
'recipient': user,
'recipient_archived': False,
'recipient_deleted_at__isnull': True,
'moderation_status': STATUS_ACCEPTED,
}
return self._folder(related, filters, **kwargs)
def inbox_unread_count(self, user):
"""
Return the number of unread messages for a user.
Designed for context_processors.py and templatetags/postman_tags.py
"""
return self.inbox(user, related=False, option=OPTION_MESSAGES).filter(read_at__isnull=True).count()
def inbox_message(self, user):
"""
Return the number of unread messages for a user.
Designed for context_processors.py and templatetags/postman_tags.py
"""
return self.inbox(user, related=False, option=OPTION_MESSAGES).filter(read_at__isnull=True)
def sent(self, user, **kwargs):
"""
Return all messages sent by a user but not marked as archived or deleted.
"""
related = ('recipient',)
filters = {
'sender': user,
'sender_archived': False,
'sender_deleted_at__isnull': True,
# allow to see pending and rejected messages as well
}
return self._folder(related, filters, **kwargs)
def archives(self, user, **kwargs):
"""
Return messages belonging to a user and marked as archived.
"""
related = ('sender', 'recipient')
filters = ({
'recipient': user,
'recipient_archived': True,
'recipient_deleted_at__isnull': True,
'moderation_status': STATUS_ACCEPTED,
}, {
'sender': user,
'sender_archived': True,
'sender_deleted_at__isnull': True,
})
return self._folder(related, filters, **kwargs)
def trash(self, user, **kwargs):
"""
Return messages belonging to a user and marked as deleted.
"""
related = ('sender', 'recipient')
filters = ({
'recipient': user,
'recipient_deleted_at__isnull': False,
'moderation_status': STATUS_ACCEPTED,
}, {
'sender': user,
'sender_deleted_at__isnull': False,
})
return self._folder(related, filters, **kwargs)
def thread(self, user, filter):
"""
Return message/conversation for display.
"""
return self.select_related('sender', 'recipient').filter(
filter,
(models.Q(recipient=user) & models.Q(moderation_status=STATUS_ACCEPTED)) | models.Q(sender=user),
).order_by('sent_at')
def as_recipient(self, user, filter):
"""
Return messages matching a filter AND being visible to a user as the recipient.
"""
return self.filter(filter, recipient=user, moderation_status=STATUS_ACCEPTED)
def as_sender(self, user, filter):
"""
Return messages matching a filter AND being visible to a user as the sender.
"""
return self.filter(filter, sender=user) # any status is fine
def perms(self, user):
"""
Return a field-lookups filter as a permission controller for a reply request.
The user must be the recipient of the accepted, non-deleted, message
"""
return models.Q(recipient=user) & models.Q(moderation_status=STATUS_ACCEPTED) & models.Q(recipient_deleted_at__isnull=True)
def set_read(self, user, filter):
"""
Set messages as read.
"""
return self.filter(
filter,
recipient=user,
moderation_status=STATUS_ACCEPTED,
read_at__isnull=True,
).update(read_at=now())
@python_2_unicode_compatible
class Message(models.Model):
"""
A message between a User and another User or an AnonymousUser.
"""
SUBJECT_MAX_LENGTH = 120
subject = models.CharField(_("subject"), max_length=SUBJECT_MAX_LENGTH)
body = models.TextField(_("body"), blank=True)
sender = models.ForeignKey(get_user_model(), related_name='sent_messages', null=True, blank=True, verbose_name=_("sender"))
recipient = models.ForeignKey(get_user_model(), related_name='received_messages', null=True, blank=True, verbose_name=_("recipient"))
email = models.EmailField(_("visitor"), blank=True) # instead of either sender or recipient, for an AnonymousUser
parent = models.ForeignKey('self', related_name='next_messages', null=True, blank=True, verbose_name=_("parent message"))
thread = models.ForeignKey('self', related_name='child_messages', null=True, blank=True, verbose_name=_("root message"))
sent_at = models.DateTimeField(_("sent at"), default=now)
read_at = models.DateTimeField(_("read at"), null=True, blank=True)
replied_at = models.DateTimeField(_("replied at"), null=True, blank=True)
sender_archived = models.BooleanField(_("archived by sender"), default=False)
recipient_archived = models.BooleanField(_("archived by recipient"), default=False)
sender_deleted_at = models.DateTimeField(_("deleted by sender at"), null=True, blank=True)
recipient_deleted_at = models.DateTimeField(_("deleted by recipient at"), null=True, blank=True)
# moderation fields
moderation_status = models.CharField(_("status"), max_length=1, choices=STATUS_CHOICES, default=STATUS_PENDING)
moderation_by = models.ForeignKey(get_user_model(), related_name='moderated_messages',
null=True, blank=True, verbose_name=_("moderator"))
moderation_date = models.DateTimeField(_("moderated at"), null=True, blank=True)
moderation_reason = models.CharField(_("rejection reason"), max_length=120, blank=True)
notify_via_sms = models.BooleanField(default=0)
objects = MessageManager()
class Meta:
verbose_name = _("message")
verbose_name_plural = _("messages")
ordering = ['-sent_at', '-id']
def __str__(self):
return "{0}>{1}:{2}".format(self.obfuscated_sender, self.obfuscated_recipient, Truncator(self.subject).words(5))
def get_absolute_url(self):
return reverse('postman_view', args=[self.pk])
def is_pending(self):
"""Tell if the message is in the pending state."""
return self.moderation_status == STATUS_PENDING
def is_rejected(self):
"""Tell if the message is in the rejected state."""
return self.moderation_status == STATUS_REJECTED
def is_accepted(self):
"""Tell if the message is in the accepted state."""
return self.moderation_status == STATUS_ACCEPTED
@property
def is_new(self):
"""Tell if the recipient has not yet read the message."""
return self.read_at is None
@property
def is_replied(self):
"""Tell if the recipient has written a reply to the message."""
return self.replied_at is not None
def _obfuscated_email(self):
"""
Return the email field as obfuscated, to keep it undisclosed.
Format is:
first 4 characters of the hash email + '..' + last 4 characters of the hash email + '@' + domain without TLD
Example:
foo@domain.com -> 1a2b..e8f9@domain
"""
email = self.email
data = email + settings.SECRET_KEY
digest = hashlib.md5(data.encode()).hexdigest() # encode(): py3 needs a buffer of bytes
shrunken_digest = '..'.join((digest[:4], digest[-4:])) # 32 characters is too long and is useless
bits = email.split('@')
if len(bits) != 2:
return ''
domain = bits[1]
return '@'.join((shrunken_digest, domain.rsplit('.', 1)[0])) # leave off the TLD to gain some space
def admin_sender(self):
"""
Return the sender either as a username or as a plain email.
Designed for the Admin site.
"""
if self.sender:
return str(self.sender)
else:
return '<{0}>'.format(self.email)
admin_sender.short_description = _("sender")
admin_sender.admin_order_field = 'sender'
# Give the sender either as a username or as a plain email.
clear_sender = property(admin_sender)
@property
def obfuscated_sender(self):
"""Return the sender either as a username or as an undisclosed email."""
if self.sender:
return get_user_representation(self.sender)
else:
return self._obfuscated_email()
def admin_recipient(self):
"""
Return the recipient either as a username or as a plain email.
Designed for the Admin site.
"""
if self.recipient:
return str(self.recipient)
else:
return '<{0}>'.format(self.email)
admin_recipient.short_description = _("recipient")
admin_recipient.admin_order_field = 'recipient'
# Give the recipient either as a username or as a plain email.
clear_recipient = property(admin_recipient)
@property
def obfuscated_recipient(self):
"""Return the recipient either as a username or as an undisclosed email."""
if self.recipient:
return get_user_representation(self.recipient)
else:
return self._obfuscated_email()
def get_replies_count(self):
"""Return the number of accepted responses."""
return self.next_messages.filter(moderation_status=STATUS_ACCEPTED).count()
def quote(self, format_subject, format_body):
"""Return a dictionary of quote values to initiate a reply."""
return {
'subject': format_subject(self.subject)[:self.SUBJECT_MAX_LENGTH],
'body': format_body(self.obfuscated_sender, self.body),
}
def clean(self):
"""Check some validity constraints."""
if not (self.sender_id or self.email):
raise ValidationError(ugettext("Undefined sender."))
def clean_moderation(self, initial_status, user=None):
"""Adjust automatically some fields, according to status workflow."""
if self.moderation_status != initial_status:
self.moderation_date = now()
self.moderation_by = user
if self.is_rejected():
# even if maybe previously deleted during a temporary 'accepted' stay
self.recipient_deleted_at = now()
elif initial_status == STATUS_REJECTED:
# rollback
self.recipient_deleted_at = None
def clean_for_visitor(self):
"""Do some auto-read and auto-delete, because there is no one to do it (no account)."""
if not self.sender_id:
# no need to wait for a final moderation status to mark as deleted
if not self.sender_deleted_at:
self.sender_deleted_at = now()
elif not self.recipient_id:
if self.is_accepted():
if not self.read_at:
self.read_at = now()
if not self.recipient_deleted_at:
self.recipient_deleted_at = now()
else:
# rollbacks
if self.read_at:
self.read_at = None
# but stay deleted if rejected
if self.is_pending() and self.recipient_deleted_at:
self.recipient_deleted_at = None
def update_parent(self, initial_status):
"""Update the parent to actualize its response state."""
if self.moderation_status != initial_status:
parent = self.parent
if self.is_accepted():
# keep the very first date; no need to do differently
if parent and (not parent.replied_at or self.sent_at < parent.replied_at):
parent.replied_at = self.sent_at
parent.save()
elif initial_status == STATUS_ACCEPTED:
if parent and parent.replied_at == self.sent_at:
# rollback, but there may be some other valid replies
try:
other_date = parent.next_messages\
.exclude(pk=self.pk).filter(moderation_status=STATUS_ACCEPTED)\
.values_list('sent_at', flat=True)\
.order_by('sent_at')[:1].get()
parent.replied_at = other_date
except Message.DoesNotExist:
parent.replied_at = None
parent.save()
def notify_users(self, initial_status, site, is_auto_moderated=True):
"""Notify the rejection (to sender) or the acceptance (to recipient) of the message."""
if initial_status == STATUS_PENDING:
if self.is_rejected():
# Bypass: for an online user, no need to notify when rejection is immediate.
# Only useful for a visitor as an archive copy of the message, otherwise lost.
if not (self.sender_id and is_auto_moderated):
(notify_user if self.sender_id else email_visitor)(self, 'rejection', site)
elif self.is_accepted():
(notify_user if self.recipient_id else email_visitor)(self, 'acceptance', site)
def get_dates(self):
"""Get some dates to restore later."""
return (self.sender_deleted_at, self.recipient_deleted_at, self.read_at)
def set_dates(self, sender_deleted_at, recipient_deleted_at, read_at):
"""Restore some dates."""
self.sender_deleted_at = sender_deleted_at
self.recipient_deleted_at = recipient_deleted_at
self.read_at = read_at
def get_moderation(self):
"""Get moderation information to restore later."""
return (self.moderation_status, self.moderation_by_id, self.moderation_date, self.moderation_reason)
def set_moderation(self, status, by_id, date, reason):
"""Restore moderation information."""
self.moderation_status = status
self.moderation_by_id = by_id
self.moderation_date = date
self.moderation_reason = reason
def auto_moderate(self, moderators):
"""Run a chain of auto-moderators."""
auto = None
final_reason = ''
percents = []
reasons = []
if not isinstance(moderators, (list, tuple)):
moderators = (moderators,)
for moderator in moderators:
rating = moderator(self)
if rating is None: continue
if isinstance(rating, tuple):
percent, reason = rating
else:
percent = rating
reason = getattr(moderator, 'default_reason', '')
if percent is False: percent = 0
if percent is True: percent = 100
if not 0 <= percent <= 100: continue
if percent == 0:
auto = False
final_reason = reason
break
elif percent == 100:
auto = True
break
percents.append(percent)
reasons.append(reason)
if auto is None and percents:
average = float(sum(percents)) / len(percents)
final_reason = ', '.join([r for i, r in enumerate(reasons) if r and not r.isspace() and percents[i] < 50])
auto = average >= 50
if auto is None:
auto = getattr(settings, 'POSTMAN_AUTO_MODERATE_AS', None)
if auto is True:
self.moderation_status = STATUS_ACCEPTED
elif auto is False:
self.moderation_status = STATUS_REJECTED
self.moderation_reason = final_reason
class PendingMessageManager(models.Manager):
"""The manager for PendingMessage."""
def get_query_set(self):
"""Filter to get only pending objects."""
return super(PendingMessageManager, self).get_query_set().filter(moderation_status=STATUS_PENDING)
class PendingMessage(Message):
"""
A proxy to Message, focused on pending objects to accept or reject.
"""
objects = PendingMessageManager()
class Meta:
verbose_name = _("pending message")
verbose_name_plural = _("pending messages")
proxy = True
def set_accepted(self):
"""Set the message as accepted."""
self.moderation_status = STATUS_ACCEPTED
def set_rejected(self):
"""Set the message as rejected."""
self.moderation_status = STATUS_REJECTED
|
|
from canvas_sdk import client, utils
def delete_assignment(request_ctx, course_id, id, **request_kwargs):
"""
Delete the given assignment.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param id: (required) ID
:type id: string
:return: Delete an assignment
:rtype: requests.Response (with Assignment data)
"""
path = '/v1/courses/{course_id}/assignments/{id}'
url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)
response = client.delete(request_ctx, url, **request_kwargs)
return response
def list_assignments(request_ctx, course_id, include, search_term=None, override_assignment_dates=None, per_page=None, **request_kwargs):
"""
Returns the list of assignments for the current context.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param include: (required) Associations to include with the assignment.
:type include: string
:param search_term: (optional) The partial title of the assignments to match and return.
:type search_term: string or None
:param override_assignment_dates: (optional) Apply assignment overrides for each assignment, defaults to true.
:type override_assignment_dates: boolean or None
:param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: List assignments
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
include_types = ('submission')
utils.validate_attr_is_acceptable(include, include_types)
path = '/v1/courses/{course_id}/assignments'
payload = {
'include' : include,
'search_term' : search_term,
'override_assignment_dates' : override_assignment_dates,
'per_page' : per_page,
}
url = request_ctx.base_api_url + path.format(course_id=course_id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response
def get_single_assignment(request_ctx, course_id, id, include, override_assignment_dates=None, **request_kwargs):
"""
Returns the assignment with the given id.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param id: (required) ID
:type id: string
:param include: (required) Associations to include with the assignment.
:type include: string
:param override_assignment_dates: (optional) Apply assignment overrides to the assignment, defaults to true.
:type override_assignment_dates: boolean or None
:return: Get a single assignment
:rtype: requests.Response (with Assignment data)
"""
include_types = ('submission')
utils.validate_attr_is_acceptable(include, include_types)
path = '/v1/courses/{course_id}/assignments/{id}'
payload = {
'include' : include,
'override_assignment_dates' : override_assignment_dates,
}
url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response
def create_assignment(request_ctx, course_id, assignment_name, assignment_submission_types, assignment_position=None, assignment_allowed_extensions=None, assignment_turnitin_enabled=None, assignment_integration_data=None, assignment_integration_id=None, assignment_turnitin_settings=None, assignment_peer_reviews=None, assignment_automatic_peer_reviews=None, assignment_notify_of_update=None, assignment_group_category_id=None, assignment_grade_group_students_individually=None, assignment_external_tool_tag_attributes=None, assignment_points_possible=None, assignment_grading_type=None, assignment_due_at=None, assignment_lock_at=None, assignment_unlock_at=None, assignment_description=None, assignment_assignment_group_id=None, assignment_muted=None, assignment_assignment_overrides=None, assignment_only_visible_to_overrides=None, assignment_published=None, assignment_grading_standard_id=None, **request_kwargs):
"""
Create a new assignment for this course. The assignment is created in the
active state.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param assignment_name: (required) The assignment name.
:type assignment_name: string
:param assignment_submission_types: (required) List of supported submission types for the assignment. Unless the assignment is allowing online submissions, the array should only have one element. If not allowing online submissions, your options are: "online_quiz" "none" "on_paper" "online_quiz" "discussion_topic" "external_tool" If you are allowing online submissions, you can have one or many allowed submission types: "online_upload" "online_text_entry" "online_url" "media_recording" (Only valid when the Kaltura plugin is enabled)
:type assignment_submission_types: string
:param assignment_position: (optional) The position of this assignment in the group when displaying assignment lists.
:type assignment_position: integer or None
:param assignment_allowed_extensions: (optional) Allowed extensions if submission_types includes "online_upload" Example: allowed_extensions: ["docx","ppt"]
:type assignment_allowed_extensions: string or None
:param assignment_turnitin_enabled: (optional) Only applies when the Turnitin plugin is enabled for a course and the submission_types array includes "online_upload". Toggles Turnitin submissions for the assignment. Will be ignored if Turnitin is not available for the course.
:type assignment_turnitin_enabled: boolean or None
:param assignment_integration_data: (optional) Data related to third party integrations, JSON string required.
:type assignment_integration_data: string or None
:param assignment_integration_id: (optional) Unique ID from third party integrations
:type assignment_integration_id: string or None
:param assignment_turnitin_settings: (optional) Settings to send along to turnitin. See Assignment object definition for format.
:type assignment_turnitin_settings: string or None
:param assignment_peer_reviews: (optional) If submission_types does not include external_tool,discussion_topic, online_quiz, or on_paper, determines whether or not peer reviews will be turned on for the assignment.
:type assignment_peer_reviews: boolean or None
:param assignment_automatic_peer_reviews: (optional) Whether peer reviews will be assigned automatically by Canvas or if teachers must manually assign peer reviews. Does not apply if peer reviews are not enabled.
:type assignment_automatic_peer_reviews: boolean or None
:param assignment_notify_of_update: (optional) If true, Canvas will send a notification to students in the class notifying them that the content has changed.
:type assignment_notify_of_update: boolean or None
:param assignment_group_category_id: (optional) If present, the assignment will become a group assignment assigned to the group.
:type assignment_group_category_id: integer or None
:param assignment_grade_group_students_individually: (optional) If this is a group assignment, teachers have the options to grade students individually. If false, Canvas will apply the assignment's score to each member of the group. If true, the teacher can manually assign scores to each member of the group.
:type assignment_grade_group_students_individually: integer or None
:param assignment_external_tool_tag_attributes: (optional) Hash of attributes if submission_types is ["external_tool"] Example: external_tool_tag_attributes: { // url to the external tool url: "http://instructure.com", // create a new tab for the module, defaults to false. new_tab: false }
:type assignment_external_tool_tag_attributes: string or None
:param assignment_points_possible: (optional) The maximum points possible on the assignment.
:type assignment_points_possible: float or None
:param assignment_grading_type: (optional) The strategy used for grading the assignment. The assignment is ungraded if this field is omitted.
:type assignment_grading_type: string or None
:param assignment_due_at: (optional) The day/time the assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_due_at: timestamp or None
:param assignment_lock_at: (optional) The day/time the assignment is locked after. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_lock_at: timestamp or None
:param assignment_unlock_at: (optional) The day/time the assignment is unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_unlock_at: timestamp or None
:param assignment_description: (optional) The assignment's description, supports HTML.
:type assignment_description: string or None
:param assignment_assignment_group_id: (optional) The assignment group id to put the assignment in. Defaults to the top assignment group in the course.
:type assignment_assignment_group_id: integer or None
:param assignment_muted: (optional) Whether this assignment is muted. A muted assignment does not send change notifications and hides grades from students. Defaults to false.
:type assignment_muted: boolean or None
:param assignment_assignment_overrides: (optional) List of overrides for the assignment. NOTE: The assignment overrides feature is in beta.
:type assignment_assignment_overrides: assignmentoverride or None
:param assignment_only_visible_to_overrides: (optional) Whether this assignment is only visible to overrides (Only useful if 'differentiated assignments' account setting is on)
:type assignment_only_visible_to_overrides: boolean or None
:param assignment_published: (optional) Whether this assignment is published. (Only useful if 'draft state' account setting is on) Unpublished assignments are not visible to students.
:type assignment_published: boolean or None
:param assignment_grading_standard_id: (optional) The grading standard id to set for the course. If no value is provided for this argument the current grading_standard will be un-set from this course. This will update the grading_type for the course to 'letter_grade' unless it is already 'gpa_scale'.
:type assignment_grading_standard_id: integer or None
:return: Create an assignment
:rtype: requests.Response (with Assignment data)
"""
assignment_submission_types_types = ('online_quiz', 'none', 'on_paper', 'online_quiz', 'discussion_topic', 'external_tool', 'online_upload', 'online_text_entry', 'online_url', 'media_recording')
assignment_grading_type_types = ('pass_fail', 'percent', 'letter_grade', 'gpa_scale', 'points')
utils.validate_attr_is_acceptable(assignment_submission_types, assignment_submission_types_types)
utils.validate_attr_is_acceptable(assignment_grading_type, assignment_grading_type_types)
path = '/v1/courses/{course_id}/assignments'
payload = {
'assignment[name]' : assignment_name,
'assignment[position]' : assignment_position,
'assignment[submission_types][]' : assignment_submission_types,
'assignment[allowed_extensions]' : assignment_allowed_extensions,
'assignment[turnitin_enabled]' : assignment_turnitin_enabled,
'assignment[integration_data]' : assignment_integration_data,
'assignment[integration_id]' : assignment_integration_id,
'assignment[turnitin_settings]' : assignment_turnitin_settings,
'assignment[peer_reviews]' : assignment_peer_reviews,
'assignment[automatic_peer_reviews]' : assignment_automatic_peer_reviews,
'assignment[notify_of_update]' : assignment_notify_of_update,
'assignment[group_category_id]' : assignment_group_category_id,
'assignment[grade_group_students_individually]' : assignment_grade_group_students_individually,
'assignment[points_possible]' : assignment_points_possible,
'assignment[grading_type]' : assignment_grading_type,
'assignment[due_at]' : assignment_due_at,
'assignment[lock_at]' : assignment_lock_at,
'assignment[unlock_at]' : assignment_unlock_at,
'assignment[description]' : assignment_description,
'assignment[assignment_group_id]' : assignment_assignment_group_id,
'assignment[muted]' : assignment_muted,
'assignment[assignment_overrides]' : assignment_assignment_overrides,
'assignment[only_visible_to_overrides]' : assignment_only_visible_to_overrides,
'assignment[published]' : assignment_published,
'assignment[grading_standard_id]' : assignment_grading_standard_id,
}
for attribute, value in list((assignment_external_tool_tag_attributes or {}).items()):
payload['assignment[external_tool_tag_attributes][{}]'.format(attribute)] = value
url = request_ctx.base_api_url + path.format(course_id=course_id)
response = client.post(request_ctx, url, payload=payload, **request_kwargs)
return response
def edit_assignment(request_ctx, course_id, id, assignment_name=None, assignment_position=None, assignment_submission_types=None, assignment_allowed_extensions=None, assignment_turnitin_enabled=None, assignment_turnitin_settings=None, assignment_peer_reviews=None, assignment_automatic_peer_reviews=None, assignment_notify_of_update=None, assignment_group_category_id=None, assignment_grade_group_students_individually=None, assignment_external_tool_tag_attributes=None, assignment_points_possible=None, assignment_grading_type=None, assignment_due_at=None, assignment_lock_at=None, assignment_unlock_at=None, assignment_description=None, assignment_assignment_group_id=None, assignment_muted=None, assignment_assignment_overrides=None, assignment_only_visible_to_overrides=None, assignment_published=None, assignment_grading_standard_id=None, **request_kwargs):
"""
Modify an existing assignment.
If the assignment[assignment_overrides] key is absent, any existing
overrides are kept as is. If the assignment[assignment_overrides] key is
present, existing overrides are updated or deleted (and new ones created,
as necessary) to match the provided list.
NOTE: The assignment overrides feature is in beta.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param id: (required) ID
:type id: string
:param assignment_name: (optional) The assignment name.
:type assignment_name: string or None
:param assignment_position: (optional) The position of this assignment in the group when displaying assignment lists.
:type assignment_position: integer or None
:param assignment_submission_types: (optional) List of supported submission types for the assignment. Unless the assignment is allowing online submissions, the array should only have one element. If not allowing online submissions, your options are: "online_quiz" "none" "on_paper" "online_quiz" "discussion_topic" "external_tool" If you are allowing online submissions, you can have one or many allowed submission types: "online_upload" "online_text_entry" "online_url" "media_recording" (Only valid when the Kaltura plugin is enabled)
:type assignment_submission_types: string or None
:param assignment_allowed_extensions: (optional) Allowed extensions if submission_types includes "online_upload" Example: allowed_extensions: ["docx","ppt"]
:type assignment_allowed_extensions: string or None
:param assignment_turnitin_enabled: (optional) Only applies when the Turnitin plugin is enabled for a course and the submission_types array includes "online_upload". Toggles Turnitin submissions for the assignment. Will be ignored if Turnitin is not available for the course.
:type assignment_turnitin_enabled: boolean or None
:param assignment_turnitin_settings: (optional) Settings to send along to turnitin. See Assignment object definition for format.
:type assignment_turnitin_settings: string or None
:param assignment_peer_reviews: (optional) If submission_types does not include external_tool,discussion_topic, online_quiz, or on_paper, determines whether or not peer reviews will be turned on for the assignment.
:type assignment_peer_reviews: boolean or None
:param assignment_automatic_peer_reviews: (optional) Whether peer reviews will be assigned automatically by Canvas or if teachers must manually assign peer reviews. Does not apply if peer reviews are not enabled.
:type assignment_automatic_peer_reviews: boolean or None
:param assignment_notify_of_update: (optional) If true, Canvas will send a notification to students in the class notifying them that the content has changed.
:type assignment_notify_of_update: boolean or None
:param assignment_group_category_id: (optional) If present, the assignment will become a group assignment assigned to the group.
:type assignment_group_category_id: integer or None
:param assignment_grade_group_students_individually: (optional) If this is a group assignment, teachers have the options to grade students individually. If false, Canvas will apply the assignment's score to each member of the group. If true, the teacher can manually assign scores to each member of the group.
:type assignment_grade_group_students_individually: integer or None
:param assignment_external_tool_tag_attributes: (optional) Hash of attributes if submission_types is ["external_tool"] Example: external_tool_tag_attributes: { // url to the external tool url: "http://instructure.com", // create a new tab for the module, defaults to false. new_tab: false }
:type assignment_external_tool_tag_attributes: string or None
:param assignment_points_possible: (optional) The maximum points possible on the assignment.
:type assignment_points_possible: float or None
:param assignment_grading_type: (optional) The strategy used for grading the assignment. The assignment is ungraded if this field is omitted.
:type assignment_grading_type: string or None
:param assignment_due_at: (optional) The day/time the assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_due_at: timestamp or None
:param assignment_lock_at: (optional) The day/time the assignment is locked after. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_lock_at: timestamp or None
:param assignment_unlock_at: (optional) The day/time the assignment is unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_unlock_at: timestamp or None
:param assignment_description: (optional) The assignment's description, supports HTML.
:type assignment_description: string or None
:param assignment_assignment_group_id: (optional) The assignment group id to put the assignment in. Defaults to the top assignment group in the course.
:type assignment_assignment_group_id: integer or None
:param assignment_muted: (optional) Whether this assignment is muted. A muted assignment does not send change notifications and hides grades from students. Defaults to false.
:type assignment_muted: boolean or None
:param assignment_assignment_overrides: (optional) List of overrides for the assignment. NOTE: The assignment overrides feature is in beta.
:type assignment_assignment_overrides: assignmentoverride or None
:param assignment_only_visible_to_overrides: (optional) Whether this assignment is only visible to overrides (Only useful if 'differentiated assignments' account setting is on)
:type assignment_only_visible_to_overrides: boolean or None
:param assignment_published: (optional) Whether this assignment is published. (Only useful if 'draft state' account setting is on) Unpublished assignments are not visible to students.
:type assignment_published: boolean or None
:param assignment_grading_standard_id: (optional) The grading standard id to set for the course. If no value is provided for this argument the current grading_standard will be un-set from this course. This will update the grading_type for the course to 'letter_grade' unless it is already 'gpa_scale'.
:type assignment_grading_standard_id: integer or None
:return: Edit an assignment
:rtype: requests.Response (with Assignment data)
"""
assignment_submission_types_types = ('online_quiz', 'none', 'on_paper', 'online_quiz', 'discussion_topic', 'external_tool', 'online_upload', 'online_text_entry', 'online_url', 'media_recording')
assignment_grading_type_types = ('pass_fail', 'percent', 'letter_grade', 'gpa_scale', 'points')
utils.validate_attr_is_acceptable(assignment_submission_types, assignment_submission_types_types)
utils.validate_attr_is_acceptable(assignment_grading_type, assignment_grading_type_types)
path = '/v1/courses/{course_id}/assignments/{id}'
payload = {
'assignment[name]' : assignment_name,
'assignment[position]' : assignment_position,
'assignment[submission_types][]' : assignment_submission_types,
'assignment[allowed_extensions]' : assignment_allowed_extensions,
'assignment[turnitin_enabled]' : assignment_turnitin_enabled,
'assignment[turnitin_settings]' : assignment_turnitin_settings,
'assignment[peer_reviews]' : assignment_peer_reviews,
'assignment[automatic_peer_reviews]' : assignment_automatic_peer_reviews,
'assignment[notify_of_update]' : assignment_notify_of_update,
'assignment[group_category_id]' : assignment_group_category_id,
'assignment[grade_group_students_individually]' : assignment_grade_group_students_individually,
'assignment[points_possible]' : assignment_points_possible,
'assignment[grading_type]' : assignment_grading_type,
'assignment[due_at]' : assignment_due_at,
'assignment[lock_at]' : assignment_lock_at,
'assignment[unlock_at]' : assignment_unlock_at,
'assignment[description]' : assignment_description,
'assignment[assignment_group_id]' : assignment_assignment_group_id,
'assignment[muted]' : assignment_muted,
'assignment[assignment_overrides]' : assignment_assignment_overrides,
'assignment[only_visible_to_overrides]' : assignment_only_visible_to_overrides,
'assignment[published]' : assignment_published,
'assignment[grading_standard_id]' : assignment_grading_standard_id,
}
for attribute, value in list((assignment_external_tool_tag_attributes or {}).items()):
payload['assignment[external_tool_tag_attributes][{}]'.format(attribute)] = value
url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)
response = client.put(request_ctx, url, payload=payload, **request_kwargs)
return response
def list_assignment_overrides(request_ctx, course_id, assignment_id, per_page=None, **request_kwargs):
"""
Returns the list of overrides for this assignment that target
sections/groups/students visible to the current user.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param assignment_id: (required) ID
:type assignment_id: string
:param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: List assignment overrides
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
path = '/v1/courses/{course_id}/assignments/{assignment_id}/overrides'
payload = {
'per_page' : per_page,
}
url = request_ctx.base_api_url + path.format(course_id=course_id, assignment_id=assignment_id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response
def get_single_assignment_override(request_ctx, course_id, assignment_id, id, **request_kwargs):
"""
Returns details of the the override with the given id.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param assignment_id: (required) ID
:type assignment_id: string
:param id: (required) ID
:type id: string
:return: Get a single assignment override
:rtype: requests.Response (with AssignmentOverride data)
"""
path = '/v1/courses/{course_id}/assignments/{assignment_id}/overrides/{id}'
url = request_ctx.base_api_url + path.format(course_id=course_id, assignment_id=assignment_id, id=id)
response = client.get(request_ctx, url, **request_kwargs)
return response
def redirect_to_assignment_override_for_group(request_ctx, group_id, assignment_id, **request_kwargs):
"""
Responds with a redirect to the override for the given group, if any
(404 otherwise).
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param group_id: (required) ID
:type group_id: string
:param assignment_id: (required) ID
:type assignment_id: string
:return: Redirect to the assignment override for a group
:rtype: requests.Response (with void data)
"""
path = '/v1/groups/{group_id}/assignments/{assignment_id}/override'
url = request_ctx.base_api_url + path.format(group_id=group_id, assignment_id=assignment_id)
response = client.get(request_ctx, url, **request_kwargs)
return response
def redirect_to_assignment_override_for_section(request_ctx, course_section_id, assignment_id, **request_kwargs):
"""
Responds with a redirect to the override for the given section, if any
(404 otherwise).
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_section_id: (required) ID
:type course_section_id: string
:param assignment_id: (required) ID
:type assignment_id: string
:return: Redirect to the assignment override for a section
:rtype: requests.Response (with void data)
"""
path = '/v1/sections/{course_section_id}/assignments/{assignment_id}/override'
url = request_ctx.base_api_url + path.format(course_section_id=course_section_id, assignment_id=assignment_id)
response = client.get(request_ctx, url, **request_kwargs)
return response
def create_assignment_override(request_ctx, course_id, assignment_id, assignment_override_student_ids=None, assignment_override_title=None, assignment_override_group_id=None, assignment_override_course_section_id=None, assignment_override_due_at=None, assignment_override_unlock_at=None, assignment_override_lock_at=None, **request_kwargs):
"""
One of student_ids, group_id, or course_section_id must be present. At most
one should be present; if multiple are present only the most specific
(student_ids first, then group_id, then course_section_id) is used and any
others are ignored.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param assignment_id: (required) ID
:type assignment_id: string
:param assignment_override_student_ids: (optional) The IDs of the override's target students. If present, the IDs must each identify a user with an active student enrollment in the course that is not already targetted by a different adhoc override.
:type assignment_override_student_ids: integer or None
:param assignment_override_title: (optional) The title of the adhoc assignment override. Required if student_ids is present, ignored otherwise (the title is set to the name of the targetted group or section instead).
:type assignment_override_title: string or None
:param assignment_override_group_id: (optional) The ID of the override's target group. If present, the following conditions must be met for the override to be successful: 1. the assignment MUST be a group assignment (a group_category_id is assigned to it) 2. the ID must identify an active group in the group set the assignment is in 3. the ID must not be targetted by a different override See {Appendix: Group assignments} for more info.
:type assignment_override_group_id: integer or None
:param assignment_override_course_section_id: (optional) The ID of the override's target section. If present, must identify an active section of the assignment's course not already targetted by a different override.
:type assignment_override_course_section_id: integer or None
:param assignment_override_due_at: (optional) The day/time the overridden assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect due date. May be present but null to indicate the override removes any previous due date.
:type assignment_override_due_at: timestamp or None
:param assignment_override_unlock_at: (optional) The day/time the overridden assignment becomes unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the unlock date. May be present but null to indicate the override removes any previous unlock date.
:type assignment_override_unlock_at: timestamp or None
:param assignment_override_lock_at: (optional) The day/time the overridden assignment becomes locked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the lock date. May be present but null to indicate the override removes any previous lock date.
:type assignment_override_lock_at: timestamp or None
:return: Create an assignment override
:rtype: requests.Response (with AssignmentOverride data)
"""
path = '/v1/courses/{course_id}/assignments/{assignment_id}/overrides'
payload = {
'assignment_override[student_ids]' : assignment_override_student_ids,
'assignment_override[title]' : assignment_override_title,
'assignment_override[group_id]' : assignment_override_group_id,
'assignment_override[course_section_id]' : assignment_override_course_section_id,
'assignment_override[due_at]' : assignment_override_due_at,
'assignment_override[unlock_at]' : assignment_override_unlock_at,
'assignment_override[lock_at]' : assignment_override_lock_at,
}
url = request_ctx.base_api_url + path.format(course_id=course_id, assignment_id=assignment_id)
response = client.post(request_ctx, url, payload=payload, **request_kwargs)
return response
def update_assignment_override(request_ctx, course_id, assignment_id, id, assignment_override_student_ids=None, assignment_override_title=None, assignment_override_due_at=None, assignment_override_unlock_at=None, assignment_override_lock_at=None, **request_kwargs):
"""
All current overridden values must be supplied if they are to be retained;
e.g. if due_at was overridden, but this PUT omits a value for due_at,
due_at will no longer be overridden. If the override is adhoc and
student_ids is not supplied, the target override set is unchanged. Target
override sets cannot be changed for group or section overrides.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param assignment_id: (required) ID
:type assignment_id: string
:param id: (required) ID
:type id: string
:param assignment_override_student_ids: (optional) The IDs of the override's target students. If present, the IDs must each identify a user with an active student enrollment in the course that is not already targetted by a different adhoc override. Ignored unless the override being updated is adhoc.
:type assignment_override_student_ids: integer or None
:param assignment_override_title: (optional) The title of an adhoc assignment override. Ignored unless the override being updated is adhoc.
:type assignment_override_title: string or None
:param assignment_override_due_at: (optional) The day/time the overridden assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect due date. May be present but null to indicate the override removes any previous due date.
:type assignment_override_due_at: timestamp or None
:param assignment_override_unlock_at: (optional) The day/time the overridden assignment becomes unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the unlock date. May be present but null to indicate the override removes any previous unlock date.
:type assignment_override_unlock_at: timestamp or None
:param assignment_override_lock_at: (optional) The day/time the overridden assignment becomes locked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z. If absent, this override will not affect the lock date. May be present but null to indicate the override removes any previous lock date.
:type assignment_override_lock_at: timestamp or None
:return: Update an assignment override
:rtype: requests.Response (with AssignmentOverride data)
"""
path = '/v1/courses/{course_id}/assignments/{assignment_id}/overrides/{id}'
payload = {
'assignment_override[student_ids]' : assignment_override_student_ids,
'assignment_override[title]' : assignment_override_title,
'assignment_override[due_at]' : assignment_override_due_at,
'assignment_override[unlock_at]' : assignment_override_unlock_at,
'assignment_override[lock_at]' : assignment_override_lock_at,
}
url = request_ctx.base_api_url + path.format(course_id=course_id, assignment_id=assignment_id, id=id)
response = client.put(request_ctx, url, payload=payload, **request_kwargs)
return response
def delete_assignment_override(request_ctx, course_id, assignment_id, id, **request_kwargs):
"""
Deletes an override and returns its former details.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param assignment_id: (required) ID
:type assignment_id: string
:param id: (required) ID
:type id: string
:return: Delete an assignment override
:rtype: requests.Response (with AssignmentOverride data)
"""
path = '/v1/courses/{course_id}/assignments/{assignment_id}/overrides/{id}'
url = request_ctx.base_api_url + path.format(course_id=course_id, assignment_id=assignment_id, id=id)
response = client.delete(request_ctx, url, **request_kwargs)
return response
|
|
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import Q
from django.db.models.query import QuerySet
class GFKOptimizedQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
# pop the gfk_field from the kwargs if its passed in explicitly
self._gfk_field = kwargs.pop('gfk_field', None)
# call the parent class' initializer
super(GFKOptimizedQuerySet, self).__init__(*args, **kwargs)
def _clone(self, *args, **kwargs):
clone = super(GFKOptimizedQuerySet, self)._clone(*args, **kwargs)
clone._gfk_field = self._gfk_field
return clone
def get_gfk(self):
if not self._gfk_field:
for field in self.model._meta.virtual_fields:
if isinstance(field, GenericForeignKey):
self._gfk_field = field
break
return self._gfk_field
def generic_objects(self):
clone = self._clone()
ctypes_and_fks = {}
gfk_field = self.get_gfk()
ctype_field = '%s_id' % gfk_field.ct_field
fk_field = gfk_field.fk_field
for obj in clone:
ctype = ContentType.objects.get_for_id(getattr(obj, ctype_field))
obj_id = getattr(obj, fk_field)
ctypes_and_fks.setdefault(ctype, [])
ctypes_and_fks[ctype].append(obj_id)
gfk_objects = {}
for ctype, obj_ids in ctypes_and_fks.items():
gfk_objects[ctype.pk] = ctype.model_class()._default_manager.in_bulk(obj_ids)
obj_list = []
for obj in clone:
obj_list.append(gfk_objects[getattr(obj, ctype_field)][getattr(obj, fk_field)])
return obj_list
class RelatedObjectsDescriptor(object):
def __init__(self, model=None, from_field='parent', to_field='object'):
self.related_model = model or RelatedObject
self.from_field = self.get_related_model_field(from_field)
self.to_field = self.get_related_model_field(to_field)
def get_related_model_field(self, field_name):
opts = self.related_model._meta
for virtual_field in opts.virtual_fields:
if virtual_field.name == field_name:
return virtual_field
return opts.get_field(field_name)
def is_gfk(self, field):
return isinstance(field, GenericForeignKey)
def get_query_for_field(self, instance, field):
if self.is_gfk(field):
ctype = ContentType.objects.get_for_model(instance)
return {
field.ct_field: ctype,
field.fk_field: instance.pk
}
elif isinstance(instance, field.rel.to):
return {field.name: instance}
raise TypeError('Unable to query %s with %s' % (field, instance))
def get_query_from(self, instance):
return self.get_query_for_field(instance, self.from_field)
def get_query_to(self, instance):
return self.get_query_for_field(instance, self.to_field)
def contribute_to_class(self, cls, name):
self.name = name
self.model_class = cls
setattr(cls, self.name, self)
def __get__(self, instance, cls=None):
if instance is None:
return self
ManagerClass = type(self.related_model._default_manager)
return self.create_manager(instance, ManagerClass)
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
manager = self.__get__(instance)
manager.add(*value)
def delete_manager(self, instance):
return self.create_manager(instance,
self.related_model._base_manager.__class__)
def create_manager(self, instance, superclass, cf_from=True):
rel_obj = self
if cf_from:
core_filters = self.get_query_from(instance)
rel_field = self.to_field
else:
core_filters = self.get_query_to(instance)
rel_field = self.from_field
uses_gfk = self.is_gfk(rel_field)
class RelatedManager(superclass):
def get_query_set(self):
if uses_gfk:
qs = GFKOptimizedQuerySet(self.model, gfk_field=rel_field)
return qs.filter(**(core_filters))
else:
return superclass.get_query_set(self).filter(**(core_filters))
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError, "'%s' instance expected" % self.model._meta.object_name
for (k, v) in core_filters.iteritems():
setattr(obj, k, v)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update(core_filters)
return super(RelatedManager, self).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs.update(core_filters)
return super(RelatedManager, self).get_or_create(**kwargs)
get_or_create.alters_data = True
def remove(self, *objs):
for obj in objs:
# Is obj actually part of this descriptor set?
if obj in self.all():
obj.delete()
else:
raise rel_obj.related_model.DoesNotExist, \
"%r is not related to %r." % (obj, instance)
remove.alters_data = True
def clear(self):
self.all().delete()
clear.alters_data = True
def connect(self, obj, **kwargs):
kwargs.update(rel_obj.get_query_to(obj))
connection, created = self.get_or_create(**kwargs)
return connection
def related_to(self):
mgr = rel_obj.create_manager(instance, superclass, False)
return mgr.filter(
**rel_obj.get_query_to(instance)
)
def symmetrical(self):
return superclass.get_query_set(self).filter(
Q(**rel_obj.get_query_from(instance)) |
Q(**rel_obj.get_query_to(instance))
).distinct()
manager = RelatedManager()
manager.core_filters = core_filters
manager.model = self.related_model
return manager
def all(self):
if self.is_gfk(self.from_field):
ctype = ContentType.objects.get_for_model(self.model_class)
query = {self.from_field.ct_field: ctype}
else:
query = {}
return self.related_model._default_manager.filter(**query)
class BaseGFKRelatedObject(models.Model):
"""
A generic many-to-many implementation where diverse objects are related
across a single model to other diverse objects -> using a dual GFK
"""
# SOURCE OBJECT:
parent_type = models.ForeignKey(ContentType, related_name="child_%(class)s")
parent_id = models.IntegerField(db_index=True)
parent = GenericForeignKey(ct_field="parent_type", fk_field="parent_id")
# ACTUAL RELATED OBJECT:
object_type = models.ForeignKey(ContentType, related_name="related_%(class)s")
object_id = models.IntegerField(db_index=True)
object = GenericForeignKey(ct_field="object_type", fk_field="object_id")
class Meta:
abstract = True
class RelatedObject(BaseGFKRelatedObject):
"""
A subclass of BaseGFKRelatedObject which adds two fields used for tracking
some metadata about the relationship, an alias and the date the relationship
was created
"""
alias = models.CharField(max_length=255, blank=True)
creation_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-creation_date',)
def __unicode__(self):
return u'%s related to %s ("%s")' % (self.parent, self.object, self.alias)
|
|
"""
********************************************************************************
* Name: StreamNetworkModel
* Author: Nathan Swain
* Created On: May 12, 2013
* Copyright: (c) Brigham Young University 2013
* License: BSD 2-Clause
********************************************************************************
"""
from __future__ import unicode_literals
## TODO: Add capability to store RATING_CURVE, RULE_CURVE, and SCHEDULED_RELEASE data
__all__ = ['ChannelInputFile',
'StreamLink',
'UpstreamLink',
'StreamNode',
'Weir',
'Culvert',
'Reservoir',
'ReservoirPoint',
'BreakpointCS',
'Breakpoint',
'TrapezoidalCS']
from future.utils import iteritems
import logging
import json
from mapkit.sqlatypes import Geometry
from sqlalchemy import ForeignKey, Column
from sqlalchemy.types import Integer, String, Float, Boolean
from sqlalchemy.orm import relationship
import xml.etree.ElementTree as ET
from . import DeclarativeBase
from ..base.geom import GeometricObjectBase
from ..base.file_base import GsshaPyFileObjectBase
from ..lib import parsetools as pt, cif_chunk as cic
from ..lib.parsetools import valueReadPreprocessor as vrp, valueWritePreprocessor as vwp
log = logging.getLogger(__name__)
class ChannelInputFile(DeclarativeBase, GsshaPyFileObjectBase):
"""
Object interface for the Channel Input File.
The contents of the channel input file is abstracted into several objects including:
:class:`.StreamLink`, :class:`.UpstreamLink`, :class:`.StreamNode`, :class:`.Weir`, :class:`.Culvert`,
:class:`.Reservoir`, :class:`.ReservoirPoint`, :class:`.BreakpointCS`, :class:`.Breakpoint`, and
:class:`.TrapezoidalCS`. See the documentation provided for each object for a more details.
See: http://www.gsshawiki.com/Surface_Water_Routing:Channel_Routing
"""
__tablename__ = 'cif_channel_input_files'
tableName = __tablename__ #: Database tablename
# Primary and Foreign Keys
id = Column(Integer, autoincrement=True, primary_key=True) #: PK
# Value Columns
alpha = Column(Float) #: FLOAT
beta = Column(Float) #: FLOAT
theta = Column(Float) #: FLOAT
links = Column(Integer) #: INTEGER
maxNodes = Column(Integer) #: INTEGER
fileExtension = Column(String, default='cif') #: STRING
# Relationship Properties
projectFile = relationship('ProjectFile', uselist=False, back_populates='channelInputFile') #: RELATIONSHIP
streamLinks = relationship('StreamLink', back_populates='channelInputFile') #: RELATIONSHIP
linkNodeDatasets = relationship('LinkNodeDatasetFile', back_populates='channelInputFile') #: RELATIONSHIP
def __init__(self, alpha=None, beta=None, theta=None, links=None, maxNodes=None):
"""
Constructor
"""
GsshaPyFileObjectBase.__init__(self)
self.alpha = alpha
self.beta = beta
self.theta = theta
self.links = links
self.maxNodes = maxNodes
def __eq__(self, other):
return (self.alpha == other.alpha and
self.beta == other.beta and
self.theta == other.theta and
self.links == other.links and
self.maxNodes == other.maxNodes)
def getFluvialLinks(self):
"""
Retrieve only the links that represent fluvial portions of the stream. Returns a list of StreamLink instances.
Returns:
list: A list of fluvial :class:`.StreamLink` objects.
"""
# Define fluvial types
fluvialTypeKeywords = ('TRAPEZOID', 'TRAP', 'BREAKPOINT', 'ERODE', 'SUBSURFACE')
fluvialLinks = []
for link in self.streamLinks:
for fluvialTypeKeyword in fluvialTypeKeywords:
if fluvialTypeKeyword in link.type:
fluvialLinks.append(link)
break
return fluvialLinks
def getOrderedLinks(self, session):
"""
Retrieve the links in the order of the link number.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
Returns:
list: A list of :class:`.StreamLink` objects.
"""
streamLinks = session.query(StreamLink).\
filter(StreamLink.channelInputFile == self).\
order_by(StreamLink.linkNumber).\
all()
return streamLinks
def getStreamNetworkAsKml(self, session, path=None, documentName='Stream Network', withNodes=False, styles={}):
"""
Retrieve the stream network visualization in KML format.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
path (str, optional): Path to file where KML will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
withNodes (bool, optional): Include nodes. Defaults to False.
styles (dict, optional): Custom styles to apply to KML geometry. Defaults to empty dictionary.
Valid keys (styles) include:
* lineColor: tuple/list of RGBA integers (0-255) e.g.: (255, 0, 0, 128)
* lineWidth: float line width in pixels
* nodeIconHref: link to icon image (PNG format) to represent nodes (see: http://kml4earth.appspot.com/icons.html)
* nodeIconScale: scale of the icon image
Returns:
str: KML string
"""
# Retrieve Stream Links
links = self.getFluvialLinks()
# Set Default Styles
lineColorValue = (255, 255, 0, 0) # Blue
lineWidthValue = 2
nodeIconHrefValue = 'http://maps.google.com/mapfiles/kml/paddle/red-circle.png'
nodeIconScaleValue = 1.0
if 'lineColor' in styles:
if len(styles['lineColor']) < 4:
log.warning('lineColor style must be a list or a tuple of four elements containing integer RGBA values.')
else:
userLineColor = styles['lineColor']
lineColorValue = (userLineColor[3], userLineColor[2], userLineColor[1], userLineColor[0])
if 'lineWidth' in styles:
try:
float(styles['lineWidth'])
lineWidthValue = styles['lineWidth']
except ValueError:
log.warning('lineWidth must be a valid number containing the width of the line in pixels.')
if 'nodeIconHref' in styles:
nodeIconHrefValue = styles['nodeIconHref']
if 'nodeIconScale' in styles:
try:
float(styles['nodeIconScale'])
nodeIconScaleValue = styles['nodeIconScale']
except ValueError:
log.warning('nodeIconScaleValue must be a valid number containing the width of the line in pixels.')
# Initialize KML Document
kml = ET.Element('kml', xmlns='http://www.opengis.net/kml/2.2')
document = ET.SubElement(kml, 'Document')
docName = ET.SubElement(document, 'name')
docName.text = documentName
for link in links:
placemark = ET.SubElement(document, 'Placemark')
placemarkName = ET.SubElement(placemark, 'name')
placemarkName.text = str(link.linkNumber)
# Create style tag and setup styles
styles = ET.SubElement(placemark, 'Style')
# Set line style
lineStyle = ET.SubElement(styles, 'LineStyle')
lineColor = ET.SubElement(lineStyle, 'color')
lineColor.text = '%02X%02X%02X%02X' % lineColorValue
lineWidth = ET.SubElement(lineStyle, 'width')
lineWidth.text = str(lineWidthValue)
# Add the geometry to placemark
linkKML = link.getAsKml(session)
if linkKML:
lineString = ET.fromstring(linkKML)
placemark.append(lineString)
else:
log.warning("No geometry found for link with id {0}".format(link.id))
if withNodes:
# Create the node styles
nodeStyles = ET.SubElement(document, 'Style', id='node_styles')
# Hide labels
nodeLabelStyle = ET.SubElement(nodeStyles, 'LabelStyle')
nodeLabelScale = ET.SubElement(nodeLabelStyle, 'scale')
nodeLabelScale.text = str(0)
# Style icon
nodeIconStyle = ET.SubElement(nodeStyles, 'IconStyle')
# Set icon
nodeIcon = ET.SubElement(nodeIconStyle, 'Icon')
iconHref = ET.SubElement(nodeIcon, 'href')
iconHref.text = nodeIconHrefValue
# Set icon scale
iconScale = ET.SubElement(nodeIconStyle, 'scale')
iconScale.text = str(nodeIconScaleValue)
for node in link.nodes:
# New placemark for each node
nodePlacemark = ET.SubElement(document, 'Placemark')
nodePlacemarkName = ET.SubElement(nodePlacemark, 'name')
nodePlacemarkName.text = str(node.nodeNumber)
# Styles for the node
nodeStyleUrl = ET.SubElement(nodePlacemark, 'styleUrl')
nodeStyleUrl.text = '#node_styles'
nodeString = ET.fromstring(node.getAsKml(session))
nodePlacemark.append(nodeString)
# Embed node data
nodeExtendedData = ET.SubElement(nodePlacemark, 'ExtendedData')
nodeNumberData = ET.SubElement(nodeExtendedData, 'Data', name='node_number')
nodeNumberValue = ET.SubElement(nodeNumberData, 'value')
nodeNumberValue.text = str(node.nodeNumber)
nodeLinkNumberData = ET.SubElement(nodeExtendedData, 'Data', name='link_number')
nodeLinkNumberValue = ET.SubElement(nodeLinkNumberData, 'value')
nodeLinkNumberValue.text = str(link.linkNumber)
nodeElevationData = ET.SubElement(nodeExtendedData, 'Data', name='elevation')
nodeElevationValue = ET.SubElement(nodeElevationData, 'value')
nodeElevationValue.text = str(node.elevation)
# Create the data tag
extendedData = ET.SubElement(placemark, 'ExtendedData')
# Add value to data
linkNumberData = ET.SubElement(extendedData, 'Data', name='link_number')
linkNumberValue = ET.SubElement(linkNumberData, 'value')
linkNumberValue.text = str(link.linkNumber)
linkTypeData = ET.SubElement(extendedData, 'Data', name='link_type')
linkTypeValue = ET.SubElement(linkTypeData, 'value')
linkTypeValue.text = str(link.type)
numElementsData = ET.SubElement(extendedData, 'Data', name='number_elements')
numElementsValue = ET.SubElement(numElementsData, 'value')
numElementsValue.text = str(link.numElements)
dxData = ET.SubElement(extendedData, 'Data', name='dx')
dxValue = ET.SubElement(dxData, 'value')
dxValue.text = str(link.dx)
erodeData = ET.SubElement(extendedData, 'Data', name='erode')
erodeValue = ET.SubElement(erodeData, 'value')
erodeValue.text = str(link.type)
subsurfaceData = ET.SubElement(extendedData, 'Data', name='subsurface')
subsurfaceValue = ET.SubElement(subsurfaceData, 'value')
subsurfaceValue.text = str(link.type)
kmlString = ET.tostring(kml)
if path:
with open(path, 'w') as f:
f.write(kmlString)
return kmlString
def getStreamNetworkAsWkt(self, session, withNodes=True):
"""
Retrieve the stream network geometry in Well Known Text format.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
withNodes (bool, optional): Include nodes. Defaults to False.
Returns:
str: Well Known Text string.
"""
wkt_list = []
for link in self.streamLinks:
wkt_link = link.getAsWkt(session)
if wkt_link:
wkt_list.append(wkt_link)
if withNodes:
for node in link.nodes:
wkt_node = node.getAsWkt(session)
if wkt_node:
wkt_list.append(wkt_node)
return 'GEOMCOLLECTION ({0})'.format(', '.join(wkt_list))
def getStreamNetworkAsGeoJson(self, session, withNodes=True):
"""
Retrieve the stream network geometry in GeoJSON format.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
withNodes (bool, optional): Include nodes. Defaults to False.
Returns:
str: GeoJSON string.
"""
features_list = []
# Assemble link features
for link in self.streamLinks:
link_geoJson = link.getAsGeoJson(session)
if link_geoJson:
link_geometry = json.loads(link.getAsGeoJson(session))
link_properties = {"link_number": link.linkNumber,
"type": link.type,
"num_elements": link.numElements,
"dx": link.dx,
"erode": link.erode,
"subsurface": link.subsurface}
link_feature = {"type": "Feature",
"geometry": link_geometry,
"properties": link_properties,
"id": link.id}
features_list.append(link_feature)
# Assemble node features
if withNodes:
for node in link.nodes:
node_geoJson = node.getAsGeoJson(session)
if node_geoJson:
node_geometry = json.loads(node_geoJson)
node_properties = {"link_number": link.linkNumber,
"node_number": node.nodeNumber,
"elevation": node.elevation}
node_feature = {"type": "Feature",
"geometry": node_geometry,
"properties": node_properties,
"id": node.id}
features_list.append(node_feature)
feature_collection = {"type": "FeatureCollection",
"features": features_list}
return json.dumps(feature_collection)
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Channel Input File Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Dictionary of keywords/cards and parse function names
KEYWORDS = {'ALPHA': cic.cardChunk,
'BETA': cic.cardChunk,
'THETA': cic.cardChunk,
'LINKS': cic.cardChunk,
'MAXNODES': cic.cardChunk,
'CONNECT': cic.connectChunk,
'LINK': cic.linkChunk}
links = []
connectivity = []
# Parse file into chunks associated with keywords/cards
with open(path, 'r') as f:
chunks = pt.chunk(KEYWORDS, f)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Call chunk specific parsers for each chunk
result = KEYWORDS[key](key, chunk)
# Cases
if key == 'LINK':
# Link handler
links.append(self._createLink(result, replaceParamFile))
elif key == 'CONNECT':
# Connectivity handler
connectivity.append(result)
else:
# Global variable handler
card = result['card']
value = result['values'][0]
# Cases
if card == 'LINKS':
self.links = int(value)
elif card == 'MAXNODES':
self.maxNodes = int(value)
elif card == 'ALPHA':
self.alpha = float(vrp(value, replaceParamFile))
elif card == 'BETA':
self.beta = float(vrp(value, replaceParamFile))
elif card == 'THETA':
self.theta = float(vrp(value, replaceParamFile))
self._createConnectivity(linkList=links, connectList=connectivity)
if spatial:
self._createGeometry(session, spatialReferenceID)
def _write(self, session, openFile, replaceParamFile):
"""
Channel Input File Write to File Method
"""
# Write lines
openFile.write('GSSHA_CHAN\n')
alpha = vwp(self.alpha, replaceParamFile)
try:
openFile.write('ALPHA%s%.6f\n' % (' ' * 7, alpha))
except:
openFile.write('ALPHA%s%s\n' % (' ' * 7, alpha))
beta = vwp(self.beta, replaceParamFile)
try:
openFile.write('BETA%s%.6f\n' % (' ' * 8, beta))
except:
openFile.write('BETA%s%s\n' % (' ' * 8, beta))
theta = vwp(self.theta, replaceParamFile)
try:
openFile.write('THETA%s%.6f\n' % (' ' * 7, theta))
except:
openFile.write('THETA%s%s\n' % (' ' * 7, theta))
openFile.write('LINKS%s%s\n' % (' ' * 7, self.links))
openFile.write('MAXNODES%s%s\n' % (' ' * 4, self.maxNodes))
# Retrieve StreamLinks
links = self.getOrderedLinks(session)
self._writeConnectivity(links=links,
fileObject=openFile)
self._writeLinks(links=links,
fileObject=openFile,
replaceParamFile=replaceParamFile)
def _createLink(self, linkResult, replaceParamFile):
"""
Create GSSHAPY Link Object Method
"""
link = None
# Cases
if linkResult['type'] == 'XSEC':
# Cross section link handler
link = self._createCrossSection(linkResult, replaceParamFile)
elif linkResult['type'] == 'STRUCTURE':
# Structure link handler
link = self._createStructure(linkResult, replaceParamFile)
elif linkResult['type'] in ('RESERVOIR', 'LAKE'):
# Reservoir/lake handler
link = self._createReservoir(linkResult, replaceParamFile)
return link
def _createConnectivity(self, linkList, connectList):
"""
Create GSSHAPY Connect Object Method
"""
# Create StreamLink-Connectivity Pairs
for idx, link in enumerate(linkList):
connectivity = connectList[idx]
# Initialize GSSHAPY UpstreamLink objects
for upLink in connectivity['upLinks']:
upstreamLink = UpstreamLink(upstreamLinkID=int(upLink))
upstreamLink.streamLink = link
link.downstreamLinkID = int(connectivity['downLink'])
link.numUpstreamLinks = int(connectivity['numUpLinks'])
def _createCrossSection(self, linkResult, replaceParamFile):
"""
Create GSSHAPY Cross Section Objects Method
"""
# Extract header variables from link result object
header = linkResult['header']
# Initialize GSSHAPY StreamLink object
link = StreamLink(linkNumber=int(header['link']),
type=header['xSecType'],
numElements=header['nodes'],
dx=vrp(header['dx'], replaceParamFile),
erode=header['erode'],
subsurface=header['subsurface'])
# Associate StreamLink with ChannelInputFile
link.channelInputFile = self
# Initialize GSSHAPY TrapezoidalCS or BreakpointCS objects
xSection = linkResult['xSection']
# Cases
if 'TRAPEZOID' in link.type or 'TRAP' in link.type:
# Trapezoid cross section handler
# Initialize GSSHPY TrapeziodalCS object
trapezoidCS = TrapezoidalCS(mannings_n=vrp(xSection['mannings_n'], replaceParamFile),
bottomWidth=vrp(xSection['bottom_width'], replaceParamFile),
bankfullDepth=vrp(xSection['bankfull_depth'], replaceParamFile),
sideSlope=vrp(xSection['side_slope'], replaceParamFile),
mRiver=vrp(xSection['m_river'], replaceParamFile),
kRiver=vrp(xSection['k_river'], replaceParamFile),
erode=xSection['erode'],
subsurface=xSection['subsurface'],
maxErosion=vrp(xSection['max_erosion'], replaceParamFile))
# Associate TrapezoidalCS with StreamLink
trapezoidCS.streamLink = link
elif 'BREAKPOINT' in link.type:
# Breakpoint cross section handler
# Initialize GSSHAPY BreakpointCS objects
breakpointCS = BreakpointCS(mannings_n=vrp(xSection['mannings_n'], replaceParamFile),
numPairs=xSection['npairs'],
numInterp=vrp(xSection['num_interp'], replaceParamFile),
mRiver=vrp(xSection['m_river'], replaceParamFile),
kRiver=vrp(xSection['k_river'], replaceParamFile),
erode=xSection['erode'],
subsurface=xSection['subsurface'],
maxErosion=vrp(xSection['max_erosion'], replaceParamFile))
# Associate BreakpointCS with StreamLink
breakpointCS.streamLink = link
# Create GSSHAPY Breakpoint objects
for b in xSection['breakpoints']:
breakpoint = Breakpoint(x=b['x'],
y=b['y'])
# Associate Breakpoint with BreakpointCS
breakpoint.crossSection = breakpointCS
# Initialize GSSHAPY StreamNode objects
for n in linkResult['nodes']:
# Initialize GSSHAPY StreamNode object
node = StreamNode(nodeNumber=int(n['node']),
x=n['x'],
y=n['y'],
elevation=n['elev'])
# Associate StreamNode with StreamLink
node.streamLink = link
return link
def _createStructure(self, linkResult, replaceParamFile):
"""
Create GSSHAPY Structure Objects Method
"""
# Constants
WEIRS = ('WEIR', 'SAG_WEIR')
CULVERTS = ('ROUND_CULVERT', 'RECT_CULVERT')
CURVES = ('RATING_CURVE', 'SCHEDULED_RELEASE', 'RULE_CURVE')
header = linkResult['header']
# Initialize GSSHAPY StreamLink object
link = StreamLink(linkNumber=header['link'],
type=linkResult['type'],
numElements=header['numstructs'])
# Associate StreamLink with ChannelInputFile
link.channelInputFile = self
# Create Structure objects
for s in linkResult['structures']:
structType = s['structtype']
# Cases
if structType in WEIRS:
# Weir type handler
# Initialize GSSHAPY Weir object
weir = Weir(type=structType,
crestLength=vrp(s['crest_length'], replaceParamFile),
crestLowElevation=vrp(s['crest_low_elev'], replaceParamFile),
dischargeCoeffForward=vrp(s['discharge_coeff_forward'], replaceParamFile),
dischargeCoeffReverse=vrp(s['discharge_coeff_reverse'], replaceParamFile),
crestLowLocation=vrp(s['crest_low_loc'], replaceParamFile),
steepSlope=vrp(s['steep_slope'], replaceParamFile),
shallowSlope=vrp(s['shallow_slope'], replaceParamFile))
# Associate Weir with StreamLink
weir.streamLink = link
elif structType in CULVERTS:
# Culvert type handler
# Initialize GSSHAPY Culvert object
culvert = Culvert(type=structType,
upstreamInvert=vrp(s['upinvert'], replaceParamFile),
downstreamInvert=vrp(s['downinvert'], replaceParamFile),
inletDischargeCoeff=vrp(s['inlet_disch_coeff'], replaceParamFile),
reverseFlowDischargeCoeff=vrp(s['rev_flow_disch_coeff'], replaceParamFile),
slope=vrp(s['slope'], replaceParamFile),
length=vrp(s['length'], replaceParamFile),
roughness=vrp(s['rough_coeff'], replaceParamFile),
diameter=vrp(s['diameter'], replaceParamFile),
width=vrp(s['width'], replaceParamFile),
height=vrp(s['height'], replaceParamFile))
# Associate Culvert with StreamLink
culvert.streamLink = link
elif structType in CURVES:
# Curve type handler
pass
return link
def _createReservoir(self, linkResult, replaceParamFile):
"""
Create GSSHAPY Reservoir Objects Method
"""
# Extract header variables from link result object
header = linkResult['header']
# Cases
if linkResult['type'] == 'LAKE':
# Lake handler
initWSE = vrp(header['initwse'], replaceParamFile)
minWSE = vrp(header['minwse'], replaceParamFile)
maxWSE = vrp(header['maxwse'], replaceParamFile)
numPts = header['numpts']
elif linkResult['type'] == 'RESERVOIR':
# Reservoir handler
initWSE = vrp(header['res_initwse'], replaceParamFile)
minWSE = vrp(header['res_minwse'], replaceParamFile)
maxWSE = vrp(header['res_maxwse'], replaceParamFile)
numPts = header['res_numpts']
# Initialize GSSHAPY Reservoir object
reservoir = Reservoir(initWSE=initWSE,
minWSE=minWSE,
maxWSE=maxWSE)
# Initialize GSSHAPY StreamLink object
link = StreamLink(linkNumber=int(header['link']),
type=linkResult['type'],
numElements=numPts)
# Associate StreamLink with ChannelInputFile
link.channelInputFile = self
# Associate Reservoir with StreamLink
reservoir.streamLink = link
# Create ReservoirPoint objects
for p in linkResult['points']:
# Initialize GSSHAPY ReservoirPoint object
resPoint = ReservoirPoint(i=p['i'],
j=p['j'])
# Associate ReservoirPoint with Reservoir
resPoint.reservoir = reservoir
return link
def _createGeometry(self, session, spatialReferenceID):
"""
Create PostGIS geometric objects
"""
# Flush the current session
session.flush()
# Create geometry for each fluvial link
for link in self.getFluvialLinks():
# Retrieve the nodes for each link
nodes = link.nodes
nodeCoordinates = []
# Create geometry for each node
for node in nodes:
# Assemble coordinates in well known text format
coordinates = '{0} {1} {2}'.format(node.x, node.y, node.elevation)
nodeCoordinates.append(coordinates)
# Create well known text string for point with z coordinate
wktPoint = 'POINT Z ({0})'.format(coordinates)
# Write SQL statement
statement = self._getUpdateGeometrySqlString(geometryID=node.id,
tableName=node.tableName,
spatialReferenceID=spatialReferenceID,
wktString=wktPoint)
session.execute(statement)
# Assemble line string in well known text format
wktLineString = 'LINESTRING Z ({0})'.format(', '.join(nodeCoordinates))
# Write SQL statement
statement = self._getUpdateGeometrySqlString(geometryID=link.id,
tableName=link.tableName,
spatialReferenceID=spatialReferenceID,
wktString=wktLineString)
session.execute(statement)
def _writeConnectivity(self, links, fileObject):
"""
Write Connectivity Lines to File Method
"""
for link in links:
linkNum = link.linkNumber
downLink = link.downstreamLinkID
numUpLinks = link.numUpstreamLinks
upLinks = ''
for upLink in link.upstreamLinks:
upLinks = '{}{:>5}'.format(upLinks, str(upLink.upstreamLinkID))
line = 'CONNECT{:>5}{:>5}{:>5}{}\n'.format(linkNum, downLink, numUpLinks, upLinks)
fileObject.write(line)
fileObject.write('\n')
def _writeLinks(self, links, fileObject, replaceParamFile):
"""
Write Link Lines to File Method
"""
for link in links:
linkType = link.type
fileObject.write('LINK %s\n' % link.linkNumber)
# Cases
if 'TRAP' in linkType or 'TRAPEZOID' in linkType or 'BREAKPOINT' in linkType:
self._writeCrossSectionLink(link, fileObject, replaceParamFile)
elif linkType == 'STRUCTURE':
self._writeStructureLink(link, fileObject, replaceParamFile)
elif linkType in ('RESERVOIR', 'LAKE'):
self._writeReservoirLink(link, fileObject, replaceParamFile)
else:
log.error('OOPS: CIF LINE 417') # THIS SHOULDN'T HAPPEN
fileObject.write('\n')
def _writeReservoirLink(self, link, fileObject, replaceParamFile):
"""
Write Reservoir/Lake Link to File Method
"""
fileObject.write('%s\n' % link.type)
# Retrieve reservoir
reservoir = link.reservoir
# Reservoir parameters
initWSE = vwp(reservoir.initWSE, replaceParamFile)
minWSE = vwp(reservoir.minWSE, replaceParamFile)
maxWSE = vwp(reservoir.maxWSE, replaceParamFile)
numElements = link.numElements
# Cases
if link.type == 'LAKE':
# Lake handler
try:
fileObject.write('INITWSE %.6f\n' % initWSE)
except:
fileObject.write('INITWSE %s\n' % initWSE)
try:
fileObject.write('MINWSE %.6f\n' % minWSE)
except:
fileObject.write('MINWSE %s\n' % minWSE)
try:
fileObject.write('MAXWSE %.6f\n' % maxWSE)
except:
fileObject.write('MAXWSE %s\n' % maxWSE)
fileObject.write('NUMPTS %s\n' % numElements)
elif link.type == 'RESERVOIR':
# Reservoir handler
try:
fileObject.write('RES_INITWSE %.6f\n' % initWSE)
except:
fileObject.write('RES_INITWSE %s\n' % initWSE)
try:
fileObject.write('RES_MINWSE %.6f\n' % minWSE)
except:
fileObject.write('RES_MINWSE %s\n' % minWSE)
try:
fileObject.write('RES_MAXWSE %.6f\n' % maxWSE)
except:
fileObject.write('RES_MAXWSE %s\n' % maxWSE)
fileObject.write('RES_NUMPTS %s\n' % numElements)
# Retrieve reservoir points
points = reservoir.reservoirPoints
for idx, point in enumerate(points):
if ((idx + 1) % 10) != 0:
fileObject.write('%s %s ' % (point.i, point.j))
else:
fileObject.write('%s %s\n' % (point.i, point.j))
if (link.numElements % 10) != 0:
fileObject.write('\n')
def _writeStructureLink(self, link, fileObject, replaceParamFile):
"""
Write Structure Link to File Method
"""
fileObject.write('%s\n' % link.type)
fileObject.write('NUMSTRUCTS %s\n' % link.numElements)
# Retrieve lists of structures
weirs = link.weirs
culverts = link.culverts
# Write weirs to file
for weir in weirs:
fileObject.write('STRUCTTYPE %s\n' % weir.type)
# Check for replacement vars
crestLength = vwp(weir.crestLength, replaceParamFile)
crestLowElevation = vwp(weir.crestLowElevation, replaceParamFile)
dischargeCoeffForward = vwp(weir.dischargeCoeffForward, replaceParamFile)
dischargeCoeffReverse = vwp(weir.dischargeCoeffReverse, replaceParamFile)
crestLowLocation = vwp(weir.crestLowLocation, replaceParamFile)
steepSlope = vwp(weir.steepSlope, replaceParamFile)
shallowSlope = vwp(weir.shallowSlope, replaceParamFile)
if weir.crestLength != None:
try:
fileObject.write('CREST_LENGTH %.6f\n' % crestLength)
except:
fileObject.write('CREST_LENGTH %s\n' % crestLength)
if weir.crestLowElevation != None:
try:
fileObject.write('CREST_LOW_ELEV %.6f\n' % crestLowElevation)
except:
fileObject.write('CREST_LOW_ELEV %s\n' % crestLowElevation)
if weir.dischargeCoeffForward != None:
try:
fileObject.write('DISCHARGE_COEFF_FORWARD %.6f\n' % dischargeCoeffForward)
except:
fileObject.write('DISCHARGE_COEFF_FORWARD %s\n' % dischargeCoeffForward)
if weir.dischargeCoeffReverse != None:
try:
fileObject.write('DISCHARGE_COEFF_REVERSE %.6f\n' % dischargeCoeffReverse)
except:
fileObject.write('DISCHARGE_COEFF_REVERSE %s\n' % dischargeCoeffReverse)
if weir.crestLowLocation != None:
fileObject.write('CREST_LOW_LOC %s\n' % crestLowLocation)
if weir.steepSlope != None:
try:
fileObject.write('STEEP_SLOPE %.6f\n' % steepSlope)
except:
fileObject.write('STEEP_SLOPE %s\n' % steepSlope)
if weir.shallowSlope != None:
try:
fileObject.write('SHALLOW_SLOPE %.6f\n' % shallowSlope)
except:
fileObject.write('SHALLOW_SLOPE %s\n' % shallowSlope)
# Write culverts to file
for culvert in culverts:
fileObject.write('STRUCTTYPE %s\n' % culvert.type)
# Check for replacement vars
upstreamInvert = vwp(culvert.upstreamInvert, replaceParamFile)
downstreamInvert = vwp(culvert.downstreamInvert, replaceParamFile)
inletDischargeCoeff = vwp(culvert.inletDischargeCoeff, replaceParamFile)
reverseFlowDischargeCoeff = vwp(culvert.reverseFlowDischargeCoeff, replaceParamFile)
slope = vwp(culvert.slope, replaceParamFile)
length = vwp(culvert.length, replaceParamFile)
roughness = vwp(culvert.roughness, replaceParamFile)
diameter = vwp(culvert.diameter, replaceParamFile)
width = vwp(culvert.width, replaceParamFile)
height = vwp(culvert.height, replaceParamFile)
if culvert.upstreamInvert != None:
try:
fileObject.write('UPINVERT %.6f\n' % upstreamInvert)
except:
fileObject.write('UPINVERT %s\n' % upstreamInvert)
if culvert.downstreamInvert != None:
try:
fileObject.write('DOWNINVERT %.6f\n' % downstreamInvert)
except:
fileObject.write('DOWNINVERT %s\n' % downstreamInvert)
if culvert.inletDischargeCoeff != None:
try:
fileObject.write('INLET_DISCH_COEFF %.6f\n' % inletDischargeCoeff)
except:
fileObject.write('INLET_DISCH_COEFF %s\n' % inletDischargeCoeff)
if culvert.reverseFlowDischargeCoeff != None:
try:
fileObject.write('REV_FLOW_DISCH_COEFF %.6f\n' % reverseFlowDischargeCoeff)
except:
fileObject.write('REV_FLOW_DISCH_COEFF %s\n' % reverseFlowDischargeCoeff)
if culvert.slope != None:
try:
fileObject.write('SLOPE %.6f\n' % slope)
except:
fileObject.write('SLOPE %s\n' % slope)
if culvert.length != None:
try:
fileObject.write('LENGTH %.6f\n' % length)
except:
fileObject.write('LENGTH %s\n' % length)
if culvert.roughness != None:
try:
fileObject.write('ROUGH_COEFF %.6f\n' % roughness)
except:
fileObject.write('ROUGH_COEFF %s\n' % roughness)
if culvert.diameter != None:
try:
fileObject.write('DIAMETER %.6f\n' % diameter)
except:
fileObject.write('DIAMETER %s\n' % diameter)
if culvert.width != None:
try:
fileObject.write('WIDTH %.6f\n' % width)
except:
fileObject.write('WIDTH %s\n' % width)
if culvert.height != None:
try:
fileObject.write('HEIGHT %.6f\n' % height)
except:
fileObject.write('HEIGHT %s\n' % height)
def _writeCrossSectionLink(self, link, fileObject, replaceParamFile):
"""
Write Cross Section Link to File Method
"""
linkType = link.type
# Write cross section link header
dx = vwp(link.dx, replaceParamFile)
try:
fileObject.write('DX %.6f\n' % dx)
except:
fileObject.write('DX %s\n' % dx)
fileObject.write('%s\n' % linkType)
fileObject.write('NODES %s\n' % link.numElements)
for node in link.nodes:
# Write node information
fileObject.write('NODE %s\n' % node.nodeNumber)
fileObject.write('X_Y %.6f %.6f\n' % (node.x, node.y))
fileObject.write('ELEV %.6f\n' % node.elevation)
if node.nodeNumber == 1:
# Write cross section information after first node
fileObject.write('XSEC\n')
# Cases
if 'TRAPEZOID' in linkType or 'TRAP' in linkType:
# Retrieve cross section
xSec = link.trapezoidalCS
# Write cross section properties
mannings_n = vwp(xSec.mannings_n, replaceParamFile)
bottomWidth = vwp(xSec.bottomWidth, replaceParamFile)
bankfullDepth = vwp(xSec.bankfullDepth, replaceParamFile)
sideSlope = vwp(xSec.sideSlope, replaceParamFile)
try:
fileObject.write('MANNINGS_N %.6f\n' % mannings_n)
except:
fileObject.write('MANNINGS_N %s\n' % mannings_n)
try:
fileObject.write('BOTTOM_WIDTH %.6f\n' % bottomWidth)
except:
fileObject.write('BOTTOM_WIDTH %s\n' % bottomWidth)
try:
fileObject.write('BANKFULL_DEPTH %.6f\n' % bankfullDepth)
except:
fileObject.write('BANKFULL_DEPTH %s\n' % bankfullDepth)
try:
fileObject.write('SIDE_SLOPE %.6f\n' % sideSlope)
except:
fileObject.write('SIDE_SLOPE %s\n' % sideSlope)
# Write optional cross section properties
self._writeOptionalXsecCards(fileObject=fileObject, xSec=xSec, replaceParamFile=replaceParamFile)
elif 'BREAKPOINT' in linkType:
# Retrieve cross section
xSec = link.breakpointCS
# Write cross section properties
mannings_n = vwp(xSec.mannings_n, replaceParamFile)
try:
fileObject.write('MANNINGS_N %.6f\n' % mannings_n)
except:
fileObject.write('MANNINGS_N %s\n' % mannings_n)
fileObject.write('NPAIRS %s\n' % xSec.numPairs)
fileObject.write('NUM_INTERP %s\n' % vwp(xSec.numInterp, replaceParamFile))
# Write optional cross section properties
self._writeOptionalXsecCards(fileObject=fileObject, xSec=xSec, replaceParamFile=replaceParamFile)
# Write breakpoint lines
for bp in xSec.breakpoints:
fileObject.write('X1 %.6f %.6f\n' % (bp.x, bp.y))
else:
log.error('OOPS: MISSED A CROSS SECTION TYPE. CIF LINE 580. {0}'.format(linkType))
def _writeOptionalXsecCards(self, fileObject, xSec, replaceParamFile):
"""
Write Optional Cross Section Cards to File Method
"""
if xSec.erode:
fileObject.write('ERODE\n')
if xSec.maxErosion != None:
fileObject.write('MAX_EROSION %.6f\n' % xSec.maxErosion)
if xSec.subsurface:
fileObject.write('SUBSURFACE\n')
if xSec.mRiver != None:
mRiver = vwp(xSec.mRiver, replaceParamFile)
try:
fileObject.write('M_RIVER %.6f\n' % mRiver)
except:
fileObject.write('M_RIVER %s\n' % mRiver)
if xSec.kRiver != None:
kRiver = vwp(xSec.kRiver, replaceParamFile)
try:
fileObject.write('K_RIVER %.6f\n' % kRiver)
except:
fileObject.write('K_RIVER %s\n' % kRiver)
def _getUpdateGeometrySqlString(self, geometryID, tableName, spatialReferenceID, wktString):
statement = """
UPDATE {0} SET geometry=ST_GeomFromText('{1}', {2})
WHERE id={3};
""".format(tableName,
wktString,
spatialReferenceID,
geometryID)
return statement
class StreamLink(DeclarativeBase, GeometricObjectBase):
"""
Object containing generic stream link or reach data.
GSSHA stream networks are composed of a series of stream links and nodes. A stream link is composed of two or more
nodes. A basic fluvial stream link contains the cross section. Stream links can also be used to describe structures
on a stream such as culverts, weirs, or reservoirs.
This object inherits several methods from the :class:`gsshapy.orm.GeometricObjectBase` base class for generating
geometric visualizations.
See: http://www.gsshawiki.com/Surface_Water_Routing:Channel_Routing#5.1.4.1.4_-_Link_.28Reach.29_information
"""
__tablename__ = 'cif_links'
tableName = __tablename__ #: Database tablename
# Public Table Metadata
tableName = __tablename__
geometryColumnName = 'geometry'
# Primary and Foreign Keys
id = Column(Integer, autoincrement=True, primary_key=True) #: PK
channelInputFileID = Column(Integer, ForeignKey('cif_channel_input_files.id')) #: FK
# Value Columns
linkNumber = Column(Integer) #: INTEGER
type = Column(String) #: STRING
numElements = Column(Integer) #: INTEGER
dx = Column(Float) #: FLOAT
erode = Column(Boolean) #: BOOLEAN
subsurface = Column(Boolean) #: BOOLEAN
downstreamLinkID = Column(Integer) #: INTEGER
numUpstreamLinks = Column(Integer) #: INTEGER
geometry = Column(Geometry) #: GEOMETRY
# Relationship Properties
channelInputFile = relationship('ChannelInputFile', back_populates='streamLinks') #: RELATIONSHIP
upstreamLinks = relationship('UpstreamLink', back_populates='streamLink') #: RELATIONSHIP
nodes = relationship('StreamNode', back_populates='streamLink') #: RELATIONSHIP
weirs = relationship('Weir', back_populates='streamLink') #: RELATIONSHIP
culverts = relationship('Culvert', back_populates='streamLink') #: RELATIONSHIP
reservoir = relationship('Reservoir', uselist=False, back_populates='streamLink') #: RELATIONSHIP
breakpointCS = relationship('BreakpointCS', uselist=False, back_populates='streamLink') #: RELATIONSHIP
trapezoidalCS = relationship('TrapezoidalCS', uselist=False, back_populates='streamLink') #: RELATIONSHIP
datasets = relationship('LinkDataset', back_populates='link') #: RELATIONSHIP
def __init__(self, linkNumber, type, numElements, dx=None, erode=False, subsurface=False):
"""
Constructor
"""
self.linkNumber = linkNumber
self.type = type
self.numElements = numElements
self.dx = dx
self.erode = erode
self.subsurface = subsurface
def __repr__(self):
return '<StreamLink: LinkNumber=%s, Type=%s, NumberElements=%s, DX=%s, Erode=%s, Subsurface=%s, DownstreamLinkID=%s, NumUpstreamLinks=%s>' % (
self.linkNumber,
self.type,
self.numElements,
self.dx,
self.erode,
self.subsurface,
self.downstreamLinkID,
self.numUpstreamLinks)
def __eq__(self, other):
return (self.linkNumber == other.linkNumber and
self.type == other.type and
self.numElements == other.numElements and
self.dx == other.dx and
self.erode == other.erode and
self.subsurface == other.subsurface and
self.downstreamLinkID == other.downstreamLinkID and
self.numUpstreamLinks == other.numUpstreamLinks)
class UpstreamLink(DeclarativeBase):
"""
Object used to map stream links with their upstream link counterparts.
See: http://www.gsshawiki.com/Surface_Water_Routing:Channel_Routing#5.1.4.1.3_.E2.80.93_Channel_network_connectivity
"""
__tablename__ = 'cif_upstream_links'
tableName = __tablename__ #: Database tablename
# Primary and Foreign Keys
id = Column(Integer, autoincrement=True, primary_key=True) #: PK
linkID = Column(Integer, ForeignKey('cif_links.id')) #: INTEGER
# Value Columns
upstreamLinkID = Column(Integer) #: INTEGER
# Relationship Properties
streamLink = relationship('StreamLink', back_populates='upstreamLinks') #: RELATIONSHIP
def __init__(self, upstreamLinkID):
self.upstreamLinkID = upstreamLinkID
def __repr__(self):
return '<UpstreamLink: LinkID=%s, UpstreamLinkID=%s>' % (self.linkID, self.upstreamLinkID)
def __eq__(self, other):
return self.upstreamLinkID == other.upstreamLinkID
class StreamNode(DeclarativeBase, GeometricObjectBase):
"""
Object containing the stream node data in the channel network.
Stream nodes represent the computational unit of GSSHA stream networks. Each stream link must consist of two or more
stream nodes.
This object inherits several methods from the :class:`gsshapy.orm.GeometricObjectBase` base class for generating
geometric visualizations.
See: http://www.gsshawiki.com/Surface_Water_Routing:Channel_Routing#5.1.4.1.4.2.1.4_Node_information
"""
__tablename__ = 'cif_nodes'
tableName = __tablename__ #: Database tablename
# Public Table Metadata
tableName = __tablename__
geometryColumnName = 'geometry'
# Primary and Foreign Keys
id = Column(Integer, autoincrement=True, primary_key=True) #: PK
linkID = Column(Integer, ForeignKey('cif_links.id')) #: FK
# Value Columns
nodeNumber = Column(Integer) #: INTEGER
x = Column(Float) #: FLOAT
y = Column(Float) #: FLOAT
elevation = Column(Float) #: FLOAT
geometry = Column(Geometry) #: GEOMETRY
# Relationship Properties
streamLink = relationship('StreamLink', back_populates='nodes') #: RELATIONSHIP
datasets = relationship('NodeDataset', back_populates='node') #: RELATIONSHIP
def __init__(self, nodeNumber, x, y, elevation):
"""
Constructor
"""
self.nodeNumber = nodeNumber
self.x = x
self.y = y
self.elevation = elevation
def __repr__(self):
return '<Node: NodeNumber=%s, X=%s, Y=%s, Elevation=%s>' % (
self.nodeNumber,
self.x,
self.y,
self.elevation)
def __eq__(self, other):
return (self.nodeNumber == other.nodeNumber and
self.x == other.x and
self.y == other.y and
self.elevation == other.elevation)
class Weir(DeclarativeBase):
"""
Object containing data that defines a weir structure for a stream link.
See: http://www.gsshawiki.com/Surface_Water_Routing:Channel_Routing#5.1.4.1.4.2_-_Structure_channel_links
"""
__tablename__ = 'cif_weirs'
tableName = __tablename__ #: Database tablename
# Primary and Foreign Keys
id = Column(Integer, autoincrement=True, primary_key=True) #: PK
linkID = Column(Integer, ForeignKey('cif_links.id')) #: FK
# Value Columns
type = Column(String) #: STRING
crestLength = Column(Float) #: FLOAT
crestLowElevation = Column(Float) #: FLOAT
dischargeCoeffForward = Column(Float) #: FLOAT
dischargeCoeffReverse = Column(Float) #: FLOAT
crestLowLocation = Column(Float) #: FLOAT
steepSlope = Column(Float) #: FLOAT
shallowSlope = Column(Float) #: FLOAT
# Relationship Properties
streamLink = relationship('StreamLink', back_populates='weirs') #: RELATIONSHIP
def __init__(self, type, crestLength, crestLowElevation, dischargeCoeffForward, dischargeCoeffReverse,
crestLowLocation, steepSlope, shallowSlope):
"""
Constructor
"""
self.type = type
self.crestLength = crestLength
self.crestLowElevation = crestLowElevation
self.dischargeCoeffForward = dischargeCoeffForward
self.dischargeCoeffReverse = dischargeCoeffReverse
self.crestLowLocation = crestLowLocation
self.steepSlope = steepSlope
self.shallowSlope = shallowSlope
def __repr__(self):
return '<Weir: Type=%s, CrestLenght=%s, CrestLowElevation=%s, DischargeCoeffForward=%s, DischargeCoeffReverse=%s, CrestLowLocation=%s, SteepSlope=%s, ShallowSlope=%s>' % (
self.type,
self.crestLength,
self.crestLowElevation,
self.dischargeCoeffForward,
self.dischargeCoeffReverse,
self.crestLowLocation,
self.steepSlope,
self.shallowSlope)
def __eq__(self, other):
return (self.type == other.type and
self.crestLength == other.crestLength and
self.crestLowElevation == other.crestLowElevation and
self.dischargeCoeffForward == other.dischargeCoeffForward and
self.dischargeCoeffReverse == other.dischargeCoeffReverse and
self.crestLowLocation == other.crestLowLocation and
self.steepSlope == other.steepSlope and
self.shallowSlope == other.shallowSlope)
class Culvert(DeclarativeBase):
"""
Object containing a culvert structure data for a stream link.
See: http://www.gsshawiki.com/Surface_Water_Routing:Channel_Routing#5.1.4.1.4.2_-_Structure_channel_links
"""
__tablename__ = 'cif_culverts'
tableName = __tablename__ #: Database tablename
# Primary and Foreign Keys
id = Column(Integer, autoincrement=True, primary_key=True) #: PK
linkID = Column(Integer, ForeignKey('cif_links.id')) #: FK
# Value Columns
type = Column(String) #: STRING
upstreamInvert = Column(Float) #: FLOAT
downstreamInvert = Column(Float) #: FLOAT
inletDischargeCoeff = Column(Float) #: FLOAT
reverseFlowDischargeCoeff = Column(Float) #: FLOAT
slope = Column(Float) #: FLOAT
length = Column(Float) #: FLOAT
roughness = Column(Float) #: FLOAT
diameter = Column(Float) #: FLOAT
width = Column(Float) #: FLOAT
height = Column(Float) #: FLOAT
# Relationship Properties
streamLink = relationship('StreamLink', back_populates='culverts') #: RELATIONSHIP
def __init__(self, type, upstreamInvert, downstreamInvert, inletDischargeCoeff, reverseFlowDischargeCoeff, slope,
length, roughness, diameter, width, height):
"""
Constructor
"""
self.type = type
self.upstreamInvert = upstreamInvert
self.downstreamInvert = downstreamInvert
self.inletDischargeCoeff = inletDischargeCoeff
self.reverseFlowDischargeCoeff = reverseFlowDischargeCoeff
self.slope = slope
self.length = length
self.roughness = roughness
self.diameter = diameter
self.width = width
self.height = height
def __repr__(self):
return '<Culvert: Type=%s, UpstreamInvert=%s, DownstreamInvert=%s, InletDischargeCoeff=%s, ReverseFlowDischargeCoeff=%s, Slope=%s, Length=%s, Roughness=%s, Diameter=%s, Width=%s, Height=%s>' % (
self.type,
self.upstreamInvert,
self.downstreamInvert,
self.inletDischargeCoeff,
self.reverseFlowDischargeCoeff,
self.slope,
self.length,
self.roughness,
self.diameter,
self.width,
self.height)
def __eq__(self, other):
return (self.type == other.type and
self.upstreamInvert == other.upstreamInvert and
self.downstreamInvert == other.downstreamInvert and
self.inletDischargeCoeff == other.inletDischargeCoeff and
self.reverseFlowDischargeCoeff == other.reverseFlowDischargeCoeff and
self.slope == other.slope and
self.length == other.length and
self.roughness == other.roughness and
self.diameter == other.diameter and
self.width == other.width and
self.height == other.height)
class Reservoir(DeclarativeBase):
"""
Object containing a data that defines a reservoir for a stream link.
See: http://www.gsshawiki.com/Surface_Water_Routing:Channel_Routing#5.1.4.1.4.3_-_Reservoir_channel_links
"""
__tablename__ = 'cif_reservoirs'
tableName = __tablename__ #: Database tablename
# Primary and Foreign Keys
id = Column(Integer, autoincrement=True, primary_key=True) #: PK
linkID = Column(Integer, ForeignKey('cif_links.id')) #: FK
# Value Columns
initWSE = Column(Float) #: FLOAT
minWSE = Column(Float) #: FLOAT
maxWSE = Column(Float) #: FLOAT
# Relationship Properties
streamLink = relationship('StreamLink', back_populates='reservoir') #: RELATIONSHIP
reservoirPoints = relationship('ReservoirPoint', back_populates='reservoir') #: RELATIONSHIP
def __init__(self, initWSE, minWSE, maxWSE):
"""
Constructor
"""
self.initWSE = initWSE
self.minWSE = minWSE
self.maxWSE = maxWSE
def __repr__(self):
return '<Reservoir: InitialWSE=%s, MinWSE=%s, MaxWSE=%s>' % (self.initWSE, self.minWSE, self.maxWSE)
def __eq__(self, other):
return (self.initWSE == other.initWSE and
self.minWSE == other.minWSE and
self.maxWSE == other.maxWSE)
class ReservoirPoint(DeclarativeBase):
"""
Object containing the cells/points that define the maximum inundation area of a reservoir.
See: http://www.gsshawiki.com/Surface_Water_Routing:Channel_Routing#
"""
__tablename__ = 'cif_reservoir_points'
tableName = __tablename__ #: Database tablename
# Primary and Foreign Keys
id = Column(Integer, autoincrement=True, primary_key=True) #: PK
reservoirID = Column(Integer, ForeignKey('cif_reservoirs.id')) #: FK
# Value Columns
i = Column(Integer) #: INTEGER
j = Column(Integer) #: INTEGER
# Relationship Properties
reservoir = relationship('Reservoir', back_populates='reservoirPoints') #: RELATIONSHIP
def __init__(self, i, j):
"""
Constructor
"""
self.i = i
self.j = j
def __repr__(self):
return '<ReservoirPoint: CellI=%s, CellJ=%s>' % (self.i, self.j)
def __eq__(self, other):
return (self.i == other.i and
self.j == other.j)
class BreakpointCS(DeclarativeBase):
"""
Object containing breakpoint type cross section data for fluvial stream links.
See: http://www.gsshawiki.com/Surface_Water_Routing:Channel_Routing#5.1.4.1.4.2.1.2_Natural_cross-section
"""
__tablename__ = 'cif_breakpoint'
tableName = __tablename__ #: Database tablename
# Primary and Foreign Keys
id = Column(Integer, autoincrement=True, primary_key=True) #: PK
linkID = Column(Integer, ForeignKey('cif_links.id')) #: FK
# Value Columns
mannings_n = Column(Float) #: FLOAT
numPairs = Column(Integer) #: INTEGER
numInterp = Column(Integer) #:INTEGER
mRiver = Column(Float) #: FLOAT
kRiver = Column(Float) #: FLOAT
erode = Column(Boolean) #: BOOLEAN
subsurface = Column(Boolean) #: BOOLEAN
maxErosion = Column(Float) #: FLOAT
# Relationship Properties
streamLink = relationship('StreamLink', back_populates='breakpointCS') #: RELATIONSHIP
breakpoints = relationship('Breakpoint', back_populates='crossSection') #: RELATIONSHIP
def __init__(self, mannings_n, numPairs, numInterp, mRiver, kRiver, erode, subsurface, maxErosion):
"""
Constructor
"""
self.mannings_n = mannings_n
self.numPairs = numPairs
self.numInterp = numInterp
self.mRiver = mRiver
self.kRiver = kRiver
self.erode = erode
self.subsurface = subsurface
self.maxErosion = maxErosion
def __repr__(self):
return '<BreakpointCrossSection: Mannings-n=%s, NumPairs=%s, NumInterp=%s, M-River=%s, K-River=%s, Erode=%s, Subsurface=%s, MaxErosion=%s>' % (
self.mannings_n,
self.numPairs,
self.numInterp,
self.mRiver,
self.kRiver,
self.erode,
self.subsurface,
self.maxErosion)
def __eq__(self, other):
return (self.mannings_n == other.mannings_n and
self.numPairs == other.numPairs and
self.numInterp == other.numInterp and
self.mRiver == other.mRiver and
self.kRiver == other.kRiver and
self.erode == other.erode and
self.subsurface == other.subsurface and
self.maxErosion == other.maxErosion)
class Breakpoint(DeclarativeBase):
"""
Object used to define points in a :class:`.BreakpointCS` object.
See: http://www.gsshawiki.com/Surface_Water_Routing:Channel_Routing#5.1.4.1.4.2.1.2_Natural_cross-section
"""
__tablename__ = 'cif_bcs_points'
tableName = __tablename__ #: Database tablename
# Primary and Foreign Keys
id = Column(Integer, autoincrement=True, primary_key=True) #: PK
crossSectionID = Column(Integer, ForeignKey('cif_breakpoint.id')) #: FK
# Value Columns
x = Column(Float) #: FLOAT
y = Column(Float) #: FLOAT
# Relationship Properties
crossSection = relationship('BreakpointCS', back_populates='breakpoints') #: RELATIONSHIP
def __init__(self, x, y):
"""
Constructor
"""
self.x = x
self.y = y
def __repr__(self):
return '<Breakpoint: X=%s, Y=%s>' % (self.x, self.y)
def __eq__(self, other):
return (self.x == other.x,
self.y == other.y)
class TrapezoidalCS(DeclarativeBase):
"""
Object containing trapezoidal type cross section data for fluvial stream links.
See: http://www.gsshawiki.com/Surface_Water_Routing:Channel_Routing#5.1.4.1.4.2.1.1_Trapezoidal_cross-section
"""
__tablename__ = 'cif_trapezoid'
tableName = __tablename__ #: Database tablename
# Primary and Foreign Keys
id = Column(Integer, autoincrement=True, primary_key=True) #: PK
linkID = Column(Integer, ForeignKey('cif_links.id')) #: FK
# Value Columns
mannings_n = Column(Float) #: FLOAT
bottomWidth = Column(Float) #: FLOAT
bankfullDepth = Column(Float) #: FLOAT
sideSlope = Column(Float) #: FLOAT
mRiver = Column(Float) #: FLOAT
kRiver = Column(Float) #: FLOAT
erode = Column(Boolean) #: BOOLEAN
subsurface = Column(Boolean) #: BOOLEAN
maxErosion = Column(Float) #: BOOLEAN
# Relationship Properties
streamLink = relationship('StreamLink', back_populates='trapezoidalCS') #: RELATIONSHIP
def __init__(self, mannings_n, bottomWidth, bankfullDepth, sideSlope, mRiver, kRiver, erode, subsurface,
maxErosion):
"""
Constructor
"""
self.mannings_n = mannings_n
self.bottomWidth = bottomWidth
self.bankfullDepth = bankfullDepth
self.sideSlope = sideSlope
self.mRiver = mRiver
self.kRiver = kRiver
self.erode = erode
self.subsurface = subsurface
self.maxErosion = maxErosion
def __repr__(self):
return '<TrapezoidalCS: Mannings-n=%s, BottomWidth=%s, BankfullDepth=%s, SideSlope=%s, M-River=%s, K-River=%s, Erode=%s, Subsurface=%s, MaxErosion=%s>' % (
self.mannings_n,
self.bottomWidth,
self.bankfullDepth,
self.sideSlope,
self.mRiver,
self.kRiver,
self.erode,
self.subsurface,
self.maxErosion)
def __eq__(self, other):
return (self.mannings_n == other.mannings_n and
self.bottomWidth == other.bottomWidth and
self.bankfullDepth == other.bankfullDepth and
self.sideSlope == other.sideSlope and
self.mRiver == other.mRiver and
self.kRiver == other.kRiver and
self.erode == other.erode and
self.subsurface == other.subsurface and
self.maxErosion == other.maxErosion)
|
|
#!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Standalone WebSocket server.
Use this file to launch pywebsocket without Apache HTTP Server.
BASIC USAGE
Go to the src directory and run
$ python mod_pywebsocket/standalone.py [-p <ws_port>]
[-w <websock_handlers>]
[-d <document_root>]
<ws_port> is the port number to use for ws:// connection.
<document_root> is the path to the root directory of HTML files.
<websock_handlers> is the path to the root directory of WebSocket handlers.
If not specified, <document_root> will be used. See __init__.py (or
run $ pydoc mod_pywebsocket) for how to write WebSocket handlers.
For more detail and other options, run
$ python mod_pywebsocket/standalone.py --help
or see _build_option_parser method below.
For trouble shooting, adding "--log_level debug" might help you.
TRY DEMO
Go to the src directory and run
$ python standalone.py -d example
to launch pywebsocket with the sample handler and html on port 80. Open
http://localhost/console.html, click the connect button, type something into
the text box next to the send button and click the send button. If everything
is working, you'll see the message you typed echoed by the server.
SUPPORTING TLS
To support TLS, run standalone.py with -t, -k, and -c options.
Note that when ssl module is used and the key/cert location is incorrect,
TLS connection silently fails while pyOpenSSL fails on startup.
SUPPORTING CLIENT AUTHENTICATION
To support client authentication with TLS, run standalone.py with -t, -k, -c,
and --tls-client-auth, and --tls-client-ca options.
E.g., $./standalone.py -d ../example -p 10443 -t -c ../test/cert/cert.pem -k
../test/cert/key.pem --tls-client-auth --tls-client-ca=../test/cert/cacert.pem
CONFIGURATION FILE
You can also write a configuration file and use it by specifying the path to
the configuration file by --config option. Please write a configuration file
following the documentation of the Python ConfigParser library. Name of each
entry must be the long version argument name. E.g. to set log level to debug,
add the following line:
log_level=debug
For options which doesn't take value, please add some fake value. E.g. for
--tls option, add the following line:
tls=True
Note that tls will be enabled even if you write tls=False as the value part is
fake.
When both a command line argument and a configuration file entry are set for
the same configuration item, the command line value will override one in the
configuration file.
THREADING
This server is derived from SocketServer.ThreadingMixIn. Hence a thread is
used for each request.
SECURITY WARNING
This uses CGIHTTPServer and CGIHTTPServer is not secure.
It may execute arbitrary Python code or external programs. It should not be
used outside a firewall.
"""
import BaseHTTPServer
import CGIHTTPServer
import SimpleHTTPServer
import SocketServer
import ConfigParser
import base64
import httplib
import logging
import logging.handlers
import optparse
import os
import re
import select
import socket
import sys
import threading
import time
import types
from mod_pywebsocket import common
from mod_pywebsocket import dispatch
from mod_pywebsocket import handshake
from mod_pywebsocket import http_header_util
from mod_pywebsocket import memorizingfile
from mod_pywebsocket import util
_DEFAULT_LOG_MAX_BYTES = 1024 * 256
_DEFAULT_LOG_BACKUP_COUNT = 5
_DEFAULT_REQUEST_QUEUE_SIZE = 128
# 1024 is practically large enough to contain WebSocket handshake lines.
_MAX_MEMORIZED_LINES = 1024
# Constants for the --tls_module flag.
_TLS_BY_STANDARD_MODULE = 'ssl'
_TLS_BY_PYOPENSSL = 'pyopenssl'
class _StandaloneConnection(object):
"""Mimic mod_python mp_conn."""
def __init__(self, request_handler):
"""Construct an instance.
Args:
request_handler: A WebSocketRequestHandler instance.
"""
self._request_handler = request_handler
def get_local_addr(self):
"""Getter to mimic mp_conn.local_addr."""
return (self._request_handler.server.server_name,
self._request_handler.server.server_port)
local_addr = property(get_local_addr)
def get_remote_addr(self):
"""Getter to mimic mp_conn.remote_addr.
Setting the property in __init__ won't work because the request
handler is not initialized yet there."""
return self._request_handler.client_address
remote_addr = property(get_remote_addr)
def write(self, data):
"""Mimic mp_conn.write()."""
return self._request_handler.wfile.write(data)
def read(self, length):
"""Mimic mp_conn.read()."""
return self._request_handler.rfile.read(length)
def get_memorized_lines(self):
"""Get memorized lines."""
return self._request_handler.rfile.get_memorized_lines()
class _StandaloneRequest(object):
"""Mimic mod_python request."""
def __init__(self, request_handler, use_tls):
"""Construct an instance.
Args:
request_handler: A WebSocketRequestHandler instance.
"""
self._logger = util.get_class_logger(self)
self._request_handler = request_handler
self.connection = _StandaloneConnection(request_handler)
self._use_tls = use_tls
self.headers_in = request_handler.headers
def get_uri(self):
"""Getter to mimic request.uri.
This method returns the raw data at the Request-URI part of the
Request-Line, while the uri method on the request object of mod_python
returns the path portion after parsing the raw data. This behavior is
kept for compatibility.
"""
return self._request_handler.path
uri = property(get_uri)
def get_unparsed_uri(self):
"""Getter to mimic request.unparsed_uri."""
return self._request_handler.path
unparsed_uri = property(get_unparsed_uri)
def get_method(self):
"""Getter to mimic request.method."""
return self._request_handler.command
method = property(get_method)
def get_protocol(self):
"""Getter to mimic request.protocol."""
return self._request_handler.request_version
protocol = property(get_protocol)
def is_https(self):
"""Mimic request.is_https()."""
return self._use_tls
def _import_ssl():
global ssl
try:
import ssl
return True
except ImportError:
return False
def _import_pyopenssl():
global OpenSSL
try:
import OpenSSL.SSL
return True
except ImportError:
return False
class _StandaloneSSLConnection(object):
"""A wrapper class for OpenSSL.SSL.Connection to
- provide makefile method which is not supported by the class
- tweak shutdown method since OpenSSL.SSL.Connection.shutdown doesn't
accept the "how" argument.
- convert SysCallError exceptions that its recv method may raise into a
return value of '', meaning EOF. We cannot overwrite the recv method on
self._connection since it's immutable.
"""
_OVERRIDDEN_ATTRIBUTES = ['_connection', 'makefile', 'shutdown', 'recv']
def __init__(self, connection):
self._connection = connection
def __getattribute__(self, name):
if name in _StandaloneSSLConnection._OVERRIDDEN_ATTRIBUTES:
return object.__getattribute__(self, name)
return self._connection.__getattribute__(name)
def __setattr__(self, name, value):
if name in _StandaloneSSLConnection._OVERRIDDEN_ATTRIBUTES:
return object.__setattr__(self, name, value)
return self._connection.__setattr__(name, value)
def makefile(self, mode='r', bufsize=-1):
return socket._fileobject(self, mode, bufsize)
def shutdown(self, unused_how):
self._connection.shutdown()
def recv(self, bufsize, flags=0):
if flags != 0:
raise ValueError('Non-zero flags not allowed')
try:
return self._connection.recv(bufsize)
except OpenSSL.SSL.SysCallError, (err, message):
if err == -1:
# Suppress "unexpected EOF" exception. See the OpenSSL document
# for SSL_get_error.
return ''
raise
def _alias_handlers(dispatcher, websock_handlers_map_file):
"""Set aliases specified in websock_handler_map_file in dispatcher.
Args:
dispatcher: dispatch.Dispatcher instance
websock_handler_map_file: alias map file
"""
fp = open(websock_handlers_map_file)
try:
for line in fp:
if line[0] == '#' or line.isspace():
continue
m = re.match('(\S+)\s+(\S+)', line)
if not m:
logging.warning('Wrong format in map file:' + line)
continue
try:
dispatcher.add_resource_path_alias(
m.group(1), m.group(2))
except dispatch.DispatchException, e:
logging.error(str(e))
finally:
fp.close()
class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""HTTPServer specialized for WebSocket."""
# Overrides SocketServer.ThreadingMixIn.daemon_threads
daemon_threads = True
# Overrides BaseHTTPServer.HTTPServer.allow_reuse_address
allow_reuse_address = True
def __init__(self, options):
"""Override SocketServer.TCPServer.__init__ to set SSL enabled
socket object to self.socket before server_bind and server_activate,
if necessary.
"""
# Share a Dispatcher among request handlers to save time for
# instantiation. Dispatcher can be shared because it is thread-safe.
options.dispatcher = dispatch.Dispatcher(
options.websock_handlers,
options.scan_dir,
options.allow_handlers_outside_root_dir)
if options.websock_handlers_map_file:
_alias_handlers(options.dispatcher,
options.websock_handlers_map_file)
warnings = options.dispatcher.source_warnings()
if warnings:
for warning in warnings:
logging.warning('Warning in source loading: %s' % warning)
self._logger = util.get_class_logger(self)
self.request_queue_size = options.request_queue_size
self.__ws_is_shut_down = threading.Event()
self.__ws_serving = False
SocketServer.BaseServer.__init__(
self, (options.server_host, options.port), WebSocketRequestHandler)
# Expose the options object to allow handler objects access it. We name
# it with websocket_ prefix to avoid conflict.
self.websocket_server_options = options
self._create_sockets()
self.server_bind()
self.server_activate()
def _create_sockets(self):
self.server_name, self.server_port = self.server_address
self._sockets = []
if not self.server_name:
# On platforms that doesn't support IPv6, the first bind fails.
# On platforms that supports IPv6
# - If it binds both IPv4 and IPv6 on call with AF_INET6, the
# first bind succeeds and the second fails (we'll see 'Address
# already in use' error).
# - If it binds only IPv6 on call with AF_INET6, both call are
# expected to succeed to listen both protocol.
addrinfo_array = [
(socket.AF_INET6, socket.SOCK_STREAM, '', '', ''),
(socket.AF_INET, socket.SOCK_STREAM, '', '', '')]
else:
addrinfo_array = socket.getaddrinfo(self.server_name,
self.server_port,
socket.AF_UNSPEC,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
for addrinfo in addrinfo_array:
self._logger.info('Create socket on: %r', addrinfo)
family, socktype, proto, canonname, sockaddr = addrinfo
try:
socket_ = socket.socket(family, socktype)
except Exception, e:
self._logger.info('Skip by failure: %r', e)
continue
server_options = self.websocket_server_options
if server_options.use_tls:
# For the case of _HAS_OPEN_SSL, we do wrapper setup after
# accept.
if server_options.tls_module == _TLS_BY_STANDARD_MODULE:
if server_options.tls_client_auth:
if server_options.tls_client_cert_optional:
client_cert_ = ssl.CERT_OPTIONAL
else:
client_cert_ = ssl.CERT_REQUIRED
else:
client_cert_ = ssl.CERT_NONE
socket_ = ssl.wrap_socket(socket_,
keyfile=server_options.private_key,
certfile=server_options.certificate,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=server_options.tls_client_ca,
cert_reqs=client_cert_,
do_handshake_on_connect=False)
self._sockets.append((socket_, addrinfo))
def server_bind(self):
"""Override SocketServer.TCPServer.server_bind to enable multiple
sockets bind.
"""
failed_sockets = []
for socketinfo in self._sockets:
socket_, addrinfo = socketinfo
self._logger.info('Bind on: %r', addrinfo)
if self.allow_reuse_address:
socket_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
socket_.bind(self.server_address)
except Exception, e:
self._logger.info('Skip by failure: %r', e)
socket_.close()
failed_sockets.append(socketinfo)
if self.server_address[1] == 0:
# The operating system assigns the actual port number for port
# number 0. This case, the second and later sockets should use
# the same port number. Also self.server_port is rewritten
# because it is exported, and will be used by external code.
self.server_address = (
self.server_name, socket_.getsockname()[1])
self.server_port = self.server_address[1]
self._logger.info('Port %r is assigned', self.server_port)
for socketinfo in failed_sockets:
self._sockets.remove(socketinfo)
def server_activate(self):
"""Override SocketServer.TCPServer.server_activate to enable multiple
sockets listen.
"""
failed_sockets = []
for socketinfo in self._sockets:
socket_, addrinfo = socketinfo
self._logger.info('Listen on: %r', addrinfo)
try:
socket_.listen(self.request_queue_size)
except Exception, e:
self._logger.info('Skip by failure: %r', e)
socket_.close()
failed_sockets.append(socketinfo)
for socketinfo in failed_sockets:
self._sockets.remove(socketinfo)
if len(self._sockets) == 0:
self._logger.critical(
'No sockets activated. Use info log level to see the reason.')
def server_close(self):
"""Override SocketServer.TCPServer.server_close to enable multiple
sockets close.
"""
for socketinfo in self._sockets:
socket_, addrinfo = socketinfo
self._logger.info('Close on: %r', addrinfo)
socket_.close()
def fileno(self):
"""Override SocketServer.TCPServer.fileno."""
self._logger.critical('Not supported: fileno')
return self._sockets[0][0].fileno()
def handle_error(self, request, client_address):
"""Override SocketServer.handle_error."""
self._logger.error(
'Exception in processing request from: %r\n%s',
client_address,
util.get_stack_trace())
# Note: client_address is a tuple.
def get_request(self):
"""Override TCPServer.get_request to wrap OpenSSL.SSL.Connection
object with _StandaloneSSLConnection to provide makefile method. We
cannot substitute OpenSSL.SSL.Connection.makefile since it's readonly
attribute.
"""
accepted_socket, client_address = self.socket.accept()
server_options = self.websocket_server_options
if server_options.use_tls:
if server_options.tls_module == _TLS_BY_STANDARD_MODULE:
try:
accepted_socket.do_handshake()
except ssl.SSLError, e:
self._logger.debug('%r', e)
raise
# Print cipher in use. Handshake is done on accept.
self._logger.debug('Cipher: %s', accepted_socket.cipher())
self._logger.debug('Client cert: %r',
accepted_socket.getpeercert())
elif server_options.tls_module == _TLS_BY_PYOPENSSL:
# We cannot print the cipher in use. pyOpenSSL doesn't provide
# any method to fetch that.
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
ctx.use_privatekey_file(server_options.private_key)
ctx.use_certificate_file(server_options.certificate)
def default_callback(conn, cert, errnum, errdepth, ok):
return ok == 1
# See the OpenSSL document for SSL_CTX_set_verify.
if server_options.tls_client_auth:
verify_mode = OpenSSL.SSL.VERIFY_PEER
if not server_options.tls_client_cert_optional:
verify_mode |= OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT
ctx.set_verify(verify_mode, default_callback)
ctx.load_verify_locations(server_options.tls_client_ca,
None)
else:
ctx.set_verify(OpenSSL.SSL.VERIFY_NONE, default_callback)
accepted_socket = OpenSSL.SSL.Connection(ctx, accepted_socket)
accepted_socket.set_accept_state()
# Convert SSL related error into socket.error so that
# SocketServer ignores them and keeps running.
#
# TODO(tyoshino): Convert all kinds of errors.
try:
accepted_socket.do_handshake()
except OpenSSL.SSL.Error, e:
# Set errno part to 1 (SSL_ERROR_SSL) like the ssl module
# does.
self._logger.debug('%r', e)
raise socket.error(1, '%r' % e)
cert = accepted_socket.get_peer_certificate()
self._logger.debug('Client cert subject: %r',
cert.get_subject().get_components())
accepted_socket = _StandaloneSSLConnection(accepted_socket)
else:
raise ValueError('No TLS support module is available')
return accepted_socket, client_address
def serve_forever(self, poll_interval=0.5):
"""Override SocketServer.BaseServer.serve_forever."""
self.__ws_serving = True
self.__ws_is_shut_down.clear()
handle_request = self.handle_request
if hasattr(self, '_handle_request_noblock'):
handle_request = self._handle_request_noblock
else:
self._logger.warning('Fallback to blocking request handler')
try:
while self.__ws_serving:
r, w, e = select.select(
[socket_[0] for socket_ in self._sockets],
[], [], poll_interval)
for socket_ in r:
self.socket = socket_
handle_request()
self.socket = None
finally:
self.__ws_is_shut_down.set()
def shutdown(self):
"""Override SocketServer.BaseServer.shutdown."""
self.__ws_serving = False
self.__ws_is_shut_down.wait()
class WebSocketRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler):
"""CGIHTTPRequestHandler specialized for WebSocket."""
# Use httplib.HTTPMessage instead of mimetools.Message.
MessageClass = httplib.HTTPMessage
def setup(self):
"""Override SocketServer.StreamRequestHandler.setup to wrap rfile
with MemorizingFile.
This method will be called by BaseRequestHandler's constructor
before calling BaseHTTPRequestHandler.handle.
BaseHTTPRequestHandler.handle will call
BaseHTTPRequestHandler.handle_one_request and it will call
WebSocketRequestHandler.parse_request.
"""
# Call superclass's setup to prepare rfile, wfile, etc. See setup
# definition on the root class SocketServer.StreamRequestHandler to
# understand what this does.
CGIHTTPServer.CGIHTTPRequestHandler.setup(self)
self.rfile = memorizingfile.MemorizingFile(
self.rfile,
max_memorized_lines=_MAX_MEMORIZED_LINES)
def __init__(self, request, client_address, server):
self._logger = util.get_class_logger(self)
self._options = server.websocket_server_options
# Overrides CGIHTTPServerRequestHandler.cgi_directories.
self.cgi_directories = self._options.cgi_directories
# Replace CGIHTTPRequestHandler.is_executable method.
if self._options.is_executable_method is not None:
self.is_executable = self._options.is_executable_method
# This actually calls BaseRequestHandler.__init__.
CGIHTTPServer.CGIHTTPRequestHandler.__init__(
self, request, client_address, server)
def _xhr_send_benchmark_helper(self):
content_length = int(self.headers.getheader('Content-Length'))
if content_length not in range(10000):
return
self._logger.debug('Requested to receive %s bytes', content_length)
RECEIVE_BLOCK_SIZE = 1024 * 1024
bytes_to_receive = content_length
while bytes_to_receive > 0:
bytes_to_receive_in_this_loop = bytes_to_receive
if bytes_to_receive_in_this_loop > RECEIVE_BLOCK_SIZE:
bytes_to_receive_in_this_loop = RECEIVE_BLOCK_SIZE
received_data = self.rfile.read(bytes_to_receive_in_this_loop)
for c in received_data:
if c != 'a':
self._logger.debug('Request body verification failed')
return
bytes_to_receive -= len(received_data)
if bytes_to_receive < 0:
self._logger.debug('Received %d more bytes than expected' %
(-bytes_to_receive))
return
# Return the number of received bytes back to the client.
response_body = '%d' % content_length
self.wfile.write(
'HTTP/1.1 200 OK\r\n'
'Content-Type: text/html\r\n'
'Content-Length: %d\r\n'
'\r\n%s' % (len(response_body), response_body))
self.wfile.flush()
def _xhr_receive_benchmark_helper(self):
content_length = self.headers.getheader('Content-Length')
if type(content_length) is not types.IntType:
content_length = 0
request_body = self.rfile.read(int(content_length))
if type(request_body) is not types.StringType:
content_length = ""
request_array = request_body.split(' ')
if len(request_array) < 2:
self._logger.debug('Malformed request body: %r', request_body)
return
# Parse the size parameter.
bytes_to_send = request_array[0]
try:
bytes_to_send = int(bytes_to_send)
except ValueError, e:
self._logger.debug('Malformed size parameter: %r', bytes_to_send)
return
self._logger.debug('Requested to send %s bytes', bytes_to_send)
# Parse the transfer encoding parameter.
chunked_mode = False
mode_parameter = request_array[1]
if mode_parameter == 'chunked':
self._logger.debug('Requested chunked transfer encoding')
chunked_mode = True
elif mode_parameter != 'none':
self._logger.debug('Invalid mode parameter: %r', mode_parameter)
return
# Write a header
response_header = (
'HTTP/1.1 200 OK\r\n'
'Content-Type: application/octet-stream\r\n')
if chunked_mode:
response_header += 'Transfer-Encoding: chunked\r\n\r\n'
else:
response_header += (
'Content-Length: %d\r\n\r\n' % bytes_to_send)
self.wfile.write(response_header)
self.wfile.flush()
# Write a body
SEND_BLOCK_SIZE = 1024 * 1024
while bytes_to_send > 0:
bytes_to_send_in_this_loop = bytes_to_send
if bytes_to_send_in_this_loop > SEND_BLOCK_SIZE:
bytes_to_send_in_this_loop = SEND_BLOCK_SIZE
if chunked_mode:
self.wfile.write('%x\r\n' % bytes_to_send_in_this_loop)
self.wfile.write('a' * bytes_to_send_in_this_loop)
if chunked_mode:
self.wfile.write('\r\n')
self.wfile.flush()
bytes_to_send -= bytes_to_send_in_this_loop
if chunked_mode:
self.wfile.write('0\r\n\r\n')
self.wfile.flush()
def parse_request(self):
"""Override BaseHTTPServer.BaseHTTPRequestHandler.parse_request.
Return True to continue processing for HTTP(S), False otherwise.
See BaseHTTPRequestHandler.handle_one_request method which calls
this method to understand how the return value will be handled.
"""
# We hook parse_request method, but also call the original
# CGIHTTPRequestHandler.parse_request since when we return False,
# CGIHTTPRequestHandler.handle_one_request continues processing and
# it needs variables set by CGIHTTPRequestHandler.parse_request.
#
# Variables set by this method will be also used by WebSocket request
# handling (self.path, self.command, self.requestline, etc. See also
# how _StandaloneRequest's members are implemented using these
# attributes).
if not CGIHTTPServer.CGIHTTPRequestHandler.parse_request(self):
return False
if self._options.use_basic_auth:
auth = self.headers.getheader('Authorization')
if auth != self._options.basic_auth_credential:
self.send_response(401)
self.send_header('WWW-Authenticate',
'Basic realm="Pywebsocket"')
self.end_headers()
self._logger.info('Request basic authentication')
return True
host, port, resource = http_header_util.parse_uri(self.path)
# Special paths for XMLHttpRequest benchmark
xhr_benchmark_helper_prefix = '/073be001e10950692ccbf3a2ad21c245'
if resource == (xhr_benchmark_helper_prefix + '_send'):
self._xhr_send_benchmark_helper()
return False
if resource == (xhr_benchmark_helper_prefix + '_receive'):
self._xhr_receive_benchmark_helper()
return False
if resource is None:
self._logger.info('Invalid URI: %r', self.path)
self._logger.info('Fallback to CGIHTTPRequestHandler')
return True
server_options = self.server.websocket_server_options
if host is not None:
validation_host = server_options.validation_host
if validation_host is not None and host != validation_host:
self._logger.info('Invalid host: %r (expected: %r)',
host,
validation_host)
self._logger.info('Fallback to CGIHTTPRequestHandler')
return True
if port is not None:
validation_port = server_options.validation_port
if validation_port is not None and port != validation_port:
self._logger.info('Invalid port: %r (expected: %r)',
port,
validation_port)
self._logger.info('Fallback to CGIHTTPRequestHandler')
return True
self.path = resource
request = _StandaloneRequest(self, self._options.use_tls)
try:
# Fallback to default http handler for request paths for which
# we don't have request handlers.
if not self._options.dispatcher.get_handler_suite(self.path):
self._logger.info('No handler for resource: %r',
self.path)
self._logger.info('Fallback to CGIHTTPRequestHandler')
return True
except dispatch.DispatchException, e:
self._logger.info('Dispatch failed for error: %s', e)
self.send_error(e.status)
return False
# If any Exceptions without except clause setup (including
# DispatchException) is raised below this point, it will be caught
# and logged by WebSocketServer.
try:
try:
handshake.do_handshake(
request,
self._options.dispatcher,
allowDraft75=self._options.allow_draft75,
strict=self._options.strict)
except handshake.VersionException, e:
self._logger.info('Handshake failed for version error: %s', e)
self.send_response(common.HTTP_STATUS_BAD_REQUEST)
if len(common.SEC_WEBSOCKET_VERSION_HEADER) > 50 or len(e.supported_versions) > 50:
return False
self.send_header(common.SEC_WEBSOCKET_VERSION_HEADER,
e.supported_versions)
self.end_headers()
return False
except handshake.HandshakeException, e:
# Handshake for ws(s) failed.
self._logger.info('Handshake failed for error: %s', e)
self.send_error(e.status)
return False
request._dispatcher = self._options.dispatcher
self._options.dispatcher.transfer_data(request)
except handshake.AbortedByUserException, e:
self._logger.info('Aborted: %s', e)
return False
def log_request(self, code='-', size='-'):
"""Override BaseHTTPServer.log_request."""
self._logger.info('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, *args):
"""Override BaseHTTPServer.log_error."""
# Despite the name, this method is for warnings than for errors.
# For example, HTTP status code is logged by this method.
self._logger.warning('%s - %s',
self.address_string(),
args[0] % args[1:])
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Add extra check that self.path doesn't contains ..
Also check if the file is a executable file or not.
If the file is not executable, it is handled as static file or dir
rather than a CGI script.
"""
if CGIHTTPServer.CGIHTTPRequestHandler.is_cgi(self):
if '..' in self.path:
return False
# strip query parameter from request path
resource_name = self.path.split('?', 2)[0]
# convert resource_name into real path name in filesystem.
scriptfile = self.translate_path(resource_name)
if not os.path.isfile(scriptfile):
return False
if not self.is_executable(scriptfile):
return False
return True
return False
def _get_logger_from_class(c):
return logging.getLogger('%s.%s' % (c.__module__, c.__name__))
def _configure_logging(options):
logging.addLevelName(common.LOGLEVEL_FINE, 'FINE')
logger = logging.getLogger()
logger.setLevel(logging.getLevelName(options.log_level.upper()))
if options.log_file:
handler = logging.handlers.RotatingFileHandler(
options.log_file, 'a', options.log_max, options.log_count)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'[%(asctime)s] [%(levelname)s] %(name)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
deflate_log_level_name = logging.getLevelName(
options.deflate_log_level.upper())
_get_logger_from_class(util._Deflater).setLevel(
deflate_log_level_name)
_get_logger_from_class(util._Inflater).setLevel(
deflate_log_level_name)
def _build_option_parser():
parser = optparse.OptionParser()
parser.add_option('--config', dest='config_file', type='string',
default=None,
help=('Path to configuration file. See the file comment '
'at the top of this file for the configuration '
'file format'))
parser.add_option('-H', '--server-host', '--server_host',
dest='server_host',
default='',
help='server hostname to listen to')
parser.add_option('-V', '--validation-host', '--validation_host',
dest='validation_host',
default=None,
help='server hostname to validate in absolute path.')
parser.add_option('-p', '--port', dest='port', type='int',
default=common.DEFAULT_WEB_SOCKET_PORT,
help='port to listen to')
parser.add_option('-P', '--validation-port', '--validation_port',
dest='validation_port', type='int',
default=None,
help='server port to validate in absolute path.')
parser.add_option('-w', '--websock-handlers', '--websock_handlers',
dest='websock_handlers',
default='.',
help=('The root directory of WebSocket handler files. '
'If the path is relative, --document-root is used '
'as the base.'))
parser.add_option('-m', '--websock-handlers-map-file',
'--websock_handlers_map_file',
dest='websock_handlers_map_file',
default=None,
help=('WebSocket handlers map file. '
'Each line consists of alias_resource_path and '
'existing_resource_path, separated by spaces.'))
parser.add_option('-s', '--scan-dir', '--scan_dir', dest='scan_dir',
default=None,
help=('Must be a directory under --websock-handlers. '
'Only handlers under this directory are scanned '
'and registered to the server. '
'Useful for saving scan time when the handler '
'root directory contains lots of files that are '
'not handler file or are handler files but you '
'don\'t want them to be registered. '))
parser.add_option('--allow-handlers-outside-root-dir',
'--allow_handlers_outside_root_dir',
dest='allow_handlers_outside_root_dir',
action='store_true',
default=False,
help=('Scans WebSocket handlers even if their canonical '
'path is not under --websock-handlers.'))
parser.add_option('-d', '--document-root', '--document_root',
dest='document_root', default='.',
help='Document root directory.')
parser.add_option('-x', '--cgi-paths', '--cgi_paths', dest='cgi_paths',
default=None,
help=('CGI paths relative to document_root.'
'Comma-separated. (e.g -x /cgi,/htbin) '
'Files under document_root/cgi_path are handled '
'as CGI programs. Must be executable.'))
parser.add_option('-t', '--tls', dest='use_tls', action='store_true',
default=False, help='use TLS (wss://)')
parser.add_option('--tls-module', '--tls_module', dest='tls_module',
type='choice',
choices = [_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL],
help='Use ssl module if "%s" is specified. '
'Use pyOpenSSL module if "%s" is specified' %
(_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL))
parser.add_option('-k', '--private-key', '--private_key',
dest='private_key',
default='', help='TLS private key file.')
parser.add_option('-c', '--certificate', dest='certificate',
default='', help='TLS certificate file.')
parser.add_option('--tls-client-auth', dest='tls_client_auth',
action='store_true', default=False,
help='Requests TLS client auth on every connection.')
parser.add_option('--tls-client-cert-optional',
dest='tls_client_cert_optional',
action='store_true', default=False,
help=('Makes client certificate optional even though '
'TLS client auth is enabled.'))
parser.add_option('--tls-client-ca', dest='tls_client_ca', default='',
help=('Specifies a pem file which contains a set of '
'concatenated CA certificates which are used to '
'validate certificates passed from clients'))
parser.add_option('--basic-auth', dest='use_basic_auth',
action='store_true', default=False,
help='Requires Basic authentication.')
parser.add_option('--basic-auth-credential',
dest='basic_auth_credential', default='test:test',
help='Specifies the credential of basic authentication '
'by username:password pair (e.g. test:test).')
parser.add_option('-l', '--log-file', '--log_file', dest='log_file',
default='', help='Log file.')
# Custom log level:
# - FINE: Prints status of each frame processing step
parser.add_option('--log-level', '--log_level', type='choice',
dest='log_level', default='warn',
choices=['fine',
'debug', 'info', 'warning', 'warn', 'error',
'critical'],
help='Log level.')
parser.add_option('--deflate-log-level', '--deflate_log_level',
type='choice',
dest='deflate_log_level', default='warn',
choices=['debug', 'info', 'warning', 'warn', 'error',
'critical'],
help='Log level for _Deflater and _Inflater.')
parser.add_option('--thread-monitor-interval-in-sec',
'--thread_monitor_interval_in_sec',
dest='thread_monitor_interval_in_sec',
type='int', default=-1,
help=('If positive integer is specified, run a thread '
'monitor to show the status of server threads '
'periodically in the specified inteval in '
'second. If non-positive integer is specified, '
'disable the thread monitor.'))
parser.add_option('--log-max', '--log_max', dest='log_max', type='int',
default=_DEFAULT_LOG_MAX_BYTES,
help='Log maximum bytes')
parser.add_option('--log-count', '--log_count', dest='log_count',
type='int', default=_DEFAULT_LOG_BACKUP_COUNT,
help='Log backup count')
parser.add_option('--allow-draft75', dest='allow_draft75',
action='store_true', default=False,
help='Obsolete option. Ignored.')
parser.add_option('--strict', dest='strict', action='store_true',
default=False, help='Obsolete option. Ignored.')
parser.add_option('-q', '--queue', dest='request_queue_size', type='int',
default=_DEFAULT_REQUEST_QUEUE_SIZE,
help='request queue size')
return parser
class ThreadMonitor(threading.Thread):
daemon = True
def __init__(self, interval_in_sec):
threading.Thread.__init__(self, name='ThreadMonitor')
self._logger = util.get_class_logger(self)
self._interval_in_sec = interval_in_sec
def run(self):
while True:
thread_name_list = []
for thread in threading.enumerate():
thread_name_list.append(thread.name)
self._logger.info(
"%d active threads: %s",
threading.active_count(),
', '.join(thread_name_list))
time.sleep(self._interval_in_sec)
def _parse_args_and_config(args):
parser = _build_option_parser()
# First, parse options without configuration file.
temporary_options, temporary_args = parser.parse_args(args=args)
if temporary_args:
logging.critical(
'Unrecognized positional arguments: %r', temporary_args)
sys.exit(1)
if temporary_options.config_file:
try:
config_fp = open(temporary_options.config_file, 'r')
except IOError, e:
logging.critical(
'Failed to open configuration file %r: %r',
temporary_options.config_file,
e)
sys.exit(1)
config_parser = ConfigParser.SafeConfigParser()
config_parser.readfp(config_fp)
config_fp.close()
args_from_config = []
for name, value in config_parser.items('pywebsocket'):
args_from_config.append('--' + name)
args_from_config.append(value)
if args is None:
args = args_from_config
else:
args = args_from_config + args
return parser.parse_args(args=args)
else:
return temporary_options, temporary_args
def _main(args=None):
"""You can call this function from your own program, but please note that
this function has some side-effects that might affect your program. For
example, util.wrap_popen3_for_win use in this method replaces implementation
of os.popen3.
"""
options, args = _parse_args_and_config(args=args)
os.chdir(options.document_root)
_configure_logging(options)
if options.allow_draft75:
logging.warning('--allow_draft75 option is obsolete.')
if options.strict:
logging.warning('--strict option is obsolete.')
# TODO(tyoshino): Clean up initialization of CGI related values. Move some
# of code here to WebSocketRequestHandler class if it's better.
options.cgi_directories = []
options.is_executable_method = None
if options.cgi_paths:
options.cgi_directories = options.cgi_paths.split(',')
if sys.platform in ('cygwin', 'win32'):
cygwin_path = None
# For Win32 Python, it is expected that CYGWIN_PATH
# is set to a directory of cygwin binaries.
# For example, websocket_server.py in Chromium sets CYGWIN_PATH to
# full path of third_party/cygwin/bin.
if 'CYGWIN_PATH' in os.environ:
cygwin_path = os.environ['CYGWIN_PATH']
util.wrap_popen3_for_win(cygwin_path)
def __check_script(scriptpath):
return util.get_script_interp(scriptpath, cygwin_path)
options.is_executable_method = __check_script
if options.use_tls:
if options.tls_module is None:
if _import_ssl():
options.tls_module = _TLS_BY_STANDARD_MODULE
logging.debug('Using ssl module')
elif _import_pyopenssl():
options.tls_module = _TLS_BY_PYOPENSSL
logging.debug('Using pyOpenSSL module')
else:
logging.critical(
'TLS support requires ssl or pyOpenSSL module.')
sys.exit(1)
elif options.tls_module == _TLS_BY_STANDARD_MODULE:
if not _import_ssl():
logging.critical('ssl module is not available')
sys.exit(1)
elif options.tls_module == _TLS_BY_PYOPENSSL:
if not _import_pyopenssl():
logging.critical('pyOpenSSL module is not available')
sys.exit(1)
else:
logging.critical('Invalid --tls-module option: %r',
options.tls_module)
sys.exit(1)
if not options.private_key or not options.certificate:
logging.critical(
'To use TLS, specify private_key and certificate.')
sys.exit(1)
if (options.tls_client_cert_optional and
not options.tls_client_auth):
logging.critical('Client authentication must be enabled to '
'specify tls_client_cert_optional')
sys.exit(1)
else:
if options.tls_module is not None:
logging.critical('Use --tls-module option only together with '
'--use-tls option.')
sys.exit(1)
if options.tls_client_auth:
logging.critical('TLS must be enabled for client authentication.')
sys.exit(1)
if options.tls_client_cert_optional:
logging.critical('TLS must be enabled for client authentication.')
sys.exit(1)
if not options.scan_dir:
options.scan_dir = options.websock_handlers
if options.use_basic_auth:
options.basic_auth_credential = 'Basic ' + base64.b64encode(
options.basic_auth_credential)
try:
if options.thread_monitor_interval_in_sec > 0:
# Run a thread monitor to show the status of server threads for
# debugging.
ThreadMonitor(options.thread_monitor_interval_in_sec).start()
server = WebSocketServer(options)
server.serve_forever()
except Exception, e:
logging.critical('mod_pywebsocket: %s' % e)
logging.critical('mod_pywebsocket: %s' % util.get_stack_trace())
sys.exit(1)
if __name__ == '__main__':
_main(sys.argv[1:])
# vi:sts=4 sw=4 et
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import swiftclient.client as sc
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.aws.s3 import s3
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
swift_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test S3 Bucket resources",
"Resources" : {
"S3BucketWebsite" : {
"Type" : "AWS::S3::Bucket",
"DeletionPolicy" : "Delete",
"Properties" : {
"AccessControl" : "PublicRead",
"WebsiteConfiguration" : {
"IndexDocument" : "index.html",
"ErrorDocument" : "error.html"
}
}
},
"SwiftContainer": {
"Type": "OS::Swift::Container",
"Properties": {
"S3Bucket": {"Ref" : "S3Bucket"},
}
},
"S3Bucket" : {
"Type" : "AWS::S3::Bucket",
"Properties" : {
"AccessControl" : "Private"
}
},
"S3Bucket_with_tags" : {
"Type" : "AWS::S3::Bucket",
"Properties" : {
"Tags" : [{"Key": "greeting", "Value": "hello"},
{"Key": "location", "Value": "here"}]
}
}
}
}
'''
class s3Test(common.HeatTestCase):
def setUp(self):
super(s3Test, self).setUp()
self.m.CreateMock(sc.Connection)
self.m.StubOutWithMock(sc.Connection, 'put_container')
self.m.StubOutWithMock(sc.Connection, 'get_container')
self.m.StubOutWithMock(sc.Connection, 'delete_container')
self.m.StubOutWithMock(sc.Connection, 'get_auth')
def create_resource(self, t, stack, resource_name):
resource_defns = stack.t.resource_definitions(stack)
rsrc = s3.S3Bucket('test_resource',
resource_defns[resource_name],
stack)
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def test_attributes(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name,
{'X-Container-Write': 'test_tenant:test_username',
'X-Container-Read': 'test_tenant:test_username'}
).AndReturn(None)
sc.Connection.get_auth().MultipleTimes().AndReturn(
('http://server.test:8080/v_2', None))
sc.Connection.delete_container(container_name).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'S3Bucket')
ref_id = rsrc.FnGetRefId()
self.assertEqual(container_name, ref_id)
self.assertEqual('server.test', rsrc.FnGetAtt('DomainName'))
url = 'http://server.test:8080/v_2/%s' % ref_id
self.assertEqual(url, rsrc.FnGetAtt('WebsiteURL'))
self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'Foo')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_public_read(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
utils.PhysName('test_stack', 'test_resource'),
{'X-Container-Write': 'test_tenant:test_username',
'X-Container-Read': '.r:*'}).AndReturn(None)
sc.Connection.delete_container(
container_name).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
properties = t['Resources']['S3Bucket']['Properties']
properties['AccessControl'] = 'PublicRead'
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'S3Bucket')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_tags(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
utils.PhysName('test_stack', 'test_resource'),
{'X-Container-Write': 'test_tenant:test_username',
'X-Container-Read': 'test_tenant:test_username',
'X-Container-Meta-S3-Tag-greeting': 'hello',
'X-Container-Meta-S3-Tag-location': 'here'}).AndReturn(None)
sc.Connection.delete_container(
container_name).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'S3Bucket_with_tags')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_public_read_write(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name,
{'X-Container-Write': '.r:*',
'X-Container-Read': '.r:*'}).AndReturn(None)
sc.Connection.delete_container(
container_name).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
properties = t['Resources']['S3Bucket']['Properties']
properties['AccessControl'] = 'PublicReadWrite'
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'S3Bucket')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_authenticated_read(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name,
{'X-Container-Write': 'test_tenant:test_username',
'X-Container-Read': 'test_tenant'}).AndReturn(None)
sc.Connection.delete_container(container_name).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
properties = t['Resources']['S3Bucket']['Properties']
properties['AccessControl'] = 'AuthenticatedRead'
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'S3Bucket')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_website(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name,
{'X-Container-Meta-Web-Error': 'error.html',
'X-Container-Meta-Web-Index': 'index.html',
'X-Container-Write': 'test_tenant:test_username',
'X-Container-Read': '.r:*'}).AndReturn(None)
sc.Connection.delete_container(container_name).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'S3BucketWebsite')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_delete_exception(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name,
{'X-Container-Write': 'test_tenant:test_username',
'X-Container-Read': 'test_tenant:test_username'}).AndReturn(None)
sc.Connection.delete_container(container_name).AndRaise(
sc.ClientException('Test delete failure'))
self.m.ReplayAll()
t = template_format.parse(swift_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'S3Bucket')
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.m.VerifyAll()
def test_delete_not_found(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name,
{'X-Container-Write': 'test_tenant:test_username',
'X-Container-Read': 'test_tenant:test_username'}).AndReturn(None)
sc.Connection.delete_container(container_name).AndRaise(
sc.ClientException('Its gone', http_status=404))
self.m.ReplayAll()
t = template_format.parse(swift_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'S3Bucket')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_delete_conflict_not_empty(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name,
{'X-Container-Write': 'test_tenant:test_username',
'X-Container-Read': 'test_tenant:test_username'}).AndReturn(None)
sc.Connection.delete_container(container_name).AndRaise(
sc.ClientException('Not empty', http_status=409))
sc.Connection.get_container(container_name).AndReturn(
({'name': container_name}, [{'name': 'test_object'}]))
self.m.ReplayAll()
t = template_format.parse(swift_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'S3Bucket')
deleter = scheduler.TaskRunner(rsrc.delete)
ex = self.assertRaises(exception.ResourceFailure, deleter)
self.assertIn("ResourceActionNotSupported: resources.test_resource: "
"The bucket you tried to delete is not empty",
six.text_type(ex))
self.m.VerifyAll()
def test_delete_conflict_empty(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name,
{'X-Container-Write': 'test_tenant:test_username',
'X-Container-Read': 'test_tenant:test_username'}).AndReturn(None)
sc.Connection.delete_container(container_name).AndRaise(
sc.ClientException('Conflict', http_status=409))
sc.Connection.get_container(container_name).AndReturn(
({'name': container_name}, []))
self.m.ReplayAll()
t = template_format.parse(swift_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'S3Bucket')
deleter = scheduler.TaskRunner(rsrc.delete)
ex = self.assertRaises(exception.ResourceFailure, deleter)
self.assertIn("Conflict", six.text_type(ex))
self.m.VerifyAll()
def test_delete_retain(self):
# first run, with retain policy
sc.Connection.put_container(
utils.PhysName('test_stack', 'test_resource'),
{'X-Container-Write': 'test_tenant:test_username',
'X-Container-Read': 'test_tenant:test_username'}).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
bucket = t['Resources']['S3Bucket']
bucket['DeletionPolicy'] = 'Retain'
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'S3Bucket')
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
|
|
import datetime
from django.conf import settings
from django.core.files.base import File
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from djangohelpers import allow_http
from djangohelpers import rendered_with
import ho.pisa as pisa
import markdown
import os
import pprint
import pyPdf
from reportlab.pdfgen import canvas
import sys
import tailer
import tempfile
from pdfbuilder.basetemplates import (NumberedCanvas,
ReportlabProgressLogger)
from pdfbuilder.models import (Configuration,
SavedPdf)
from pdfbuilder import registry
from pdfbuilder import app_settings
@rendered_with("pdfbuilder/new_configuration.html")
@allow_http("GET", "POST")
def create_configuration(request):
if request.method == "GET":
return {}
data = "[options]"
name = "New Configuration by %s on %s" % (
request.user.username, datetime.datetime.now())
if 'clone_from' in request.POST:
try:
base_config = Configuration.objects.get(
pk=request.POST['clone_from'])
except Configuration.DoesNotExist:
pass
else:
data = base_config.data
name = "Copy of <%s>" % base_config.name
name = "%s by %s on %s" % (
name,
request.user.username,
datetime.datetime.now())
config = Configuration()
config.data = data
config.name = name
config.save()
url = reverse("view-configuration", args=[config.id])
qs = []
if 'data_source' in request.POST:
qs.append("data_source=%s" % request.POST['data_source'])
if len(qs):
qs = '&'.join(qs)
url += "?" + qs
return HttpResponseRedirect(url)
@rendered_with("pdfbuilder/configuration.html")
@allow_http("GET", "POST")
def configuration(request, config_id):
config = get_object_or_404(Configuration, pk=config_id)
if request.method == "GET":
saved_pdfs = SavedPdf.objects.filter(configuration=config)
return {
'config': config,
'templates': registry.available_templates(),
'orderings': registry.available_orderings(),
'groupings': registry.available_groupings(),
'saved_pdfs': saved_pdfs,
}
options = dict(request.POST.items())
if 'csrfmiddlewaretoken' in request.POST:
del options['csrfmiddlewaretoken']
if 'description' in request.POST:
config.name = options['description']
del options['description']
if 'filter_by' in request.POST and request.POST['filter_by'].strip():
options['filter_by'] = request.POST.getlist("filter_by")
else:
options['filter_by'] = None
url = reverse("view-configuration", args=[config.id])
qs = []
if 'data_source' in request.GET:
qs.append("data_source=%s" % request.GET['data_source'])
if len(qs):
qs = '&'.join(qs)
url += "?" + qs
config.set_options(options)
return HttpResponseRedirect(url)
@allow_http("GET", "POST")
def prepare_pdf_export(request, config_id):
if request.method == "POST":
fd, filename = tempfile.mkstemp(suffix=".log")
fp = open(filename, 'w')
print >> fp, "Ready to start PDF generation"
fp.close()
# Strip off the path and the suffix
filename = filename[len(tempfile.gettempdir()):]
filename = filename[:-4]
filename = filename.lstrip(os.path.sep)
assert os.sep not in filename and filename.isalnum() # lazy security checking
return HttpResponse(filename, content_type="text/plain")
filename = request.GET.get("key")
assert filename and os.sep not in filename and filename.isalnum() # lazy security checking
filename = os.path.join(tempfile.gettempdir(), filename + ".log")
fp = open(filename)
try:
body = tailer.tail(fp, 1)[0]
except IndexError:
body = ''
fp.close()
return HttpResponse(body, content_type="text/plain")
@allow_http("GET", "POST")
@rendered_with("pdfbuilder/pdf_export.html")
def page_export_pdf(request, config_id):
config = get_object_or_404(Configuration, pk=config_id)
if request.method == "GET":
return {'config': config}
log_filename = request.POST.get("key")
assert (log_filename and os.sep not in log_filename
and log_filename.isalnum()) # lazy security checking
log_filename = os.path.join(tempfile.gettempdir(),
log_filename + ".log")
data_source = request.POST.get("data_source")
logger = open(log_filename, 'w')
print >> logger, "Fetching data from source.."
logger.flush()
from zope.dottedname.resolve import resolve
queryset = resolve(settings.PDFBUILDER_DATA_SOURCE)(request, data_source)
entry_count_upper_bound = queryset.count()
order_by = registry.get_ordering(config.order_by())
if order_by is not None:
queryset = queryset.order_by(*order_by)
group_by = registry.get_grouping(config.group_by())
#filter_by = config.filter_by()
## FIXME: this is ugly
#def fixer(queryset, filter_by):
# for item in queryset:
# omit = False
# for field in filter_by:
# if getattr(item, field)() is None:
# omit = True
# break
# if omit is True:
# continue
# yield item
#queryset = fixer(queryset, filter_by)
template = registry.get_template(config.template())
def log_callback(prepared_flowables):
count = 0
for name, bucket in prepared_flowables.items():
count += len(bucket)
if count % 500:
return
print >> logger, "%s / %s" % (count, entry_count_upper_bound)
logger.flush()
try:
grouped_elements = template.generate_flowables(
queryset,
number_entries=config.number_entries(),
bucket_selector=group_by,
log_callback=log_callback)
except Exception, e:
print >> logger, pprint.pformat(sys.exc_info())
logger.flush()
raise
cover_letter = request.POST.get('coverletter')
if cover_letter:
print >> logger, "Generating cover letter..."
logger.flush()
_default_css = open(app_settings.PDFBUILDER_COVERLETTER_CSS)
DEFAULT_CSS = _default_css.read()
_default_css.close()
del(_default_css)
cover_letter = markdown.markdown(cover_letter)
cover_letter = app_settings.PDFBUILDER_COVERLETTER_FUNCTION(
cover_letter.encode("utf8"), request, config)
fd, cover_letter_filename = tempfile.mkstemp(suffix=".pdf")
cover_letter_file = open(cover_letter_filename, 'wb')
pisa.CreatePDF(cover_letter, cover_letter_file, default_css=DEFAULT_CSS)
cover_letter_file.close()
print >> logger, "Finalizing %s PDF(s)..." % len(grouped_elements)
logger.flush()
comment = request.POST.get("comment", '')
progress_logger = ReportlabProgressLogger(logger)
saved_pdfs = []
for key, elements in grouped_elements.items():
template.reset_pdf_file()
doc = template.doctemplate(config)
doc.setProgressCallBack(progress_logger)
doc.build(elements, canvasmaker=template.canvasmaker(config))
fd, filename = template.pdf_file()
if cover_letter:
print >> logger, "Merging cover letter with PDF..."
logger.flush()
cover_letter_file = open(cover_letter_filename)
cover_letter = pyPdf.PdfFileReader(cover_letter_file)
content_file = open(filename)
content = pyPdf.PdfFileReader(content_file)
result = pyPdf.PdfFileWriter()
for page in cover_letter.pages:
result.addPage(page)
for page in content.pages:
result.addPage(page)
fd, result_filename = tempfile.mkstemp(suffix=".pdf")
fp = open(result_filename, 'wb')
result.write(fp)
cover_letter_file.close()
content_file.close()
fp.close()
os.unlink(cover_letter_filename)
os.unlink(filename)
filename = result_filename
fp = open(filename)
print >> logger, "Storing PDF file and updating database..."
logger.flush()
pdf = SavedPdf(author=request.user,
configuration=config,
comment=comment,
data_source=data_source,
group=key,
cover_letter=request.POST.get('coverletter', ''))
file = File(fp)
pdf.file.save("%s-%s.pdf" % (config.pk, request.user), file)
file.close()
os.unlink(filename)
del(fd)
saved_pdfs.append(pdf)
print >> logger, "PDF generation complete."
logger.flush()
logger.close()
os.unlink(log_filename)
if request.is_ajax():
return pdf_table_snippet(request, saved_pdfs)
return pdf_table_results(request, saved_pdfs)
@rendered_with("pdfbuilder/pdf_table.html")
def pdf_table_results(request, saved_pdfs):
return {'saved_pdfs': saved_pdfs}
@rendered_with("pdfbuilder/snippets/pdf_table.html")
def pdf_table_snippet(request, saved_pdfs):
return {'saved_pdfs': saved_pdfs}
def download_pdf(request, pdf_id):
pdf = SavedPdf.objects.get(pk=pdf_id)
contents = pdf.file.read()
resp = HttpResponse(contents,
content_type="application/pdf")
resp['Content-Disposition'] = "attachment; filename=export.pdf"
return resp
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.stdio}.
"""
from __future__ import absolute_import, division
import os
import sys
import itertools
from twisted.trial import unittest
from twisted.python import filepath, log
from twisted.python.reflect import requireModule
from twisted.python.runtime import platform
from twisted.python.compat import xrange, intToBytes
from twisted.internet import error, defer, protocol, stdio, reactor
from twisted.test.test_tcp import ConnectionLostNotifyingProtocol
# A short string which is intended to appear here and nowhere else,
# particularly not in any random garbage output CPython unavoidable
# generates (such as in warning text and so forth). This is searched
# for in the output from stdio_test_lastwrite.py and if it is found at
# the end, the functionality works.
UNIQUE_LAST_WRITE_STRING = b'xyz123abc Twisted is great!'
skipWindowsNopywin32 = None
if platform.isWindows():
if requireModule('win32process') is None:
skipWindowsNopywin32 = ("On windows, spawnProcess is not available "
"in the absence of win32process.")
class StandardIOTestProcessProtocol(protocol.ProcessProtocol):
"""
Test helper for collecting output from a child process and notifying
something when it exits.
@ivar onConnection: A L{defer.Deferred} which will be called back with
C{None} when the connection to the child process is established.
@ivar onCompletion: A L{defer.Deferred} which will be errbacked with the
failure associated with the child process exiting when it exits.
@ivar onDataReceived: A L{defer.Deferred} which will be called back with
this instance whenever C{childDataReceived} is called, or C{None} to
suppress these callbacks.
@ivar data: A C{dict} mapping file descriptors to strings containing all
bytes received from the child process on each file descriptor.
"""
onDataReceived = None
def __init__(self):
self.onConnection = defer.Deferred()
self.onCompletion = defer.Deferred()
self.data = {}
def connectionMade(self):
self.onConnection.callback(None)
def childDataReceived(self, name, bytes):
"""
Record all bytes received from the child process in the C{data}
dictionary. Fire C{onDataReceived} if it is not C{None}.
"""
self.data[name] = self.data.get(name, b'') + bytes
if self.onDataReceived is not None:
d, self.onDataReceived = self.onDataReceived, None
d.callback(self)
def processEnded(self, reason):
self.onCompletion.callback(reason)
class StandardInputOutputTests(unittest.TestCase):
skip = skipWindowsNopywin32
def _spawnProcess(self, proto, sibling, *args, **kw):
"""
Launch a child Python process and communicate with it using the
given ProcessProtocol.
@param proto: A L{ProcessProtocol} instance which will be connected
to the child process.
@param sibling: The basename of a file containing the Python program
to run in the child process.
@param *args: strings which will be passed to the child process on
the command line as C{argv[2:]}.
@param **kw: additional arguments to pass to L{reactor.spawnProcess}.
@return: The L{IProcessTransport} provider for the spawned process.
"""
import twisted
subenv = dict(os.environ)
subenv['PYTHONPATH'] = os.pathsep.join(
[os.path.abspath(
os.path.dirname(os.path.dirname(twisted.__file__))),
subenv.get('PYTHONPATH', '')
])
args = [sys.executable,
filepath.FilePath(__file__).sibling(sibling).asBytesMode().path,
reactor.__class__.__module__] + list(args)
return reactor.spawnProcess(
proto,
sys.executable,
args,
env=subenv,
**kw)
def _requireFailure(self, d, callback):
def cb(result):
self.fail("Process terminated with non-Failure: %r" % (result,))
def eb(err):
return callback(err)
return d.addCallbacks(cb, eb)
def test_loseConnection(self):
"""
Verify that a protocol connected to L{StandardIO} can disconnect
itself using C{transport.loseConnection}.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, 'stdio_test_loseconn.py', errorLogFile)
def processEnded(reason):
# Copy the child's log to ours so it's more visible.
with open(errorLogFile, 'r') as f:
for line in f:
log.msg("Child logged: " + line.rstrip())
self.failIfIn(1, p.data)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_readConnectionLost(self):
"""
When stdin is closed and the protocol connected to it implements
L{IHalfCloseableProtocol}, the protocol's C{readConnectionLost} method
is called.
"""
errorLogFile = self.mktemp()
log.msg("Child process logging to " + errorLogFile)
p = StandardIOTestProcessProtocol()
p.onDataReceived = defer.Deferred()
def cbBytes(ignored):
d = p.onCompletion
p.transport.closeStdin()
return d
p.onDataReceived.addCallback(cbBytes)
def processEnded(reason):
reason.trap(error.ProcessDone)
d = self._requireFailure(p.onDataReceived, processEnded)
self._spawnProcess(
p, 'stdio_test_halfclose.py', errorLogFile)
return d
def test_lastWriteReceived(self):
"""
Verify that a write made directly to stdout using L{os.write}
after StandardIO has finished is reliably received by the
process reading that stdout.
"""
p = StandardIOTestProcessProtocol()
# Note: the OS X bug which prompted the addition of this test
# is an apparent race condition involving non-blocking PTYs.
# Delaying the parent process significantly increases the
# likelihood of the race going the wrong way. If you need to
# fiddle with this code at all, uncommenting the next line
# will likely make your life much easier. It is commented out
# because it makes the test quite slow.
# p.onConnection.addCallback(lambda ign: __import__('time').sleep(5))
try:
self._spawnProcess(
p, 'stdio_test_lastwrite.py', UNIQUE_LAST_WRITE_STRING,
usePTY=True)
except ValueError as e:
# Some platforms don't work with usePTY=True
raise unittest.SkipTest(str(e))
def processEnded(reason):
"""
Asserts that the parent received the bytes written by the child
immediately after the child starts.
"""
self.assertTrue(
p.data[1].endswith(UNIQUE_LAST_WRITE_STRING),
"Received %r from child, did not find expected bytes." % (
p.data,))
reason.trap(error.ProcessDone)
return self._requireFailure(p.onCompletion, processEnded)
def test_hostAndPeer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
has C{getHost} and C{getPeer} methods.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, 'stdio_test_hostpeer.py')
def processEnded(reason):
host, peer = p.data[1].splitlines()
self.failUnless(host)
self.failUnless(peer)
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_write(self):
"""
Verify that the C{write} method of the transport of a protocol
connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, 'stdio_test_write.py')
def processEnded(reason):
self.assertEqual(p.data[1], b'ok!')
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_writeSequence(self):
"""
Verify that the C{writeSequence} method of the transport of a
protocol connected to L{StandardIO} sends bytes to standard out.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
self._spawnProcess(p, 'stdio_test_writeseq.py')
def processEnded(reason):
self.assertEqual(p.data[1], b'ok!')
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def _junkPath(self):
junkPath = self.mktemp()
with open(junkPath, 'wb') as junkFile:
for i in xrange(1024):
junkFile.write(intToBytes(i) + b'\n')
return junkPath
def test_producer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IProducer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
written = []
toWrite = list(range(100))
def connectionMade(ign):
if toWrite:
written.append(intToBytes(toWrite.pop()) + b"\n")
proc.write(written[-1])
reactor.callLater(0.01, connectionMade, None)
proc = self._spawnProcess(p, 'stdio_test_producer.py')
p.onConnection.addCallback(connectionMade)
def processEnded(reason):
self.assertEqual(p.data[1], b''.join(written))
self.failIf(toWrite, "Connection lost with %d writes left to go." % (len(toWrite),))
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_consumer(self):
"""
Verify that the transport of a protocol connected to L{StandardIO}
is a working L{IConsumer} provider.
"""
p = StandardIOTestProcessProtocol()
d = p.onCompletion
junkPath = self._junkPath()
self._spawnProcess(p, 'stdio_test_consumer.py', junkPath)
def processEnded(reason):
with open(junkPath, 'rb') as f:
self.assertEqual(p.data[1], f.read())
reason.trap(error.ProcessDone)
return self._requireFailure(d, processEnded)
def test_normalFileStandardOut(self):
"""
If L{StandardIO} is created with a file descriptor which refers to a
normal file (ie, a file from the filesystem), L{StandardIO.write}
writes bytes to that file. In particular, it does not immediately
consider the file closed or call its protocol's C{connectionLost}
method.
"""
onConnLost = defer.Deferred()
proto = ConnectionLostNotifyingProtocol(onConnLost)
path = filepath.FilePath(self.mktemp())
self.normal = normal = path.open('wb')
self.addCleanup(normal.close)
kwargs = dict(stdout=normal.fileno())
if not platform.isWindows():
# Make a fake stdin so that StandardIO doesn't mess with the *real*
# stdin.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
kwargs['stdin'] = r
connection = stdio.StandardIO(proto, **kwargs)
# The reactor needs to spin a bit before it might have incorrectly
# decided stdout is closed. Use this counter to keep track of how
# much we've let it spin. If it closes before we expected, this
# counter will have a value that's too small and we'll know.
howMany = 5
count = itertools.count()
def spin():
for value in count:
if value == howMany:
connection.loseConnection()
return
connection.write(intToBytes(value))
break
reactor.callLater(0, spin)
reactor.callLater(0, spin)
# Once the connection is lost, make sure the counter is at the
# appropriate value.
def cbLost(reason):
self.assertEqual(next(count), howMany + 1)
self.assertEqual(
path.getContent(),
b''.join(map(intToBytes, range(howMany))))
onConnLost.addCallback(cbLost)
return onConnLost
if platform.isWindows():
test_normalFileStandardOut.skip = (
"StandardIO does not accept stdout as an argument to Windows. "
"Testing redirection to a file is therefore harder.")
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import numpy as np
from pymatgen.core.operations import MagSymmOp, SymmOp
from pymatgen.electronic_structure.core import Magmom
from pymatgen.util.testing import PymatgenTest
class SymmOpTestCase(PymatgenTest):
def setUp(self):
self.op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False, [0, 0, 1])
def test_properties(self):
rot = self.op.rotation_matrix
vec = self.op.translation_vector
self.assertArrayAlmostEqual(rot, [[0.8660254, -0.5, 0.0], [0.5, 0.8660254, 0.0], [0.0, 0.0, 1.0]], 2)
self.assertArrayAlmostEqual(vec, [0, 0, 1], 2)
def test_operate(self):
point = np.array([1, 2, 3])
newcoord = self.op.operate(point)
self.assertArrayAlmostEqual(newcoord, [-0.1339746, 2.23205081, 4.0], 2)
def test_operate_multi(self):
point = np.array([1, 2, 3])
newcoords = self.op.operate_multi([point, point])
self.assertArrayAlmostEqual(newcoords, [[-0.1339746, 2.23205081, 4.0]] * 2, 2)
newcoords = self.op.operate_multi([[point, point]] * 2)
self.assertArrayAlmostEqual(newcoords, [[[-0.1339746, 2.23205081, 4.0]] * 2] * 2, 2)
def test_inverse(self):
point = np.random.rand(3)
newcoord = self.op.operate(point)
self.assertArrayAlmostEqual(self.op.inverse.operate(newcoord), point, 2)
def test_reflection(self):
normal = np.random.rand(3)
origin = np.random.rand(3)
refl = SymmOp.reflection(normal, origin)
point = np.random.rand(3)
newcoord = refl.operate(point)
# Distance to the plane should be negatives of each other.
self.assertAlmostEqual(np.dot(newcoord - origin, normal), -np.dot(point - origin, normal))
def test_apply_rotation_only(self):
point = np.random.rand(3)
newcoord = self.op.operate(point)
rotate_only = self.op.apply_rotation_only(point)
self.assertArrayAlmostEqual(rotate_only + self.op.translation_vector, newcoord, 2)
def test_transform_tensor(self):
# Rank 2
tensor = np.arange(0, 9).reshape(3, 3)
new_tensor = self.op.transform_tensor(tensor)
self.assertArrayAlmostEqual(
new_tensor,
[
[-0.73205, -1.73205, -0.76794],
[0.26795, 4.73205, 5.33013],
[1.69615, 9.06218, 8.0],
],
5,
)
# Rank 3
tensor = np.arange(0, 27).reshape(3, 3, 3)
new_tensor = self.op.transform_tensor(tensor)
self.assertArrayAlmostEqual(
new_tensor,
[
[
[-0.871, -2.884, -1.928],
[-2.152, -6.665, -4.196],
[-1.026, -2.830, -1.572],
],
[
[0.044, 1.531, 1.804],
[4.263, 21.008, 17.928],
[5.170, 23.026, 18.722],
],
[
[1.679, 7.268, 5.821],
[9.268, 38.321, 29.919],
[8.285, 33.651, 26.000],
],
],
3,
)
# Rank 4
tensor = np.arange(0, 81).reshape(3, 3, 3, 3)
new_tensor = self.op.transform_tensor(tensor)
self.assertArrayAlmostEqual(
new_tensor,
[
[
[
[-0.981, -3.526, -2.514],
[-3.258, -11.660, -8.286],
[-2.184, -7.786, -5.517],
],
[
[-2.454, -8.660, -6.090],
[-7.660, -26.722, -18.629],
[-4.858, -16.763, -11.588],
],
[
[-1.194, -4.090, -2.811],
[-3.358, -11.165, -7.490],
[-1.909, -6.124, -3.983],
],
],
[
[
[-0.043, 0.340, 0.499],
[1.340, 6.866, 5.959],
[1.731, 7.825, 6.412],
],
[
[4.340, 18.062, 14.155],
[21.794, 88.301, 68.123],
[18.754, 75.087, 57.517],
],
[
[5.427, 21.620, 16.510],
[24.352, 95.979, 72.811],
[19.876, 77.909, 58.899],
],
],
[
[
[1.777, 6.999, 5.306],
[7.731, 30.218, 22.804],
[6.208, 24.170, 18.194],
],
[
[9.927, 38.414, 28.804],
[41.146, 158.656, 118.694],
[32.170, 123.792, 92.488],
],
[
[8.914, 34.268, 25.586],
[36.268, 139.086, 103.684],
[28.050, 107.416, 80.000],
],
],
],
3,
)
def test_are_symmetrically_related(self):
point = np.random.rand(3)
newcoord = self.op.operate(point)
self.assertTrue(self.op.are_symmetrically_related(point, newcoord))
self.assertTrue(self.op.are_symmetrically_related(newcoord, point))
def test_to_from_dict(self):
d = self.op.as_dict()
op = SymmOp.from_dict(d)
point = np.random.rand(3)
newcoord = self.op.operate(point)
self.assertTrue(op.are_symmetrically_related(point, newcoord))
def test_inversion(self):
origin = np.random.rand(3)
op = SymmOp.inversion(origin)
pt = np.random.rand(3)
inv_pt = op.operate(pt)
self.assertArrayAlmostEqual(pt - origin, origin - inv_pt)
def test_xyz(self):
op = SymmOp([[1, -1, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
s = op.as_xyz_string()
self.assertEqual(s, "x-y, -y, -z")
self.assertEqual(op, SymmOp.from_xyz_string(s))
op2 = SymmOp([[0, -1, 0, 0.5], [1, 0, 0, 0.5], [0, 0, 1, 0.5 + 1e-7], [0, 0, 0, 1]])
s2 = op2.as_xyz_string()
self.assertEqual(s2, "-y+1/2, x+1/2, z+1/2")
self.assertEqual(op2, SymmOp.from_xyz_string(s2))
op2 = SymmOp(
[
[3, -2, -1, 0.5],
[-1, 0, 0, 12.0 / 13],
[0, 0, 1, 0.5 + 1e-7],
[0, 0, 0, 1],
]
)
s2 = op2.as_xyz_string()
self.assertEqual(s2, "3x-2y-z+1/2, -x+12/13, z+1/2")
self.assertEqual(op2, SymmOp.from_xyz_string(s2))
op3 = SymmOp.from_xyz_string("3x - 2y - z+1 /2 , -x+12/ 13, z+1/2")
self.assertEqual(op2, op3)
# Ensure strings can be read in any order
op4 = SymmOp.from_xyz_string("1 /2 + 3X - 2y - z , 12/ 13-x, z+1/2")
op5 = SymmOp.from_xyz_string("+1 /2 + 3x - 2y - z , 12/ 13-x, +1/2+z")
self.assertEqual(op4, op3)
self.assertEqual(op4, op5)
self.assertEqual(op3, op5)
# TODO: assertWarns not in Python 2.x unittest
# update PymatgenTest for unittest2?
# self.assertWarns(UserWarning, self.op.as_xyz_string)
o = SymmOp.from_xyz_string("0.5+x, 0.25+y, 0.75+z")
self.assertArrayAlmostEqual(o.translation_vector, [0.5, 0.25, 0.75])
o = SymmOp.from_xyz_string("x + 0.5, y + 0.25, z + 0.75")
self.assertArrayAlmostEqual(o.translation_vector, [0.5, 0.25, 0.75])
class MagSymmOpTestCase(PymatgenTest):
def test_xyzt_string(self):
xyzt_strings = ["x, y, z, +1", "x, y, z, -1", "-y+1/2, x+1/2, x+1/2, +1"]
for xyzt_string in xyzt_strings:
op = MagSymmOp.from_xyzt_string(xyzt_string)
xyzt_string_out = op.as_xyzt_string()
self.assertEqual(xyzt_string, xyzt_string_out)
op = SymmOp(
[
[3, -2, -1, 0.5],
[-1, 0, 0, 12.0 / 13],
[0, 0, 1, 0.5 + 1e-7],
[0, 0, 0, 1],
]
)
magop = MagSymmOp.from_symmop(op, -1)
magop_str = magop.as_xyzt_string()
self.assertEqual(magop.time_reversal, -1)
self.assertEqual(magop_str, "3x-2y-z+1/2, -x+12/13, z+1/2, -1")
def test_to_from_dict(self):
op = SymmOp(
[
[3, -2, -1, 0.5],
[-1, 0, 0, 12.0 / 13],
[0, 0, 1, 0.5 + 1e-7],
[0, 0, 0, 1],
]
)
magop = MagSymmOp.from_symmop(op, -1)
magop2 = MagSymmOp.from_dict(magop.as_dict())
self.assertEqual(magop2.time_reversal, -1)
self.assertEqual(magop2.as_xyzt_string(), "3x-2y-z+1/2, -x+12/13, z+1/2, -1")
def test_operate_magmom(self):
# all test magmoms are the same
magmoms = [
Magmom([1, 2, 3]), # as Magmom
[1, 2, 3], # as list
Magmom([-3, 2, 1], saxis=[1, 0, 0]),
] # as Magmom with non-default saxis
xyzt_strings = ["x, y, z, +1", "x, y, z, -1", "x, -y, z, -1", "-x, -y, z, -1"]
transformed_magmoms = [[1, 2, 3], [-1, -2, -3], [1, -2, 3], [1, 2, -3]]
for xyzt_string, transformed_magmom in zip(xyzt_strings, transformed_magmoms):
for magmom in magmoms:
op = MagSymmOp.from_xyzt_string(xyzt_string)
self.assertTrue(np.allclose(transformed_magmom, op.operate_magmom(magmom).global_moment))
if __name__ == "__main__":
import unittest
unittest.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module is to find lowest eigenvalues with Davidson algorithm."""
import logging
import warnings
import numpy
import numpy.linalg
import scipy
import scipy.linalg
import scipy.sparse
import scipy.sparse.linalg
from openfermion.linalg.sparse_tools import get_linear_qubit_operator_diagonal
from openfermion.linalg.linear_qubit_operator import \
generate_linear_qubit_operator
class DavidsonError(Exception):
"""Exceptions."""
pass
class DavidsonOptions(object):
"""Davidson algorithm iteration options."""
def __init__(self,
max_subspace=100,
max_iterations=300,
eps=1e-6,
real_only=False):
"""
Args:
max_subspace(int): Max number of vectors in the auxiliary subspace.
max_iterations(int): Max number of iterations.
eps(float): The max error for eigen vector error's elements during
iterations: linear_operator * v - v * lambda.
real_only(bool): Desired eigenvectors are real only or not. When one
specifies the real_only to be true but it only has complex ones,
no matter it converges or not, the returned vectors will be
complex.
"""
if max_subspace <= 2 or max_iterations <= 0 or eps <= 0:
raise ValueError('Invalid values for max_subspace, max_iterations '
'and/ or eps: ({}, {}, {}).'.format(
max_subspace, max_iterations, eps))
self.max_subspace = max_subspace
self.max_iterations = max_iterations
self.eps = eps
self.real_only = real_only
def set_dimension(self, dimension):
"""
Args:
dimension(int): Dimension of the matrix, which sets a upper limit on
the work space.
"""
if dimension <= 0:
raise ValueError('Invalid dimension: {}).'.format(dimension))
self.max_subspace = min(self.max_subspace, dimension + 1)
class Davidson(object):
"""Davidson algorithm to get the n states with smallest eigenvalues."""
def __init__(self, linear_operator, linear_operator_diagonal, options=None):
"""
Args:
linear_operator(scipy.sparse.linalg.LinearOperator): The linear
operator which defines a dot function when applying on a vector.
linear_operator_diagonal(numpy.ndarray): The linear operator's
diagonal elements.
options(DavidsonOptions): Iteration options.
"""
if options is None:
options = DavidsonOptions()
if not isinstance(
linear_operator,
(scipy.sparse.linalg.LinearOperator, scipy.sparse.spmatrix)):
raise ValueError(
'linear_operator is not a LinearOperator: {}.'.format(
type(linear_operator)))
self.linear_operator = linear_operator
self.linear_operator_diagonal = linear_operator_diagonal
self.options = options
self.options.set_dimension(len(linear_operator_diagonal))
def get_lowest_n(self, n_lowest=1, initial_guess=None, max_iterations=None):
"""
Returns `n` smallest eigenvalues and corresponding eigenvectors for
linear_operator.
Args:
n(int):
The number of states corresponding to the smallest eigenvalues
and associated eigenvectors for the linear_operator.
initial_guess(numpy.ndarray[complex]): Initial guess of eigenvectors
associated with the `n` smallest eigenvalues.
max_iterations(int): Max number of iterations when not converging.
Returns:
success(bool): Indicates whether it converged, i.e. max elementwise
error is smaller than eps.
eigen_values(numpy.ndarray[complex]): The smallest n eigenvalues.
eigen_vectors(numpy.ndarray[complex]): The smallest n eigenvectors
corresponding with those eigen values.
"""
# Goes through a few checks and preprocessing before iterative
# diagonalization.
# 1. Checks for number of states desired, should be in the range of
# [0, max_subspace).
if n_lowest <= 0 or n_lowest >= self.options.max_subspace:
raise ValueError('n_lowest {} is supposed to be in [1, {}).'.format(
n_lowest, self.options.max_subspace))
# 2. Checks for initial guess vectors' dimension is the same to that of
# the operator.
if initial_guess is None:
initial_guess = generate_random_vectors(
len(self.linear_operator_diagonal),
n_lowest,
real_only=self.options.real_only)
if initial_guess.shape[0] != len(self.linear_operator_diagonal):
raise ValueError(
'Guess vectors have a different dimension with '
'linear opearator diagonal elements: {} != {}.'.format(
initial_guess.shape[1], len(self.linear_operator_diagonal)))
# 3. Makes sure real guess vector if real_only is specified.
if self.options.real_only:
if not numpy.allclose(numpy.real(initial_guess), initial_guess):
warnings.warn('Initial guess is not real only!', RuntimeWarning)
initial_guess = numpy.real(initial_guess)
# 4. Checks for non-trivial (non-zero) initial guesses.
if numpy.max(numpy.abs(initial_guess)) < self.options.eps:
raise ValueError('Guess vectors are all zero! {}'.format(
initial_guess.shape))
initial_guess = scipy.linalg.orth(initial_guess)
# 5. Makes sure number of initial guess vector is at least n_lowest.
if initial_guess.shape[1] < n_lowest:
initial_guess = append_random_vectors(
initial_guess,
n_lowest - initial_guess.shape[1],
real_only=self.options.real_only)
success = False
num_iterations = 0
guess_v = initial_guess
guess_mv = None
max_iterations = max_iterations or self.options.max_iterations
while (num_iterations < max_iterations and not success):
(eigen_values, eigen_vectors, mat_eigen_vectors, max_trial_error,
guess_v, guess_mv) = self._iterate(n_lowest, guess_v, guess_mv)
logging.info("Eigenvalues for iteration %d: %s, error is %f.",
num_iterations, eigen_values, max_trial_error)
if max_trial_error < self.options.eps:
success = True
break
# Make sure it keeps real components only.
if self.options.real_only:
guess_v = numpy.real(guess_v)
# Deals with new directions to make sure they're orthonormal.
# Also makes sure there're new directions added for the next
# iteration, if not, add n_lowest random vectors.
count_mvs = guess_mv.shape[1]
guess_v = orthonormalize(guess_v, count_mvs, self.options.eps)
if guess_v.shape[1] <= count_mvs:
guess_v = append_random_vectors(
guess_v, n_lowest, real_only=self.options.real_only)
# Limits number of vectors to self.options.max_subspace, in this
# case, keep the following:
# 1) first n_lowest eigen_vectors;
# 2) first n_lowest matrix multiplication result for eigen_vectors;
#
# 3) new search directions which will be used for improvement for
# the next iteration.
if guess_v.shape[1] >= self.options.max_subspace:
guess_v = numpy.hstack([
eigen_vectors,
guess_v[:, count_mvs:],
])
guess_mv = mat_eigen_vectors
if self.options.real_only:
if (not numpy.allclose(numpy.real(guess_v), guess_v) or
not numpy.allclose(numpy.real(guess_mv), guess_mv)):
# Forces recalculation for matrix multiplication with
# vectors.
guess_mv = None
num_iterations += 1
if (self.options.real_only and
not numpy.allclose(numpy.real(eigen_vectors), eigen_vectors)):
warnings.warn(
'Unable to get real only eigenvectors, return '
'complex vectors instead with success state {}.'.format(
success), RuntimeWarning)
return success, eigen_values, eigen_vectors
def _iterate(self, n_lowest, guess_v, guess_mv=None):
"""One iteration with guess vectors.
Args:
n_lowest(int): The first n_lowest number of eigenvalues and
eigenvectors one is interested in.
guess_v(numpy.ndarray(complex)): Guess eigenvectors associated with
the smallest eigenvalues.
guess_mv(numpy.ndarray(complex)): Matrix applied on guess_v,
therefore they should have the same dimension.
Returns:
trial_lambda(numpy.ndarray(float)): The minimal eigenvalues based on
guess eigenvectors.
trial_v(numpy.ndarray(complex)): New guess eigenvectors.
trial_mv(numpy.ndarray(complex)): New guess eigenvectors' matrix
multiplication result.
max_trial_error(float): The max elementwise error for all guess
vectors.
guess_v(numpy.ndarray(complex)): Cached guess eigenvectors to avoid
recalculation for the next iterations.
guess_mv(numpy.ndarray(complex)): Cached guess vectors which is the
matrix product of linear_operator with guess_v.
"""
if guess_mv is None:
guess_mv = self.linear_operator.dot(guess_v)
dimension = guess_v.shape[1]
# Note that getting guess_mv is the most expensive step.
if guess_mv.shape[1] < dimension:
guess_mv = numpy.hstack([
guess_mv,
self.linear_operator.dot(
guess_v[:, guess_mv.shape[1]:dimension])
])
guess_vmv = numpy.dot(guess_v.conj().T, guess_mv)
# Gets new set of eigenvalues and eigenvectors in the vmv space, with a
# smaller dimension which is the number of vectors in guess_v.
#
# Note that we don't get the eigenvectors directly, instead we only get
# a transformation based on the raw vectors, so that mv don't need to be
# recalculated.
trial_lambda, trial_transformation = numpy.linalg.eigh(guess_vmv)
# Sorts eigenvalues in ascending order.
sorted_index = list(reversed(trial_lambda.argsort()[::-1]))
trial_lambda = trial_lambda[sorted_index]
trial_transformation = trial_transformation[:, sorted_index]
if len(trial_lambda) > n_lowest:
trial_lambda = trial_lambda[:n_lowest]
trial_transformation = trial_transformation[:, :n_lowest]
# Estimates errors based on diagonalization in the smaller space.
trial_v = numpy.dot(guess_v, trial_transformation)
trial_mv = numpy.dot(guess_mv, trial_transformation)
trial_error = trial_mv - trial_v * trial_lambda
new_directions, max_trial_error = self._get_new_directions(
trial_error, trial_lambda, trial_v)
if new_directions:
guess_v = numpy.hstack([guess_v, numpy.stack(new_directions).T])
return (trial_lambda, trial_v, trial_mv, max_trial_error, guess_v,
guess_mv)
def _get_new_directions(self, error_v, trial_lambda, trial_v):
"""Gets new directions from error vectors.
Args:
error_v(numpy.ndarray(complex)): Error vectors from the guess
eigenvalues and associated eigenvectors.
trial_lambda(numpy.ndarray(float)): The n_lowest minimal guess
eigenvalues.
trial_v(numpy.ndarray(complex)): Guess eigenvectors associated with
trial_lambda.
Returns:
new_directions(numpy.ndarray(complex)): New directions for searching
for real eigenvalues and eigenvectors.
max_trial_error(float): The max elementwise error for all guess
vectors.
"""
n_lowest = error_v.shape[1]
max_trial_error = 0
# Adds new guess vectors for the next iteration for the first n_lowest
# directions.
origonal_dimension = error_v.shape[0]
new_directions = []
for i in range(n_lowest):
current_error_v = error_v[:, i]
if numpy.max(numpy.abs(current_error_v)) < self.options.eps:
# Already converged for this eigenvector, no contribution to
# search for new directions.
continue
max_trial_error = max(max_trial_error,
numpy.linalg.norm(current_error_v))
diagonal_inverse = numpy.ones(origonal_dimension)
for j in range(origonal_dimension):
# Makes sure error vectors are bounded.
diff_lambda = self.linear_operator_diagonal[j] - trial_lambda[i]
if numpy.abs(diff_lambda) > self.options.eps:
diagonal_inverse[j] /= diff_lambda
else:
diagonal_inverse[j] /= self.options.eps
diagonal_inverse_error = diagonal_inverse * current_error_v
diagonal_inverse_trial = diagonal_inverse * trial_v[:, i]
new_direction = -current_error_v + (trial_v[:, i] * numpy.dot(
trial_v[:, i].conj(), diagonal_inverse_error) / numpy.dot(
trial_v[:, i].conj(), diagonal_inverse_trial))
new_directions.append(new_direction)
return new_directions, max_trial_error
class QubitDavidson(Davidson):
"""Davidson algorithm applied to a QubitOperator."""
def __init__(self, qubit_operator, n_qubits=None, options=None):
"""
Args:
qubit_operator(QubitOperator): A qubit operator which is a linear
operator as well.
n_qubits(int): Number of qubits.
options(DavidsonOptions): Iteration options.
"""
super(QubitDavidson, self).__init__(
generate_linear_qubit_operator(qubit_operator, n_qubits, options),
get_linear_qubit_operator_diagonal(qubit_operator, n_qubits),
options=options)
class SparseDavidson(Davidson):
"""Davidson algorithm for a sparse matrix."""
def __init__(self, sparse_matrix, options=None):
"""
Args:
sparse_matrix(scipy.sparse.spmatrix): A sparse matrix in scipy.
options(DavidsonOptions): Iteration options.
"""
super(SparseDavidson, self).__init__(sparse_matrix,
sparse_matrix.diagonal(),
options=options)
def generate_random_vectors(row, col, real_only=False):
"""Generates orthonormal random vectors with col columns.
Args:
row(int): Number of rows for the vectors.
col(int): Number of columns for the vectors.
real_only(bool): Real vectors or complex ones.
Returns:
random_vectors(numpy.ndarray(complex)): Orthonormal random vectors.
"""
random_vectors = numpy.random.rand(row, col)
if not real_only:
random_vectors = random_vectors + numpy.random.rand(row, col) * 1.0j
random_vectors = scipy.linalg.orth(random_vectors)
return random_vectors
def append_random_vectors(vectors, col, max_trial=3, real_only=False):
"""Appends exactly col orthonormal random vectors for vectors.
Assumes vectors is already orthonormal.
Args:
vectors(numpy.ndarray(complex)): Orthonormal original vectors to be
appended.
col(int): Number of columns to be appended.
real_only(bool): Real vectors or complex ones.
Returns:
vectors(numpy.ndarray(complex)): Orthonormal vectors with n columns.
"""
if col <= 0:
return vectors
vector_columns = vectors.shape[1]
total_columns = min(vector_columns + col, vectors.shape[0] + 1)
num_trial = 0
while vector_columns < total_columns:
num_trial += 1
vectors = numpy.hstack([
vectors,
generate_random_vectors(vectors.shape[0],
total_columns - vector_columns, real_only)
])
vectors = orthonormalize(vectors, vector_columns)
# Checks whether there are any new vectors added successfully.
if vectors.shape[1] == vector_columns:
if num_trial > max_trial:
warnings.warn(
'Unable to generate specified number of random '
'vectors {}: returning {} in total.'.format(
col, vector_columns), RuntimeWarning)
break
else:
num_trial = 1
vector_columns = vectors.shape[1]
return vectors
def orthonormalize(vectors, num_orthonormals=1, eps=1e-6):
"""Orthonormalize vectors, so that they're all normalized and orthogoal.
The first vector is the same to that of vectors, while vector_i is
orthogonal to vector_j, where j < i.
Args:
vectors(numpy.ndarray(complex)): Input vectors to be
orthonormalized.
num_orthonormals(int): First `num_orthonormals` columns are already
orthonormal, so that one doesn't need to make any changes.
eps(float): criterion of elements' max absolute value for zero vectors.
Returns:
ortho_normals(numpy.ndarray(complex)): Output orthonormal vectors.
"""
ortho_normals = vectors
count_orthonormals = num_orthonormals
# Skip unchanged ones.
for i in range(num_orthonormals, vectors.shape[1]):
vector_i = vectors[:, i]
# Makes sure vector_i is orthogonal to all processed vectors.
for j in range(i):
vector_i -= ortho_normals[:, j] * numpy.dot(
ortho_normals[:, j].conj(), vector_i)
# Makes sure vector_i is normalized.
if numpy.max(numpy.abs(vector_i)) < eps:
continue
ortho_normals[:, count_orthonormals] = (vector_i /
numpy.linalg.norm(vector_i))
count_orthonormals += 1
return ortho_normals[:, :count_orthonormals]
|
|
# MAKE NUMPY RUNNER make x 3D
def tfg_days_3d( x, err='off' ):
''' calculate DOF/DOT/LOGS for a vector of 12 chronological monthly values '''
import itertools
import numpy as np
# filter the div by zero and comparison with np.nan warnings from numpy
if err == 'off':
np.warnings.filterwarnings( "ignore", category=RuntimeWarning )
# need to treat zero as freezing (working with signs)
if np.any(x) == 0:
x[ x == 0 ] = -0.0001
nlayers, rows, cols = x.shape
# make some 2-D dof / dot arrays
dof = np.copy( x[0,...] )
dot = np.copy( x[0,...] )
# positive or negative monthly temps
s1 = np.sign( x )
# products of consecutive months' signs: positive indicates no change; negative indicates a potential freeze or thaw transition
s = s1[:11, ...] * s1[1:, ...]
# [ HARDWIRED ] pull out a mask using the known SNAP oob value
oob_value = -3.39999995e+38
oob_mask = (x[0,...] != oob_value).astype( int ) # pull out a mask for later
# set anything that we dont want to look at to np.nan
time_ind, lat_ind, lon_ind = np.where( x == oob_value )
x[ ..., lat_ind, lon_ind ] = np.nan
# set out-of-bounds values -- grabbing any other lurking ones
time_ind, lat_ind, lon_ind = np.where( np.isnan( x ) )
dof[ (lat_ind, lon_ind) ] = np.nan
dot[ (lat_ind, lon_ind) ] = np.nan
# FIND THE NEGATIVE VALUES IN THE FULL NDARRAY
def where_less_zero( arr ):
''' where in 1d slices of 3d array are less than zero '''
new_arr = np.copy( arr )
new_arr[:] = np.nan
ind, = np.where( arr < 0 )
new_arr[ range( len(ind) ) ] = ind
return new_arr
new_ind = np.apply_along_axis( where_less_zero, axis=0, arr=s )
new_ind_plus1 = new_ind + 1
# get index lengths of values along axis=0
def count_ind( arr ):
''' number of ~np.nan values '''
return len( arr[ ~np.isnan( arr ) ] )
# get the counts of values along the time dimension
new_ind_counts = np.apply_along_axis( count_ind, axis=0, arr=new_ind )
new_ind_plus1_counts = np.apply_along_axis( count_ind, axis=0, arr=new_ind_plus1 )
ind_counts = new_ind_counts + new_ind_plus1_counts
# above takes aboout 1.5 mins
# # not sure if these are useful
# s1_above = np.sum( s1a, axis=0 ) > 0
# s1_below = np.sum( s1a, axis=0 ) < 0
# # # #
# # IS THE FIRST ELEM OF THE NEW ARRAY > 0
def s1_n0_greater0( arr ):
return arr[ 0 ] > 0
s1_n0_greater = np.apply_along_axis( s1_n0_greater0, axis=0, arr=s1 )
# no transitions: all positive temps means no freeze day
cur_ind = np.where( (ind_counts == 0) & (s1_n0_greater == True) )
dot[ cur_ind ] = 0 # set dot to zero
dof[ cur_ind ] = 365 # set dof to 365
# case = 2
# no transitions: all negative temps means no thaw day
cur_ind = np.where( (ind_counts == 0) & (s1_n0_greater == False) )
dot[ cur_ind ] = 365 # set dot to 365
dof[ cur_ind ] = 0 # set dof to zero
# case = 3
# # # #END CURRENT
# [ML FIXED]
# only one transition during the year, thawing or freezing
cur_ind = np.where( (ind_counts == 2) & (x[0,...] < 0) )
# # # # # # # # # # #
# ONCE WE FIGURE OUT THIS PART THE REST IS REALLY THE SAME THING WITH SLIGHT MODS.
# THIS PART IS CONFUSING ME in ROUND 3 here...
dot = 15 + 30 * ((ind[0]+1)-1) - np.round( x[ ind[0] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dot = 15 + 30 * ((ind[0, cur_ind[0], cur_ind[1]]+1)-1) - np.round( x[ ind[0] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dof = dof[ cur_ind ] = 350 # 350: we know the ground freezes so we use 350 rather than the special 365
# case = 4
# # # # # # # # # # #
# places where we know the ground freezes and thaws,
# but during a specific 12 months we just don't happen to witness both
# only thaw occurs
# if x[ ind[0] ] < 0:
# [ml] note:((ind[0]+1)-1) is ind[0]+1 is the month number and minus 1 is to get to previous month
# we could make that a call to a months array -- months = range(1, 12+1)
# [ round3 ] THIS GUY IS THE SAME AS THE ONE WE NEED TO FIGURE OUT ABOVE JUST FLIPPED!
# only freeze occurs
if x[ ind[0] ] > 0:
dof = 350 - 30 * (12-ind[1]-1) - np.round( x[ ind[1] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dot = np.array([15]) # 15: we know the ground thaws so we use 15 rather than the special 0
grow = dof - dot
case = 5
# [ ! ON THIS HERE ! ]
# two transitions occur: thaw, then freeze (this is the ideal case; everything else is an idiosyncratic edge case)
cur_lat, cur_lon = np.where( (ind_counts == 4) & (x[0,...] < 0) )
# THIS IS NON-WORKING!
# [ml] note:((ind[0]+1)-1) is ind[0]+1 is the month number and minus 1 is to get to previous month
# we could make that a call to a months array -- months = range(1, 12+1)
new_ind_cur = new_ind[ ..., cur_lat, cur_lon ]
dot[ cur_ind ] = 15 + 30 * ((new_ind[0, cur_ind[0], cur_ind[1]]+1)-1) - np.round( x[ new_ind[0]:, cur_ind[0], cur_ind[1] ] / (np.diff( x[ ind[:2, cur_ind[0], cur_ind[1]] ] ) / 30.0), decimals=0 )
dof[ cur_ind ] = 350 - 30 * (12-ind[3]-1) - np.round( x[ ind[3] ] / (np.diff( x[ ind[2:4] ] ) / 30.0), decimals=0 )
grow = dof - dot
case = 0
# [ML FIXED]
cur_ind = np.where( (ind_counts == 4) & (x[0,...] > 0) )
elif (len(ind) == 4) & (s1[0] > 0): # two transitions occur but backward to what is expected; freeze, then thaw
if( ind[0] >= 7 ): # freeze occurs in second half of year as expected; late thaw is spurious
# dof = 350 - 30 * (12-ind[1]-1) - np.round( x[ ind[1] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dof = 350 - 30 * (12-ind[1]-1) - np.round( x[ ind[1] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dot = np.array([15]) # ignore spurious post-freeze thaw; treat as early, unobserved thaw
grow = dof - dot
case = 6
if ind[0] <= 6: # spurious freeze occurs in first half of year; thaw probably fine
dot = 15 + 30 * ((ind[2]+1)-1) - np.round( x[ ind[2] ] / (np.diff( x[ ind[2:4] ]) / 30.0), decimals=0 )
dof = np.array([350]) # ignore spurious early freeze; treat as late, unobserved freeze
grow = dof - dot
case = 7
# [ML FIXED]
elif len(ind) > 4: # more than two transitions; at least one definitely spurious
# [MATT Q]:
# what is the prepending 0 below? and what is its intention?
# what do u do if there is a use-case where idx-0 is already chosen? Py is ZERO-anchored...
ind2, = np.where( s < 0 )
ind2 = ind2 + 1
ind2 = np.sort( np.concatenate( (ind2, np.array([0])) ) )
diffed = np.diff( ind2 )
# [ml] m1, m2 are month indexes
m1, = np.where( diffed == np.max( diffed ) )#[0][-1]
m1 = m1 + 1
m2 = np.where( np.delete(np.diff( ind2 ), (m1-1)-1) == max( np.delete(np.diff( ind2 ), (m1-1)-1)) )[-1] + 1
if m1 == m2:
m2 = m2 - 1
ind2 = ind2[ np.sort( np.append( m1, m2 ) ) ]
ind = np.sort( np.append(ind2, ind2+1) ) - 1
dot = 15 + 30 * (ind[1]-1) - np.round( x[ind[1]-1] / (np.diff( x[ ind[:2] ] ) / 30.0), 0) # [ml] SOME WEIRD -1's here...
dof = 350 - 30 * (12-ind[3]-1) - np.round( x[ind[3]] / (np.diff( x[ ind[2:4] ] ) / 30.0), 0)
grow = dof - dot
case = 8
else:
dot, dof, grow = itertools.repeat( np.array([np.nan]), 3 )
# print( "Condition unconsidered: {}".format( x.strip() ) )
return 'dof:{} dot:{} logs:{}'.format( dof,dot,grow )
if __name__ == '__main__':
# get the GD filenames here
# filenames = []
# set up the array up here...
arr = np.array([ read_arr( fn )[0] for fn in filenames ])
x = arr.copy()
# INSERT THIS TEST DATA TO PLACES IN THE ARRAY
test_vals = [np.array([-16, -5, -1, 3, 5, 10, 12, 16, 11, -3, -15, -16]),
np.array([-16, -5, -1, 3, 5, 10, 12, 16, 11, np.nan, -15, -16]),
np.array([1, 3, 4, 6, 7, 12, 15, 12, 8, 9, 4, 2]),
np.array([-16, -15, -13, -11, -10, -5, 0, -2, -4, -12, -13, -16]),
np.array([-16, -13, -8, -6, 1, 4, 7, 11, 8, 4, 2, 1]),
np.array([1, 3, 1, 5, 8, 10, 14, 11, 7, -2, -5, -2]),
np.array([1, 3, 1, 5, 8, 10, 14, 10, 4, -1, -4, 1]),
np.array([1, -5, -4, -2, 3, 5, 10, 8, 6, 4, 4, 1]),
np.array([-11, 1, -7, -3, 2, 6, 11, 10, 8, -1, -5, -10]) ]
ind = [ (800, 1000+idx) for idx in range( len( test_vals ) ) ]
for idx, a in zip( ind, test_vals ):
x[ ..., idx[0], idx[1] ] = a
# how to get to the data we added for testing later on
x[ ..., 800, 1000:np.array(ind)[:,1].max() ]
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This provides a sphinx extension able to render the
source/general_feature_support_matrix.ini
file into the developer documentation.
It is used via a single directive in the .rst file
.. support_matrix::
"""
import re
from docutils import nodes
from docutils.parsers import rst
from six.moves import configparser
RE_PATTERN = re.compile("[^a-zA-Z0-9_]")
class SupportMatrix(object):
"""Represents the entire support matrix for Neutron drivers"""
def __init__(self):
self.features = []
self.targets = {}
class SupportMatrixFeature(object):
STATUS_IMMATURE = "immature"
STATUS_MATURE = "mature"
STATUS_REQUIRED = "required"
STATUS_DEPRECATED = "deprecated"
STATUS_ALL = [STATUS_IMMATURE, STATUS_MATURE,
STATUS_REQUIRED, STATUS_DEPRECATED]
def __init__(self, key, title, status=STATUS_IMMATURE,
group=None, notes=None, cli=(), api=None):
self.key = key
self.title = title
self.status = status
self.group = group
self.notes = notes
self.cli = cli
self.api = api
self.implementations = {}
class SupportMatrixImplementation(object):
STATUS_COMPLETE = "complete"
STATUS_PARTIAL = "partial"
STATUS_INCOMPLETE = "incomplete"
STATUS_UNKNOWN = "unknown"
STATUS_ALL = [STATUS_COMPLETE, STATUS_INCOMPLETE,
STATUS_PARTIAL, STATUS_UNKNOWN]
def __init__(self, status=STATUS_INCOMPLETE, notes=None):
self.status = status
self.notes = notes
STATUS_DICT = {
SupportMatrixImplementation.STATUS_COMPLETE: u"\u2714",
SupportMatrixImplementation.STATUS_INCOMPLETE: u"\u2716",
SupportMatrixImplementation.STATUS_PARTIAL: u"\u2714",
SupportMatrixImplementation.STATUS_UNKNOWN: u"?"
}
class SupportMatrixTarget(object):
def __init__(self, key, title, driver, plugin=None,
architecture=None, api=None, link=None):
""":param key: Unique identifier for plugin
:param title: Human readable name for plugin
:param driver: name of the driver
:param plugin: optional name of plugin
:param architecture: optional name of architecture
"""
self.api = api
self.key = key
self.title = title
self.driver = driver
self.plugin = plugin
self.architecture = architecture
self.link = link
class SupportMatrixDirective(rst.Directive):
# general_feature_support_matrix.ini is the arg
required_arguments = 1
def run(self):
matrix = self._load_support_matrix()
return self._build_markup(matrix)
def _load_support_matrix(self):
"""Reads the support-matrix.ini file and populates an instance
of the SupportMatrix class with all the data.
:returns: SupportMatrix instance
"""
cfg = configparser.SafeConfigParser()
env = self.state.document.settings.env
fname = self.arguments[0]
rel_fpath, fpath = env.relfn2path(fname)
with open(fpath) as fp:
cfg.readfp(fp)
# This ensures that the docs are rebuilt whenever the
# .ini file changes
env.note_dependency(rel_fpath)
matrix = SupportMatrix()
matrix.targets = self._get_targets(cfg)
matrix.features = self._get_features(cfg, matrix.targets)
return matrix
def _get_targets(self, cfg):
# The 'target.<foo>' sections are special - they list all the
# backend drivers that this file records data for
targets = {}
for section in cfg.sections():
if not section.startswith("target."):
continue
key = cfg.get(section, "label")
name = key.split("-")
title = cfg.get(section, "title")
link = cfg.get(section, "link")
target = SupportMatrixTarget(key, title, *name, link=link)
targets[key] = target
return targets
def _get_features(self, cfg, targets):
# All sections except 'targets' describe some feature of
# the Neutron backend driver.
features = []
for section in cfg.sections():
if section.startswith("target."):
continue
if not cfg.has_option(section, "title"):
raise Exception(
"'title' field missing in '[%s]' section" % section)
title = cfg.get(section, "title")
status = SupportMatrixFeature.STATUS_IMMATURE
if cfg.has_option(section, "status"):
# The value is a string "status(group)" where
# the 'group' part is optional
status = cfg.get(section, "status")
offset = status.find("(")
group = None
if offset != -1:
group = status[offset + 1:-1]
status = status[0:offset]
if status not in SupportMatrixFeature.STATUS_ALL:
raise Exception(
"'status' field value '%s' in ['%s']"
"section must be %s" %
(status, section,
",".join(SupportMatrixFeature.STATUS_ALL)))
cli = []
if cfg.has_option(section, "cli"):
cli = cfg.get(section, "cli")
api = None
if cfg.has_option(section, "api"):
api = cfg.get(section, "api")
notes = None
if cfg.has_option(section, "notes"):
notes = cfg.get(section, "notes")
feature = SupportMatrixFeature(section, title, status, group,
notes, cli, api)
# Now we've got the basic feature details, we must process
# the backend driver implementation for each feature
for item in cfg.options(section):
network_notes = "networking-notes-"
if not item.startswith("networking-"):
continue
if item not in targets:
raise Exception(
"networking-'%s' in '[%s]' not declared" %
(item, section))
status = cfg.get(section, item)
if status not in SupportMatrixImplementation.STATUS_ALL:
raise Exception(
"'%s' value '%s' in '[%s]' section must be %s" %
(item, status, section,
",".join(SupportMatrixImplementation.STATUS_ALL)))
notes_key = network_notes + item[len(network_notes):]
notes = None
if cfg.has_option(section, notes_key):
notes = cfg.get(section, notes_key)
target = targets[item]
impl = SupportMatrixImplementation(status, notes)
feature.implementations[target.key] = impl
for key in targets:
if key not in feature.implementations:
raise Exception("'%s' missing in '[%s]' section" %
(target.key, section))
features.append(feature)
return features
def _build_markup(self, matrix):
"""Constructs the docutils content for the support matrix
"""
content = []
self._build_summary(matrix, content)
self._build_details(matrix, content)
self._build_notes(content)
return content
def _build_summary(self, matrix, content):
"""Constructs the docutils content for the summary of
the support matrix.
The summary consists of a giant table, with one row
for each feature, and a column for each backend
driver. It provides an 'at a glance' summary of the
status of each driver
"""
summary_title = nodes.subtitle(text="Summary")
summary = nodes.table()
cols = len(matrix.targets.keys())
cols += 2
summary_group = nodes.tgroup(cols=cols)
summary_body = nodes.tbody()
summary_head = nodes.thead()
for i in range(cols):
summary_group.append(nodes.colspec(colwidth=1))
summary_group.append(summary_head)
summary_group.append(summary_body)
summary.append(summary_group)
content.append(summary_title)
content.append(summary)
# This sets up all the column headers - two fixed
# columns for feature name & status
header = nodes.row()
blank = nodes.entry()
blank.append(nodes.emphasis(text="Feature"))
header.append(blank)
blank = nodes.entry()
blank.append(nodes.emphasis(text="Status"))
header.append(blank)
summary_head.append(header)
# then one column for each backend driver
impls = matrix.targets.keys()
impls = sorted(impls)
for key in impls:
target = matrix.targets[key]
implcol = nodes.entry()
header.append(implcol)
if target.link:
uri = target.link
target_ref = nodes.reference("", refuri=uri)
target_txt = nodes.inline()
implcol.append(target_txt)
target_txt.append(target_ref)
target_ref.append(nodes.strong(text=target.title))
else:
implcol.append(nodes.strong(text=target.title))
# We now produce the body of the table, one row for
# each feature to report on
for feature in matrix.features:
item = nodes.row()
# the hyperlink target name linking to details
feature_id = re.sub(RE_PATTERN, "_", feature.key)
# first the fixed columns for title/status
key_col = nodes.entry()
item.append(key_col)
key_ref = nodes.reference(refid=feature_id)
key_txt = nodes.inline()
key_col.append(key_txt)
key_txt.append(key_ref)
key_ref.append(nodes.strong(text=feature.title))
status_col = nodes.entry()
item.append(status_col)
status_col.append(nodes.inline(
text=feature.status,
classes=["sp_feature_" + feature.status]))
# and then one column for each backend driver
impls = matrix.targets.keys()
impls = sorted(impls)
for key in impls:
target = matrix.targets[key]
impl = feature.implementations[key]
impl_col = nodes.entry()
item.append(impl_col)
key_id = re.sub(RE_PATTERN, "_",
"{}_{}".format(feature.key, key))
impl_ref = nodes.reference(refid=key_id)
impl_txt = nodes.inline()
impl_col.append(impl_txt)
impl_txt.append(impl_ref)
status = STATUS_DICT.get(impl.status, "")
impl_ref.append(nodes.literal(
text=status,
classes=["sp_impl_summary", "sp_impl_" + impl.status]))
summary_body.append(item)
def _build_details(self, matrix, content):
"""Constructs the docutils content for the details of
the support matrix.
"""
details_title = nodes.subtitle(text="Details")
details = nodes.bullet_list()
content.append(details_title)
content.append(details)
# One list entry for each feature we're reporting on
for feature in matrix.features:
item = nodes.list_item()
status = feature.status
if feature.group is not None:
status += "({})".format(feature.group)
feature_id = re.sub(RE_PATTERN, "_", feature.key)
# Highlight the feature title name
item.append(nodes.strong(text=feature.title, ids=[feature_id]))
# Add maturity status
para = nodes.paragraph()
para.append(nodes.strong(text="Status: {} ".format(status)))
item.append(para)
# If API Alias exists add it
if feature.api is not None:
para = nodes.paragraph()
para.append(
nodes.strong(text="API Alias: {} ".format(feature.api)))
item.append(para)
if feature.cli:
item.append(self._create_cli_paragraph(feature))
if feature.notes is not None:
item.append(self._create_notes_paragraph(feature.notes))
para_divers = nodes.paragraph()
para_divers.append(nodes.strong(text="Driver Support:"))
# A sub-list giving details of each backend driver target
impls = nodes.bullet_list()
for key in feature.implementations:
target = matrix.targets[key]
impl = feature.implementations[key]
subitem = nodes.list_item()
key_id = re.sub(RE_PATTERN, "_",
"{}_{}".format(feature.key, key))
subitem += [
nodes.strong(text="{}: ".format(target.title)),
nodes.literal(text=impl.status,
classes=["sp_impl_{}".format(impl.status)],
ids=[key_id]),
]
if impl.notes is not None:
subitem.append(self._create_notes_paragraph(impl.notes))
impls.append(subitem)
para_divers.append(impls)
item.append(para_divers)
details.append(item)
def _build_notes(self, content):
"""Constructs a list of notes content for the support matrix.
This is generated as a bullet list.
"""
notes_title = nodes.subtitle(text="Notes:")
notes = nodes.bullet_list()
content.append(notes_title)
content.append(notes)
for note in ["This document is a continuous work in progress"]:
item = nodes.list_item()
item.append(nodes.strong(text=note))
notes.append(item)
def _create_cli_paragraph(self, feature):
"""Create a paragraph which represents the CLI commands of the feature
The paragraph will have a bullet list of CLI commands.
"""
para = nodes.paragraph()
para.append(nodes.strong(text="CLI commands:"))
commands = nodes.bullet_list()
for c in feature.cli.split(";"):
cli_command = nodes.list_item()
cli_command += nodes.literal(text=c, classes=["sp_cli"])
commands.append(cli_command)
para.append(commands)
return para
def _create_notes_paragraph(self, notes):
"""Constructs a paragraph which represents the implementation notes
The paragraph consists of text and clickable URL nodes if links were
given in the notes.
"""
para = nodes.paragraph()
para.append(nodes.strong(text="Notes: "))
# links could start with http:// or https://
link_idxs = [m.start() for m in re.finditer('https?://', notes)]
start_idx = 0
for link_idx in link_idxs:
# assume the notes start with text (could be empty)
para.append(nodes.inline(text=notes[start_idx:link_idx]))
# create a URL node until the next text or the end of the notes
link_end_idx = notes.find(" ", link_idx)
if link_end_idx == -1:
# In case the notes end with a link without a blank
link_end_idx = len(notes)
uri = notes[link_idx:link_end_idx + 1]
para.append(nodes.reference("", uri, refuri=uri))
start_idx = link_end_idx + 1
# get all text after the last link (could be empty) or all of the
# text if no link was given
para.append(nodes.inline(text=notes[start_idx:]))
return para
def setup(app):
app.add_directive('support_matrix', SupportMatrixDirective)
app.add_stylesheet('support_matrix.css')
|
|
import numpy as np
import matplotlib
import os
import pandas as pd
from astropy.io import fits
import subprocess
import cosmics
import shlex
import sys
#from pisco_lib import *
# edited 5/9/17
"""
pisco_combine: run pisco pipeline to reduce the raw data to clean data with correct WCS
The pipeline is a combination of LA Cosmics, Astrometry, Sextractor, SCAMP and SWARP.
ARGUMENTS:
1. raw directory (e.g., 'ut170103/')
2. fieldname for object (e.g., 'Field027')
Examples: python pisco_pipeline/pisco_combine.py data/ Field026
"""
def filter_name(index):
"""
filter_name: turn index [1,8] into letter band (g,r,i,z) for PISCO quadrant data
INPUT:
- index: number
OUTPUT:
- a pair of letter for corresponding band and dome band
"""
if index == 1 or index == 2:
filter_name = 'g'
dome_name = 'g'
elif index == 3 or index == 4:
filter_name = 'r'
dome_name = 'r'
else:
dome_name = 'iz'
if index == 5 or index == 6:
filter_name = 'i'
elif index == 7 or index == 8:
filter_name = 'z'
return [filter_name, dome_name]
def list_file_name(dir, name, end=0):
"""
list_file_name: list all filename which started with 'name' and end with
'end' in 'dir' directory
INPUT:
- dir: directory to search in
- name: begining of the file name
- end: ending of the file name
OUTPUT:
- list of all filename in that directory
"""
names = []
for file in os.listdir(dir):
if file.startswith(name):
if end == 0:
names.append(os.path.join(dir, file))
else:
if file.endswith(end):
names.append(os.path.join(dir, file))
if len(names) == 0:
print 'Cannot find the files'
return names
def open_files(names, index, bias=np.array([]), twilight=False):
"""
open_files: use to open multiple bias or domeflat files at once and take the mean to
to get the average bias/domeflat file for image reduction
bias: take the mean of all bias files
domeflat: subtracted by average 'bias' (also calcualted from open_files) before take the mean
INPUT:
- name: starting name of the bias/domeflat files (output from 'list_file_name')
- index: extension of the fits file to read in (8 extension of PISCO - two each for different bands)
- (optional) bias: average 2D bias image (required to calculate domeflat correctly)
OUTPUT:
- 2D array of average bias/domeflat images
"""
ch_bs = []
for name in names:
hdulist = fits.open(name)
ch_b = hdulist[index].data
if len(bias) == 0:
ch_bs.append(ch_b) # for bias to combine as a mean
else:
# for domeflat-bias before combine into a mean
ch_bs.append(ch_b - bias)
if twilight == True:
print 'working on twlight flat'
return np.median(np.array(ch_bs), axis=0)
else:
return np.mean(np.array(ch_bs), axis=0)
def plot_one_chip(ax, data, vmin, vmax):
"""
plot_one_chip: plot 2D array 'data' on 'ax' with Normalize scale 'vmin' and 'vmax'
INPUT:
- ax: ax from fig,ax=plt.subplots(...)
- data: 2D array data to be plot
- vmin: normalize scale for the minimum
- vmax: normalize scale for the maximum
OUTPUT:
- plot of the data at a specified normalize scale
"""
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
c_m = matplotlib.cm.Greys
s_m = matplotlib.cm.ScalarMappable(cmap=c_m, norm=norm)
s_m.set_array([])
ax.imshow(data, cmap=c_m, norm=norm)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
def save_fits(index, dir, outdir, fieldname, final_image, name):
"""
save_fits: save the fits file from 2D array 'final_image' with a known header from the raw PISCO data
'fieldname' (changed the size to accompany the attachment of two amplifiers) with the output 'name'
in 'reduced/' directory
INPUT:
- index: specific the band (g, r, i, z) that we want to save on.
- dir: input directory for raw PISCO data
- fieldname: starting of the name for the raw PISCO data (e.g., 'Field027_B_73')
- final_image: 2D array of image that we want to save to the fits file
- name: output name of the fits file in 'reduced/' directory
OUTPUT:
- fits file in 'reduced/' directory
"""
ch1_name = list_file_name(dir, fieldname)
hdulist = fits.open(ch1_name[0])
hdu0 = hdulist[0]
hdu0.header['NAXIS'] = 2
hdulist[index].header['NAXIS1'] = '1546'
hdulist[index].header['DATASEC'] = '[1:1546,1:3092]'
hdulist[index].header['TRIMSEC'] = '[1:1546,1:3092]'
hdulist[index].header['ORIGSEC'] = '[1:1546,1:3092]'
hdulist[index].header['CCDSEC'] = '[1:1546,3093:6184]'
hdulist[index].header['DETSEC'] = '[1:1546,3093:6184]'
hdu1 = fits.ImageHDU(final_image * 1000, name='filter ' +
filter_name(index)[0], header=hdulist[index].header)
hdu_l = fits.HDUList(hdus=[hdu0, hdu1])
# if not os.path.exists(outdir):
# os.makedirs(outdir)
outname = os.path.join(outdir, name)
print 'saving the fits file ' + outname
hdu_l.writeto(outname, overwrite=True)
data, header = fits.getdata(outname, header=True)
fits.writeto(outname, data, header, overwrite=True)
def reduce_data(dir, index, fieldname, flat='domeflat'):
"""
reduce_data: combine raw PISCO data with bias and domeflat to create 2D array of output image
using function list_file_name, open_files
INPUT:
- dir: directory for the raw PISCO data
- index: index for the band of the image that we want to reduce
- fieldname: the begining of the file name (e.g., 'Field027_B_73')
- (extra) cut: -27 is the number of pixel needed to be cut out for the gap in the image
OUTPUT:
- ch1: 2D array of raw input image
- bias: 2D array for the bias image
- domeflat: 2D array for the domeflat image
- img: 2D array of the output image after subtraction of bias and normalization with domeflat
"""
cut = -27
ch1_name = list_file_name(dir, fieldname)
print 'working on %s with the index=%i' % (ch1_name[0], index)
hdulist = fits.open(ch1_name[0])
ch1 = hdulist[index].data
bias_names = list_file_name(dir, 'Bias_')
if flat == 'domeflat':
domeflat_names = list_file_name(dir, "domeflat" + filter_name(index)[1])
if flat == 'twilight':
domeflat_names = list_file_name(dir, "twiflat_")
bias = open_files(bias_names, index)
if flat == 'domeflat':
domeflat = open_files(domeflat_names, index, bias=bias, twilight=False)
if flat == 'twilight':
domeflat = open_files(domeflat_names, index, bias=bias, twilight=True)
domeflat[domeflat == 0] = 1e-4
# if index in [1,2,3,4]:
# mean=np.median(domeflat[350:2550, 10:-10])
# elif index in [5,6,7,8]:
# mean=np.median(domeflat[650:2800, 10:-10])
# domeflat=domeflat/mean
img = (ch1 - bias) / domeflat
ch1, bias, domeflat, img = ch1[:, :cut], bias[:,
:cut], domeflat[:, :cut], img[:, :cut]
if index % 2 == 0:
return np.fliplr(ch1), np.fliplr(bias), np.fliplr(domeflat), np.fliplr(img)
else:
return ch1, bias, domeflat, img
def cosmic_reduce(dir, field, band):
"""
cosmic_reduce: read the FITS file and use L.A. Cosmic (http://www.astro.yale.edu/dokkum/lacosmic/)
to remove cosmic rays in the images
INPUT:
- dir: directory input of the combine images ('reduced/')
- field: beginning of the file name (e.g., 'Field027_A_72')
- band: {'g','r','i','z'} band
PARAMETERS for LA Cosmic:
- gain and readnoise are the property from the telescope (PISCO: gain 4 ADU/e, readnoise 3 e -Brian[3/27/17])
- satlevel: identify saturated level for bright stars
- sigclip, sigfrac, objlim
OUTPUT:
- nField..._g.fits: not clean data (original with a mask cut)
- cField..._g.fits: clean version, removed cosmic ray
- mField..._g.fits: masked file to remove cosmic ray
"""
array, header = cosmics.fromfits(
os.path.join(dir, field + '_' + band + '.fits'))
# cutting the circular aperature of the image out to only have good pixels
# in the center
if band == 'g':
array_c = array[20:-20, 350:2550]
elif band == 'r':
array_c = array[20:-20, 350:2550]
elif band == 'z':
array_c = array[20:-20, 650:2800]
elif band == 'i':
array_c = array[20:-40, 650:2800]
c = cosmics.cosmicsimage(array_c, gain=4.0, readnoise=3.0, sigclip=2.5, sigfrac=0.5,
objlim=5.0, satlevel=3000.0, verbose=False)
c.run(maxiter=5)
# cosmics.tofits(os.path.join(dir, 'cosmics', 'n' + field +
# '_' + band + '.fits'), array_c, header)
cosmics.tofits(os.path.join(dir, 'cosmics', 'c' + field +
'_' + band + '.fits'), c.cleanarray, header)
# cosmics.tofits(os.path.join(dir, 'cosmics', 'm' + field +
# '_' + band + '.fits'), c.mask, header)
def astrometry_solve(cosmicdir, field, outdir):
"""
astrometry_solve: apply astrometry algorithm to find celestial Coordinate (WCS) for the image
REQUIRE:
- appropriate index files in '/usr/local/astrometry/data/' for Astrometry to have enough patch of
the sky to search for the position
INPUT:
- cosmicdir: input directory for cosmic-ray subtracted fits file ('reduced/cosmics/')
- field: begining of the file name after cosmic-ray subtraction for each band and each exposure
(e.g. 'cField027_B_73_z')
- outdir: output directory for these outputs ('wcs/')
OUTPUT:
- wcs/.wcs file: for the header with appropriate coordinate
- new_fits/..._new.fits: updated fits file with new wcs information in 'new_fits' directory
"""
# if not os.path.exists(outdir):
# os.makedirs(os.path.join(outdir))
if not os.path.isfile(os.path.join(outdir, field + '.wcs')):
cmd = 'solve-field %s --downsample 2 --overwrite --scale-unit arcsecperpix --scale-low 0.08 --scale-high 0.3 --dir %s' \
% (os.path.join(cosmicdir, field + '.fits'), outdir)
print cmd
sub = subprocess.check_call(shlex.split(cmd))
if sub == 0:
print 'finish solve-field and updating fits headers'
else:
print 'solve-field does not work.'
orig = fits.open(os.path.join(cosmicdir, field + '.fits'))
wcs_file = fits.open(os.path.join(outdir, field + '.wcs'))
header = wcs_file[0].header
wcsaxes_index = np.where(np.array(header.keys()) == 'WCSAXES')[0][0]
for i in range(wcsaxes_index, len(header)):
orig[0].header[header.keys()[i]] = header.values()[i]
orig.writeto(os.path.join('new_fits', field + '_new.fits'), overwrite=True)
def sextracting(field, band):
"""
sextracting: run Sextractor to find all the point sources in .ldac.fits format (suitable for SCAMP input)
INPUT:
- config.sex: sextractor config file
- field: begining of the file name for each band and each exposure (e.g. 'cField027_B_73_z')
OUTPUT:
- new_fits/..._new.ldac.fits: source catalogs of all the point source from Sextractor
"""
cmd = 'sex %s -c pisco_pipeline/config-%s.sex -CATALOG_NAME %s' % \
(os.path.join('new_fits', field + '_new.fits'), band,
os.path.join('new_fits', field + '_new.ldac.fits'))
print cmd
sub = subprocess.check_call(shlex.split(cmd))
# cmd = 'sex %s -c pisco_pipeline/config-%s.sex -CATALOG_NAME %s -CATALOG_TYPE ASCII' % \
# (os.path.join('new_fits', field + '_new.fits'), band,
# os.path.join('new_fits', 'tmp.cat'))
# print cmd
# sub = subprocess.check_call(shlex.split(cmd))
#
# name=['NUMBER','EXT_NUMBER','XWIN_WORLD','YWIN_WORLD','MAG_AUTO','MAGERR_AUTO','MAG_APER','MAGERR_APER','XWIN_IMAGE',\
# 'YWIN_IMAGE','ERRAWIN_IMAGE','ERRBWIN_IMAGE','ERRTHETAWIN_IMAGE','FLUX_AUTO','FLUXERR_AUTO','FLAGS',\
# 'FLUX RADIUS','CLASS_STAR','ALPHA_J2000','DELTA_J2000']
# df0=pd.read_csv(os.path.join('new_fits', 'tmp.cat'),delim_whitespace=True,names=name)
# hdu=fits.open(os.path.join('new_fits', field + '_new.ldac.fits'))
# print 'number of total stars found', df0.shape
# print 'number of stars using in Sextractor', len(np.array(df0[df0['FLAGS']==0].index))
# hdu[2].data=hdu[2].data[np.array(df0[df0['FLAGS']==0].index)]
# hdu.writeto(os.path.join('new_fits', field + '_new.ldac.fits'), overwrite=True)
def scamp(fieldname):
"""
scamp: run SCAMP to align the coordinate better after Astrometry with distortions.
(need to run all exposure and all filters at once to get the best performance)
INPUT:
- config.scamp: SCAMP config file
- fieldname: begining of the file name (e.g., 'cField027')
OUTPUT:
- new_fits/...ldac.head: SCAMP output which includes new celestial coordinates for fixing WCS
"""
cmd = 'scamp %s -c pisco_pipeline/config.scamp' % ' '.join(
list_file_name('new_fits', fieldname, end='_new.ldac.fits'))
print cmd
sub = subprocess.check_call(shlex.split(cmd))
def swarp(fieldname):
"""
swarp: run SWARP to combine multiple exposure into a better image with SCAMP output to help correct the location
INPUT:
- config.swarp: SWARP config file
- fieldname: begining of the file name (e.g., 'cField027')
OUTPUT:
- final/coadd_'fieldname'_'g'.fits: final image for each 'band' with corrected WCS
"""
bands = ['g', 'r', 'i', 'z']
print 'Swarping...'
for band in bands:
coadd_files = list_file_name(
'new_fits', fieldname, end=band + '_new.fits')
cmd = 'swarp %s -c pisco_pipeline/config.swarp -IMAGEOUT_NAME %s' %\
(' '.join(coadd_files), os.path.join(
'final', 'coadd_' + fieldname + '_' + band + '.fits'))
print cmd
sub = subprocess.check_call(shlex.split(cmd))
def save_rgb_image(field):
cmd = "ds9 -zscale -rgb -red final/coadd_c%s_i.fits -green final/coadd_c%s_r.fits -blue final/coadd_c%s_g.fits -zoom to fit -saveimage final/img%s.eps -exit" % \
(field, field, field, field)
print cmd
sub = subprocess.check_call(shlex.split(cmd))
print 'finished saving final/img%s.eps' % field
# --------
if __name__ == "__main__":
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
# Pipeline to run PISCO reduction data
dir = str(sys.argv[1])
fieldname = str(sys.argv[2])
outdir = 'wcs'
reducedir = 'reduced'
cosmicdir = os.path.join(reducedir, 'cosmics')
if len(sys.argv)>3:
flattype = str(sys.argv[3])
else:
flattype='domeflat'
if not os.path.exists(outdir):
os.makedirs(os.path.join(outdir))
if not os.path.exists(reducedir):
os.makedirs(reducedir)
if not os.path.exists(cosmicdir):
os.makedirs(cosmicdir)
if not os.path.exists('new_fits'):
os.makedirs(os.path.join('new_fits'))
if not os.path.exists('final'):
os.makedirs('final')
fields = [name.split('/')[-1].split('.')[0]
for name in list_file_name(dir, fieldname)]
for field in fields:
for index in [1, 3, 5, 7]:
ch1, bias1, domeflat1, img1 = reduce_data(dir, index, field, flat=flattype)
ch2, bias2, domeflat2, img2 = reduce_data(dir, index + 1, field, flat=flattype)
final_image = np.concatenate((img1, img2), axis=1)
save_fits(index, dir, reducedir, field, final_image,
"%s_%s.fits" % (field, filter_name(index)[0]))
# Cosmic ray reduction using L.A. Cosmic
bands = ['g', 'r', 'i', 'z']
for field in fields:
for band in bands:
if not os.path.isfile(os.path.join(reducedir, 'cosmics', 'c' + field + '_' + band + '.fits')):
print 'working on the cosmic ' + 'c' + field + '_' + band
cosmic_reduce(reducedir, field, band)
else:
print 'already did this band ' + band
# cosmic_reduce(reducedir,field,band)
cfieldname = 'c' + fieldname
print 'number of files in %s is %i' % (cosmicdir, len(list_file_name(cosmicdir, cfieldname)))
for field_long in list_file_name(cosmicdir, cfieldname):
field = field_long.split('/')[2].split('.')[0]
# Astrometry to get a rough estimate on the World Coordinate System
# (WCS) for each images
astrometry_solve(cosmicdir, field, outdir)
# Sextracting
band=field.split('_')[3]; print band
sextracting(field, band[0])
if fieldname == 'Field173':
cmd = 'rm new_fits/cField173_A_100_i_new.fits'
try:
print cmd
sub = subprocess.check_call(shlex.split(cmd))
except (ValueError, RuntimeError, TypeError, NameError):
pass
# SCAMP
scamp(cfieldname)
# SWARP
swarp(cfieldname)
# save eps file for RGB image
save_rgb_image(fieldname)
|
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake capability definitions and fake device classes with fake capabilities for unit testing."""
from gazoo_device import decorators
from gazoo_device.capabilities.interfaces import capability_base
from gazoo_device.tests.unit_tests.utils import fake_devices
from gazoo_device.utility import common_utils
VALID_CAPABILITY_NAME = "some_valid_capability"
OTHER_VALID_CAPABILITY_NAME = "some_other_valid_capability"
NONCONFORMING_CAPABILITY_NAME = "non_conforming_capability"
PARENT_CAPABILITY_NAME = "valid_parent_capability"
CHILD_CAPABILITY_NAME = "valid_child_capability"
def get_interface_name(interface):
return common_utils.generate_name(interface)
def get_flavor_name(flavor):
return common_utils.generate_name(flavor)
# Valid capability interface definitions
class ValidCapabilityBase(capability_base.CapabilityBase):
@classmethod
def get_capability_name(cls):
return VALID_CAPABILITY_NAME
class ValidOtherCapabilityBase(capability_base.CapabilityBase):
@classmethod
def get_capability_name(cls):
return OTHER_VALID_CAPABILITY_NAME
class ValidParentCapabilityBase(capability_base.CapabilityBase):
pass
class ValidChildCapabilityBase(ValidParentCapabilityBase):
pass
class NonConformingCapabilityInterfaceNameWithOverride(
capability_base.CapabilityBase):
@classmethod
def get_capability_name(cls):
return NONCONFORMING_CAPABILITY_NAME
# Invalid capability interface definitions
class InvalidCapabilityBase:
"""Doesn't inherit from CapabilityBase."""
pass
class UnsupportedCapabilityBase(capability_base.CapabilityBase):
pass
class NonConformingCapabilityInterfaceNameNoOverride(
capability_base.CapabilityBase):
pass
_SUPPORTED_INTERFACES = [
ValidCapabilityBase, ValidOtherCapabilityBase, ValidParentCapabilityBase,
ValidChildCapabilityBase, NonConformingCapabilityInterfaceNameWithOverride,
NonConformingCapabilityInterfaceNameNoOverride
]
SUPPORTED_INTERFACES = {
get_interface_name(interface): interface
for interface in _SUPPORTED_INTERFACES
}
# Note: NonConformingCapabilityInterfaceNameNoOverride is intentionally
# excluded. Generating a name for it should raise an error.
CAPABILITIES = {
VALID_CAPABILITY_NAME:
get_interface_name(ValidCapabilityBase),
OTHER_VALID_CAPABILITY_NAME:
get_interface_name(ValidOtherCapabilityBase),
NONCONFORMING_CAPABILITY_NAME:
get_interface_name(NonConformingCapabilityInterfaceNameWithOverride),
PARENT_CAPABILITY_NAME:
get_interface_name(ValidParentCapabilityBase),
CHILD_CAPABILITY_NAME:
get_interface_name(ValidChildCapabilityBase)
}
# Valid capability flavor definitions
class ValidCapabilityFlavor(ValidCapabilityBase):
pass
class ValidCapabilityFlavor1(ValidCapabilityBase):
pass
class ValidOtherCapabilityFlavor(ValidOtherCapabilityBase):
pass
class ValidParentCapabilityFlavor(ValidParentCapabilityBase):
pass
class ValidChildCapabilityFlavor(ValidChildCapabilityBase):
pass
class NonConformingInterfaceNameFlavor(
NonConformingCapabilityInterfaceNameWithOverride):
pass
DICT_VALIDATION_KEYS = ["foo", "bar"]
DICT_VALIDATION_DICT_NAME = "some_dict"
class ValidDictValidationFlavor(ValidParentCapabilityBase):
def __init__(self, some_dict, device_name):
super(ValidDictValidationFlavor, self).__init__(device_name=device_name)
self.validate_required_keys(DICT_VALIDATION_KEYS, some_dict,
DICT_VALIDATION_DICT_NAME)
_SUPPORTED_FLAVORS = [
ValidCapabilityFlavor, ValidCapabilityFlavor1, ValidDictValidationFlavor,
ValidOtherCapabilityFlavor, ValidParentCapabilityFlavor,
ValidChildCapabilityFlavor, NonConformingInterfaceNameFlavor
]
SUPPORTED_FLAVORS = {
get_flavor_name(flavor): flavor for flavor in _SUPPORTED_FLAVORS
}
# Invalid capability flavor definitions
class NoInterfaceFlavor(capability_base.CapabilityBase):
pass
class UnsupportedFlavor(UnsupportedCapabilityBase):
pass
# Device classes with invalid capability definitions
class DeviceWithInvalidCapability1(fake_devices.FakeGazooDeviceBase):
"""Capability doesn't inherit from CapabilityBase."""
@decorators.CapabilityDecorator(InvalidCapabilityBase)
def foo(self):
return 1
class DeviceWithInvalidCapability2(fake_devices.FakeGazooDeviceBase):
"""Invalid capability class."""
@decorators.CapabilityDecorator(str)
def foo(self):
return 1
class DeviceWithInvalidCapability3(fake_devices.FakeGazooDeviceBase):
"""Capability is a class instance (not a class object)."""
@decorators.CapabilityDecorator("some_string")
def foo(self):
return 1
class DeviceWithUnsupportedCapability(fake_devices.FakeGazooDeviceBase):
@decorators.CapabilityDecorator(UnsupportedFlavor)
def foo(self):
return 1
class DeviceInvalidCapabilityName(fake_devices.FakeGazooDeviceBase):
@decorators.CapabilityDecorator(ValidCapabilityFlavor)
def unexpected_capability_name(self):
return 1
class DeviceDifferentCapabilityInterfaces(fake_devices.FakeGazooDeviceBase):
@decorators.CapabilityDecorator(
[ValidCapabilityFlavor, ValidOtherCapabilityFlavor])
def some_valid_capability(self):
return 1
class DeviceCapabilityInterfaceInsteadOfFlavor(
fake_devices.FakeGazooDeviceBase):
@decorators.CapabilityDecorator(ValidCapabilityBase)
def some_valid_capability(self):
return 1
# Device classes with valid capability definitions
class DeviceNoCapabilities(fake_devices.FakeGazooDeviceBase):
pass
class DeviceOneFlavorCapability(fake_devices.FakeGazooDeviceBase):
@decorators.CapabilityDecorator(ValidCapabilityFlavor)
def some_valid_capability(self):
return 1
class DeviceOneFlavorCapabilityList(fake_devices.FakeGazooDeviceBase):
@decorators.CapabilityDecorator([ValidCapabilityFlavor])
def some_valid_capability(self):
return 1
class DeviceParentFlavorCapability(fake_devices.FakeGazooDeviceBase):
@decorators.CapabilityDecorator([ValidParentCapabilityFlavor])
def valid_parent_capability(self):
return 1
class DeviceChildFlavorCapability(fake_devices.FakeGazooDeviceBase):
@decorators.CapabilityDecorator([ValidChildCapabilityFlavor])
def valid_child_capability(self):
return 1
class DeviceNonConformingNameFlavor(fake_devices.FakeGazooDeviceBase):
@decorators.CapabilityDecorator(NonConformingInterfaceNameFlavor)
def non_conforming_capability(self):
return 1
class DeviceMultipleFlavorsCapabilityList(fake_devices.FakeGazooDeviceBase):
@decorators.CapabilityDecorator(
[ValidCapabilityFlavor, ValidCapabilityFlavor1])
def some_valid_capability(self):
return 1
class DeviceMultipleFlavorsCapabilityTuple(fake_devices.FakeGazooDeviceBase):
@decorators.CapabilityDecorator(
(ValidCapabilityFlavor, ValidCapabilityFlavor1))
def some_valid_capability(self):
return 1
class DeviceMultipleCapabilities(fake_devices.FakeGazooDeviceBase):
@decorators.CapabilityDecorator(ValidCapabilityFlavor)
def some_valid_capability(self):
return 1
@decorators.CapabilityDecorator(ValidOtherCapabilityFlavor)
def some_other_valid_capability(self):
return 1
class DeviceMultipleCapabilitiesMultipleFlavors(
fake_devices.FakeGazooDeviceBase):
"""Mock device with multiple capabilities and multiple flavors of them."""
@decorators.CapabilityDecorator(
(ValidCapabilityFlavor, ValidCapabilityFlavor1))
def some_valid_capability(self):
return 1
@decorators.CapabilityDecorator(ValidOtherCapabilityFlavor)
def some_other_valid_capability(self):
return 1
|
|
import numpy as np
import numpy.linalg as la
from pysal.spreg.utils import RegressionPropsY, spdot
import pysal.spreg.user_output as USER
from utils import cache_readonly
from base import LikelihoodModelResults
import family
from iwls import iwls
__all__ = ['GLM']
class GLM(RegressionPropsY):
"""
Generalised linear models. Can currently estimate Guassian, Poisson and
Logisitc regression coefficients. GLM object prepares model input and fit
method performs estimation which then returns a GLMResults object.
Parameters
----------
y : array
n*1, dependent variable.
X : array
n*k, independent variable, exlcuding the constant.
family : string
Model type: 'Gaussian', 'Poisson', 'Binomial'
offset : array
n*1, the offset variable at the ith location. For Poisson model
this term is often the size of the population at risk or
the expected size of the outcome in spatial epidemiology.
Default is None where Ni becomes 1.0 for all locations.
y_fix : array
n*1, the fix intercept value of y
Attributes
----------
y : array
n*1, dependent variable.
X : array
n*k, independent variable, including constant.
family : string
Model type: 'Gaussian', 'Poisson', 'logistic'
n : integer
Number of observations
k : integer
Number of independent variables
df_model : float
k-1, where k is the number of variables (including
intercept)
df_residual : float
observations minus variables (n-k)
mean_y : float
Mean of y
std_y : float
Standard deviation of y
fit_params : dict
Parameters passed into fit method to define estimation
routine.
Examples
--------
>>> from pysal.contrib.glm.glm import GLM
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> self.y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> self.X = np.array(X).T
>>> model = GLM(self.y, self.X, family=Gaussian())
>>> results = model.fit()
>>> results.params
[ 46.42818268, 0.62898397, -0.48488854]
"""
def __init__(self, y, X, family=family.Gaussian(), offset=None, y_fix = None,
constant=True):
"""
Initialize class
"""
self.n = USER.check_arrays(y, X)
USER.check_y(y, self.n)
self.y = y
if constant:
self.X = USER.check_constant(X)
else:
self.X = X
self.family = family
self.k = self.X.shape[1]
if offset is None:
self.offset = np.ones(shape=(self.n,1))
else:
self.offset = offset * 1.0
if y_fix is None:
self.y_fix = np.zeros(shape=(self.n,1))
else:
self.y_fix = y_fix
self.fit_params = {}
def fit(self, ini_betas=None, tol=1.0e-6, max_iter=200, solve='iwls'):
"""
Method that fits a model with a particular estimation routine.
Parameters
----------
ini_betas : array
k*1, initial coefficient values, including constant.
Default is None, which calculates initial values during
estimation.
tol: float
Tolerence for estimation convergence.
max_iter : integer
Maximum number of iterations if convergence not
achieved.
solve :string
Technique to solve MLE equations.
'iwls' = iteratively (re)weighted least squares (default)
"""
self.fit_params['ini_betas'] = ini_betas
self.fit_params['tol'] = tol
self.fit_params['max_iter'] = max_iter
self.fit_params['solve']=solve
if solve.lower() == 'iwls':
params, predy, w, n_iter = iwls(self.y, self.X, self.family, self.offset,
self.y_fix, ini_betas, tol, max_iter)
self.fit_params['n_iter'] = n_iter
return GLMResults(self, params.flatten(), predy, w)
@cache_readonly
def df_model(self):
return self.X.shape[1] - 1
@cache_readonly
def df_resid(self):
return self.n - self.df_model - 1
class GLMResults(LikelihoodModelResults):
"""
Results of estimated GLM and diagnostics.
Parameters
----------
model : GLM object
Pointer to GLM object with estimation parameters.
params : array
k*1, estimared coefficients
mu : array
n*1, predicted y values.
w : array
n*1, final weight used for iwls
Attributes
----------
model : GLM Object
Points to GLM object for which parameters have been
estimated.
y : array
n*1, dependent variable.
x : array
n*k, independent variable, including constant.
family : string
Model type: 'Gaussian', 'Poisson', 'Logistic'
n : integer
Number of observations
k : integer
Number of independent variables
df_model : float
k-1, where k is the number of variables (including
intercept)
df_residual : float
observations minus variables (n-k)
fit_params : dict
parameters passed into fit method to define estimation
routine.
scale : float
sigma squared used for subsequent computations.
params : array
n*k, estimared beta coefficients
w : array
n*1, final weight values of x
mu : array
n*1, predicted value of y (i.e., fittedvalues)
cov_params : array
Variance covariance matrix (kxk) of betas
bse : array
k*1, standard errors of betas
pvalues : array
k*1, two-tailed pvalues of parameters
tvalues : array
k*1, the tvalues of the standard errors
null : array
n*1, predicted values of y for null model
deviance : float
value of the deviance function evalued at params;
see family.py for distribution-specific deviance
null_deviance : float
value of the deviance function for the model fit with
a constant as the only regressor
llf : float
value of the loglikelihood function evalued at params;
see family.py for distribution-specific loglikelihoods
llnull : float
value of log-likelihood function evaluated at null
aic : float
AIC
bic : float
BIC
D2 : float
percent deviance explained
adj_D2 : float
adjusted percent deviance explained
pseudo_R2 : float
McFadden's pseudo R2 (coefficient of determination)
adj_pseudoR2 : float
adjusted McFadden's pseudo R2
resid_response : array
response residuals; defined as y-mu
resid_pearson : array
Pearson residuals; defined as (y-mu)/sqrt(VAR(mu))
where VAR is the distribution specific variance
function; see family.py and varfuncs.py for more information.
resid_working : array
Working residuals; the working residuals are defined as
resid_response/link'(mu); see links.py for the
derivatives of the link functions.
resid_anscombe : array
Anscombe residuals; see family.py for
distribution-specific Anscombe residuals.
resid_deviance : array
deviance residuals; see family.py for
distribution-specific deviance residuals.
pearson_chi2 : float
chi-Squared statistic is defined as the sum
of the squares of the Pearson residuals
normalized_cov_params : array
k*k, approximates [X.T*X]-1
Examples
--------
>>> from pysal.contrib.glm.glm import GLM, GLMResults
>>> db = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> self.y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> self.X = np.array(X).T
>>> model = GLM(self.y, self.X, family=Gaussian())
>>> results1 = model.fit()
>>> results1.aic
408.73548964604873
>>> model = results1.model
>>> params = results1.params.flatten()
>>> predy = results1.predy
>>> w = results1.w
>>> results2 = GLMResults(model, params, predy, w)
>>> results2.aic
408.73548964604873
"""
def __init__(self, model, params, mu, w):
self.model = model
self.n = model.n
self.y = model.y.T.flatten()
self.X = model.X
self.k = model.k
self.offset = model.offset
self.family = model.family
self.fit_params = model.fit_params
self.params = params
self.w = w
self.mu = mu.flatten()
self._cache = {}
@cache_readonly
def df_model(self):
return self.model.df_model
@cache_readonly
def df_resid(self):
return self.model.df_resid
@cache_readonly
def normalized_cov_params(self):
return la.inv(spdot(self.w.T, self.w))
@cache_readonly
def resid_response(self):
return (self.y-self.mu)
@cache_readonly
def resid_pearson(self):
return ((self.y-self.mu) /
np.sqrt(self.family.variance(self.mu)))
@cache_readonly
def resid_working(self):
return (self.resid_response / self.family.link.deriv(self.mu))
@cache_readonly
def resid_anscombe(self):
return (self.family.resid_anscombe(self.y, self.mu))
@cache_readonly
def resid_deviance(self):
return (self.family.resid_dev(self.y, self.mu))
@cache_readonly
def pearson_chi2(self):
chisq = (self.y - self.mu)**2 / self.family.variance(self.mu)
chisqsum = np.sum(chisq)
return chisqsum
@cache_readonly
def null(self):
y = np.reshape(self.y, (-1,1))
model = self.model
X = np.ones((len(y), 1))
null_mod = GLM(y, X, family=self.family, offset=self.offset, constant=False)
return null_mod.fit().mu
@cache_readonly
def scale(self):
if isinstance(self.family, (family.Binomial, family.Poisson)):
return 1.
else:
return (((np.power(self.resid_response, 2) /
self.family.variance(self.mu))).sum() /
(self.df_resid))
@cache_readonly
def deviance(self):
return self.family.deviance(self.y, self.mu)
@cache_readonly
def null_deviance(self):
return self.family.deviance(self.y, self.null)
@cache_readonly
def llnull(self):
return self.family.loglike(self.y, self.null, scale=self.scale)
@cache_readonly
def llf(self):
return self.family.loglike(self.y, self.mu, scale=self.scale)
@cache_readonly
def aic(self):
if isinstance(self.family, family.QuasiPoisson):
return np.nan
else:
return -2 * self.llf + 2*(self.df_model+1)
@cache_readonly
def bic(self):
return (self.deviance -
(self.model.n - self.df_model - 1) *
np.log(self.model.n))
@cache_readonly
def D2(self):
return 1 - (self.deviance / self.null_deviance)
@cache_readonly
def adj_D2(self):
return 1.0 - (float(self.n) - 1.0)/(float(self.n) - float(self.k)) * (1.0-self.D2)
@cache_readonly
def pseudoR2(self):
return 1 - (self.llf/self.llnull)
@cache_readonly
def adj_pseudoR2(self):
return 1 - ((self.llf-self.k)/self.llnull)
|
|
# ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
import os
import re
import unittest
from binascii import hexlify, unhexlify
from Cryptodome.Util.py3compat import b, tobytes, bchr, _memoryview
from Cryptodome.Util.strxor import strxor_c
from Cryptodome.SelfTest.st_common import list_test_cases
from Cryptodome.Cipher import ChaCha20
class ChaCha20Test(unittest.TestCase):
def test_new_positive(self):
cipher = ChaCha20.new(key=b("0")*32, nonce=b"0"*8)
self.assertEqual(cipher.nonce, b"0" * 8)
cipher = ChaCha20.new(key=b("0")*32, nonce=b"0"*12)
self.assertEqual(cipher.nonce, b"0" * 12)
def test_new_negative(self):
new = ChaCha20.new
self.assertRaises(TypeError, new)
self.assertRaises(TypeError, new, nonce=b("0"))
self.assertRaises(ValueError, new, nonce=b("0")*8, key=b("0"))
self.assertRaises(ValueError, new, nonce=b("0"), key=b("0")*32)
def test_default_nonce(self):
cipher1 = ChaCha20.new(key=bchr(1) * 32)
cipher2 = ChaCha20.new(key=bchr(1) * 32)
self.assertEquals(len(cipher1.nonce), 8)
self.assertNotEqual(cipher1.nonce, cipher2.nonce)
def test_eiter_encrypt_or_decrypt(self):
"""Verify that a cipher cannot be used for both decrypting and encrypting"""
c1 = ChaCha20.new(key=b("5") * 32, nonce=b("6") * 8)
c1.encrypt(b("8"))
self.assertRaises(TypeError, c1.decrypt, b("9"))
c2 = ChaCha20.new(key=b("5") * 32, nonce=b("6") * 8)
c2.decrypt(b("8"))
self.assertRaises(TypeError, c2.encrypt, b("9"))
def test_round_trip(self):
pt = b("A") * 1024
c1 = ChaCha20.new(key=b("5") * 32, nonce=b("6") * 8)
c2 = ChaCha20.new(key=b("5") * 32, nonce=b("6") * 8)
ct = c1.encrypt(pt)
self.assertEqual(c2.decrypt(ct), pt)
self.assertEqual(c1.encrypt(b("")), b(""))
self.assertEqual(c2.decrypt(b("")), b(""))
def test_streaming(self):
"""Verify that an arbitrary number of bytes can be encrypted/decrypted"""
from Cryptodome.Hash import SHA1
segments = (1, 3, 5, 7, 11, 17, 23)
total = sum(segments)
pt = b("")
while len(pt) < total:
pt += SHA1.new(pt).digest()
cipher1 = ChaCha20.new(key=b("7") * 32, nonce=b("t") * 8)
ct = cipher1.encrypt(pt)
cipher2 = ChaCha20.new(key=b("7") * 32, nonce=b("t") * 8)
cipher3 = ChaCha20.new(key=b("7") * 32, nonce=b("t") * 8)
idx = 0
for segment in segments:
self.assertEqual(cipher2.decrypt(ct[idx:idx+segment]), pt[idx:idx+segment])
self.assertEqual(cipher3.encrypt(pt[idx:idx+segment]), ct[idx:idx+segment])
idx += segment
def test_seek(self):
cipher1 = ChaCha20.new(key=b("9") * 32, nonce=b("e") * 8)
offset = 64 * 900 + 7
pt = b("1") * 64
cipher1.encrypt(b("0") * offset)
ct1 = cipher1.encrypt(pt)
cipher2 = ChaCha20.new(key=b("9") * 32, nonce=b("e") * 8)
cipher2.seek(offset)
ct2 = cipher2.encrypt(pt)
self.assertEquals(ct1, ct2)
def test_seek_tv(self):
# Test Vector #4, A.1 from
# http://tools.ietf.org/html/draft-nir-cfrg-chacha20-poly1305-04
key = bchr(0) + bchr(255) + bchr(0) * 30
nonce = bchr(0) * 8
cipher = ChaCha20.new(key=key, nonce=nonce)
cipher.seek(64 * 2)
expected_key_stream = unhexlify(b(
"72d54dfbf12ec44b362692df94137f32"
"8fea8da73990265ec1bbbea1ae9af0ca"
"13b25aa26cb4a648cb9b9d1be65b2c09"
"24a66c54d545ec1b7374f4872e99f096"
))
ct = cipher.encrypt(bchr(0) * len(expected_key_stream))
self.assertEqual(expected_key_stream, ct)
def test_rfc7539(self):
# from https://tools.ietf.org/html/rfc7539 Annex A.1
# Each item is: key, nonce, block #, plaintext, ciphertext
tvs = [
# Test Vector #1
(
"00"*32,
"00"*12,
0,
"00"*16*4,
"76b8e0ada0f13d90405d6ae55386bd28"
"bdd219b8a08ded1aa836efcc8b770dc7"
"da41597c5157488d7724e03fb8d84a37"
"6a43b8f41518a11cc387b669b2ee6586"
),
# Test Vector #2
(
"00"*31 + "01",
"00"*11 + "02",
1,
"416e79207375626d697373696f6e2074"
"6f20746865204945544620696e74656e"
"6465642062792074686520436f6e7472"
"696275746f7220666f72207075626c69"
"636174696f6e20617320616c6c206f72"
"2070617274206f6620616e2049455446"
"20496e7465726e65742d447261667420"
"6f722052464320616e6420616e792073"
"746174656d656e74206d616465207769"
"7468696e2074686520636f6e74657874"
"206f6620616e20494554462061637469"
"7669747920697320636f6e7369646572"
"656420616e20224945544620436f6e74"
"7269627574696f6e222e205375636820"
"73746174656d656e747320696e636c75"
"6465206f72616c2073746174656d656e"
"747320696e2049455446207365737369"
"6f6e732c2061732077656c6c20617320"
"7772697474656e20616e6420656c6563"
"74726f6e696320636f6d6d756e696361"
"74696f6e73206d61646520617420616e"
"792074696d65206f7220706c6163652c"
"20776869636820617265206164647265"
"7373656420746f",
"a3fbf07df3fa2fde4f376ca23e827370"
"41605d9f4f4f57bd8cff2c1d4b7955ec"
"2a97948bd3722915c8f3d337f7d37005"
"0e9e96d647b7c39f56e031ca5eb6250d"
"4042e02785ececfa4b4bb5e8ead0440e"
"20b6e8db09d881a7c6132f420e527950"
"42bdfa7773d8a9051447b3291ce1411c"
"680465552aa6c405b7764d5e87bea85a"
"d00f8449ed8f72d0d662ab052691ca66"
"424bc86d2df80ea41f43abf937d3259d"
"c4b2d0dfb48a6c9139ddd7f76966e928"
"e635553ba76c5c879d7b35d49eb2e62b"
"0871cdac638939e25e8a1e0ef9d5280f"
"a8ca328b351c3c765989cbcf3daa8b6c"
"cc3aaf9f3979c92b3720fc88dc95ed84"
"a1be059c6499b9fda236e7e818b04b0b"
"c39c1e876b193bfe5569753f88128cc0"
"8aaa9b63d1a16f80ef2554d7189c411f"
"5869ca52c5b83fa36ff216b9c1d30062"
"bebcfd2dc5bce0911934fda79a86f6e6"
"98ced759c3ff9b6477338f3da4f9cd85"
"14ea9982ccafb341b2384dd902f3d1ab"
"7ac61dd29c6f21ba5b862f3730e37cfd"
"c4fd806c22f221"
),
# Test Vector #3
(
"1c9240a5eb55d38af333888604f6b5f0"
"473917c1402b80099dca5cbc207075c0",
"00"*11 + "02",
42,
"2754776173206272696c6c69672c2061"
"6e642074686520736c6974687920746f"
"7665730a446964206779726520616e64"
"2067696d626c6520696e207468652077"
"6162653a0a416c6c206d696d73792077"
"6572652074686520626f726f676f7665"
"732c0a416e6420746865206d6f6d6520"
"7261746873206f757467726162652e",
"62e6347f95ed87a45ffae7426f27a1df"
"5fb69110044c0d73118effa95b01e5cf"
"166d3df2d721caf9b21e5fb14c616871"
"fd84c54f9d65b283196c7fe4f60553eb"
"f39c6402c42234e32a356b3e764312a6"
"1a5532055716ead6962568f87d3f3f77"
"04c6a8d1bcd1bf4d50d6154b6da731b1"
"87b58dfd728afa36757a797ac188d1"
)
]
for tv in tvs:
key = unhexlify(tv[0])
nonce = unhexlify(tv[1])
offset = tv[2] * 64
pt = unhexlify(tv[3])
ct_expect = unhexlify(tv[4])
cipher = ChaCha20.new(key=key, nonce=nonce)
if offset != 0:
cipher.seek(offset)
ct = cipher.encrypt(pt)
assert(ct == ct_expect)
class XChaCha20Test(unittest.TestCase):
# From https://tools.ietf.org/html/draft-arciszewski-xchacha-03
def test_hchacha20(self):
# Section 2.2.1
from Cryptodome.Cipher.ChaCha20 import _HChaCha20
key = b"00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f:10:11:12:13:14:15:16:17:18:19:1a:1b:1c:1d:1e:1f"
key = unhexlify(key.replace(b":", b""))
nonce = b"00:00:00:09:00:00:00:4a:00:00:00:00:31:41:59:27"
nonce = unhexlify(nonce.replace(b":", b""))
subkey = _HChaCha20(key, nonce)
expected = b"82413b42 27b27bfe d30e4250 8a877d73 a0f9e4d5 8a74a853 c12ec413 26d3ecdc"
expected = unhexlify(expected.replace(b" ", b""))
self.assertEqual(subkey, expected)
def test_encrypt(self):
# Section A.3.2
pt = b"""
5468652064686f6c65202870726f6e6f756e6365642022646f6c652229206973
20616c736f206b6e6f776e2061732074686520417369617469632077696c6420
646f672c2072656420646f672c20616e642077686973746c696e6720646f672e
2049742069732061626f7574207468652073697a65206f662061204765726d61
6e20736865706865726420627574206c6f6f6b73206d6f7265206c696b652061
206c6f6e672d6c656767656420666f782e205468697320686967686c7920656c
757369766520616e6420736b696c6c6564206a756d70657220697320636c6173
736966696564207769746820776f6c7665732c20636f796f7465732c206a6163
6b616c732c20616e6420666f78657320696e20746865207461786f6e6f6d6963
2066616d696c792043616e696461652e"""
pt = unhexlify(pt.replace(b"\n", b"").replace(b" ", b""))
key = unhexlify(b"808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9f")
iv = unhexlify(b"404142434445464748494a4b4c4d4e4f5051525354555658")
ct = b"""
7d0a2e6b7f7c65a236542630294e063b7ab9b555a5d5149aa21e4ae1e4fbce87
ecc8e08a8b5e350abe622b2ffa617b202cfad72032a3037e76ffdcdc4376ee05
3a190d7e46ca1de04144850381b9cb29f051915386b8a710b8ac4d027b8b050f
7cba5854e028d564e453b8a968824173fc16488b8970cac828f11ae53cabd201
12f87107df24ee6183d2274fe4c8b1485534ef2c5fbc1ec24bfc3663efaa08bc
047d29d25043532db8391a8a3d776bf4372a6955827ccb0cdd4af403a7ce4c63
d595c75a43e045f0cce1f29c8b93bd65afc5974922f214a40b7c402cdb91ae73
c0b63615cdad0480680f16515a7ace9d39236464328a37743ffc28f4ddb324f4
d0f5bbdc270c65b1749a6efff1fbaa09536175ccd29fb9e6057b307320d31683
8a9c71f70b5b5907a66f7ea49aadc409"""
ct = unhexlify(ct.replace(b"\n", b"").replace(b" ", b""))
cipher = ChaCha20.new(key=key, nonce=iv)
cipher.seek(64) # Counter = 1
ct_test = cipher.encrypt(pt)
self.assertEqual(ct, ct_test)
class ByteArrayTest(unittest.TestCase):
"""Verify we can encrypt or decrypt bytearrays"""
def runTest(self):
data = b"0123"
key = b"9" * 32
nonce = b"t" * 8
# Encryption
data_ba = bytearray(data)
key_ba = bytearray(key)
nonce_ba = bytearray(nonce)
cipher1 = ChaCha20.new(key=key, nonce=nonce)
ct = cipher1.encrypt(data)
cipher2 = ChaCha20.new(key=key_ba, nonce=nonce_ba)
key_ba[:1] = b'\xFF'
nonce_ba[:1] = b'\xFF'
ct_test = cipher2.encrypt(data_ba)
self.assertEqual(ct, ct_test)
self.assertEqual(cipher1.nonce, cipher2.nonce)
# Decryption
key_ba = bytearray(key)
nonce_ba = bytearray(nonce)
ct_ba = bytearray(ct)
cipher3 = ChaCha20.new(key=key_ba, nonce=nonce_ba)
key_ba[:1] = b'\xFF'
nonce_ba[:1] = b'\xFF'
pt_test = cipher3.decrypt(ct_ba)
self.assertEqual(data, pt_test)
class MemoryviewTest(unittest.TestCase):
"""Verify we can encrypt or decrypt bytearrays"""
def runTest(self):
data = b"0123"
key = b"9" * 32
nonce = b"t" * 8
# Encryption
data_mv = memoryview(bytearray(data))
key_mv = memoryview(bytearray(key))
nonce_mv = memoryview(bytearray(nonce))
cipher1 = ChaCha20.new(key=key, nonce=nonce)
ct = cipher1.encrypt(data)
cipher2 = ChaCha20.new(key=key_mv, nonce=nonce_mv)
key_mv[:1] = b'\xFF'
nonce_mv[:1] = b'\xFF'
ct_test = cipher2.encrypt(data_mv)
self.assertEqual(ct, ct_test)
self.assertEqual(cipher1.nonce, cipher2.nonce)
# Decryption
key_mv = memoryview(bytearray(key))
nonce_mv = memoryview(bytearray(nonce))
ct_mv = memoryview(bytearray(ct))
cipher3 = ChaCha20.new(key=key_mv, nonce=nonce_mv)
key_mv[:1] = b'\xFF'
nonce_mv[:1] = b'\xFF'
pt_test = cipher3.decrypt(ct_mv)
self.assertEqual(data, pt_test)
class ChaCha20_AGL_NIR(unittest.TestCase):
# From http://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04
# and http://tools.ietf.org/html/draft-nir-cfrg-chacha20-poly1305-04
tv = [
( "00" * 32,
"00" * 8,
"76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc"
"8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11c"
"c387b669b2ee6586"
"9f07e7be5551387a98ba977c732d080d"
"cb0f29a048e3656912c6533e32ee7aed"
"29b721769ce64e43d57133b074d839d5"
"31ed1f28510afb45ace10a1f4b794d6f"
),
( "00" * 31 + "01",
"00" * 8,
"4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952"
"ed432d41bbe2a0b6ea7566d2a5d1e7e20d42af2c53d792b1c43fea81"
"7e9ad275ae546963"
"3aeb5224ecf849929b9d828db1ced4dd"
"832025e8018b8160b82284f3c949aa5a"
"8eca00bbb4a73bdad192b5c42f73f2fd"
"4e273644c8b36125a64addeb006c13a0"
),
( "00" * 32,
"00" * 7 + "01",
"de9cba7bf3d69ef5e786dc63973f653a0b49e015adbff7134fcb7df1"
"37821031e85a050278a7084527214f73efc7fa5b5277062eb7a0433e"
"445f41e3"
),
( "00" * 32,
"01" + "00" * 7,
"ef3fdfd6c61578fbf5cf35bd3dd33b8009631634d21e42ac33960bd1"
"38e50d32111e4caf237ee53ca8ad6426194a88545ddc497a0b466e7d"
"6bbdb0041b2f586b"
),
( "000102030405060708090a0b0c0d0e0f101112131415161718191a1b"
"1c1d1e1f",
"0001020304050607",
"f798a189f195e66982105ffb640bb7757f579da31602fc93ec01ac56"
"f85ac3c134a4547b733b46413042c9440049176905d3be59ea1c53f1"
"5916155c2be8241a38008b9a26bc35941e2444177c8ade6689de9526"
"4986d95889fb60e84629c9bd9a5acb1cc118be563eb9b3a4a472f82e"
"09a7e778492b562ef7130e88dfe031c79db9d4f7c7a899151b9a4750"
"32b63fc385245fe054e3dd5a97a5f576fe064025d3ce042c566ab2c5"
"07b138db853e3d6959660996546cc9c4a6eafdc777c040d70eaf46f7"
"6dad3979e5c5360c3317166a1c894c94a371876a94df7628fe4eaaf2"
"ccb27d5aaae0ad7ad0f9d4b6ad3b54098746d4524d38407a6deb3ab7"
"8fab78c9"
),
( "00" * 32,
"00" * 7 + "02",
"c2c64d378cd536374ae204b9ef933fcd"
"1a8b2288b3dfa49672ab765b54ee27c7"
"8a970e0e955c14f3a88e741b97c286f7"
"5f8fc299e8148362fa198a39531bed6d"
),
]
def runTest(self):
for (key, nonce, stream) in self.tv:
c = ChaCha20.new(key=unhexlify(b(key)), nonce=unhexlify(b(nonce)))
ct = unhexlify(b(stream))
pt = b("\x00") * len(ct)
self.assertEqual(c.encrypt(pt), ct)
class TestOutput(unittest.TestCase):
def runTest(self):
# Encrypt/Decrypt data and test output parameter
key = b'4' * 32
nonce = b'5' * 8
cipher = ChaCha20.new(key=key, nonce=nonce)
pt = b'5' * 16
ct = cipher.encrypt(pt)
output = bytearray(16)
cipher = ChaCha20.new(key=key, nonce=nonce)
res = cipher.encrypt(pt, output=output)
self.assertEqual(ct, output)
self.assertEqual(res, None)
cipher = ChaCha20.new(key=key, nonce=nonce)
res = cipher.decrypt(ct, output=output)
self.assertEqual(pt, output)
self.assertEqual(res, None)
import sys
if sys.version[:3] != '2.6':
output = memoryview(bytearray(16))
cipher = ChaCha20.new(key=key, nonce=nonce)
cipher.encrypt(pt, output=output)
self.assertEqual(ct, output)
cipher = ChaCha20.new(key=key, nonce=nonce)
cipher.decrypt(ct, output=output)
self.assertEqual(pt, output)
cipher = ChaCha20.new(key=key, nonce=nonce)
self.assertRaises(TypeError, cipher.encrypt, pt, output=b'0'*16)
cipher = ChaCha20.new(key=key, nonce=nonce)
self.assertRaises(TypeError, cipher.decrypt, ct, output=b'0'*16)
shorter_output = bytearray(7)
cipher = ChaCha20.new(key=key, nonce=nonce)
self.assertRaises(ValueError, cipher.encrypt, pt, output=shorter_output)
cipher = ChaCha20.new(key=key, nonce=nonce)
self.assertRaises(ValueError, cipher.decrypt, ct, output=shorter_output)
def get_tests(config={}):
tests = []
tests += list_test_cases(ChaCha20Test)
tests += list_test_cases(XChaCha20Test)
tests.append(ChaCha20_AGL_NIR())
tests.append(ByteArrayTest())
import sys
if sys.version[:3] != "2.6":
tests.append(MemoryviewTest())
tests.append(TestOutput())
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
|
|
"""
The OGRGeometry is a wrapper for using the OGR Geometry class
(see http://www.gdal.org/classOGRGeometry.html). OGRGeometry
may be instantiated when reading geometries from OGR Data Sources
(e.g. SHP files), or when given OGC WKT (a string).
While the 'full' API is not present yet, the API is "pythonic" unlike
the traditional and "next-generation" OGR Python bindings. One major
advantage OGR Geometries have over their GEOS counterparts is support
for spatial reference systems and their transformation.
Example:
>>> from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, SpatialReference
>>> wkt1, wkt2 = 'POINT(-90 30)', 'POLYGON((0 0, 5 0, 5 5, 0 5)'
>>> pnt = OGRGeometry(wkt1)
>>> print(pnt)
POINT (-90 30)
>>> mpnt = OGRGeometry(OGRGeomType('MultiPoint'), SpatialReference('WGS84'))
>>> mpnt.add(wkt1)
>>> mpnt.add(wkt1)
>>> print(mpnt)
MULTIPOINT (-90 30,-90 30)
>>> print(mpnt.srs.name)
WGS 84
>>> print(mpnt.srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> mpnt.transform(SpatialReference('NAD27'))
>>> print(mpnt.proj)
+proj=longlat +ellps=clrk66 +datum=NAD27 +no_defs
>>> print(mpnt)
MULTIPOINT (-89.999930378602485 29.999797886557641,-89.999930378602485 29.999797886557641)
The OGRGeomType class is to make it easy to specify an OGR geometry type:
>>> from django.contrib.gis.gdal import OGRGeomType
>>> gt1 = OGRGeomType(3) # Using an integer for the type
>>> gt2 = OGRGeomType('Polygon') # Using a string
>>> gt3 = OGRGeomType('POLYGON') # It's case-insensitive
>>> print(gt1 == 3, gt1 == 'Polygon') # Equivalence works w/non-OGRGeomType objects
True True
"""
import sys
from binascii import a2b_hex, b2a_hex
from ctypes import byref, c_char_p, c_double, c_ubyte, c_void_p, string_at
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.envelope import Envelope, OGREnvelope
from django.contrib.gis.gdal.error import (
GDALException, OGRIndexError, SRSException,
)
from django.contrib.gis.gdal.geomtype import OGRGeomType
from django.contrib.gis.gdal.prototypes import geom as capi, srs as srs_api
from django.contrib.gis.gdal.srs import CoordTransform, SpatialReference
from django.contrib.gis.geometry.regex import hex_regex, json_regex, wkt_regex
from django.utils.encoding import force_bytes
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr__api_8h.html
#
# The OGR_G_* routines are relevant here.
class OGRGeometry(GDALBase):
"Generally encapsulates an OGR geometry."
destructor = capi.destroy_geom
def __init__(self, geom_input, srs=None):
"Initializes Geometry on either WKT or an OGR pointer as input."
str_instance = isinstance(geom_input, str)
# If HEX, unpack input to a binary buffer.
if str_instance and hex_regex.match(geom_input):
geom_input = memoryview(a2b_hex(geom_input.upper().encode()))
str_instance = False
# Constructing the geometry,
if str_instance:
wkt_m = wkt_regex.match(geom_input)
json_m = json_regex.match(geom_input)
if wkt_m:
if wkt_m.group('srid'):
# If there's EWKT, set the SRS w/value of the SRID.
srs = int(wkt_m.group('srid'))
if wkt_m.group('type').upper() == 'LINEARRING':
# OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.
# See http://trac.osgeo.org/gdal/ticket/1992.
g = capi.create_geom(OGRGeomType(wkt_m.group('type')).num)
capi.import_wkt(g, byref(c_char_p(wkt_m.group('wkt').encode())))
else:
g = capi.from_wkt(byref(c_char_p(wkt_m.group('wkt').encode())), None, byref(c_void_p()))
elif json_m:
g = capi.from_json(geom_input.encode())
else:
# Seeing if the input is a valid short-hand string
# (e.g., 'Point', 'POLYGON').
OGRGeomType(geom_input)
g = capi.create_geom(OGRGeomType(geom_input).num)
elif isinstance(geom_input, memoryview):
# WKB was passed in
g = self._from_wkb(geom_input)
elif isinstance(geom_input, OGRGeomType):
# OGRGeomType was passed in, an empty geometry will be created.
g = capi.create_geom(geom_input.num)
elif isinstance(geom_input, self.ptr_type):
# OGR pointer (c_void_p) was the input.
g = geom_input
else:
raise GDALException('Invalid input type for OGR Geometry construction: %s' % type(geom_input))
# Now checking the Geometry pointer before finishing initialization
# by setting the pointer for the object.
if not g:
raise GDALException('Cannot create OGR Geometry from input: %s' % geom_input)
self.ptr = g
# Assigning the SpatialReference object to the geometry, if valid.
if srs:
self.srs = srs
# Setting the class depending upon the OGR Geometry Type
self.__class__ = GEO_CLASSES[self.geom_type.num]
# Pickle routines
def __getstate__(self):
srs = self.srs
if srs:
srs = srs.wkt
else:
srs = None
return bytes(self.wkb), srs
def __setstate__(self, state):
wkb, srs = state
ptr = capi.from_wkb(wkb, None, byref(c_void_p()), len(wkb))
if not ptr:
raise GDALException('Invalid OGRGeometry loaded from pickled state.')
self.ptr = ptr
self.srs = srs
@classmethod
def _from_wkb(cls, geom_input):
return capi.from_wkb(bytes(geom_input), None, byref(c_void_p()), len(geom_input))
@classmethod
def from_bbox(cls, bbox):
"Constructs a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
return OGRGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (
x0, y0, x0, y1, x1, y1, x1, y0, x0, y0))
@classmethod
def from_gml(cls, gml_string):
return cls(capi.from_gml(force_bytes(gml_string)))
# ### Geometry set-like operations ###
# g = g1 | g2
def __or__(self, other):
"Returns the union of the two geometries."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
def __eq__(self, other):
"Is this Geometry equal to the other?"
if isinstance(other, OGRGeometry):
return self.equals(other)
else:
return False
def __str__(self):
"WKT is used for the string representation."
return self.wkt
# #### Geometry Properties ####
@property
def dimension(self):
"Returns 0 for points, 1 for lines, and 2 for surfaces."
return capi.get_dims(self.ptr)
def _get_coord_dim(self):
"Returns the coordinate dimension of the Geometry."
return capi.get_coord_dim(self.ptr)
def _set_coord_dim(self, dim):
"Sets the coordinate dimension of this Geometry."
if dim not in (2, 3):
raise ValueError('Geometry dimension must be either 2 or 3')
capi.set_coord_dim(self.ptr, dim)
coord_dim = property(_get_coord_dim, _set_coord_dim)
@property
def geom_count(self):
"The number of elements in this Geometry."
return capi.get_geom_count(self.ptr)
@property
def point_count(self):
"Returns the number of Points in this Geometry."
return capi.get_point_count(self.ptr)
@property
def num_points(self):
"Alias for `point_count` (same name method in GEOS API.)"
return self.point_count
@property
def num_coords(self):
"Alais for `point_count`."
return self.point_count
@property
def geom_type(self):
"Returns the Type for this Geometry."
return OGRGeomType(capi.get_geom_type(self.ptr))
@property
def geom_name(self):
"Returns the Name of this Geometry."
return capi.get_geom_name(self.ptr)
@property
def area(self):
"Returns the area for a LinearRing, Polygon, or MultiPolygon; 0 otherwise."
return capi.get_area(self.ptr)
@property
def envelope(self):
"Returns the envelope for this Geometry."
# TODO: Fix Envelope() for Point geometries.
return Envelope(capi.get_envelope(self.ptr, byref(OGREnvelope())))
@property
def empty(self):
return capi.is_empty(self.ptr)
@property
def extent(self):
"Returns the envelope as a 4-tuple, instead of as an Envelope object."
return self.envelope.tuple
# #### SpatialReference-related Properties ####
# The SRS property
def _get_srs(self):
"Returns the Spatial Reference for this Geometry."
try:
srs_ptr = capi.get_geom_srs(self.ptr)
return SpatialReference(srs_api.clone_srs(srs_ptr))
except SRSException:
return None
def _set_srs(self, srs):
"Sets the SpatialReference for this geometry."
# Do not have to clone the `SpatialReference` object pointer because
# when it is assigned to this `OGRGeometry` it's internal OGR
# reference count is incremented, and will likewise be released
# (decremented) when this geometry's destructor is called.
if isinstance(srs, SpatialReference):
srs_ptr = srs.ptr
elif isinstance(srs, (int, str)):
sr = SpatialReference(srs)
srs_ptr = sr.ptr
elif srs is None:
srs_ptr = None
else:
raise TypeError('Cannot assign spatial reference with object of type: %s' % type(srs))
capi.assign_srs(self.ptr, srs_ptr)
srs = property(_get_srs, _set_srs)
# The SRID property
def _get_srid(self):
srs = self.srs
if srs:
return srs.srid
return None
def _set_srid(self, srid):
if isinstance(srid, int) or srid is None:
self.srs = srid
else:
raise TypeError('SRID must be set with an integer.')
srid = property(_get_srid, _set_srid)
# #### Output Methods ####
def _geos_ptr(self):
from django.contrib.gis.geos import GEOSGeometry
return GEOSGeometry._from_wkb(self.wkb)
@property
def geos(self):
"Returns a GEOSGeometry object from this OGRGeometry."
from django.contrib.gis.geos import GEOSGeometry
return GEOSGeometry(self._geos_ptr(), self.srid)
@property
def gml(self):
"Returns the GML representation of the Geometry."
return capi.to_gml(self.ptr)
@property
def hex(self):
"Returns the hexadecimal representation of the WKB (a string)."
return b2a_hex(self.wkb).upper()
@property
def json(self):
"""
Returns the GeoJSON representation of this Geometry.
"""
return capi.to_json(self.ptr)
geojson = json
@property
def kml(self):
"Returns the KML representation of the Geometry."
return capi.to_kml(self.ptr, None)
@property
def wkb_size(self):
"Returns the size of the WKB buffer."
return capi.get_wkbsize(self.ptr)
@property
def wkb(self):
"Returns the WKB representation of the Geometry."
if sys.byteorder == 'little':
byteorder = 1 # wkbNDR (from ogr_core.h)
else:
byteorder = 0 # wkbXDR
sz = self.wkb_size
# Creating the unsigned character buffer, and passing it in by reference.
buf = (c_ubyte * sz)()
capi.to_wkb(self.ptr, byteorder, byref(buf))
# Returning a buffer of the string at the pointer.
return memoryview(string_at(buf, sz))
@property
def wkt(self):
"Returns the WKT representation of the Geometry."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def ewkt(self):
"Returns the EWKT representation of the Geometry."
srs = self.srs
if srs and srs.srid:
return 'SRID=%s;%s' % (srs.srid, self.wkt)
else:
return self.wkt
# #### Geometry Methods ####
def clone(self):
"Clones this OGR Geometry."
return OGRGeometry(capi.clone_geom(self.ptr), self.srs)
def close_rings(self):
"""
If there are any rings within this geometry that have not been
closed, this routine will do so by adding the starting point at the
end.
"""
# Closing the open rings.
capi.geom_close_rings(self.ptr)
def transform(self, coord_trans, clone=False):
"""
Transforms this geometry to a different spatial reference system.
May take a CoordTransform object, a SpatialReference object, string
WKT or PROJ.4, and/or an integer SRID. By default nothing is returned
and the geometry is transformed in-place. However, if the `clone`
keyword is set, then a transformed clone of this geometry will be
returned.
"""
if clone:
klone = self.clone()
klone.transform(coord_trans)
return klone
# Depending on the input type, use the appropriate OGR routine
# to perform the transformation.
if isinstance(coord_trans, CoordTransform):
capi.geom_transform(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, SpatialReference):
capi.geom_transform_to(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, (int, str)):
sr = SpatialReference(coord_trans)
capi.geom_transform_to(self.ptr, sr.ptr)
else:
raise TypeError('Transform only accepts CoordTransform, '
'SpatialReference, string, and integer objects.')
# #### Topology Methods ####
def _topology(self, func, other):
"""A generalized function for topology operations, takes a GDAL function and
the other geometry to perform the operation on."""
if not isinstance(other, OGRGeometry):
raise TypeError('Must use another OGRGeometry object for topology operations!')
# Returning the output of the given function with the other geometry's
# pointer.
return func(self.ptr, other.ptr)
def intersects(self, other):
"Returns True if this geometry intersects with the other."
return self._topology(capi.ogr_intersects, other)
def equals(self, other):
"Returns True if this geometry is equivalent to the other."
return self._topology(capi.ogr_equals, other)
def disjoint(self, other):
"Returns True if this geometry and the other are spatially disjoint."
return self._topology(capi.ogr_disjoint, other)
def touches(self, other):
"Returns True if this geometry touches the other."
return self._topology(capi.ogr_touches, other)
def crosses(self, other):
"Returns True if this geometry crosses the other."
return self._topology(capi.ogr_crosses, other)
def within(self, other):
"Returns True if this geometry is within the other."
return self._topology(capi.ogr_within, other)
def contains(self, other):
"Returns True if this geometry contains the other."
return self._topology(capi.ogr_contains, other)
def overlaps(self, other):
"Returns True if this geometry overlaps the other."
return self._topology(capi.ogr_overlaps, other)
# #### Geometry-generation Methods ####
def _geomgen(self, gen_func, other=None):
"A helper routine for the OGR routines that generate geometries."
if isinstance(other, OGRGeometry):
return OGRGeometry(gen_func(self.ptr, other.ptr), self.srs)
else:
return OGRGeometry(gen_func(self.ptr), self.srs)
@property
def boundary(self):
"Returns the boundary of this geometry."
return self._geomgen(capi.get_boundary)
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points in
this Geometry.
"""
return self._geomgen(capi.geom_convex_hull)
def difference(self, other):
"""
Returns a new geometry consisting of the region which is the difference
of this geometry and the other.
"""
return self._geomgen(capi.geom_diff, other)
def intersection(self, other):
"""
Returns a new geometry consisting of the region of intersection of this
geometry and the other.
"""
return self._geomgen(capi.geom_intersection, other)
def sym_difference(self, other):
"""
Returns a new geometry which is the symmetric difference of this
geometry and the other.
"""
return self._geomgen(capi.geom_sym_diff, other)
def union(self, other):
"""
Returns a new geometry consisting of the region which is the union of
this geometry and the other.
"""
return self._geomgen(capi.geom_union, other)
# The subclasses for OGR Geometry.
class Point(OGRGeometry):
def _geos_ptr(self):
from django.contrib.gis import geos
return geos.Point._create_empty() if self.empty else super()._geos_ptr()
@classmethod
def _create_empty(cls):
return capi.create_geom(OGRGeomType('point').num)
@property
def x(self):
"Returns the X coordinate for this Point."
return capi.getx(self.ptr, 0)
@property
def y(self):
"Returns the Y coordinate for this Point."
return capi.gety(self.ptr, 0)
@property
def z(self):
"Returns the Z coordinate for this Point."
if self.coord_dim == 3:
return capi.getz(self.ptr, 0)
@property
def tuple(self):
"Returns the tuple of this point."
if self.coord_dim == 2:
return (self.x, self.y)
elif self.coord_dim == 3:
return (self.x, self.y, self.z)
coords = tuple
class LineString(OGRGeometry):
def __getitem__(self, index):
"Returns the Point at the given index."
if index >= 0 and index < self.point_count:
x, y, z = c_double(), c_double(), c_double()
capi.get_point(self.ptr, index, byref(x), byref(y), byref(z))
dim = self.coord_dim
if dim == 1:
return (x.value,)
elif dim == 2:
return (x.value, y.value)
elif dim == 3:
return (x.value, y.value, z.value)
else:
raise OGRIndexError('index out of range: %s' % index)
def __iter__(self):
"Iterates over each point in the LineString."
for i in range(self.point_count):
yield self[i]
def __len__(self):
"The length returns the number of points in the LineString."
return self.point_count
@property
def tuple(self):
"Returns the tuple representation of this LineString."
return tuple(self[i] for i in range(len(self)))
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function.
"""
return [func(self.ptr, i) for i in range(len(self))]
@property
def x(self):
"Returns the X coordinates in a list."
return self._listarr(capi.getx)
@property
def y(self):
"Returns the Y coordinates in a list."
return self._listarr(capi.gety)
@property
def z(self):
"Returns the Z coordinates in a list."
if self.coord_dim == 3:
return self._listarr(capi.getz)
# LinearRings are used in Polygons.
class LinearRing(LineString):
pass
class Polygon(OGRGeometry):
def __len__(self):
"The number of interior rings in this Polygon."
return self.geom_count
def __iter__(self):
"Iterates through each ring in the Polygon."
for i in range(self.geom_count):
yield self[i]
def __getitem__(self, index):
"Gets the ring at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
# Polygon Properties
@property
def shell(self):
"Returns the shell of this Polygon."
return self[0] # First ring is the shell
exterior_ring = shell
@property
def tuple(self):
"Returns a tuple of LinearRing coordinate tuples."
return tuple(self[i].tuple for i in range(self.geom_count))
coords = tuple
@property
def point_count(self):
"The number of Points in this Polygon."
# Summing up the number of points in each ring of the Polygon.
return sum(self[i].point_count for i in range(self.geom_count))
@property
def centroid(self):
"Returns the centroid (a Point) of this Polygon."
# The centroid is a Point, create a geometry for this.
p = OGRGeometry(OGRGeomType('Point'))
capi.get_centroid(self.ptr, p.ptr)
return p
# Geometry Collection base class.
class GeometryCollection(OGRGeometry):
"The Geometry Collection class."
def __getitem__(self, index):
"Gets the Geometry at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
def __iter__(self):
"Iterates over each Geometry."
for i in range(self.geom_count):
yield self[i]
def __len__(self):
"The number of geometries in this Geometry Collection."
return self.geom_count
def add(self, geom):
"Add the geometry to this Geometry Collection."
if isinstance(geom, OGRGeometry):
if isinstance(geom, self.__class__):
for g in geom:
capi.add_geom(self.ptr, g.ptr)
else:
capi.add_geom(self.ptr, geom.ptr)
elif isinstance(geom, str):
tmp = OGRGeometry(geom)
capi.add_geom(self.ptr, tmp.ptr)
else:
raise GDALException('Must add an OGRGeometry.')
@property
def point_count(self):
"The number of Points in this Geometry Collection."
# Summing up the number of points in each geometry in this collection
return sum(self[i].point_count for i in range(self.geom_count))
@property
def tuple(self):
"Returns a tuple representation of this Geometry Collection."
return tuple(self[i].tuple for i in range(self.geom_count))
coords = tuple
# Multiple Geometry types.
class MultiPoint(GeometryCollection):
pass
class MultiLineString(GeometryCollection):
pass
class MultiPolygon(GeometryCollection):
pass
# Class mapping dictionary (using the OGRwkbGeometryType as the key)
GEO_CLASSES = {1: Point,
2: LineString,
3: Polygon,
4: MultiPoint,
5: MultiLineString,
6: MultiPolygon,
7: GeometryCollection,
101: LinearRing,
1 + OGRGeomType.wkb25bit: Point,
2 + OGRGeomType.wkb25bit: LineString,
3 + OGRGeomType.wkb25bit: Polygon,
4 + OGRGeomType.wkb25bit: MultiPoint,
5 + OGRGeomType.wkb25bit: MultiLineString,
6 + OGRGeomType.wkb25bit: MultiPolygon,
7 + OGRGeomType.wkb25bit: GeometryCollection,
}
|
|
from django import forms
from django.contrib.admin.util import (flatten_fieldsets, lookup_field,
display_for_field, label_for_field, help_text_for_field)
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields.related import ManyToManyRel
from django.forms.util import flatatt
from django.template.defaultfilters import capfirst
from django.utils.encoding import force_unicode, smart_unicode
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
ACTION_CHECKBOX_NAME = '_selected_action'
class ActionForm(forms.Form):
action = forms.ChoiceField(label=_('Action:'))
select_across = forms.BooleanField(label='', required=False, initial=0,
widget=forms.HiddenInput({'class': 'select-across'}))
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
class AdminForm(object):
def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None):
self.form, self.fieldsets = form, normalize_fieldsets(fieldsets)
self.prepopulated_fields = [{
'field': form[field_name],
'dependencies': [form[f] for f in dependencies]
} for field_name, dependencies in prepopulated_fields.items()]
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(self.form, name,
readonly_fields=self.readonly_fields,
model_admin=self.model_admin,
**options
)
def first_field(self):
try:
fieldset_name, fieldset_options = self.fieldsets[0]
field_name = fieldset_options['fields'][0]
if not isinstance(field_name, basestring):
field_name = field_name[0]
return self.form[field_name]
except (KeyError, IndexError):
pass
try:
return iter(self.form).next()
except StopIteration:
return None
def _media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class Fieldset(object):
def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),
description=None, model_admin=None):
self.form = form
self.name, self.fields = name, fields
self.classes = u' '.join(classes)
self.description = description
self.model_admin = model_admin
self.readonly_fields = readonly_fields
def _media(self):
if 'collapse' in self.classes:
extra = '' if settings.DEBUG else '.min'
js = ['jquery%s.js' % extra,
'jquery.init.js',
'collapse%s.js' % extra]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
return forms.Media()
media = property(_media)
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class Fieldline(object):
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__"):
self.fields = [field]
else:
self.fields = field
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield AdminReadonlyField(self.form, field, is_first=(i == 0),
model_admin=self.model_admin)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe(u'\n'.join([self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields]).strip('\n'))
class AdminField(object):
def __init__(self, form, field, is_first):
self.field = form[field] # A django.forms.BoundField instance
self.is_first = is_first # Whether this field is first on the line
self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
def label_tag(self):
classes = []
contents = conditional_escape(force_unicode(self.field.label))
if self.is_checkbox:
classes.append(u'vCheckboxLabel')
else:
contents += u':'
if self.field.field.required:
classes.append(u'required')
if not self.is_first:
classes.append(u'inline')
attrs = classes and {'class': u' '.join(classes)} or {}
return self.field.label_tag(contents=mark_safe(contents), attrs=attrs)
def errors(self):
return mark_safe(self.field.errors.as_ul())
class AdminReadonlyField(object):
def __init__(self, form, field, is_first, model_admin=None):
label = label_for_field(field, form._meta.model, model_admin)
# Make self.field look a little bit like a field. This means that
# {{ field.name }} must be a useful class name to identify the field.
# For convenience, store other field-related data here too.
if callable(field):
class_name = field.__name__ != '<lambda>' and field.__name__ or ''
else:
class_name = field
self.field = {
'name': class_name,
'label': label,
'field': field,
'help_text': help_text_for_field(class_name, form._meta.model)
}
self.form = form
self.model_admin = model_admin
self.is_first = is_first
self.is_checkbox = False
self.is_readonly = True
def label_tag(self):
attrs = {}
if not self.is_first:
attrs["class"] = "inline"
label = self.field['label']
contents = capfirst(force_unicode(escape(label))) + u":"
return mark_safe('<label%(attrs)s>%(contents)s</label>' % {
"attrs": flatatt(attrs),
"contents": contents,
})
def contents(self):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin
try:
f, attr, value = lookup_field(field, obj, model_admin)
except (AttributeError, ValueError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
boolean = getattr(attr, "boolean", False)
if boolean:
result_repr = _boolean_icon(value)
else:
result_repr = smart_unicode(value)
if getattr(attr, "allow_tags", False):
result_repr = mark_safe(result_repr)
else:
if value is None:
result_repr = EMPTY_CHANGELIST_VALUE
elif isinstance(f.rel, ManyToManyRel):
result_repr = ", ".join(map(unicode, value.all()))
else:
result_repr = display_for_field(value, f)
return conditional_escape(result_repr)
class InlineAdminFormSet(object):
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(self, inline, formset, fieldsets, prepopulated_fields=None,
readonly_fields=None, model_admin=None):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
if prepopulated_fields is None:
prepopulated_fields = {}
self.prepopulated_fields = prepopulated_fields
def __iter__(self):
for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, original, self.readonly_fields,
model_admin=self.opts)
for form in self.formset.extra_forms:
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, None, self.readonly_fields,
model_admin=self.opts)
yield InlineAdminForm(self.formset, self.formset.empty_form,
self.fieldsets, self.prepopulated_fields, None,
self.readonly_fields, model_admin=self.opts)
def fields(self):
fk = getattr(self.formset, "fk", None)
for i, field in enumerate(flatten_fieldsets(self.fieldsets)):
if fk and fk.name == field:
continue
if field in self.readonly_fields:
yield {
'label': label_for_field(field, self.opts.model, self.opts),
'widget': {
'is_hidden': False
},
'required': False
}
else:
yield self.formset.form.base_fields[field]
def _media(self):
media = self.opts.media + self.formset.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class InlineAdminForm(AdminForm):
"""
A wrapper around an inline form for use in the admin system.
"""
def __init__(self, formset, form, fieldsets, prepopulated_fields, original,
readonly_fields=None, model_admin=None):
self.formset = formset
self.model_admin = model_admin
self.original = original
if original is not None:
self.original_content_type_id = ContentType.objects.get_for_model(original).pk
self.show_url = original and hasattr(original, 'get_absolute_url')
super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields,
readonly_fields, model_admin)
def __iter__(self):
for name, options in self.fieldsets:
yield InlineFieldset(self.formset, self.form, name,
self.readonly_fields, model_admin=self.model_admin, **options)
def has_auto_field(self):
if self.form._meta.model._meta.has_auto_field:
return True
# Also search any parents for an auto field.
for parent in self.form._meta.model._meta.get_parent_list():
if parent._meta.has_auto_field:
return True
return False
def field_count(self):
# tabular.html uses this function for colspan value.
num_of_fields = 0
if self.has_auto_field():
num_of_fields += 1
num_of_fields += len(self.fieldsets[0][1]["fields"])
if self.formset.can_order:
num_of_fields += 1
if self.formset.can_delete:
num_of_fields += 1
return num_of_fields
def pk_field(self):
return AdminField(self.form, self.formset._pk_field.name, False)
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
def deletion_field(self):
from django.forms.formsets import DELETION_FIELD_NAME
return AdminField(self.form, DELETION_FIELD_NAME, False)
def ordering_field(self):
from django.forms.formsets import ORDERING_FIELD_NAME
return AdminField(self.form, ORDERING_FIELD_NAME, False)
class InlineFieldset(Fieldset):
def __init__(self, formset, *args, **kwargs):
self.formset = formset
super(InlineFieldset, self).__init__(*args, **kwargs)
def __iter__(self):
fk = getattr(self.formset, "fk", None)
for field in self.fields:
if fk and fk.name == field:
continue
yield Fieldline(self.form, field, self.readonly_fields,
model_admin=self.model_admin)
class AdminErrorList(forms.util.ErrorList):
"""
Stores all errors for the form/formsets in an add/change stage view.
"""
def __init__(self, form, inline_formsets):
if form.is_bound:
self.extend(form.errors.values())
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(errors_in_inline_form.values())
def normalize_fieldsets(fieldsets):
"""
Make sure the keys in fieldset dictionaries are strings. Returns the
normalized data.
"""
result = []
for name, options in fieldsets:
result.append((name, normalize_dictionary(options)))
return result
def normalize_dictionary(data_dict):
"""
Converts all the keys in "data_dict" to strings. The keys must be
convertible using str().
"""
for key, value in data_dict.items():
if not isinstance(key, str):
del data_dict[key]
data_dict[str(key)] = value
return data_dict
|
|
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# author: Reza Hosseini
## create a directory if it does not exist
def CreateDir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
#### this function creates a latex file skin. i.e. the main document
def LatexSkin(fn, docType='article', figsPath='{figs/}',
latexInputFn='Input.tex'):
latex1 = '\\documentclass{' + docType + '}'
latex2 = ('\n \\usepackage{enumerate,amsmath,graphics,amssymb,graphicx,' +
'amscd,amscd,amsbsy,multirow,float,booktabs,verbatim,xy,' +
'geometry,import} \n')
latex3 = '\n \\graphicspath{' + figsPath + '} \n'
latex4 = ('\n \\begin{document} \n \\input{' + latexInputFn +
'} \n \\end{document}')
with open(fn, 'w') as f:
f.write(latex1)
f.write(latex2)
f.write(latex3)
f.write(latex4)
#### this function writes the text in the center for a given latex file
def WriteLatex(fn, text):
with open(fn, 'a') as f:
f.write('\n' + text + '\n')
### this begins a frame in latex
def BeginFrame(fn, frameTitle=''):
with open(fn, 'a') as f:
f.write('\n' + '\\begin{frame}' + '\n')
f.write('\n' + '\\frametitle{' + frameTitle +'}' + '\n')
### this ends a frame in latex
def EndFrame(fn):
with open(fn, 'a') as f:
f.write('\n' + '\\end{frame}' + '\n')
### this function writes the text in the center for a given latex file
def WriteLatexCenter(fn, text):
with open(fn, 'a') as f:
f.write('\\begin{center} \n ')
f.write(text + '\n')
f.write('\\end{center} \n ')
### this function adds a figure with caption and label to a latex file
def LatexFig(latexFn, figFn, figLabel=None, figCaption=None, scale=str(0.5)):
with open(latexFn, 'a') as f:
f.write('\n \\begin{figure}[H] \n')
f.write('\\centering \n')
f.write('\\includegraphics[scale=' + scale + ']{' + figFn + '} \n')
if figCaption is not None:
f.write('\\caption{' + figCaption + '} \n')
if figLabel is not None:
f.write('\\label{' + figLabel + '} \n')
f.write('\\end{figure} \n')
############################ Creating figures ################
def Pdff(fn, PlotFcn, plotTitle):
with PdfPages(fn) as pdf:
plt.figure(figsize=(3, 3))
PlotFcn()
plt.title(pltTitle)
pdf.savefig() # saves the current figure into a pdf page
plt.close()
def SaveFig(fn, PlotFcn, plotTitle='', format='png', dpi=100):
PlotFcn()
import matplotlib
if plotTitle != '':
matplotlib.pylab.title(plotTitle)
matplotlib.pylab.savefig(fn, format=format, dpi=dpi)
'''def PlotFcn(): pylab.plot(range(7), [3, 1, 4, 1, 5, 9, 2], 'r-o')
PlotFcn()
figFn = ofigs+'fig1.pdf'
plotTitle = 'maast & kashk'
pdff(figFn,PlotFcn,plotTitle)
fn = odir + 'skin.tex'
LatexSkin(fn,docType='article')
fn = odir + 'Input.tex'
with open(fn, 'a') as f: f.write('This is an automatically generated EDA')
fn = odir + 'Input.tex'
LatexFig(fn,'fig1.pdf','figLabel','mofo')'''
### save fig and add to input latex file
def SaveFig_addLatex(figCaption, latexFn, ofigs, figFn='', format='png',
dpi=100, skipCaption=False, skipLabel=False):
import matplotlib
if figFn == '':
figFn=figCaption.replace(' ','_')
figFn = ofigs + figFn + '.png'
matplotlib.pylab.savefig(figFn, format=format, dpi=dpi)
LatexFig(latexFn=latexFn, figFn=figFn, figLabel=figFn,
figCaption=figCaption, skipCaption=skipCaption,
skipLabel=skipLabel)
def SaveFig_addSlide(figCaption, latexFn, ofigs, figFn='', format='png',
dpi=100, skipCaption=False, skipLabel=False):
import matplotlib
if figFn == '':
figFn=figCaption.replace(' ','_')
figFn = ofigs + figFn + '.png'
matplotlib.pylab.savefig(figFn,format=format,dpi=dpi)
with open(latexFn, 'a') as f:
f.write('\n \\begin{frame} \n')
f.write('\n \\frametitle{ \n')
f.write(figCaption)
f.write('}')
LatexFig(latexFn=latexFn, figFn=figFn, figLabel=figFn,
figCaption=figCaption, skipCaption=skipCaption,
skipLabel=skipLabel)
with open(latexFn, 'a') as f:
f.write('\n \\end{frame} \n')
def Excise(filename, start, end):
with open(filename) as infile, open(filename + ".out666", "w") as outfile:
for line in infile:
if line.strip() == start:
print('zereshk')
break
outfile.write(line)
for line in infile:
if line.strip() == end:
print('albaloo!')
break
for line in infile:
outfile.write(line)
os.remove(filename)
os.rename(filename + ".out", filename)
def PltCloseStat(stat=True):
if stat:
plt.close()
return None
def SaveFigStat(stat=False, figCaption='test caption'):
if stat:
SaveFig_addSlide(
figCaption=figCaption,
latexFn=latexInputFn, ofigs=ofigs,
figFn='', format='png', dpi=200)
return None
### this function adds a figure with caption and label to a latex file
def DfLatexTable(df):
table = ''
n = len(df)
colNames = df.columns
top = ' & '.join(colNames)
table = top + ' \\\\' + ' \n' + ' '
table = table + '\hline' + '\n'
table = table + '\hline' + '\n'
for i in range(n):
dfRow = df.iloc[i,:]
tableRow = ' & '.join(dfRow)
table = table + tableRow + '\\\\' + '\n' + ' '
table = table + '\hline' + '\n'
table = table + '\hline' + '\n'
return table
## creates a latex file which would create a pdf after running
# the pdf has all the photos found in path/figs/
# the latex files will be created in path with the name latexFn
def ConcatPhotos_viaLatex(path, latexFn):
figsPath = path + "/figs/"
files = os.listdir(figsPath)
fn = path + latexFn
LatexSkin(fn=fn, docType='article', figsPath='{figs/}', latexInputFn='Input.tex')
latexFn0 = path + "input.tex"
for file in files:
LatexFig(
latexFn=latexFn0,
figFn=file,
figLabel=None, figCaption=None, scale=str(0.5))
"""
path = "/Users/rz13/Dropbox/Reza_Docs/morgage_application2/rent/"
latexFn = "rent_boa_copies.tex"
ConcatPhotos_viaLatex(path=path, latexFn=latexFn)
"""
|
|
import re
from django.db.backends import BaseDatabaseIntrospection, FieldInfo, TableInfo
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [FieldInfo(info['name'], info['type'], None, info['size'], None, None,
info['null_ok']) for info in self._table_info(cursor, table_name)]
def column_name_converter(self, name):
"""
SQLite will in some cases, e.g. when returning columns from views and
subselects, return column names in 'alias."column"' format instead of
simply 'column'.
Affects SQLite < 3.7.15, fixed by http://www.sqlite.org/src/info/5526e0aa3c
"""
# TODO: remove when SQLite < 3.7.15 is sufficiently old.
# 3.7.13 ships in Debian stable as of 2014-03-21.
if self.connection.Database.sqlite_version_info < (3, 7, 15):
return name.split('.')[-1].strip('"')
else:
return name
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
try:
results = cursor.fetchone()[0].strip()
except TypeError:
# It might be a view, then no results will be returned
return relations
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_index, other_desc in enumerate(other_table_results.split(',')):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
name = other_desc.split(' ', 1)[0].strip('"')
if name == column:
relations[field_index] = (other_index, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_indexes(self, cursor, table_name):
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False),
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?$', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [{'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for number, index, unique in cursor.fetchall():
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
return constraints
|
|
import os
import sys
import subprocess
from time import sleep
from os.path import exists, join, abspath
from shutil import rmtree
from tempfile import mkdtemp
from twisted.trial import unittest
from scrapy.utils.python import retry_on_eintr
from scrapy.utils.test import get_testenv
class ProjectTest(unittest.TestCase):
project_name = 'testproject'
def setUp(self):
self.temp_path = mkdtemp()
self.cwd = self.temp_path
self.proj_path = join(self.temp_path, self.project_name)
self.proj_mod_path = join(self.proj_path, self.project_name)
self.env = get_testenv()
def tearDown(self):
rmtree(self.temp_path)
def call(self, *new_args, **kwargs):
out = os.tmpfile()
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
return subprocess.call(args, stdout=out, stderr=out, cwd=self.cwd, \
env=self.env, **kwargs)
def proc(self, *new_args, **kwargs):
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
p = subprocess.Popen(args, cwd=self.cwd, env=self.env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs)
waited = 0
interval = 0.2
while p.poll() is None:
sleep(interval)
waited += interval
if waited > 5:
p.kill()
assert False, 'Command took too much time to complete'
return p
class StartprojectTest(ProjectTest):
def test_startproject(self):
self.assertEqual(0, self.call('startproject', self.project_name))
assert exists(join(self.proj_path, 'scrapy.cfg'))
assert exists(join(self.proj_path, 'testproject'))
assert exists(join(self.proj_mod_path, '__init__.py'))
assert exists(join(self.proj_mod_path, 'items.py'))
assert exists(join(self.proj_mod_path, 'pipelines.py'))
assert exists(join(self.proj_mod_path, 'settings.py'))
assert exists(join(self.proj_mod_path, 'spiders', '__init__.py'))
self.assertEqual(1, self.call('startproject', self.project_name))
self.assertEqual(1, self.call('startproject', 'wrong---project---name'))
class CommandTest(ProjectTest):
def setUp(self):
super(CommandTest, self).setUp()
self.call('startproject', self.project_name)
self.cwd = join(self.temp_path, self.project_name)
self.env['SCRAPY_SETTINGS_MODULE'] = '%s.settings' % self.project_name
class GenspiderCommandTest(CommandTest):
def test_arguments(self):
# only pass one argument. spider script shouldn't be created
self.assertEqual(2, self.call('genspider', 'test_name'))
assert not exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
# pass two arguments <name> <domain>. spider script should be created
self.assertEqual(0, self.call('genspider', 'test_name', 'test.com'))
assert exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
def test_template(self, tplname='crawl'):
args = ['--template=%s' % tplname] if tplname else []
spname = 'test_spider'
p = self.proc('genspider', spname, 'test.com', *args)
out = retry_on_eintr(p.stdout.read)
self.assert_("Created spider %r using template %r in module" % (spname, tplname) in out)
self.assert_(exists(join(self.proj_mod_path, 'spiders', 'test_spider.py')))
p = self.proc('genspider', spname, 'test.com', *args)
out = retry_on_eintr(p.stdout.read)
self.assert_("Spider %r already exists in module" % spname in out)
def test_template_basic(self):
self.test_template('basic')
def test_template_csvfeed(self):
self.test_template('csvfeed')
def test_template_xmlfeed(self):
self.test_template('xmlfeed')
def test_list(self):
self.assertEqual(0, self.call('genspider', '--list'))
def test_dump(self):
self.assertEqual(0, self.call('genspider', '--dump=basic'))
self.assertEqual(0, self.call('genspider', '-d', 'basic'))
def test_same_name_as_project(self):
self.assertEqual(2, self.call('genspider', self.project_name))
assert not exists(join(self.proj_mod_path, 'spiders', '%s.py' % self.project_name))
class MiscCommandsTest(CommandTest):
def test_list(self):
self.assertEqual(0, self.call('list'))
class RunSpiderCommandTest(CommandTest):
def test_runspider(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy import log
from scrapy.spider import BaseSpider
class MySpider(BaseSpider):
name = 'myspider'
def start_requests(self):
self.log("It Works!")
return []
""")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assert_("[myspider] DEBUG: It Works!" in log, log)
self.assert_("[myspider] INFO: Spider opened" in log, log)
self.assert_("[myspider] INFO: Closing spider (finished)" in log, log)
self.assert_("[myspider] INFO: Spider closed (finished)" in log, log)
def test_runspider_no_spider_found(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy import log
from scrapy.spider import BaseSpider
""")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assert_("No spider found in file" in log)
def test_runspider_file_not_found(self):
p = self.proc('runspider', 'some_non_existent_file')
log = p.stderr.read()
self.assert_("File not found: some_non_existent_file" in log)
def test_runspider_unable_to_load(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.txt'))
with open(fname, 'w') as f:
f.write("")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assert_("Unable to load" in log)
class ParseCommandTest(CommandTest):
def setUp(self):
super(ParseCommandTest, self).setUp()
self.spider_name = 'parse_spider'
fname = abspath(join(self.proj_mod_path, 'spiders', 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy import log
from scrapy.spider import BaseSpider
from scrapy.item import Item
class MySpider(BaseSpider):
name = '{0}'
def parse(self, response):
if getattr(self, 'test_arg', None):
self.log('It Works!')
return [Item()]
""".format(self.spider_name))
fname = abspath(join(self.proj_mod_path, 'pipelines.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy import log
class MyPipeline(object):
component_name = 'my_pipeline'
def process_item(self, item, spider):
log.msg('It Works!')
return item
""")
fname = abspath(join(self.proj_mod_path, 'settings.py'))
with open(fname, 'a') as f:
f.write("""
ITEM_PIPELINES = ['{0}.pipelines.MyPipeline']
""".format(self.project_name))
def test_spider_arguments(self):
p = self.proc('parse', '--spider', self.spider_name, '-a', 'test_arg=1',
'-c', 'parse', 'http://scrapinghub.com')
log = p.stderr.read()
self.assert_("[parse_spider] DEBUG: It Works!" in log, log)
def test_pipelines(self):
p = self.proc('parse', '--spider', self.spider_name, '--pipelines',
'-c', 'parse', 'http://scrapinghub.com')
log = p.stderr.read()
self.assert_("[scrapy] INFO: It Works!" in log, log)
class BenchCommandTest(CommandTest):
def test_run(self):
p = self.proc('bench', '-s', 'LOGSTATS_INTERVAL=0.001',
'-s', 'CLOSESPIDER_TIMEOUT=0.01')
log = p.stderr.read()
self.assert_('INFO: Crawled' in log, log)
|
|
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import os
import mock
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.volume.drivers import smbfs
class SmbFsTestCase(test.TestCase):
_FAKE_SHARE = '//1.2.3.4/share1'
_FAKE_MNT_BASE = '/mnt'
_FAKE_VOLUME_NAME = 'volume-4f711859-4928-4cb7-801a-a50c37ceaccc'
_FAKE_TOTAL_SIZE = '2048'
_FAKE_TOTAL_AVAILABLE = '1024'
_FAKE_TOTAL_ALLOCATED = 1024
_FAKE_VOLUME = {'id': '4f711859-4928-4cb7-801a-a50c37ceaccc',
'size': 1,
'provider_location': _FAKE_SHARE,
'name': _FAKE_VOLUME_NAME,
'status': 'available'}
_FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, 'fake_hash')
_FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME)
_FAKE_SNAPSHOT_ID = '5g811859-4928-4cb7-801a-a50c37ceacba'
_FAKE_SNAPSHOT = {'id': _FAKE_SNAPSHOT_ID,
'volume': _FAKE_VOLUME,
'status': 'available',
'volume_size': 1}
_FAKE_SNAPSHOT_PATH = (
_FAKE_VOLUME_PATH + '-snapshot' + _FAKE_SNAPSHOT_ID)
_FAKE_SHARE_OPTS = '-o username=Administrator,password=12345'
_FAKE_OPTIONS_DICT = {'username': 'Administrator',
'password': '12345'}
_FAKE_LISTDIR = [_FAKE_VOLUME_NAME, _FAKE_VOLUME_NAME + '.vhd',
_FAKE_VOLUME_NAME + '.vhdx', 'fake_folder']
_FAKE_SMBFS_CONFIG = mock.MagicMock()
_FAKE_SMBFS_CONFIG.smbfs_oversub_ratio = 2
_FAKE_SMBFS_CONFIG.smbfs_used_ratio = 0.5
_FAKE_SMBFS_CONFIG.smbfs_shares_config = '/fake/config/path'
_FAKE_SMBFS_CONFIG.smbfs_default_volume_format = 'raw'
_FAKE_SMBFS_CONFIG.smbfs_sparsed_volumes = False
def setUp(self):
super(SmbFsTestCase, self).setUp()
smbfs.SmbfsDriver.__init__ = lambda x: None
self._smbfs_driver = smbfs.SmbfsDriver()
self._smbfs_driver._remotefsclient = mock.Mock()
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._execute = mock.Mock()
self._smbfs_driver.base = self._FAKE_MNT_BASE
def test_delete_volume(self):
drv = self._smbfs_driver
fake_vol_info = self._FAKE_VOLUME_PATH + '.info'
drv._ensure_share_mounted = mock.MagicMock()
fake_ensure_mounted = drv._ensure_share_mounted
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
drv._delete = mock.Mock()
drv._local_path_volume_info = mock.Mock(
return_value=fake_vol_info)
with mock.patch('os.path.exists', lambda x: True):
drv.delete_volume(self._FAKE_VOLUME)
fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE)
drv._delete.assert_any_call(
self._FAKE_VOLUME_PATH)
drv._delete.assert_any_call(fake_vol_info)
def _test_setup(self, config, share_config_exists=True):
fake_exists = mock.Mock(return_value=share_config_exists)
fake_ensure_mounted = mock.MagicMock()
self._smbfs_driver._ensure_shares_mounted = fake_ensure_mounted
self._smbfs_driver.configuration = config
with mock.patch('os.path.exists', fake_exists):
if not (config.smbfs_shares_config and share_config_exists and
config.smbfs_oversub_ratio > 0 and
0 <= config.smbfs_used_ratio <= 1):
self.assertRaises(exception.SmbfsException,
self._smbfs_driver.do_setup,
None)
else:
self._smbfs_driver.do_setup(None)
self.assertEqual(self._smbfs_driver.shares, {})
fake_ensure_mounted.assert_called_once()
def test_setup_missing_shares_config_option(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_shares_config = None
self._test_setup(fake_config, None)
def test_setup_missing_shares_config_file(self):
self._test_setup(self._FAKE_SMBFS_CONFIG, False)
def test_setup_invlid_oversub_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_oversub_ratio = -1
self._test_setup(fake_config)
def test_setup_invalid_used_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_used_ratio = -1
self._test_setup(fake_config)
def _test_create_volume(self, volume_exists=False, volume_format=None):
fake_method = mock.MagicMock()
self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG)
self._smbfs_driver._set_rw_permissions_for_all = mock.MagicMock()
fake_set_permissions = self._smbfs_driver._set_rw_permissions_for_all
self._smbfs_driver.get_volume_format = mock.MagicMock()
windows_image_format = False
fake_vol_path = self._FAKE_VOLUME_PATH
self._smbfs_driver.get_volume_format.return_value = volume_format
if volume_format:
if volume_format in ('vhd', 'vhdx'):
windows_image_format = volume_format
if volume_format == 'vhd':
windows_image_format = 'vpc'
method = '_create_windows_image'
fake_vol_path += '.' + volume_format
else:
method = '_create_%s_file' % volume_format
if volume_format == 'sparsed':
self._smbfs_driver.configuration.smbfs_sparsed_volumes = (
True)
else:
method = '_create_regular_file'
setattr(self._smbfs_driver, method, fake_method)
with mock.patch('os.path.exists', new=lambda x: volume_exists):
if volume_exists:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._do_create_volume,
self._FAKE_VOLUME)
return
self._smbfs_driver._do_create_volume(self._FAKE_VOLUME)
if windows_image_format:
fake_method.assert_called_once_with(
fake_vol_path,
self._FAKE_VOLUME['size'],
windows_image_format)
else:
fake_method.assert_called_once_with(
fake_vol_path, self._FAKE_VOLUME['size'])
fake_set_permissions.assert_called_once_with(fake_vol_path)
def test_create_existing_volume(self):
self._test_create_volume(volume_exists=True)
def test_create_vhdx(self):
self._test_create_volume(volume_format='vhdx')
def test_create_qcow2(self):
self._test_create_volume(volume_format='qcow2')
def test_create_sparsed(self):
self._test_create_volume(volume_format='sparsed')
def test_create_regular(self):
self._test_create_volume()
def _test_find_share(self, existing_mounted_shares=True,
eligible_shares=True):
if existing_mounted_shares:
mounted_shares = ('fake_share1', 'fake_share2', 'fake_share3')
else:
mounted_shares = None
self._smbfs_driver._mounted_shares = mounted_shares
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=eligible_shares)
fake_capacity_info = ((2, 1, 5), (2, 1, 4), (2, 1, 1))
self._smbfs_driver._get_capacity_info = mock.Mock(
side_effect=fake_capacity_info)
if not mounted_shares:
self.assertRaises(exception.SmbfsNoSharesMounted,
self._smbfs_driver._find_share,
self._FAKE_VOLUME['size'])
elif not eligible_shares:
self.assertRaises(exception.SmbfsNoSuitableShareFound,
self._smbfs_driver._find_share,
self._FAKE_VOLUME['size'])
else:
ret_value = self._smbfs_driver._find_share(
self._FAKE_VOLUME['size'])
# The eligible share with the minimum allocated space
# will be selected
self.assertEqual(ret_value, 'fake_share3')
def test_find_share(self):
self._test_find_share()
def test_find_share_missing_mounted_shares(self):
self._test_find_share(existing_mounted_shares=False)
def test_find_share_missing_eligible_shares(self):
self._test_find_share(eligible_shares=False)
def _test_is_share_eligible(self, capacity_info, volume_size):
self._smbfs_driver._get_capacity_info = mock.Mock(
return_value=[float(x << 30) for x in capacity_info])
self._smbfs_driver.configuration = self._FAKE_SMBFS_CONFIG
return self._smbfs_driver._is_share_eligible(self._FAKE_SHARE,
volume_size)
def test_share_volume_above_used_ratio(self):
fake_capacity_info = (4, 1, 1)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, False)
def test_eligible_share(self):
fake_capacity_info = (4, 4, 0)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, True)
def test_share_volume_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 7)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, False)
def test_share_reserved_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 10)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertEqual(ret_value, False)
def test_parse_options(self):
(opt_list,
opt_dict) = self._smbfs_driver.parse_options(
self._FAKE_SHARE_OPTS)
expected_ret = ([], self._FAKE_OPTIONS_DICT)
self.assertEqual(expected_ret, (opt_list, opt_dict))
def test_parse_credentials(self):
fake_smb_options = r'-o user=MyDomain\Administrator,noperm'
expected_flags = '-o username=Administrator,noperm'
flags = self._smbfs_driver.parse_credentials(fake_smb_options)
self.assertEqual(expected_flags, flags)
def test_get_volume_path(self):
self._smbfs_driver.get_volume_format = mock.Mock(
return_value='vhd')
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
expected = self._FAKE_VOLUME_PATH + '.vhd'
ret_val = self._smbfs_driver.local_path(self._FAKE_VOLUME)
self.assertEqual(expected, ret_val)
def test_initialize_connection(self):
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
self._smbfs_driver._get_mount_point_base = mock.Mock(
return_value=self._FAKE_MNT_BASE)
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format='raw'))
fake_data = {'export': self._FAKE_SHARE,
'format': 'raw',
'name': self._FAKE_VOLUME_NAME,
'options': self._FAKE_SHARE_OPTS}
expected = {
'driver_volume_type': 'smbfs',
'data': fake_data,
'mount_point_base': self._FAKE_MNT_BASE}
ret_val = self._smbfs_driver.initialize_connection(
self._FAKE_VOLUME, None)
self.assertEqual(expected, ret_val)
def _test_extend_volume(self, extend_failed=False, image_format='raw'):
drv = self._smbfs_driver
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv._check_extend_volume_support = mock.Mock(
return_value=True)
drv._is_file_size_equal = mock.Mock(
return_value=not extend_failed)
drv._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format=image_format))
with contextlib.nested(
mock.patch.object(image_utils, 'resize_image'),
mock.patch.object(image_utils, 'convert_image')) as (
fake_resize, fake_convert):
if extend_failed:
self.assertRaises(exception.ExtendVolumeError,
drv._extend_volume,
self._FAKE_VOLUME, mock.sentinel.new_size)
else:
drv._extend_volume(
self._FAKE_VOLUME,
mock.sentinel.new_size)
if image_format in (drv._DISK_FORMAT_VHDX,
drv._DISK_FORMAT_VHD_LEGACY):
fake_tmp_path = self._FAKE_VOLUME_PATH + '.tmp'
fake_convert.assert_any_call(self._FAKE_VOLUME_PATH,
fake_tmp_path, 'raw')
fake_resize.assert_called_once_with(
fake_tmp_path, mock.sentinel.new_size)
fake_convert.assert_any_call(fake_tmp_path,
self._FAKE_VOLUME_PATH,
image_format)
else:
fake_resize.assert_called_once_with(
self._FAKE_VOLUME_PATH, mock.sentinel.new_size)
def test_extend_volume(self):
self._test_extend_volume()
def test_extend_volume_failed(self):
self._test_extend_volume(extend_failed=True)
def test_extend_vhd_volume(self):
self._test_extend_volume(image_format='vpc')
def _test_check_extend_support(self, has_snapshots=False,
is_eligible=True):
self._smbfs_driver.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
if has_snapshots:
active_file_path = self._FAKE_SNAPSHOT_PATH
else:
active_file_path = self._FAKE_VOLUME_PATH
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=active_file_path)
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=is_eligible)
if has_snapshots:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._check_extend_volume_support,
self._FAKE_VOLUME, 2)
elif not is_eligible:
self.assertRaises(exception.ExtendVolumeError,
self._smbfs_driver._check_extend_volume_support,
self._FAKE_VOLUME, 2)
else:
self._smbfs_driver._check_extend_volume_support(
self._FAKE_VOLUME, 2)
self._smbfs_driver._is_share_eligible.assert_called_once_with(
self._FAKE_SHARE, 1)
def test_check_extend_support(self):
self._test_check_extend_support()
def test_check_extend_volume_with_snapshots(self):
self._test_check_extend_support(has_snapshots=True)
def test_check_extend_volume_uneligible_share(self):
self._test_check_extend_support(is_eligible=False)
def test_create_volume_from_in_use_snapshot(self):
fake_snapshot = {'status': 'in-use'}
self.assertRaises(
exception.InvalidSnapshot,
self._smbfs_driver.create_volume_from_snapshot,
self._FAKE_VOLUME, fake_snapshot)
def test_copy_volume_from_snapshot(self):
drv = self._smbfs_driver
fake_volume_info = {self._FAKE_SNAPSHOT_ID: 'fake_snapshot_file_name'}
fake_img_info = mock.MagicMock()
fake_img_info.backing_file = self._FAKE_VOLUME_NAME
drv.get_volume_format = mock.Mock(
return_value='raw')
drv._local_path_volume_info = mock.Mock(
return_value=self._FAKE_VOLUME_PATH + '.info')
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv._read_info_file = mock.Mock(
return_value=fake_volume_info)
drv._qemu_img_info = mock.Mock(
return_value=fake_img_info)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH[:-1])
drv._extend_volume = mock.Mock()
drv._set_rw_permissions_for_all = mock.Mock()
with mock.patch.object(image_utils, 'convert_image') as (
fake_convert_image):
drv._copy_volume_from_snapshot(
self._FAKE_SNAPSHOT, self._FAKE_VOLUME,
self._FAKE_VOLUME['size'])
drv._extend_volume.assert_called_once_with(
self._FAKE_VOLUME, self._FAKE_VOLUME['size'])
fake_convert_image.assert_called_once_with(
self._FAKE_VOLUME_PATH, self._FAKE_VOLUME_PATH[:-1], 'raw')
def test_ensure_mounted(self):
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._ensure_share_mounted(self._FAKE_SHARE)
self._smbfs_driver._remotefsclient.mount.assert_called_once_with(
self._FAKE_SHARE, self._FAKE_SHARE_OPTS.split())
def _test_copy_image_to_volume(self, unsupported_qemu_version=False,
wrong_size_after_fetch=False):
drv = self._smbfs_driver
vol_size_bytes = self._FAKE_VOLUME['size'] << 30
fake_image_service = mock.MagicMock()
fake_image_service.show.return_value = (
{'id': 'fake_image_id', 'disk_format': 'raw'})
fake_img_info = mock.MagicMock()
if wrong_size_after_fetch:
fake_img_info.virtual_size = 2 * vol_size_bytes
else:
fake_img_info.virtual_size = vol_size_bytes
if unsupported_qemu_version:
qemu_version = [1, 5]
else:
qemu_version = [1, 7]
drv.get_volume_format = mock.Mock(
return_value=drv._DISK_FORMAT_VHDX)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv.get_qemu_version = mock.Mock(
return_value=qemu_version)
drv._do_extend_volume = mock.Mock()
drv.configuration = mock.MagicMock()
drv.configuration.volume_dd_blocksize = (
mock.sentinel.block_size)
exc = None
with contextlib.nested(
mock.patch.object(image_utils,
'fetch_to_volume_format'),
mock.patch.object(image_utils,
'qemu_img_info')) as (
fake_fetch,
fake_qemu_img_info):
if wrong_size_after_fetch:
exc = exception.ImageUnacceptable
elif unsupported_qemu_version:
exc = exception.InvalidVolume
fake_qemu_img_info.return_value = fake_img_info
if exc:
self.assertRaises(
exc, drv.copy_image_to_volume,
mock.sentinel.context, self._FAKE_VOLUME,
fake_image_service,
mock.sentinel.image_id)
else:
drv.copy_image_to_volume(
mock.sentinel.context, self._FAKE_VOLUME,
fake_image_service,
mock.sentinel.image_id)
fake_fetch.assert_called_once_with(
mock.sentinel.context, fake_image_service,
mock.sentinel.image_id, self._FAKE_VOLUME_PATH,
drv._DISK_FORMAT_VHDX,
mock.sentinel.block_size)
drv._do_extend_volume.assert_called_once_with(
self._FAKE_VOLUME_PATH, self._FAKE_VOLUME['size'])
def test_copy_image_to_volume(self):
self._test_copy_image_to_volume()
def test_copy_image_to_volume_wrong_size_after_fetch(self):
self._test_copy_image_to_volume(wrong_size_after_fetch=True)
def test_copy_image_to_volume_unsupported_qemu_version(self):
self._test_copy_image_to_volume(unsupported_qemu_version=True)
def test_get_capacity_info(self):
fake_block_size = 4096.0
fake_total_blocks = 1024
fake_avail_blocks = 512
fake_total_allocated = fake_total_blocks * fake_block_size
fake_df = ('%s %s %s' % (fake_block_size, fake_total_blocks,
fake_avail_blocks), None)
fake_du = (str(fake_total_allocated), None)
self._smbfs_driver._get_mount_point_for_share = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._execute = mock.Mock(
side_effect=(fake_df, fake_du))
ret_val = self._smbfs_driver._get_capacity_info(self._FAKE_SHARE)
expected = (fake_block_size * fake_total_blocks,
fake_block_size * fake_avail_blocks,
fake_total_allocated)
self.assertEqual(expected, ret_val)
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import mock
import six
try:
from webapp2 import RequestHandler
except SyntaxError:
# webapp2 has not been ported to python3, so it will give a syntax
# error if we try. We'll just skip the webapp2 tests in that case.
RequestHandler = object
class Test_get_trace_id_from_flask(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id_from_flask()
@staticmethod
def create_app():
import flask
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'test flask trace' # pragma: NO COVER
return app
def test_no_context_header(self):
app = self.create_app()
with app.test_request_context(
path='/',
headers={}):
trace_id = self._call_fut()
self.assertIsNone(trace_id)
def test_valid_context_header(self):
flask_trace_header = 'X_CLOUD_TRACE_CONTEXT'
expected_trace_id = 'testtraceidflask'
flask_trace_id = expected_trace_id + '/testspanid'
app = self.create_app()
context = app.test_request_context(
path='/',
headers={flask_trace_header: flask_trace_id})
with context:
trace_id = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
class _GetTraceId(RequestHandler):
def get(self):
from google.cloud.logging.handlers import _helpers
trace_id = _helpers.get_trace_id_from_webapp2()
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(trace_id))
@unittest.skipIf(six.PY3, 'webapp2 is Python 2 only')
class Test_get_trace_id_from_webapp2(unittest.TestCase):
@staticmethod
def create_app():
import webapp2
app = webapp2.WSGIApplication([
('/', _GetTraceId),
])
return app
def test_no_context_header(self):
import webob
req = webob.BaseRequest.blank('/')
response = req.get_response(self.create_app())
trace_id = json.loads(response.body)
self.assertEqual(None, trace_id)
def test_valid_context_header(self):
import webob
webapp2_trace_header = 'X-Cloud-Trace-Context'
expected_trace_id = 'testtraceidwebapp2'
webapp2_trace_id = expected_trace_id + '/testspanid'
req = webob.BaseRequest.blank(
'/',
headers={webapp2_trace_header: webapp2_trace_id})
response = req.get_response(self.create_app())
trace_id = json.loads(response.body)
self.assertEqual(trace_id, expected_trace_id)
class Test_get_trace_id_from_django(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id_from_django()
def setUp(self):
from django.conf import settings
from django.test.utils import setup_test_environment
if not settings.configured:
settings.configure()
setup_test_environment()
def tearDown(self):
from django.test.utils import teardown_test_environment
from google.cloud.logging.handlers.middleware import request
teardown_test_environment()
request._thread_locals.__dict__.clear()
def test_no_context_header(self):
from django.test import RequestFactory
from google.cloud.logging.handlers.middleware import request
django_request = RequestFactory().get('/')
middleware = request.RequestMiddleware()
middleware.process_request(django_request)
trace_id = self._call_fut()
self.assertIsNone(trace_id)
def test_valid_context_header(self):
from django.test import RequestFactory
from google.cloud.logging.handlers.middleware import request
django_trace_header = 'HTTP_X_CLOUD_TRACE_CONTEXT'
expected_trace_id = 'testtraceiddjango'
django_trace_id = expected_trace_id + '/testspanid'
django_request = RequestFactory().get(
'/',
**{django_trace_header: django_trace_id})
middleware = request.RequestMiddleware()
middleware.process_request(django_request)
trace_id = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
class Test_get_trace_id(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id()
def _helper(self, django_return, flask_return):
django_patch = mock.patch(
'google.cloud.logging.handlers._helpers.get_trace_id_from_django',
return_value=django_return)
flask_patch = mock.patch(
'google.cloud.logging.handlers._helpers.get_trace_id_from_flask',
return_value=flask_return)
with django_patch as django_mock:
with flask_patch as flask_mock:
trace_id = self._call_fut()
return django_mock, flask_mock, trace_id
def test_from_django(self):
django_mock, flask_mock, trace_id = self._helper(
'test-django-trace-id', None)
self.assertEqual(trace_id, django_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_from_flask(self):
django_mock, flask_mock, trace_id = self._helper(
None, 'test-flask-trace-id')
self.assertEqual(trace_id, flask_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
def test_from_django_and_flask(self):
django_mock, flask_mock, trace_id = self._helper(
'test-django-trace-id', 'test-flask-trace-id')
# Django wins.
self.assertEqual(trace_id, django_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_missing(self):
django_mock, flask_mock, trace_id = self._helper(None, None)
self.assertIsNone(trace_id)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
|
|
from __future__ import absolute_import
import urwid
import urwid.util
import os
from netlib.http import CONTENT_MISSING
import netlib.utils
from .. import utils
from ..models import decoded
from . import signals
try:
import pyperclip
except:
pyperclip = False
VIEW_FLOW_REQUEST = 0
VIEW_FLOW_RESPONSE = 1
METHOD_OPTIONS = [
("get", "g"),
("post", "p"),
("put", "u"),
("head", "h"),
("trace", "t"),
("delete", "d"),
("options", "o"),
("edit raw", "e"),
]
def is_keypress(k):
"""
Is this input event a keypress?
"""
if isinstance(k, basestring):
return True
def highlight_key(str, key, textattr="text", keyattr="key"):
l = []
parts = str.split(key, 1)
if parts[0]:
l.append((textattr, parts[0]))
l.append((keyattr, key))
if parts[1]:
l.append((textattr, parts[1]))
return l
KEY_MAX = 30
def format_keyvals(lst, key="key", val="text", indent=0):
"""
Format a list of (key, value) tuples.
If key is None, it's treated specially:
- We assume a sub-value, and add an extra indent.
- The value is treated as a pre-formatted list of directives.
"""
ret = []
if lst:
maxk = min(max(len(i[0]) for i in lst if i and i[0]), KEY_MAX)
for i, kv in enumerate(lst):
if kv is None:
ret.append(urwid.Text(""))
else:
if isinstance(kv[1], urwid.Widget):
v = kv[1]
elif kv[1] is None:
v = urwid.Text("")
else:
v = urwid.Text([(val, kv[1])])
ret.append(
urwid.Columns(
[
("fixed", indent, urwid.Text("")),
(
"fixed",
maxk,
urwid.Text([(key, kv[0] or "")])
),
v
],
dividechars = 2
)
)
return ret
def shortcuts(k):
if k == " ":
k = "page down"
elif k == "ctrl f":
k = "page down"
elif k == "ctrl b":
k = "page up"
elif k == "j":
k = "down"
elif k == "k":
k = "up"
return k
def fcol(s, attr):
s = unicode(s)
return (
"fixed",
len(s),
urwid.Text(
[
(attr, s)
]
)
)
if urwid.util.detected_encoding:
SYMBOL_REPLAY = u"\u21ba"
SYMBOL_RETURN = u"\u2190"
SYMBOL_MARK = u"\u25cf"
else:
SYMBOL_REPLAY = u"[r]"
SYMBOL_RETURN = u"<-"
SYMBOL_MARK = "[m]"
def raw_format_flow(f, focus, extended, padding):
f = dict(f)
pile = []
req = []
if extended:
req.append(
fcol(
utils.format_timestamp(f["req_timestamp"]),
"highlight"
)
)
else:
req.append(fcol(">>" if focus else " ", "focus"))
if f["marked"]:
req.append(fcol(SYMBOL_MARK, "mark"))
if f["req_is_replay"]:
req.append(fcol(SYMBOL_REPLAY, "replay"))
req.append(fcol(f["req_method"], "method"))
preamble = sum(i[1] for i in req) + len(req) - 1
if f["intercepted"] and not f["acked"]:
uc = "intercept"
elif f["resp_code"] or f["err_msg"]:
uc = "text"
else:
uc = "title"
req.append(
urwid.Text([(uc, f["req_url"])])
)
pile.append(urwid.Columns(req, dividechars=1))
resp = []
resp.append(
("fixed", preamble, urwid.Text(""))
)
if f["resp_code"]:
codes = {
2: "code_200",
3: "code_300",
4: "code_400",
5: "code_500",
}
ccol = codes.get(f["resp_code"] / 100, "code_other")
resp.append(fcol(SYMBOL_RETURN, ccol))
if f["resp_is_replay"]:
resp.append(fcol(SYMBOL_REPLAY, "replay"))
resp.append(fcol(f["resp_code"], ccol))
if f["intercepted"] and f["resp_code"] and not f["acked"]:
rc = "intercept"
else:
rc = "text"
if f["resp_ctype"]:
resp.append(fcol(f["resp_ctype"], rc))
resp.append(fcol(f["resp_clen"], rc))
resp.append(fcol(f["roundtrip"], rc))
elif f["err_msg"]:
resp.append(fcol(SYMBOL_RETURN, "error"))
resp.append(
urwid.Text([
(
"error",
f["err_msg"]
)
])
)
pile.append(urwid.Columns(resp, dividechars=1))
return urwid.Pile(pile)
# Save file to disk
def save_data(path, data, master, state):
if not path:
return
try:
with file(path, "wb") as f:
f.write(data)
except IOError as v:
signals.status_message.send(message=v.strerror)
def ask_save_overwite(path, data, master, state):
if not path:
return
path = os.path.expanduser(path)
if os.path.exists(path):
def save_overwite(k):
if k == "y":
save_data(path, data, master, state)
signals.status_prompt_onekey.send(
prompt = "'" + path + "' already exists. Overwite?",
keys = (
("yes", "y"),
("no", "n"),
),
callback = save_overwite
)
else:
save_data(path, data, master, state)
def ask_save_path(prompt, data, master, state):
signals.status_prompt_path.send(
prompt = prompt,
callback = ask_save_overwite,
args = (data, master, state)
)
def copy_flow_format_data(part, scope, flow):
if part == "u":
data = flow.request.url
else:
data = ""
if scope in ("q", "a"):
if flow.request.content is None or flow.request.content == CONTENT_MISSING:
return None, "Request content is missing"
with decoded(flow.request):
if part == "h":
data += flow.client_conn.protocol.assemble(flow.request)
elif part == "c":
data += flow.request.content
else:
raise ValueError("Unknown part: {}".format(part))
if scope == "a" and flow.request.content and flow.response:
# Add padding between request and response
data += "\r\n" * 2
if scope in ("s", "a") and flow.response:
if flow.response.content is None or flow.response.content == CONTENT_MISSING:
return None, "Response content is missing"
with decoded(flow.response):
if part == "h":
data += flow.client_conn.protocol.assemble(flow.response)
elif part == "c":
data += flow.response.content
else:
raise ValueError("Unknown part: {}".format(part))
return data, False
def copy_flow(part, scope, flow, master, state):
"""
part: _c_ontent, _h_eaders+content, _u_rl
scope: _a_ll, re_q_uest, re_s_ponse
"""
data, err = copy_flow_format_data(part, scope, flow)
if err:
signals.status_message.send(message=err)
return
if not data:
if scope == "q":
signals.status_message.send(message="No request content to copy.")
elif scope == "s":
signals.status_message.send(message="No response content to copy.")
else:
signals.status_message.send(message="No contents to copy.")
return
# pyperclip calls encode('utf-8') on data to be copied without checking.
# if data are already encoded that way UnicodeDecodeError is thrown.
toclip = ""
try:
toclip = data.decode('utf-8')
except (UnicodeDecodeError):
toclip = data
try:
pyperclip.copy(toclip)
except (RuntimeError, UnicodeDecodeError, AttributeError):
def save(k):
if k == "y":
ask_save_path("Save data", data, master, state)
signals.status_prompt_onekey.send(
prompt = "Cannot copy data to clipboard. Save as file?",
keys = (
("yes", "y"),
("no", "n"),
),
callback = save
)
def ask_copy_part(scope, flow, master, state):
choices = [
("content", "c"),
("headers+content", "h")
]
if scope != "s":
choices.append(("url", "u"))
signals.status_prompt_onekey.send(
prompt = "Copy",
keys = choices,
callback = copy_flow,
args = (scope, flow, master, state)
)
def ask_save_body(part, master, state, flow):
"""
Save either the request or the response body to disk. part can either be
"q" (request), "s" (response) or None (ask user if necessary).
"""
request_has_content = flow.request and flow.request.content
response_has_content = flow.response and flow.response.content
if part is None:
# We first need to determine whether we want to save the request or the
# response content.
if request_has_content and response_has_content:
signals.status_prompt_onekey.send(
prompt = "Save",
keys = (
("request", "q"),
("response", "s"),
),
callback = ask_save_body,
args = (master, state, flow)
)
elif response_has_content:
ask_save_body("s", master, state, flow)
else:
ask_save_body("q", master, state, flow)
elif part == "q" and request_has_content:
ask_save_path(
"Save request content",
flow.request.get_decoded_content(),
master,
state
)
elif part == "s" and response_has_content:
ask_save_path(
"Save response content",
flow.response.get_decoded_content(),
master,
state
)
else:
signals.status_message.send(message="No content to save.")
flowcache = utils.LRUCache(800)
def format_flow(f, focus, extended=False, hostheader=False, padding=2,
marked=False):
d = dict(
intercepted = f.intercepted,
acked = f.reply.acked,
req_timestamp = f.request.timestamp_start,
req_is_replay = f.request.is_replay,
req_method = f.request.method,
req_url = f.request.pretty_url if hostheader else f.request.url,
err_msg = f.error.msg if f.error else None,
resp_code = f.response.status_code if f.response else None,
marked = marked,
)
if f.response:
if f.response.content:
contentdesc = netlib.utils.pretty_size(len(f.response.content))
elif f.response.content == CONTENT_MISSING:
contentdesc = "[content missing]"
else:
contentdesc = "[no content]"
duration = 0
if f.response.timestamp_end and f.request.timestamp_start:
duration = f.response.timestamp_end - f.request.timestamp_start
roundtrip = utils.pretty_duration(duration)
d.update(dict(
resp_code = f.response.status_code,
resp_is_replay = f.response.is_replay,
resp_clen = contentdesc,
roundtrip = roundtrip,
))
t = f.response.headers.get("content-type")
if t:
d["resp_ctype"] = t.split(";")[0]
else:
d["resp_ctype"] = ""
return flowcache.get(
raw_format_flow,
tuple(sorted(d.items())), focus, extended, padding
)
|
|
from django.db import models
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
from fields import PositiveSmallIntegerRangeField
import django.utils.timezone
CARD_KIND_WEAPON = 0
CARD_KIND_POTION = 1
CARD_KIND_MONSTER = 2
CARD_KIND_SCRAP = 3
CARD_KIND_TREASURE = 4
CARD_KINDS = (
(CARD_KIND_WEAPON, 'Weapon'),
(CARD_KIND_POTION, 'Potion'),
(CARD_KIND_MONSTER, 'Monster'),
(CARD_KIND_SCRAP, 'Scrap'),
(CARD_KIND_TREASURE, 'Treasure'),
)
class Player(models.Model):
user = models.OneToOneField(User)
active_session = models.OneToOneField('Session', null=True, blank=True)
statistics = models.OneToOneField('Statistics')
def __unicode__(self):
if self.user.first_name and self.user.last_name:
return u'%s %s' % (self.user.first_name, self.user.last_name)
else:
return u'%s' % (self.user.first_name)
@receiver(post_save, sender=User)
def create_player(sender, instance, created, **kwargs):
"""
Create a matching profile whenever a user object is created.
"""
if created:
try:
stats = Statistics()
stats.save()
except:
stats = None
profile, new = Player.objects.get_or_create(
user=instance,
statistics=stats)
class Session(models.Model):
belongs_to_player = models.ForeignKey('Player')
# Player's health attribute. Assumes only positive values are allowed, since 0 or less is a loss.
health = models.PositiveSmallIntegerField(default=20)
# To keep track of how many cards have been moved from the current room, since the last skip was activated.
# Defaults to negative value so that we can allow skipping on the very first turn.
amount_of_cards_moved_since_last_skip = models.IntegerField(default=-1)
room_stack = models.ForeignKey('Stack', related_name='room_stack')
equipment_stack = models.ForeignKey('Stack', related_name='equipment_stack')
you_stack = models.ForeignKey('Stack', related_name='you_stack')
treasure_stack = models.ForeignKey('Stack', related_name='treasure_stack')
forge_stack = models.ForeignKey('Stack', related_name='forge_stack')
discard_stack = models.ForeignKey('Stack', related_name='discard_stack')
time_started = models.DateTimeField(default=django.utils.timezone.now)
score = models.IntegerField(default=0)
score_multiplier = models.IntegerField(default=0)
def is_lost(self):
return self.health == 0
def __unicode__(self):
return u'%s with Health: %s' % (self.belongs_to_player, self.health)
class CardDetail(models.Model):
kind = models.PositiveSmallIntegerField(choices=CARD_KINDS)
name = models.CharField(max_length=32)
description = models.CharField(max_length=180, blank=True)
flavor = models.CharField(max_length=100, blank=True)
value = PositiveSmallIntegerRangeField(min_value=2, max_value=14, null=True, blank=True)
def __unicode__(self):
if self.value is None:
return u'(%s)' % (self.name)
else:
return u'%s (%s)' % (self.value, self.name)
class Card(models.Model):
belongs_to_session = models.ForeignKey('Session')
stack = models.ForeignKey('Stack', null=True)
order_in_stack = models.IntegerField(default=-1)
# Separating the properties into its own table allows us to have several cards of
# the same kind, but without duplicating data.
details = models.ForeignKey('CardDetail')
# Any card has a chance to be special...
is_special = models.BooleanField(default=False)
def can_be_moved(self, to_stack):
if self.stack:
# Card is already in a stack
if not self.stack.is_editable:
# Stack allows manipulation
return False
if self.stack == to_stack:
return False
return True
def __unicode__(self):
if self.is_special:
return u'%s*' % (self.details)
else:
return u'%s' % (self.details)
def get_first_element(iterable, default=None):
if iterable:
for item in iterable:
return item
return default
class Stack(models.Model):
is_editable = models.BooleanField(default=True)
def belongs_to_session(self, session):
if session:
if (session.room_stack == self or
session.equipment_stack == self or
session.you_stack == self or
session.treasure_stack == self or
session.discard_stack == self or
session.forge_stack == self):
return True
return False
def bottom(self):
return get_first_element(Card.objects.filter(stack=self).order_by('order_in_stack')[:1])
def top(self):
return get_first_element(Card.objects.filter(stack=self).order_by('-order_in_stack')[:1])
def all_cards(self):
return Card.objects.filter(stack=self).order_by('order_in_stack')
def count(self):
return len(self.all_cards())
def is_empty(self):
return self.count() == 0
def push(self, card):
if card.stack == self:
return False
try:
card.stack = self
card.order_in_stack = self.count()
card.save()
except:
return False
return True
def push_many(self, cards):
stacked_cards = []
for card in cards:
if self.push(card):
stacked_cards.append(card)
return stacked_cards
def pop_specific(self, card):
cards = self.all_cards()
if card in cards:
try:
card.stack = None
card.order_in_stack = -1
card.save()
except:
return False
else:
return False
return True
def pop(self):
try:
self.pop_specific(self.top())
except:
return False
return True
def __unicode__(self):
return u'%s' % (self.id)
class Statistics(models.Model):
cards_drawn = models.IntegerField(default=0)
monsters_slain = models.IntegerField(default=0)
def __unicode__(self):
return u'%s' % (self.id)
|
|
"""
#;+
#; NAME:
#; lls_literature
#; Ordered by publication date
#; Version 1.0
#;
#; PURPOSE:
#; Module for loading up literature data on Lyman Limit Systems
#; 29-Jun-2015 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import os, copy, sys, imp, glob
import numpy as np
import urllib2
from astropy import units as u
from astropy.io import ascii
from linetools.lists.linelist import LineList
from linetools.spectralline import AbsLine
from xastropy.atomic import ionization as xai
from xastropy.igm.abs_sys.lls_utils import LLSSystem
from xastropy.igm.abs_sys.ionclms import IonClms
from xastropy.igm.abs_sys import ionclms as xiai
from xastropy.obs import radec as xor
from xastropy.xutils import xdebug as xdb
xa_path = imp.find_module('xastropy')[1]
#class LLSSystem(AbslineSystem):
#class LLS_Survey(Absline_Survey):
def zonak2004():
'''Zoank, S. et al. 2004, ApJ, 2004, 606, 196
PG1634+706
HST+Keck spectra
MgII, SiIV, SiIII from Table 2. Summing Subsystems A (Model 2) and B
Errors estimated by JXP (not reported)
SiIII in A may be a model
SiIV in B may be a model
Total NHI from LL. Taken from Fig 3 caption.
Error estimated by JXP
Not all EWs in Table 1 included
Adopting their M/H
'''
# Setup
radec = xor.stod1('J163428.9897+703132.422') # SIMBAD
lls = LLSSystem(name='PG1634+706_z1.041', RA=radec[0], Dec=radec[1], zem=1.337,
zabs=1.0414, vlim=[-200., 30.]*u.km/u.s, NHI=17.23, MH=-1.4,
sigNHI=np.array([0.15,0.15]))
# SubSystems
lls.mk_subsys(2)
# Abundances
adict = dict(MgII={'clm': log_sum([11.45,11.90,12.02,11.68]), 'sig_clm': 0.05, 'flg_clm': 1},
SiIII={'clm': log_sum([12.5,12.5,12.8,12.7]), 'sig_clm': 0.25, 'flg_clm': 1},
SiIV={'clm': log_sum([10.9,10.8,11.2,11.1]), 'sig_clm': 0.15, 'flg_clm': 1} )
lls.subsys['A']._ionclms = IonClms(idict=adict)
bdict = dict(SiIII={'clm': log_sum([11.8,12.8,12.4]), 'sig_clm': 0.15, 'flg_clm': 1},
SiIV={'clm': log_sum([11.2,12.2,11.8]), 'sig_clm': 0.15, 'flg_clm': 1} )
lls.subsys['B']._ionclms = IonClms(idict=bdict)
# Total
lls._ionclms = lls.subsys['A']._ionclms.sum(lls.subsys['B']._ionclms)
lls.Refs.append('Zon04')
# Return
return lls
def jenkins2005():
'''Jenkins, E. et al. 2005, ApJ, 2005, 623, 767
PHL 1811
HST/STIS, FUSE
Metals parsed from Table 1
OI taken from text
Had to input error on columns by hand (JXP)
Total NHI from Lyman series. see Fig 3
M/H from O/H
'''
# Grab ASCII file from ApJ
tab_fil = xa_path+"/data/LLS/jenkins2005.tb1.ascii"
chk_fil = glob.glob(tab_fil)
if len(chk_fil) > 0:
tab_fil = chk_fil[0]
else:
url = 'http://iopscience.iop.org/0004-637X/623/2/767/fulltext/61520.tb1.txt'
print('LLSSurvey: Grabbing table file from {:s}'.format(url))
f = urllib2.urlopen(url)
with open(tab_fil, "wb") as code:
code.write(f.read())
# Setup
radec = xor.stod1('J215501.5152-092224.688') # SIMBAD
lls = LLSSystem(name='PHL1811_z0.081', RA=radec[0], Dec=radec[1], zem=0.192,
zabs=0.080923, vlim=[-100., 100.]*u.km/u.s, NHI=17.98, MH=-0.19,
sigNHI=np.array([0.05,0.05]))
# AbsLines
ism = LineList('ISM')
Nsig = {'C IV': 0.4, 'N II': 0.4, 'Si II': 0.05, 'Si IV': 0.25,
'S II': 0.2, 'Fe II': 0.12, 'H I': 0.05, 'S III': 0.06}
# Parse Table
with open(tab_fil,'r') as f:
flines = f.readlines()
ion_dict = {}
for iline in flines:
iline = iline.strip()
if (len(iline) == 0):
continue
# Split on tabs
isplit = iline.split('\t')
# Offset?
ioff = 0
if isplit[0][0] in ['1','2']:
ioff = -1
# Catch bad lines
if (isplit[1+ioff][0:6] in ['1442.0','1443.7','1120.9']): # Skip goofy CII line and CII*
continue
if len(isplit[2+ioff]) == 0:
continue
# Ion
if (len(isplit[0].strip()) > 0) & (isplit[0][0] not in ['1','2']):
ionc = isplit[0].strip()
try:
Zion = xai.name_ion(ionc)
except KeyError:
xdb.set_trace()
# Generate the Line
try:
newline = AbsLine(float(isplit[2+ioff])*u.AA,linelist=ism, closest=True)
except ValueError:
xdb.set_trace()
newline.attrib['z'] = lls.zabs
# Spectrum
newline.analy['datafile'] = 'STIS' if 'S' in isplit[1] else 'FUSE'
# EW
try:
EWvals = isplit[4+ioff].split(' ')
except IndexError:
xdb.set_trace()
newline.attrib['EW'] = float(EWvals[0])*u.AA/1e3
newline.attrib['EWsig'] = float(EWvals[2])*u.AA/1e3
newline.attrib['flgEW'] = 1
if len(isplit) < (5+ioff+1):
continue
# Colm?
#xdb.set_trace()
if (len(isplit[5+ioff].strip()) > 0) & (isplit[5+ioff].strip() != '\\ldots'):
if isplit[5+ioff][0] == '\\':
ipos = isplit[5+ioff].find(' ')
newline.attrib['N'] = float(isplit[5+ioff][ipos+1:])
newline.attrib['flagN'] = 2
elif isplit[5+ioff][0] == '<':
ipos = 0
newline.attrib['N'] = float(isplit[5+ioff][ipos+1:])
newline.attrib['flagN'] = 3
elif isplit[5+ioff][0] == '1':
try:
newline.attrib['N'] = float(isplit[5+ioff][0:5])
except ValueError:
xdb.set_trace()
newline.attrib['flagN'] = 1
try:
newline.attrib['Nsig'] = Nsig[ionc]
except KeyError:
print('No error for {:s}'.format(ionc))
else:
raise ValueError('Bad character')
# ion_dict
ion_dict[ionc] = dict(clm=newline.attrib['N'], sig_clm=newline.attrib['Nsig'],
flg_clm=newline.attrib['flagN'], Z=Zion[0], ion=Zion[1])
# Append
lls.lines.append(newline)
# Fix NI, OI
ion_dict['O I']['clm'] = 14.47
ion_dict['O I']['sig_clm'] = 0.05
ion_dict['N I']['flg_clm'] = 3
lls._ionclms = IonClms(idict=ion_dict)
lls.Refs.append('Jen05')
# Return
return lls
def tripp2005():
'''Tripp, T. et al. 2005, ApJ, 2005, 619, 714
PG 1216+069 (LLS in Virgo)
HST/STIS, FUSE
Metal columns parsed from Tables 2 and 3
Total NHI from damping wings
M/H from O/H
'''
# Grab ASCII files from ApJ
tab_fils = [xa_path+"/data/LLS/tripp2005.tb3.ascii", xa_path+"/data/LLS/tripp2005.tb2.ascii"]
urls = ['http://iopscience.iop.org/0004-637X/619/2/714/fulltext/60797.tb3.txt',
'http://iopscience.iop.org/0004-637X/619/2/714/fulltext/60797.tb2.txt']
for jj,tab_fil in enumerate(tab_fils):
chk_fil = glob.glob(tab_fil)
if len(chk_fil) > 0:
tab_fil = chk_fil[0]
else:
url = urls[jj]
print('LLSSurvey: Grabbing table file from {:s}'.format(url))
f = urllib2.urlopen(url)
with open(tab_fil, "wb") as code:
code.write(f.read())
# Setup
radec = xor.stod1('J121920.9320+063838.476') # SIMBAD
lls = LLSSystem(name='PG1216+069_z0.006', RA=radec[0], Dec=radec[1], zem=0.3313,
zabs=0.00632, vlim=[-100., 100.]*u.km/u.s, NHI=19.32, MH=-1.6,
sigNHI=np.array([0.03,0.03]))
#lls.mk_subsys(2)
# Columns
# Start with Table 3 (VPFIT)
with open(tab_fils[0],'r') as f:
flines3 = f.readlines()
ion_dict = {}
for iline in flines3:
if (len(iline.strip()) == 0):
continue
isplit = iline.split('\t')
# Ion
flg = 2
if (len(isplit[0].strip()) > 0):# & (isplit[0][0] not in ['1','2']):
ipos = isplit[0].find('1')
ionc = isplit[0][0:ipos-1].strip()
try:
Zion = xai.name_ion(ionc)
except KeyError:
xdb.set_trace()
flg = 1
# Column
csplit = isplit[3].split(' ')
clm = float(csplit[0])
sig = float(csplit[2])
if flg == 1:
ion_dict[ionc] = dict(clm=clm, sig_clm=sig, flg_clm=1, Z=Zion[0],ion=Zion[1])
else: # Add it in
tmp_dict = dict(clm=clm, sig_clm=sig, flg_clm=1, Z=Zion[0],ion=Zion[1])
logN, siglogN = xiai.sum_logN(ion_dict[ionc], tmp_dict)
ion_dict[ionc]['clm'] = logN
ion_dict[ionc]['sig_clm'] = siglogN
ions = ion_dict.keys()
# Now Table 2 for the extras
with open(tab_fils[1],'r') as f:
flines2 = f.readlines()
# Trim the first 10 lines
flines2 = flines2[10:]
# Loop
for iline in flines2:
isplit = iline.split('\t')
#
ionc = isplit[0].strip()
if (len(ionc) == 0) or (ionc in ions):
continue
#
Zion = xai.name_ion(ionc)
ion_dict[ionc] = dict(Z=Zion[0], ion=Zion[1], sig_clm=0.)
if isplit[4][0] == '<':
ion_dict[ionc]['clm'] = float(isplit[4][1:])
ion_dict[ionc]['flg_clm'] = 3
else:
raise ValueError('Should not get here')
# Finish
lls._ionclms = IonClms(idict=ion_dict)
lls.Refs.append('Tri05')
return lls
def peroux06a():
'''Peroux, C. et al. 2006a, MNRAS, 372, 369
SDSS J0134+0051
One of her sample
Metal columns taken by JXP from Table 2 (no online data)
Total NHI from damping wings
'''
# Setup
radec = xor.stod1('J013405.75+005109.4') # SDSS Name
lls = LLSSystem(name='SDSSJ0134+0051_z0.842', RA=radec[0], Dec=radec[1], zem=1.522,
zabs=0.842, vlim=[-150., 150.]*u.km/u.s, NHI=19.93, sigNHI=np.array([0.15,0.15]))
# Table 2
ion_dict = {}
N = np.sum(np.array([5.56,12.6,13.7,23.5,61.4,39.8,6,9.14])*1e10)
sig = np.sqrt(np.sum((np.array([2.32,3.1,3.68,4.13,8.02,6.65,3.37,2.82])*1e10)**2))
ion_dict['Mg I'] = dict(clm=np.log10(N), sig_clm=sig/N/np.log(10),flg_clm=1,Z=12,ion=1)
ion_dict['Mg II'] = dict(clm=np.log10(5e13), sig_clm=0.,flg_clm=2,Z=12,ion=2)
N = np.sum(np.array([8.17,4.28,32.1,125,710,301,893,600,263,65.7])*1e11)
sig = np.sqrt(np.sum((np.array([2.63,1.40,2.37,8.6,53.2,28.4,73.5,61.7,14.0,2.95])*1e11)**2))
ion_dict['Fe II'] = dict(clm=np.log10(N), sig_clm=sig/N/np.log(10),flg_clm=1,Z=26,ion=2)
sig = np.sqrt(np.sum((np.array([3.72,1.84,2.36,3.83])*1e11)**2))
ion_dict['Zn II'] = dict(clm=np.log10(2*sig), sig_clm=0.,flg_clm=3,Z=30,ion=2)
sig = np.sqrt(np.sum((np.array([19.4,9.79])*1e11)**2))
ion_dict['Cr II'] = dict(clm=np.log10(2*sig), sig_clm=0.,flg_clm=3,Z=24,ion=2)
# Not including MnII. Appears as a detection but also given as a limit..
# Finish
lls._ionclms = IonClms(idict=ion_dict)
lls.Refs.append('Prx06a')
return lls
def peroux06b():
'''Peroux, C. et al. 2006b, A&A, 450, 53
SDSS J1323-0021
Metal rich
Metal columns copied by JXP from Table 1
Total NHI from damping wings
'''
# Setup
radec = xor.stod1('J132323.78-002155.2') # SDSS Name
lls = LLSSystem(name='SDSSJ1323-0021_z0.716', RA=radec[0], Dec=radec[1], zem=1.390,
zabs=0.716, vlim=[-200., 200.]*u.km/u.s, NHI=20.21, sigNHI=np.array([0.20,0.20]))
# Parse table file
tab_fil = xa_path+"/data/LLS/peroux06b.tb1.ascii"
with open(tab_fil,'r') as f:
flines = f.readlines()
ion_dict = {}
for iline in flines:
isplit = iline.split('\t')
if len(isplit[0]) == 0:
# Grab ions and init
ions = isplit[3:10]
for ion in ions:
Zion = xai.name_ion(ion)
ion_dict[ion] = dict(clm=0., sig_clm=0.,flg_clm=1,Z=Zion[0],ion=Zion[1])
continue
# Column or sigma?
if isplit[0][0] == 'N': # Column
for kk,iis in enumerate(isplit[3:10]):
ion = ions[kk]
if iis[0] == '>':
ion_dict[ion]['flg_clm'] = 2
ion_dict[ion]['clm'] += float(iis[1:])
elif iis[0] == '<':
pass
elif iis[0] == '.':
pass
else:
ion_dict[ion]['clm'] += float(iis)
else: # Sigma
for kk,iis in enumerate(isplit[3:10]):
ion = ions[kk]
if iis[0] == '.':
pass
else:
ion_dict[ion]['sig_clm'] += float(iis)**2
# Convert to log
for ion in ions:
N = ion_dict[ion]['clm']
sig = np.sqrt(ion_dict[ion]['sig_clm'])
#
ion_dict[ion]['clm'] = np.log10(N)
if ion_dict[ion]['flg_clm'] == 2:
ion_dict[ion]['sig_clm'] = 0.
else:
ion_dict[ion]['sig_clm'] = sig/N/np.log(10)
# Finish
lls._ionclms = IonClms(idict=ion_dict)
lls.Refs.append('Prx06b')
return lls
def meiring06():
'''Meiring et al. 2006, MNRAS, 370, 43
Q1107+0003
Taken from Table 4 by JXP
NHI from RTN06 (damping wings)
RA/DEC from STIS header
'''
# Setup
lls = LLSSystem(name='SDSSJ1107+0003_z0.954', RA=166.90273*u.deg,
Dec=0.05795000*u.deg, zem=1.726,
zabs=0.9542, vlim=[-300., 300.]*u.km/u.s, NHI=20.26, sigNHI=np.array([0.14,0.09]))
# Meiring06, Table 4
ion_dict = {}
ion_dict['Zn II'] = dict(clm=12.08, sig_clm=0.,flg_clm=3,Z=30,ion=2)
ion_dict['Ti II'] = dict(clm=13.01, sig_clm=0.,flg_clm=3,Z=22,ion=2)
ion_dict['Cr II'] = dict(clm=12.76, sig_clm=0.,flg_clm=3,Z=24,ion=2)
# Finish
lls._ionclms = IonClms(idict=ion_dict)
lls.Refs.append('Mei06')
return lls
def meiring07():
'''Meiring et al. 2007, MNRAS, 376, 557
SLLS with Magellan
Abundances from Table 11 from astro-ph (LateX) by JXP [AODM]
RA/DEC from Table 1
'''
all_lls = []
# Table 1
tab_fil = xa_path+"/data/LLS/meiring07.tb1.ascii"
with open(tab_fil,'r') as f:
flines1 = f.readlines()
# Grab RA/DEC
qso_dict = {}
for iline in flines1:
if iline[0:2] in ['QS','\h','$\\', 'J2']:
continue
# Parse
isplit = iline.split('&')
#xdb.set_trace()
radec = xor.stod1((isplit[2],isplit[3]))
# zem
if isplit[0].strip() != 'Q0826-2230':
zem = float(isplit[5].strip())
else:
zem = 0.911
# Save
qso_dict[isplit[0].strip()] = dict(RA=radec[0],Dec=radec[1],zem=zem)
# Abundances (AODM)
# Table 11
tab_fil = xa_path+"/data/LLS/meiring07.tb11.ascii"
with open(tab_fil,'r') as f:
flines11 = f.readlines()
#
ion_dict = {}
for iline in flines11:
if iline[0:2] in ['\h',' ']:
continue
# Parse
isplit = iline.split('&')
# Ions
if iline[0:2] == 'QS':
ioncs = []
Zions = []
for iis in isplit[3:-1]: # Skipping HI
# Parse
is2 = iis.split('\\')
ip2 = is2[2].find('}')
ionc = is2[1][2:].strip()+' '+is2[2][0:ip2].strip()
# Zion
Zion = xai.name_ion(ionc)
# Append
ioncs.append(ionc)
Zions.append(Zion)
continue
if iline[0] == 'Q':
# QSO
qso = isplit[0].strip()
# zabs and name
zabs = float(isplit[1].strip())
qso_dict[qso]['name']=qso+'z_{:.3f}'.format(zabs)
qso_dict[qso]['zabs']=zabs
# NHI
is2 = isplit[2].strip()
qso_dict[qso]['NHI'] = float(is2[0:5])
#if qso_dict[qso]['NHI'] >= 20.3:
# print('Uh oh. DLA')
qso_dict[qso]['sigNHI'] = np.array([float(is2[10:])]*2)
# Generate LLS
lls = LLSSystem(**qso_dict[qso])
continue
else:
# ADOM Columns
ion_dict = {}
for kk,iis in enumerate(isplit[3:-1]):
is2 = iis.strip()
if is2[0:3] == '$>$':
ion_dict[ioncs[kk]] = dict(sig_clm=0.,flg_clm=2,Z=Zions[kk][0],ion=Zions[kk][1])
ion_dict[ioncs[kk]]['clm'] = float(is2[3:])
elif is2[0:3] == '$<$':
ion_dict[ioncs[kk]] = dict(sig_clm=0.,flg_clm=3,Z=Zions[kk][0],ion=Zions[kk][1])
ion_dict[ioncs[kk]]['clm'] = float(is2[3:])
elif len(is2) == 0:
pass
else:
ion_dict[ioncs[kk]] = dict(flg_clm=1,Z=Zions[kk][0],ion=Zions[kk][1])
ion_dict[ioncs[kk]]['clm'] = float(is2[0:5])
ion_dict[ioncs[kk]]['sig_clm'] = float(is2[10:])
# Finish
lls._ionclms = IonClms(idict=ion_dict)
lls.Refs.append('Mei07')
all_lls.append(lls)
# Return SLLS only
fin_slls = [ills for ills in all_lls if ills.NHI < 20.3]
return fin_slls
def meiring08():
'''Meiring et al. 2008, MNRAS, 384, 1015
SLLS with Magellan
Abundances from Table 3 from astro-ph (LateX) by JXP [AODM]
RA/DEC from Table 1
Threw out Q1436-0051B given NHI < 18.8 [i.e. unknown]
'''
all_lls = []
# Table 1
tab_fil = xa_path+"/data/LLS/meiring08.tb1.ascii"
with open(tab_fil,'r') as f:
flines1 = f.readlines()
# Grab RA/DEC
qso_dict = {}
for iline in flines1:
if iline[0:2] in ['QS','\h','$\\', 'J2', ' ']:
continue
# Parse
isplit = iline.split('&')
#xdb.set_trace()
radec = xor.stod1((isplit[3],isplit[4]))
# zem
zem = float(isplit[5].strip())
# Save
qso_dict[isplit[0].strip()] = dict(RA=radec[0],Dec=radec[1],zem=zem)
# Abundances (AODM)
# Table 3
tab_fil = xa_path+"/data/LLS/meiring08.tb3.ascii"
with open(tab_fil,'r') as f:
flines3 = f.readlines()
#
ion_dict = {}
for iline in flines3:
if iline[0:2] in ['\h',' ']:
continue
# Parse
isplit = iline.split('&')
# Ions
if iline[0:3] == ' QS':
ioncs = []
Zions = []
for iis in isplit[3:-1]: # Skipping HI
# Parse
#is2 = iis.split('\\')
#ip2 = is2[2].find('}')
ionc = iis.strip()
# Zion
Zion = xai.name_ion(ionc)
# Append
ioncs.append(ionc)
Zions.append(Zion)
continue
if iline[0:2] == ' Q':
# QSO
qso = isplit[0].strip()
if qso[-1] in ['A','B']:
qso = qso[0:-1]
# zabs and name
zabs = float(isplit[1].strip())
qso_dict[qso]['name']=qso+'z_{:.3f}'.format(zabs)
qso_dict[qso]['zabs']=zabs
# NHI
is2 = isplit[2].strip()
if is2[0] == '$':
qso_dict[qso]['NHI'] = 99.99 # THROW OUT Q1436-0051B
qso_dict[qso]['sigNHI'] = np.array([0.,0.])
else:
qso_dict[qso]['NHI'] = float(is2[0:5])
qso_dict[qso]['sigNHI'] = np.array([float(is2[10:])]*2)
#if qso_dict[qso]['NHI'] >= 20.3:
# print('Uh oh. DLA')
# Generate LLS
lls = LLSSystem(**qso_dict[qso])
continue
else:
# ADOM Columns
ion_dict = {}
for kk,iis in enumerate(isplit[3:-1]):
is2 = iis.strip()
if is2[0:3] == '$>$':
ion_dict[ioncs[kk]] = dict(sig_clm=0.,flg_clm=2,Z=Zions[kk][0],ion=Zions[kk][1])
ion_dict[ioncs[kk]]['clm'] = float(is2[3:])
elif is2[0:3] == '$<$':
ion_dict[ioncs[kk]] = dict(sig_clm=0.,flg_clm=3,Z=Zions[kk][0],ion=Zions[kk][1])
ion_dict[ioncs[kk]]['clm'] = float(is2[3:])
elif len(is2) == 0:
pass
else:
ion_dict[ioncs[kk]] = dict(flg_clm=1,Z=Zions[kk][0],ion=Zions[kk][1])
ion_dict[ioncs[kk]]['clm'] = float(is2[0:5])
ion_dict[ioncs[kk]]['sig_clm'] = float(is2[10:])
# Finish
lls._ionclms = IonClms(idict=ion_dict)
lls.Refs.append('Mei08')
all_lls.append(lls)
# Return SLLS only
fin_slls = [ills for ills in all_lls if ills.NHI < 20.3]
return fin_slls
def nestor08():
'''Nestor, D. et al. 2008, MNRAS, 390, 1670-1682
Q2149+212
Taken from Table 1 by JXP
NHI from RTN06 (damping wings)
RA/DEC from STIS header
'''
# Setup
lls = LLSSystem(name='SDSSJ2151+2130_z1.002', RA=327.94096*u.deg,
Dec=21.503750*u.deg, zem=1.534,
zabs=1.0023, vlim=[-300., 300.]*u.km/u.s, NHI=19.30, sigNHI=np.array([0.10,0.10]))
# Meiring06, Table 4
ion_dict = {}
ion_dict['Zn II'] = dict(clm=12.13, sig_clm=0.,flg_clm=3,Z=30,ion=2)
ion_dict['Cr II'] = dict(clm=12.59, sig_clm=0.,flg_clm=3,Z=24,ion=2)
# Finish
lls._ionclms = IonClms(idict=ion_dict)
lls.Refs.append('Nes08')
return lls
def meiring09():
'''Meiring et al. 2009, MNRAS, 393, 1513
SLLS with Magellan
Abundances from Table 3 from astro-ph (LateX) by JXP [AODM]
RA/DEC from Table 1
'''
all_lls = []
# Table 1
tab_fil = xa_path+"/data/LLS/meiring09.tb1.ascii"
with open(tab_fil,'r') as f:
flines1 = f.readlines()
# Grab RA/DEC
qso_dict = {}
for iline in flines1:
if iline[0:3] in [' QS','\hl','$\\c', ' J2', ' ']:
continue
# Parse
isplit = iline.split('&')
#xdb.set_trace()
if '$' in isplit[3].strip():
isplit[3] = '-'+(isplit[3].strip())[3:]
radec = xor.stod1((isplit[2],isplit[3]))
# zem
zem = float(isplit[5].strip())
# Save
qso_dict[isplit[0].strip()] = dict(RA=radec[0],Dec=radec[1],zem=zem)
# Abundances (AODM)
# Table 3
tab_fil = xa_path+"/data/LLS/meiring09.tb3.ascii"
with open(tab_fil,'r') as f:
flines3 = f.readlines()
#
ion_dict = {}
for iline in flines3:
if iline[0:2] in ['\h',' ']:
continue
# Parse
isplit = iline.split('&')
# Ions
if iline[0:2] == 'QS':
ioncs = []
Zions = []
for iis in isplit[3:-1]: # Skipping HI
# Parse
#is2 = iis.split('\\')
#ip2 = is2[2].find('}')
ionc = iis.strip()
# Zion
Zion = xai.name_ion(ionc)
# Append
ioncs.append(ionc)
Zions.append(Zion)
continue
if iline[0] == 'Q':
# QSO
qso = isplit[0].strip()
if qso[-1] in ['A','B','C']:
qso = qso[0:-1]
# zabs and name
zabs = float(isplit[1].strip())
qso_dict[qso]['name']=qso+'z_{:.3f}'.format(zabs)
qso_dict[qso]['zabs']=zabs
# NHI
is2 = isplit[2].strip()
if is2[0] == '$':
qso_dict[qso]['NHI'] = 99.99 # THROW OUT Q1436-0051B
qso_dict[qso]['sigNHI'] = np.array([0.,0.])
else:
qso_dict[qso]['NHI'] = float(is2[0:5])
qso_dict[qso]['sigNHI'] = np.array([float(is2[10:])]*2)
#if qso_dict[qso]['NHI'] >= 20.3:
# print('Uh oh. DLA')
# Generate LLS
lls = LLSSystem(**qso_dict[qso])
continue
else:
# ADOM Columns
ion_dict = {}
for kk,iis in enumerate(isplit[3:-1]):
is2 = iis.strip()
if is2[0:3] == '$>$':
ion_dict[ioncs[kk]] = dict(sig_clm=0.,flg_clm=2,Z=Zions[kk][0],ion=Zions[kk][1])
ion_dict[ioncs[kk]]['clm'] = float(is2[3:])
elif is2[0:3] == '$<$':
ion_dict[ioncs[kk]] = dict(sig_clm=0.,flg_clm=3,Z=Zions[kk][0],ion=Zions[kk][1])
ion_dict[ioncs[kk]]['clm'] = float(is2[3:])
elif len(is2) == 0:
pass
else:
ion_dict[ioncs[kk]] = dict(flg_clm=1,Z=Zions[kk][0],ion=Zions[kk][1])
ion_dict[ioncs[kk]]['clm'] = float(is2[0:5])
ion_dict[ioncs[kk]]['sig_clm'] = float(is2[10:])
# Finish
lls._ionclms = IonClms(idict=ion_dict)
lls.Refs.append('Mei09')
all_lls.append(lls)
# Return SLLS only
fin_slls = [ills for ills in all_lls if ills.NHI < 20.3]
return fin_slls
def dessauges09():
'''Dessauges-Zavadsky et al. 2009, MNRAS, 396, L96
SLLS with UVES
Zn,Fe abundances from Table 1 from astro-ph (LateX) by JXP [AODM]
Taken from the Zn/H and Fe/H assuming *no* ionization corrections
RA/DEC from the 'other' name
'''
# Solar abundances
eZn = 4.63
eFe = 7.45
sol = [eFe,eZn]
#
all_lls = []
# Table 1
tab_fil = xa_path+"/data/LLS/dessauges09.tb1.ascii"
with open(tab_fil,'r') as f:
flines1 = f.readlines()
# Trim the first few lines
flines1 = flines1[3:]
ion_dict = {}
for iline in flines1:
# Parse
isplit = iline.split('&')
# QSO
if iline[0:2] == 'QS':
# QSO, RA/DEC, zem
qso = isplit[0][4:].strip()
radec = xor.stod1(isplit[1].strip().replace('$',''))
zem = float(isplit[3].strip())
# NHI, zabs
zabs = float(isplit[4].strip())
is2 = isplit[6].strip()
NHI = float(is2[1:6])
sigNHI = np.array([float(is2[10:14])]*2)
# name
name = qso+'z_{:.3f}'.format(zabs)
lls = LLSSystem(name=name, RA=radec[0], Dec=radec[1],
zem=zem, zabs=zabs, NHI=NHI, sigNHI=sigNHI)
# ADOM Columns
ion_dict = {}
for kk,ion in enumerate(['Fe II','Zn II']):
Zion = xai.name_ion(ion)
is2 = isplit[7+kk].strip()
if is2[0:2] == '$>':
ion_dict[ion] = dict(sig_clm=0.,flg_clm=2,Z=Zion[0],ion=Zion[1])
ion_dict[ion]['clm'] = float(is2[2:7]) + NHI - 12 + sol[kk]
elif is2[0:2] == '$<':
ion_dict[ion] = dict(sig_clm=0.,flg_clm=3,Z=Zion[0],ion=Zion[1])
ion_dict[ion]['clm'] = float(is2[2:7]) + NHI - 12 + sol[kk]
elif is2[0:2] == '..':
pass
else:
ion_dict[ion] = dict(flg_clm=1,Z=Zion[0],ion=Zion[1])
ion_dict[ion]['clm'] = float(is2[1:6]) + NHI - 12 + sol[kk]
ion_dict[ion]['sig_clm'] = float(is2[10:14])
#xdb.set_trace()
# Finish
lls._ionclms = IonClms(idict=ion_dict)
lls.Refs.append('DZ09')
all_lls.append(lls)
# Return SLLS only
fin_slls = [ills for ills in all_lls if ills.NHI < 20.3]
return fin_slls
def tumlinson11():
'''Tumlinson, J. et al. 2011, ApJ, 733, 111
J1009+0713
HST/COS
Metal columns parsed from Table 1
NHI from LL+Lyman series (uncertain)
'''
# Grab ASCII file from ApJ
tab_fil = xa_path+"/data/LLS/tumlinson11.tb1.ascii"
url = 'http://iopscience.iop.org/0004-637X/733/2/111/suppdata/apj388927t1_ascii.txt'
chk_fil = glob.glob(tab_fil)
if len(chk_fil) > 0:
tab_fil = chk_fil[0]
else:
print('LLSSurvey: Grabbing table file from {:s}'.format(url))
f = urllib2.urlopen(url)
with open(tab_fil, "wb") as code:
code.write(f.read())
# Setup
radec = xor.stod1('J100902.06+071343.8') # From paper
lls = LLSSystem(name='J1009+0713_z0.356', RA=radec[0], Dec=radec[1], zem=0.456,
zabs=0.3558, vlim=[-200., 250.]*u.km/u.s, NHI=18.4,
sigNHI=np.array([0.41,0.41]))
#lls.mk_subsys(2)
# Columns
# Start with Table 3 (VPFIT)
with open(tab_fil,'r') as f:
flines1 = f.readlines()
# Trim
flines1 = flines1[18:]
#
ion_dict = {}
line_dict = dict(OI='1302',OVI='1038',MgII='2803^b',SiII='1190',
CaII='3934',FeII='2586')
ion = None
for iline in flines1:
isplit = iline.split('\t')
if ion=='FeIII': # Last line
break
# Ion
is2 = isplit[0].split(' ')
ion = is2[0]+is2[1]
try:
gdl = line_dict[ion]
except:
pass
#print('Taking {:s}'.format(isplit[0]))
else:
if is2[2] != gdl:
continue
Zion = xai.name_ion(ion)
ion_dict[ion] = dict(clm=0., sig_clm=0., flg_clm=0, Z=Zion[0],ion=Zion[1])
# Combine components [could replace with SubSystems some day]
for iis in isplit[1:-1]:
# Upper limit
if (iis.strip()[0] == '<') & (ion_dict[ion]['flg_clm']==0):
ion_dict[ion]['flg_clm']=3
ion_dict[ion]['clm']=float(iis[1:])
elif (iis.strip()[0] == '>'): # Saturated
ion_dict[ion]['flg_clm']=2
ion_dict[ion]['clm']=log_sum([ion_dict[ion]['clm'],float(iis[1:5])])
elif iis.strip()[0] in ['.','<']:
pass
else:
if ion_dict[ion]['flg_clm']==2: # Add to saturated
ion_dict[ion]['clm']=log_sum([ion_dict[ion]['clm'],float(iis[0:4])])
else:
ion_dict[ion]['flg_clm']=1
obj = dict(clm=float(iis[0:4]),sig_clm=float(iis[-4:]))
# Add
N,sig = xiai.sum_logN(ion_dict[ion],obj)
ion_dict[ion]['clm']=N
ion_dict[ion]['sig_clm']=sig
# Finish
lls._ionclms = IonClms(idict=ion_dict)
lls.Refs.append('Tum11')
return lls
def kacprzak12():
'''Kacprzak, G. et al. 2012, MNRAS, 427, 3029-3043
TON 153
Taken from Table 1 by JXP
NHI from Churchill+2007
RA/DEC from Simbad
'''
# Setup
radec = xor.stod1('J131956.2209+272808.271')
lls = LLSSystem(name='TON153_z1.002', RA=radec[0], Dec=radec[1], zem=0.6610,
zabs=1.0023, vlim=[-250., 200.]*u.km/u.s, NHI=18.30, sigNHI=np.array([0.30,0.30]))
# Table 1 (total)
ion_dict = {}
ion_dict['Mg II'] = dict(clm=13.11, sig_clm=0.07,flg_clm=1,Z=12,ion=2)
ion_dict['Mg I'] = dict(clm=11.54, sig_clm=0.06,flg_clm=1,Z=12,ion=1)
ion_dict['Si I'] = dict(clm=11.8, sig_clm=0.00,flg_clm=3,Z=14,ion=1)
ion_dict['Si II'] = dict(clm=13.16, sig_clm=0.11,flg_clm=1,Z=14,ion=2)
ion_dict['Si IV'] = dict(clm=12.4, sig_clm=0.0,flg_clm=3,Z=14,ion=4)
ion_dict['C II'] = dict(clm=13.39, sig_clm=0.0,flg_clm=2,Z=6,ion=2)
ion_dict['C III'] = dict(clm=14.20, sig_clm=0.05,flg_clm=1,Z=6,ion=3)
ion_dict['C III'] = dict(clm=14.41, sig_clm=0.05,flg_clm=1,Z=6,ion=4)
ion_dict['O VI'] = dict(clm=14.49, sig_clm=0.05,flg_clm=1,Z=8,ion=6)
# Finish
lls._ionclms = IonClms(idict=ion_dict)
lls.Refs.append('Kcz12')
return lls
def battisti12():
'''Battisti, A. et al. 2012, ApJ, 744, 93
HST/COS
QSO info from Table 1
Metal columns parsed from Table 3
NHI from Lya
'''
all_lls = []
# Grab ASCII files from ApJ
tab_fils = [xa_path+"/data/LLS/battisti12.tb1.ascii", xa_path+"/data/LLS/battisti12.tb3.ascii"]
urls = ['http://iopscience.iop.org/0004-637X/744/2/93/suppdata/apj413924t1_ascii.txt',
'http://iopscience.iop.org/0004-637X/744/2/93/suppdata/apj413924t3_ascii.txt']
for jj,tab_fil in enumerate(tab_fils):
chk_fil = glob.glob(tab_fil)
if len(chk_fil) > 0:
tab_fil = chk_fil[0]
else:
url = urls[jj]
print('LLSSurvey: Grabbing table file from {:s}'.format(url))
f = urllib2.urlopen(url)
with open(tab_fil, "wb") as code:
code.write(f.read())
# QSO info
with open(tab_fils[0],'r') as f:
flines1 = f.readlines()
# Grab RA/DEC
all_idict = []
for iline in flines1:
if iline[0:2] != 'SD':
continue
# Parse
isplit = iline.split('\t')
name = isplit[0].split(' ')[1]
radec = xor.stod1(name)
zem = float(isplit[1].strip())
zabs = float(isplit[2].strip())
NHI = float(isplit[3].strip()[0:4])
sigNHI = np.array([float(isplit[3].strip()[11:])]*2)
# Save
lls = LLSSystem(name=name,RA=radec[0],Dec=radec[1],zem=zem,
zabs=zabs,NHI=NHI,sigNHI=sigNHI)
#
all_lls.append(lls)
all_idict.append({})
# Abundances
with open(tab_fils[1],'r') as f:
flines3 = f.readlines()
flines3 = flines3[5:]
ion = None
for iline in flines3:
if ion == 'Ni II':
break
isplit = iline.split('\t')
if isplit[0] == 'C II*': # Skipping CII*
continue
# ion
ipos = -1
while (isplit[0][ipos] not in ['I','V']):
ipos -= 1
ion = isplit[0][0:ipos+1+len(isplit[0])]
Zion = xai.name_ion(ion)
# Loop on systems
for kk,iis in enumerate(isplit[1:-1]):
if iis.strip()[0] == '.':
continue
all_idict[kk][ion] = dict(Z=Zion[0], ion=Zion[1],sig_clm=0.)
if iis[0] == '>':
all_idict[kk][ion]['flg_clm'] = 2
all_idict[kk][ion]['clm'] = float(iis[1:6])
elif iis[0] == '<':
all_idict[kk][ion]['flg_clm'] = 3
all_idict[kk][ion]['clm'] = float(iis[1:])
else:
all_idict[kk][ion]['flg_clm'] = 1
all_idict[kk][ion]['clm'] = float(iis[0:5])
all_idict[kk][ion]['sig_clm'] = float(iis[-4:])
# Return SLLS only
for kk,lls in enumerate(all_lls):
try:
lls._ionclms = IonClms(idict=all_idict[kk])
except ValueError:
xdb.set_trace()
lls.Refs.append('Bat12')
fin_slls = [ills for ills in all_lls if ills.NHI < 20.3]
return fin_slls
#####
def log_sum(logN):
'''Sum up logN values return the log
'''
Nsum = np.sum(10.**np.array(logN))
return np.log10(Nsum)
######
if __name__ == '__main__':
flg_test = 0
flg_test += 2**0 # Zonak2004, Jenkins2005
# Test ions
if (flg_test % 2**1) >= 2**0:
#lls = battisti12()
#lls = kacprzak12()
#lls = tumlinson11()
#lls = dessauges09()
#lls = meiring09()
#lls = nestor08()
#lls = meiring08()
#lls = meiring07()
#lls = meiring06()
lls = peroux06b()
#lls = peroux06a()
#lls = tripp2005()
#lls = jenkins2005()
#lls = zonak2004()
print(lls)
#xdb.set_trace()
# Plot the LLS
|
|
from importlib import import_module
import itertools
import os
import re
from django.conf import global_settings, settings
from django.contrib.sites.models import Site, RequestSite
from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import QueryDict, HttpRequest
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.six.moves.urllib.parse import urlparse, ParseResult
from django.utils._os import upath
from django.test import TestCase, override_settings
from django.test.utils import patch_logger
from django.middleware.csrf import CsrfViewMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm,
SetPasswordForm)
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.views import login as login_view
@override_settings(
LANGUAGES=(
('en', 'English'),
),
LANGUAGE_CODE='en',
TEMPLATE_LOADERS=global_settings.TEMPLATE_LOADERS,
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False,
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.tests.urls'
def login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertTrue(SESSION_KEY in self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertTrue(SESSION_KEY not in self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@skipIfCustomUser
class AuthViewNamedURLTests(AuthViewsTestCase):
urls = 'django.contrib.auth.urls'
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
@skipIfCustomUser
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertTrue('<html>' not in message.get_payload(0).get_payload())
self.assertTrue('<html>' in message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://adminsite.com" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existant user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_display_user_from_form(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# #16919 -- The ``password_reset_confirm`` view should pass the user
# object to the ``SetPasswordForm``, even on GET requests.
# For this test, we render ``{{ form.user }}`` in the template
# ``registration/password_reset_confirm.html`` so that we can test this.
username = User.objects.get(email='staffmember@example.com').username
self.assertContains(response, "Hello, %s." % username)
# However, the view should NOT pass any user object on a form if the
# password reset link was invalid.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "Hello, .")
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
fixtures = ['custom_user.json']
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
@skipIfCustomUser
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@skipIfCustomUser
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if Site._meta.installed:
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertTrue(isinstance(response.context['form'], AuthenticationForm),
'Login form is not an AuthenticationForm')
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["CSRF_COOKIE_USED"] = True
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
@skipIfCustomUser
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@skipIfCustomUser
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
@skipIfCustomUser
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertTrue(SESSION_KEY not in self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertTrue('site' in response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session['_language'] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session['_language'], 'pl')
@skipIfCustomUser
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class ChangelistTests(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls_admin'
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=1)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get('/admin/auth/user/?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk, data)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk,
self.get_user_data(self.admin)
)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
response = self.client.post('/admin/auth/user/%s/password/' % self.admin.pk, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, '/admin/auth/user/%s/' % self.admin.pk)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1')
|
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import atexit
import cStringIO as StringIO
from collections import namedtuple
from functools import partial
import glob
try:
import grp
except ImportError:
# The module only exists on Unix platforms
grp = None
import logging
import os
try:
import pwd
except ImportError:
# Same as above (exists on Unix platforms only)
pwd = None
import re
import stat
import subprocess
import sys
import tarfile
import tempfile
from time import strftime
import traceback
# 3p
import requests
# DD imports
from checks.check_status import CollectorStatus, DogstatsdStatus, ForwarderStatus
from config import (
get_confd_path,
get_config,
get_config_path,
get_logging_config,
get_ssl_certificate,
get_url_endpoint,
)
from jmxfetch import JMXFetch
from utils.hostname import get_hostname
from utils.jmx import jmx_command, JMXFiles
from utils.platform import Platform
from utils.configcheck import configcheck, sd_configcheck
# Globals
log = logging.getLogger(__name__)
class Flare(object):
"""
Compress all important logs and configuration files for debug,
and then send them to Datadog (which transfers them to Support)
"""
DATADOG_SUPPORT_URL = '/support/flare'
CredentialPattern = namedtuple('CredentialPattern', ['pattern', 'replacement', 'label'])
CHECK_CREDENTIALS = [
CredentialPattern(
re.compile('( *(\w|_)*pass(word)?:).+'),
r'\1 ********',
'password'
),
CredentialPattern(
re.compile('(.*\ [A-Za-z0-9]+)\:\/\/([A-Za-z0-9_]+)\:(.+)\@'),
r'\1://\2:********@',
'password in a uri'
),
]
MAIN_CREDENTIALS = [
CredentialPattern(
re.compile('^api_key:( *\w+(\w{5}) ?,?)+$'),
lambda matchobj: 'api_key: ' + ', '.join(map(
lambda key: '*' * 26 + key[-5:],
map(lambda x: x.strip(),
matchobj.string.split(':')[1].split(',')
)
)),
'api_key'
),
CredentialPattern(
re.compile('^(proxy_user|proxy_password): *.+'),
r'\1: ********',
'proxy credentials'
),
]
COMMENT_REGEX = re.compile('^ *#.*')
COMPRESSED_FILE = 'datadog-agent-{0}.tar.bz2'
# We limit to 10MB arbitrarily
MAX_UPLOAD_SIZE = 10485000
TIMEOUT = 60
def __init__(self, cmdline=False, case_id=None):
self._case_id = case_id
self._cmdline = cmdline
self._init_tarfile()
self._init_permissions_file()
self._save_logs_path()
self._config = get_config()
self._api_key = self._config.get('api_key')
self._url = "{0}{1}".format(
get_url_endpoint(self._config.get('dd_url'), endpoint_type='flare'),
self.DATADOG_SUPPORT_URL
)
self._hostname = get_hostname(self._config)
self._prefix = "datadog-{0}".format(self._hostname)
# On Unix system, check that the user is root (to call supervisorctl & status)
# Otherwise emit a warning, and ask for confirmation
@staticmethod
def check_user_rights():
if Platform.is_linux() and not os.geteuid() == 0:
log.warning("You are not root, some information won't be collected")
choice = raw_input('Are you sure you want to continue [y/N]? ')
if choice.strip().lower() not in ['yes', 'y']:
print 'Aborting'
sys.exit(1)
else:
log.warn('Your user has to have at least read access'
' to the logs and conf files of the agent')
# Collect all conf and logs files and compress them
def collect(self):
if not self._api_key:
raise Exception('No api_key found')
log.info("Collecting logs and configuration files:")
with self._open_tarfile():
self._collect()
log.info("Saving all files to {0}".format(self.tar_path))
# Actual collection. The tar file must be open
def _collect(self):
self._add_logs_tar()
self._add_conf_tar()
log.info(" * datadog-agent configcheck output")
self._add_command_output_tar('configcheck.log', configcheck)
log.info(" * service discovery configcheck output")
self._add_command_output_tar('sd_configcheck.log', sd_configcheck, agentConfig=self._config)
log.info(" * datadog-agent status output")
self._add_command_output_tar('status.log', self._supervisor_status)
log.info(" * datadog-agent info output")
self._add_command_output_tar('info.log', self._info_all)
self._add_jmxinfo_tar()
log.info(" * pip freeze")
self._add_command_output_tar('freeze.log', self._pip_freeze,
command_desc="pip freeze --no-cache-dir")
log.info(" * log permissions on collected files")
self._permissions_file.close()
self._add_file_tar(self._permissions_file.name, 'permissions.log',
log_permissions=False)
# Set the proxy settings, if they exist
def set_proxy(self, options):
proxy_settings = self._config.get('proxy_settings')
if proxy_settings is None:
return
userpass = ''
if proxy_settings.get('user'):
userpass = "%s:%s@" % (proxy_settings.get('user'),
proxy_settings.get('password'),)
url = "http://%s%s:%s" % (userpass, proxy_settings.get('host'),
proxy_settings.get('port'),)
options['proxies'] = {
"https": url
}
# Set whether to ignore invalid ssl certs or not
def set_ssl_validation(self, options):
if self._config.get('skip_ssl_validation', False):
options['verify'] = False
elif Platform.is_windows():
options['verify'] = get_ssl_certificate('windows', 'datadog-cert.pem')
# Upload the tar file
def upload(self, email=None):
self._check_size()
if self._cmdline:
self._ask_for_confirmation()
if not email:
email = self._ask_for_email()
log.info("Uploading {0} to Datadog Support".format(self.tar_path))
url = self._url
if self._case_id:
url = '{0}/{1}'.format(self._url, str(self._case_id))
url = "{0}?api_key={1}".format(url, self._api_key)
with open(self.tar_path, 'rb') as flare_file:
requests_options = {
'data': {
'case_id': self._case_id,
'hostname': self._hostname,
'email': email
},
'files': {'flare_file': flare_file},
'timeout': self.TIMEOUT
}
self.set_proxy(requests_options)
self.set_ssl_validation(requests_options)
self._resp = requests.post(url, **requests_options)
self._analyse_result()
return self._case_id
# Start by preparing the tar file which will contain everything
def _init_tarfile(self):
# Default temp path
self.tar_path = os.path.join(
tempfile.gettempdir(),
self.COMPRESSED_FILE.format(strftime("%Y-%m-%d-%H-%M-%S"))
)
if os.path.exists(self.tar_path):
os.remove(self.tar_path)
# Open the tar file (context manager) and return it
def _open_tarfile(self):
self._tar = tarfile.open(self.tar_path, 'w:bz2')
return self._tar
# Create a file to log permissions on collected files and write header line
def _init_permissions_file(self):
self._permissions_file = tempfile.NamedTemporaryFile(mode='w', prefix='dd', delete=False)
if Platform.is_unix():
self._permissions_file_format = "{0:50} | {1:5} | {2:10} | {3:10}\n"
header = self._permissions_file_format.format("File path", "mode", "owner", "group")
self._permissions_file.write(header)
self._permissions_file.write('-'*len(header) + "\n")
else:
self._permissions_file.write("Not implemented: file permissions are only logged on Unix platforms")
# Save logs file paths
def _save_logs_path(self):
prefix = ''
if Platform.is_windows():
prefix = 'windows_'
config = get_logging_config()
self._collector_log = config.get('{0}collector_log_file'.format(prefix))
self._forwarder_log = config.get('{0}forwarder_log_file'.format(prefix))
self._dogstatsd_log = config.get('{0}dogstatsd_log_file'.format(prefix))
self._jmxfetch_log = config.get('jmxfetch_log_file')
self._gometro_log = config.get('go-metro_log_file')
# Add logs to the tarfile
def _add_logs_tar(self):
self._add_log_file_tar(self._collector_log)
self._add_log_file_tar(self._forwarder_log)
self._add_log_file_tar(self._dogstatsd_log)
self._add_log_file_tar(self._jmxfetch_log)
self._add_log_file_tar(self._gometro_log)
self._add_log_file_tar(
"{0}/*supervisord.log".format(os.path.dirname(self._collector_log))
)
def _add_log_file_tar(self, file_path):
for f in glob.glob('{0}*'.format(file_path)):
if self._can_read(f):
self._add_file_tar(
f,
os.path.join('log', os.path.basename(f))
)
# Collect all conf
def _add_conf_tar(self):
conf_path = get_config_path()
if self._can_read(conf_path, output=False):
self._add_clean_conf(
conf_path,
'etc',
self.MAIN_CREDENTIALS
)
if not Platform.is_windows():
supervisor_path = os.path.join(
os.path.dirname(get_config_path()),
'supervisor.conf'
)
if self._can_read(supervisor_path, output=False):
self._add_clean_conf(
supervisor_path,
'etc'
)
for file_path in glob.glob(os.path.join(get_confd_path(), '*.yaml')) +\
glob.glob(os.path.join(get_confd_path(), '*.yaml.default')):
if self._can_read(file_path, output=False):
self._add_clean_conf(
file_path,
os.path.join('etc', 'confd'),
self.CHECK_CREDENTIALS
)
# Collect JMXFetch-specific info and save to jmxinfo directory if jmx config
# files are present and valid
def _add_jmxinfo_tar(self):
_, _, should_run_jmx = self._capture_output(self._should_run_jmx)
if should_run_jmx:
# status files (before listing beans because executing jmxfetch overwrites status files)
for file_name, file_path in [
(JMXFiles._STATUS_FILE, JMXFiles.get_status_file_path()),
(JMXFiles._PYTHON_STATUS_FILE, JMXFiles.get_python_status_file_path())
]:
if self._can_read(file_path, warn=False):
self._add_file_tar(
file_path,
os.path.join('jmxinfo', file_name)
)
# beans lists
for command in ['list_matching_attributes', 'list_everything']:
log.info(" * datadog-agent jmx {0} output".format(command))
self._add_command_output_tar(
os.path.join('jmxinfo', '{0}.log'.format(command)),
partial(self._jmx_command_call, command)
)
# java version
log.info(" * java -version output")
_, _, java_bin_path = self._capture_output(
lambda: JMXFetch.get_configuration(get_confd_path())[2] or 'java')
self._add_command_output_tar(
os.path.join('jmxinfo', 'java_version.log'),
lambda: self._java_version(java_bin_path),
command_desc="{0} -version".format(java_bin_path)
)
# Add a file to the tar and append the file's rights to the permissions log (on Unix)
# If original_file_path is passed, the file_path will be added to the tar but the original file's
# permissions are logged
def _add_file_tar(self, file_path, target_path, log_permissions=True, original_file_path=None):
target_full_path = os.path.join(self._prefix, target_path)
if log_permissions and Platform.is_unix():
stat_file_path = original_file_path or file_path
file_stat = os.stat(stat_file_path)
# The file mode is returned in binary format, convert it to a more readable octal string
mode = oct(stat.S_IMODE(file_stat.st_mode))
try:
uname = pwd.getpwuid(file_stat.st_uid).pw_name
except KeyError:
uname = str(file_stat.st_uid)
try:
gname = grp.getgrgid(file_stat.st_gid).gr_name
except KeyError:
gname = str(file_stat.st_gid)
self._permissions_file.write(self._permissions_file_format.format(stat_file_path, mode, uname, gname))
self._tar.add(file_path, target_full_path)
# Returns whether JMXFetch should run or not
def _should_run_jmx(self):
jmx_process = JMXFetch(get_confd_path(), self._config)
jmx_process.configure(clean_status_file=False)
return jmx_process.should_run()
# Check if the file is readable (and log it)
@classmethod
def _can_read(cls, f, output=True, warn=True):
if os.access(f, os.R_OK):
if output:
log.info(" * {0}".format(f))
return True
else:
if warn:
log.warn(" * not readable - {0}".format(f))
return False
def _add_clean_conf(self, file_path, target_dir, credential_patterns=None):
basename = os.path.basename(file_path)
temp_path, log_message = self._strip_credentials(file_path, credential_patterns)
log.info(' * {0}{1}'.format(file_path, log_message))
self._add_file_tar(
temp_path,
os.path.join(target_dir, basename),
original_file_path=file_path
)
# Return path to a temp file without comments on which the credential patterns have been applied
def _strip_credentials(self, file_path, credential_patterns=None):
if not credential_patterns:
credential_patterns = []
credentials_found = set()
fh, temp_path = tempfile.mkstemp(prefix='dd')
atexit.register(os.remove, temp_path)
with os.fdopen(fh, 'w') as temp_file:
with open(file_path, 'r') as orig_file:
for line in orig_file.readlines():
if not self.COMMENT_REGEX.match(line):
clean_line, credential_found = self._clean_credentials(line, credential_patterns)
temp_file.write(clean_line)
if credential_found:
credentials_found.add(credential_found)
credentials_log = ''
if len(credentials_found) > 1:
credentials_log = ' - this file contains credentials ({0}) which'\
' have been removed in the collected version'\
.format(', '.join(credentials_found))
elif len(credentials_found) == 1:
credentials_log = ' - this file contains a credential ({0}) which'\
' has been removed in the collected version'\
.format(credentials_found.pop())
return temp_path, credentials_log
# Remove credentials from a given line
def _clean_credentials(self, line, credential_patterns):
credential_found = None
for credential_pattern in credential_patterns:
if credential_pattern.pattern.match(line):
line = re.sub(credential_pattern.pattern, credential_pattern.replacement, line)
credential_found = credential_pattern.label
# only one pattern should match per line
break
return line, credential_found
# Add output of the command to the tarfile
def _add_command_output_tar(self, name, command, command_desc=None, **kwargs):
out, err, _ = self._capture_output(command, print_exc_to_stderr=False, **kwargs)
fh, temp_path = tempfile.mkstemp(prefix='dd')
with os.fdopen(fh, 'w') as temp_file:
if command_desc:
temp_file.write(">>>> CMD <<<<\n")
temp_file.write(command_desc)
temp_file.write("\n")
temp_file.write(">>>> STDOUT <<<<\n")
temp_file.write(out.getvalue())
out.close()
temp_file.write(">>>> STDERR <<<<\n")
temp_file.write(err.getvalue())
err.close()
self._add_file_tar(temp_path, name, log_permissions=False)
os.remove(temp_path)
# Capture the output of a command (from both std streams and loggers) and the
# value returned by the command
def _capture_output(self, command, print_exc_to_stderr=True, **kwargs):
backup_out, backup_err = sys.stdout, sys.stderr
out, err = StringIO.StringIO(), StringIO.StringIO()
backup_handlers = logging.root.handlers[:]
logging.root.handlers = [logging.StreamHandler(out)]
sys.stdout, sys.stderr = out, err
return_value = None
try:
return_value = command(**kwargs)
except Exception:
# Print the exception to either stderr or `err`
traceback.print_exc(file=backup_err if print_exc_to_stderr else err)
finally:
# Stop capturing in a `finally` block to reset std streams' and loggers'
# behaviors no matter what
sys.stdout, sys.stderr = backup_out, backup_err
logging.root.handlers = backup_handlers
return out, err, return_value
# Print supervisor status (and nothing on windows)
def _supervisor_status(self):
if Platform.is_windows():
print 'Windows - status not implemented'
else:
agent_exec = self._get_path_agent_exec()
print '{0} status'.format(agent_exec)
self._print_output_command([agent_exec, 'status'])
supervisor_exec = self._get_path_supervisor_exec()
print '{0} status'.format(supervisor_exec)
self._print_output_command([supervisor_exec,
'-c', self._get_path_supervisor_conf(),
'status'])
# Find the agent exec (package or source)
def _get_path_agent_exec(self):
if Platform.is_mac():
agent_exec = '/opt/datadog-agent/bin/datadog-agent'
else:
agent_exec = '/etc/init.d/datadog-agent'
if not os.path.isfile(agent_exec):
agent_exec = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../bin/agent'
)
return agent_exec
# Find the supervisor exec (package or source)
def _get_path_supervisor_exec(self):
supervisor_exec = '/opt/datadog-agent/bin/supervisorctl'
if not os.path.isfile(supervisor_exec):
supervisor_exec = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../venv/bin/supervisorctl'
)
return supervisor_exec
# Find the supervisor conf (package or source)
def _get_path_supervisor_conf(self):
if Platform.is_mac():
supervisor_conf = '/opt/datadog-agent/etc/supervisor.conf'
else:
supervisor_conf = '/etc/dd-agent/supervisor.conf'
if not os.path.isfile(supervisor_conf):
supervisor_conf = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../agent/supervisor.conf'
)
return supervisor_conf
# Print output of command
def _print_output_command(self, command):
try:
status = subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
status = 'Not able to get output, exit number {0}, exit output:\n'\
'{1}'.format(str(e.returncode), e.output)
print status
# Print info of all agent components
def _info_all(self):
CollectorStatus.print_latest_status(verbose=True)
DogstatsdStatus.print_latest_status(verbose=True)
ForwarderStatus.print_latest_status(verbose=True)
# Call jmx_command with std streams redirection
def _jmx_command_call(self, command):
try:
jmx_command([command], self._config, redirect_std_streams=True)
except Exception as e:
print "Unable to call jmx command {0}: {1}".format(command, e)
# Print java version
def _java_version(self, java_bin_path):
try:
self._print_output_command([java_bin_path, '-version'])
except OSError:
print 'Unable to execute java bin with command: {0}'.format(java_bin_path)
# Run a pip freeze
def _pip_freeze(self):
try:
import pip
pip.main(['freeze', '--no-cache-dir'])
except ImportError:
print 'Unable to import pip'
# Check if the file is not too big before upload
def _check_size(self):
if os.path.getsize(self.tar_path) > self.MAX_UPLOAD_SIZE:
log.info("{0} won't be uploaded, its size is too important.\n"
"You can send it directly to support by email.")
sys.exit(1)
# Function to ask for confirmation before upload
def _ask_for_confirmation(self):
print '{0} is going to be uploaded to Datadog.'.format(self.tar_path)
choice = raw_input('Do you want to continue [Y/n]? ')
if choice.strip().lower() not in ['yes', 'y', '']:
print 'Aborting (you can still use {0})'.format(self.tar_path)
sys.exit(1)
# Ask for email if needed
def _ask_for_email(self):
# We ask everytime now, as it is also the 'id' to check
# that the case is the good one if it exists
return raw_input('Please enter your email: ').lower()
# Print output (success/error) of the request
def _analyse_result(self):
# First catch our custom explicit 400
if self._resp.status_code == 400:
raise Exception('Your request is incorrect: {0}'.format(self._resp.json()['error']))
# Then raise potential 500 and 404
self._resp.raise_for_status()
try:
self._case_id = self._resp.json()['case_id']
# Failed parsing
except ValueError:
raise Exception('An unknown error has occured - '
'Please contact support by email')
# Finally, correct
log.info("Your logs were successfully uploaded. For future reference,"
" your internal case id is {0}".format(self._case_id))
|
|
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""""A task where players play a soccer game."""
from dm_control import composer
from dm_control.locomotion.soccer import initializers
from dm_control.locomotion.soccer import observables as observables_lib
from dm_control.locomotion.soccer import soccer_ball
from dm_env import specs
import numpy as np
_THROW_IN_BALL_Z = 0.5
def _disable_geom_contacts(entities):
for entity in entities:
mjcf_model = entity.mjcf_model
for geom in mjcf_model.find_all("geom"):
geom.set_attributes(contype=0)
class Task(composer.Task):
"""A task where two teams of walkers play soccer."""
def __init__(
self,
players,
arena,
ball=None,
initializer=None,
observables=None,
disable_walker_contacts=False,
nconmax_per_player=200,
njmax_per_player=200,
control_timestep=0.025,
tracking_cameras=(),
):
"""Construct an instance of soccer.Task.
This task implements the high-level game logic of multi-agent MuJoCo soccer.
Args:
players: a sequence of `soccer.Player` instances, representing
participants to the game from both teams.
arena: an instance of `soccer.Pitch`, implementing the physical geoms and
the sensors associated with the pitch.
ball: optional instance of `soccer.SoccerBall`, implementing the physical
geoms and sensors associated with the soccer ball. If None, defaults to
using `soccer_ball.SoccerBall()`.
initializer: optional instance of `soccer.Initializer` that initializes
the task at the start of each episode. If None, defaults to
`initializers.UniformInitializer()`.
observables: optional instance of `soccer.ObservablesAdder` that adds
observables for each player. If None, defaults to
`observables.CoreObservablesAdder()`.
disable_walker_contacts: if `True`, disable physical contacts between
players.
nconmax_per_player: allocated maximum number of contacts per player. It
may be necessary to increase this value if you encounter errors due to
`mjWARN_CONTACTFULL`.
njmax_per_player: allocated maximum number of scalar constraints per
player. It may be necessary to increase this value if you encounter
errors due to `mjWARN_CNSTRFULL`.
control_timestep: control timestep of the agent.
tracking_cameras: a sequence of `camera.MultiplayerTrackingCamera`
instances to track the players and ball.
"""
self.arena = arena
self.players = players
self._initializer = initializer or initializers.UniformInitializer()
self._observables = observables or observables_lib.CoreObservablesAdder()
if disable_walker_contacts:
_disable_geom_contacts([p.walker for p in self.players])
# Create ball and attach ball to arena.
self.ball = ball or soccer_ball.SoccerBall()
self.arena.add_free_entity(self.ball)
self.arena.register_ball(self.ball)
# Register soccer ball contact tracking for players.
for player in self.players:
player.walker.create_root_joints(self.arena.attach(player.walker))
self.ball.register_player(player)
# Add per-walkers observables.
self._observables(self, player)
self._tracking_cameras = tracking_cameras
self.set_timesteps(
physics_timestep=0.005, control_timestep=control_timestep)
self.root_entity.mjcf_model.size.nconmax = nconmax_per_player * len(players)
self.root_entity.mjcf_model.size.njmax = njmax_per_player * len(players)
@property
def observables(self):
observables = []
for player in self.players:
observables.append(
player.walker.observables.as_dict(fully_qualified=False))
return observables
def _throw_in(self, physics, random_state, ball):
x, y, _ = physics.bind(ball.geom).xpos
shrink_x, shrink_y = random_state.uniform([0.7, 0.7], [0.9, 0.9])
ball.set_pose(physics, [x * shrink_x, y * shrink_y, _THROW_IN_BALL_Z])
ball.set_velocity(
physics, velocity=np.zeros(3), angular_velocity=np.zeros(3))
ball.initialize_entity_trackers()
def _tracked_entity_positions(self, physics):
"""Return a list of the positions of the ball and all players."""
ball_pos, unused_ball_quat = self.ball.get_pose(physics)
entity_positions = [ball_pos]
for player in self.players:
walker_pos, unused_walker_quat = player.walker.get_pose(physics)
entity_positions.append(walker_pos)
return entity_positions
def after_compile(self, physics, random_state):
super().after_compile(physics, random_state)
for camera in self._tracking_cameras:
camera.after_compile(physics)
def after_step(self, physics, random_state):
super().after_step(physics, random_state)
for camera in self._tracking_cameras:
camera.after_step(self._tracked_entity_positions(physics))
def initialize_episode_mjcf(self, random_state):
self.arena.initialize_episode_mjcf(random_state)
def initialize_episode(self, physics, random_state):
self.arena.initialize_episode(physics, random_state)
for player in self.players:
player.walker.reinitialize_pose(physics, random_state)
self._initializer(self, physics, random_state)
for camera in self._tracking_cameras:
camera.initialize_episode(self._tracked_entity_positions(physics))
@property
def root_entity(self):
return self.arena
def get_reward(self, physics):
"""Returns a list of per-player rewards.
Each player will receive a reward of:
+1 if their team scored a goal
-1 if their team conceded a goal
0 if no goals were scored on this timestep.
Note: the observations also contain various environment statistics that may
be used to derive per-player rewards (as done in
http://arxiv.org/abs/1902.07151).
Args:
physics: An instance of `Physics`.
Returns:
A list of 0-dimensional numpy arrays, one per player.
"""
scoring_team = self.arena.detected_goal()
if not scoring_team:
return [np.zeros((), dtype=np.float32) for _ in self.players]
rewards = []
for p in self.players:
if p.team == scoring_team:
rewards.append(np.ones((), dtype=np.float32))
else:
rewards.append(-np.ones((), dtype=np.float32))
return rewards
def get_reward_spec(self):
return [
specs.Array(name="reward", shape=(), dtype=np.float32)
for _ in self.players
]
def get_discount(self, physics):
if self.arena.detected_goal():
return np.zeros((), np.float32)
return np.ones((), np.float32)
def get_discount_spec(self):
return specs.Array(name="discount", shape=(), dtype=np.float32)
def should_terminate_episode(self, physics):
"""Returns True if a goal was scored by either team."""
return self.arena.detected_goal() is not None
def before_step(self, physics, actions, random_state):
for player, action in zip(self.players, actions):
player.walker.apply_action(physics, action, random_state)
if self.arena.detected_off_court():
self._throw_in(physics, random_state, self.ball)
def action_spec(self, physics):
"""Return multi-agent action_spec."""
return [player.walker.action_spec for player in self.players]
class MultiturnTask(Task):
"""Continuous game play through scoring events until timeout."""
def __init__(self,
players,
arena,
ball=None,
initializer=None,
observables=None,
disable_walker_contacts=False,
nconmax_per_player=200,
njmax_per_player=200,
control_timestep=0.025,
tracking_cameras=()):
"""See base class."""
super().__init__(
players,
arena,
ball=ball,
initializer=initializer,
observables=observables,
disable_walker_contacts=disable_walker_contacts,
nconmax_per_player=nconmax_per_player,
njmax_per_player=njmax_per_player,
control_timestep=control_timestep,
tracking_cameras=tracking_cameras)
# If `True`, reset ball entity trackers before the next step.
self._should_reset = False
def should_terminate_episode(self, physics):
return False
def get_discount(self, physics):
return np.ones((), np.float32)
def before_step(self, physics, actions, random_state):
super(MultiturnTask, self).before_step(physics, actions, random_state)
if self._should_reset:
self.ball.initialize_entity_trackers()
self._should_reset = False
def after_step(self, physics, random_state):
super(MultiturnTask, self).after_step(physics, random_state)
if self.arena.detected_goal():
self._initializer(self, physics, random_state)
self._should_reset = True
|
|
#!/usr/bin/env python
# Build or install Shapely distributions
#
# This script has two different uses.
#
# 1) Installing from a source distribution, whether via
#
# ``python setup.py install``
#
# after downloading a source distribution, or
#
# ``pip install shapely``
#
# on a platform for which pip cannot find a wheel. This will most
# often be the case for Linux, since the project is not yet
# publishing Linux wheels. This will never be the case on Windows and
# rarely the case on OS X; both are wheels-first platforms.
#
# 2) Building distributions (source or wheel) from a repository. This
# includes using Cython to generate C source for the speedups and
# vectorize modules from Shapely's .pyx files.
#
# On import, Shapely loads a GEOS shared library. GEOS is a run time
# requirement. Additionally, the speedups and vectorized C extension
# modules need GEOS headers and libraries to be built. Shapely versions
# >=1.3 require GEOS >= 3.3.
#
# For the first use case (see 1, above), we aim to treat GEOS as if it
# were a Python requirement listed in ``install_requires``. That is, in
# an environment with Shapely 1.2.x and GEOS 3.2, the command ``pip
# install shapely >=1.3 --no-use-wheel`` (whether wheels are explicitly
# opted against or are not published for the platform) should fail with
# a warning and advice to upgrade GEOS to >=3.3.
#
# In case 1, the environment's GEOS version is determined by executing
# the geos-config script. If the GEOS version returned by that script is
# incompatible with the Shapely source distribution or no geos-config
# script can be found, this setup script will fail.
#
# For the second use case (see 2, distribution building, above), we
# allow the requirements to be loosened. If this script finds that the
# environment variable NO_GEOS_CHECK is set, geos-config will not be
# executed and no attempt will be made to enforce requirements as in the
# second case.
#
# For both cases, a geos-config not in the environment's $PATH may be
# used by setting the environment variable GEOS_CONFIG to the path to
# a geos-config script.
#
# NB: within this setup scripts, software versions are evaluated according
# to https://www.python.org/dev/peps/pep-0440/.
import errno
import glob
import itertools as it
import logging
import os
import platform
import re
import shutil
import subprocess
import sys
try:
# If possible, use setuptools
from setuptools import setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext as distutils_build_ext
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from distutils.command.build_ext import build_ext as distutils_build_ext
from distutils.errors import CCompilerError, DistutilsExecError, \
DistutilsPlatformError
from _vendor.packaging.version import Version
# Get geos_version from GEOS dynamic library, which depends on
# GEOS_LIBRARY_PATH and/or GEOS_CONFIG environment variables
from shapely._buildcfg import geos_version_string, geos_version, \
geos_config, get_geos_config
logging.basicConfig()
log = logging.getLogger(__file__)
# python -W all setup.py ...
if 'all' in sys.warnoptions:
log.level = logging.DEBUG
class GEOSConfig(object):
"""Interface to config options from the `geos-config` utility
"""
def __init__(self, cmd):
self.cmd = cmd
def get(self, option):
try:
stdout, stderr = subprocess.Popen(
[self.cmd, option],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
except OSError as ex:
# e.g., [Errno 2] No such file or directory
raise OSError("Could not find geos-config script")
if stderr and not stdout:
raise ValueError(stderr.strip())
if sys.version_info[0] >= 3:
result = stdout.decode('ascii').strip()
else:
result = stdout.strip()
log.debug('%s %s: %r', self.cmd, option, result)
return result
def version(self):
match = re.match(r'(\d+)\.(\d+)\.(\d+)', self.get('--version').strip())
return tuple(map(int, match.groups()))
# Get the version from the shapely module.
shapely_version = None
with open('shapely/__init__.py', 'r') as fp:
for line in fp:
if line.startswith("__version__"):
shapely_version = Version(
line.split("=")[1].strip().strip("\"'"))
break
if not shapely_version:
raise ValueError("Could not determine Shapely's version")
# Allow GEOS_CONFIG to be bypassed in favor of CFLAGS and LDFLAGS
# vars set by build environment.
if os.environ.get('NO_GEOS_CONFIG'):
geos_config = None
else:
geos_config = GEOSConfig(os.environ.get('GEOS_CONFIG', 'geos-config'))
# Fail installation if the GEOS shared library does not meet the minimum
# version. We ship it with Shapely for Windows, so no need to check on
# that platform.
geos_version = None
if geos_config and not os.environ.get('NO_GEOS_CHECK') or sys.platform == 'win32':
try:
log.info(
"Shapely >= 1.3 requires GEOS >= 3.3. "
"Checking for GEOS version...")
geos_version = geos_config.version()
log.info("Found GEOS version: %s", geos_version)
if (set(sys.argv).intersection(['install', 'build', 'build_ext']) and
shapely_version >= Version("1.3") and geos_version < (3, 3)):
log.critical(
"Shapely >= 1.3 requires GEOS >= 3.3. "
"Install GEOS 3.3+ and reinstall Shapely.")
sys.exit(1)
except OSError as exc:
log.warn(
"Failed to determine system's GEOS version: %s. "
"Installation continuing. GEOS version will be "
"checked on import of shapely.", exc)
# Handle UTF-8 encoding of certain text files.
open_kwds = {}
if sys.version_info >= (3,):
open_kwds['encoding'] = 'utf-8'
with open('VERSION.txt', 'w', **open_kwds) as fp:
fp.write(str(shapely_version))
with open('README.rst', 'r', **open_kwds) as fp:
readme = fp.read()
with open('CREDITS.txt', 'r', **open_kwds) as fp:
credits = fp.read()
with open('CHANGES.txt', 'r', **open_kwds) as fp:
changes = fp.read()
long_description = readme + '\n\n' + credits + '\n\n' + changes
extra_reqs = {
'test': ['pytest', 'pytest-cov'],
'vectorized': ['numpy']}
extra_reqs['all'] = list(it.chain.from_iterable(extra_reqs.values()))
# Make a dict of setup arguments. Some items will be updated as
# the script progresses.
setup_args = dict(
name = 'Shapely',
version = str(shapely_version),
requires = ['Python (>=2.6)', 'libgeos_c (>=3.3)'],
description = 'Geometric objects, predicates, and operations',
license = 'BSD',
keywords = 'geometry topology gis',
author = 'Sean Gillies',
author_email = 'sean.gillies@gmail.com',
maintainer = 'Sean Gillies',
maintainer_email = 'sean.gillies@gmail.com',
url = 'https://github.com/Toblerity/Shapely',
long_description = long_description,
packages = [
'shapely',
'shapely.geometry',
'shapely.algorithms',
'shapely.examples',
'shapely.speedups',
'shapely.vectorized',
],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: GIS',
],
cmdclass = {},
extras_require = extra_reqs,
package_data={
'shapely': ['shapely/_geos.pxi']},
include_package_data=True
)
# Add DLLs for Windows.
if sys.platform == 'win32':
try:
os.mkdir('shapely/DLLs')
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if '(AMD64)' in sys.version:
for dll in glob.glob('DLLs_AMD64_VC9/*.dll'):
shutil.copy(dll, 'shapely/DLLs')
elif sys.version_info[0:2] == (2, 5):
for dll in glob.glob('DLLs_x86_VC7/*.dll'):
shutil.copy(dll, 'shapely/DLLs')
else:
for dll in glob.glob('DLLs_x86_VC9/*.dll'):
shutil.copy(dll, 'shapely/DLLs')
setup_args['package_data']['shapely'].append('shapely/DLLs/*.dll')
# Prepare build opts and args for the speedups extension module.
include_dirs = []
library_dirs = []
libraries = []
extra_link_args = []
# If NO_GEOS_CONFIG is set in the environment, geos-config will not
# be called and CFLAGS and LDFLAGS environment variables must be set
# instead like
#
# CFLAGS="-I/usr/local/include" LDFLAGS="-L/usr/local/lib -lgeos_c"
#
# Or, equivalently:
#
# CFLAGS="$(geos-config --cflags)" LDFLAGS="$(geos-config --clibs)"
if geos_version and geos_config:
# Collect other options from GEOS configuration.
for item in geos_config.get('--cflags').split():
if item.startswith("-I"):
include_dirs.extend(item[2:].split(":"))
for item in geos_config.get('--clibs').split():
if item.startswith("-L"):
library_dirs.extend(item[2:].split(":"))
elif item.startswith("-l"):
libraries.append(item[2:])
else:
# e.g. -framework GEOS
extra_link_args.append(item)
# Optional compilation of speedups
# setuptools stuff from Bob Ippolito's simplejson project
if sys.platform == 'win32' and sys.version_info > (2, 6):
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError,
IOError)
else:
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class BuildFailed(Exception):
pass
def construct_build_ext(build_ext):
class WrappedBuildExt(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError as x:
raise BuildFailed(x)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors as x:
raise BuildFailed(x)
return WrappedBuildExt
if (hasattr(platform, 'python_implementation')
and platform.python_implementation() == 'PyPy'):
# python_implementation is only available since 2.6
ext_modules = []
libraries = []
if os.path.exists("MANIFEST.in"):
pyx_file = "shapely/speedups/_speedups.pyx"
c_file = "shapely/speedups/_speedups.c"
force_cython = False
if 'sdist' in sys.argv:
force_cython = True
try:
if (force_cython or not os.path.exists(c_file)
or os.path.getmtime(pyx_file) > os.path.getmtime(c_file)):
log.info("Updating C extension with Cython.")
subprocess.check_call(["cython", "shapely/speedups/_speedups.pyx"])
except (subprocess.CalledProcessError, OSError):
log.warn("Could not (re)create C extension with Cython.")
if force_cython:
raise
if not os.path.exists(c_file):
log.warn("speedup extension not found")
ext_modules = [
Extension("shapely.speedups._speedups", ["shapely/speedups/_speedups.c"],
include_dirs=include_dirs, library_dirs=library_dirs,
libraries=libraries, extra_link_args=extra_link_args)]
cmd_classes = setup_args.setdefault('cmdclass', {})
try:
import numpy
from Cython.Distutils import build_ext as cython_build_ext
from distutils.extension import Extension as DistutilsExtension
if 'build_ext' in setup_args['cmdclass']:
raise ValueError('We need to put the Cython build_ext in '
'cmd_classes, but it is already defined.')
setup_args['cmdclass']['build_ext'] = cython_build_ext
include_dirs.append(numpy.get_include())
libraries.append(numpy.get_include())
ext_modules.append(DistutilsExtension(
"shapely.vectorized._vectorized",
sources=["shapely/vectorized/_vectorized.pyx"],
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
extra_link_args=extra_link_args,))
except ImportError:
log.info("Numpy or Cython not available, shapely.vectorized submodule "
"not being built.")
try:
# try building with speedups
existing_build_ext = setup_args['cmdclass'].\
get('build_ext', distutils_build_ext)
setup_args['cmdclass']['build_ext'] = \
construct_build_ext(existing_build_ext)
setup(ext_modules=ext_modules, **setup_args)
except BuildFailed as ex:
BUILD_EXT_WARNING = "The C extension could not be compiled, " \
"speedups are not enabled."
log.warn(ex)
log.warn(BUILD_EXT_WARNING)
log.warn("Failure information, if any, is above.")
log.warn("I'm retrying the build without the C extension now.")
# Remove any previously defined build_ext command class.
if 'build_ext' in setup_args['cmdclass']:
del setup_args['cmdclass']['build_ext']
if 'build_ext' in cmd_classes:
del cmd_classes['build_ext']
setup(**setup_args)
log.warn(BUILD_EXT_WARNING)
log.info("Plain-Python installation succeeded.")
|
|
from collections import namedtuple
import os
import errno
import threading
import time
import uuid
import struct
from nose import SkipTest
from nose.tools import eq_
from nose.tools import raises
import mock
from kazoo.exceptions import ConnectionLoss
from kazoo.protocol.serialization import (
Connect,
int_struct,
write_string,
)
from kazoo.protocol.states import KazooState
from kazoo.protocol.connection import _CONNECTION_DROP
from kazoo.testing import KazooTestCase
from kazoo.tests.util import wait
class Delete(namedtuple('Delete', 'path version')):
type = 2
def serialize(self):
b = bytearray()
b.extend(write_string(self.path))
b.extend(int_struct.pack(self.version))
return b
@classmethod
def deserialize(self, bytes, offset):
raise ValueError("oh my")
class TestConnectionHandler(KazooTestCase):
def test_bad_deserialization(self):
async_object = self.client.handler.async_result()
self.client._queue.append((Delete(self.client.chroot, -1), async_object))
os.write(self.client._connection._write_pipe, b'\0')
@raises(ValueError)
def testit():
async_object.get()
testit()
def test_with_bad_sessionid(self):
ev = threading.Event()
def expired(state):
if state == KazooState.CONNECTED:
ev.set()
password = os.urandom(16)
client = self._get_client(client_id=(82838284824, password))
client.add_listener(expired)
client.start()
try:
ev.wait(15)
eq_(ev.is_set(), True)
finally:
client.stop()
def test_connection_read_timeout(self):
client = self.client
ev = threading.Event()
path = "/" + uuid.uuid4().hex
handler = client.handler
_select = handler.select
_socket = client._connection._socket
def delayed_select(*args, **kwargs):
result = _select(*args, **kwargs)
if len(args[0]) == 1 and _socket in args[0]:
# for any socket read, simulate a timeout
return [], [], []
return result
def back(state):
if state == KazooState.CONNECTED:
ev.set()
client.add_listener(back)
client.create(path, b"1")
try:
handler.select = delayed_select
self.assertRaises(ConnectionLoss, client.get, path)
finally:
handler.select = _select
# the client reconnects automatically
ev.wait(5)
eq_(ev.is_set(), True)
eq_(client.get(path)[0], b"1")
def test_connection_write_timeout(self):
client = self.client
ev = threading.Event()
path = "/" + uuid.uuid4().hex
handler = client.handler
_select = handler.select
_socket = client._connection._socket
def delayed_select(*args, **kwargs):
result = _select(*args, **kwargs)
if _socket in args[1]:
# for any socket write, simulate a timeout
return [], [], []
return result
def back(state):
if state == KazooState.CONNECTED:
ev.set()
client.add_listener(back)
try:
handler.select = delayed_select
self.assertRaises(ConnectionLoss, client.create, path)
finally:
handler.select = _select
# the client reconnects automatically
ev.wait(5)
eq_(ev.is_set(), True)
eq_(client.exists(path), None)
def test_connection_deserialize_fail(self):
client = self.client
ev = threading.Event()
path = "/" + uuid.uuid4().hex
handler = client.handler
_select = handler.select
_socket = client._connection._socket
def delayed_select(*args, **kwargs):
result = _select(*args, **kwargs)
if _socket in args[1]:
# for any socket write, simulate a timeout
return [], [], []
return result
def back(state):
if state == KazooState.CONNECTED:
ev.set()
client.add_listener(back)
deserialize_ev = threading.Event()
def bad_deserialize(bytes, offset):
deserialize_ev.set()
raise struct.error()
# force the connection to die but, on reconnect, cause the
# server response to be non-deserializable. ensure that the client
# continues to retry. This partially reproduces a rare bug seen
# in production.
with mock.patch.object(Connect, 'deserialize') as mock_deserialize:
mock_deserialize.side_effect = bad_deserialize
try:
handler.select = delayed_select
self.assertRaises(ConnectionLoss, client.create, path)
finally:
handler.select = _select
# the client reconnects automatically but the first attempt will
# hit a deserialize failure. wait for that.
deserialize_ev.wait(5)
eq_(deserialize_ev.is_set(), True)
# this time should succeed
ev.wait(5)
eq_(ev.is_set(), True)
eq_(client.exists(path), None)
def test_connection_close(self):
self.assertRaises(Exception, self.client.close)
self.client.stop()
self.client.close()
# should be able to restart
self.client.start()
def test_connection_pipe(self):
client = self.client
read_pipe = client._connection._read_pipe
write_pipe = client._connection._write_pipe
assert read_pipe is not None
assert write_pipe is not None
# stop client and pipe should not yet be closed
client.stop()
assert read_pipe is not None
assert write_pipe is not None
os.fstat(read_pipe)
os.fstat(write_pipe)
# close client, and pipes should be
client.close()
try:
os.fstat(read_pipe)
except OSError as e:
if not e.errno == errno.EBADF:
raise
else:
self.fail("Expected read_pipe to be closed")
try:
os.fstat(write_pipe)
except OSError as e:
if not e.errno == errno.EBADF:
raise
else:
self.fail("Expected write_pipe to be closed")
# start client back up. should get a new, valid pipe
client.start()
read_pipe = client._connection._read_pipe
write_pipe = client._connection._write_pipe
assert read_pipe is not None
assert write_pipe is not None
os.fstat(read_pipe)
os.fstat(write_pipe)
def test_dirty_pipe(self):
client = self.client
read_pipe = client._connection._read_pipe
write_pipe = client._connection._write_pipe
# add a stray byte to the pipe and ensure that doesn't
# blow up client. simulates case where some error leaves
# a byte in the pipe which doesn't correspond to the
# request queue.
os.write(write_pipe, b'\0')
# eventually this byte should disappear from pipe
wait(lambda: client.handler.select([read_pipe], [], [], 0)[0] == [])
class TestConnectionDrop(KazooTestCase):
def test_connection_dropped(self):
ev = threading.Event()
def back(state):
if state == KazooState.CONNECTED:
ev.set()
# create a node with a large value and stop the ZK node
path = "/" + uuid.uuid4().hex
self.client.create(path)
self.client.add_listener(back)
result = self.client.set_async(path, b'a' * 1000 * 1024)
self.client._call(_CONNECTION_DROP, None)
self.assertRaises(ConnectionLoss, result.get)
# we have a working connection to a new node
ev.wait(30)
eq_(ev.is_set(), True)
class TestReadOnlyMode(KazooTestCase):
def setUp(self):
self.setup_zookeeper(read_only=True)
ver = self.client.server_version()
if ver[1] < 4:
raise SkipTest("Must use zookeeper 3.4 or above")
def tearDown(self):
self.client.stop()
def test_read_only(self):
from kazoo.exceptions import NotReadOnlyCallError
from kazoo.protocol.states import KeeperState
client = self.client
states = []
ev = threading.Event()
@client.add_listener
def listen(state):
states.append(state)
if client.client_state == KeeperState.CONNECTED_RO:
ev.set()
try:
self.cluster[1].stop()
self.cluster[2].stop()
ev.wait(6)
eq_(ev.is_set(), True)
eq_(client.client_state, KeeperState.CONNECTED_RO)
# Test read only command
eq_(client.get_children('/'), [])
# Test error with write command
@raises(NotReadOnlyCallError)
def testit():
client.create('/fred')
testit()
# Wait for a ping
time.sleep(15)
finally:
client.remove_listener(listen)
self.cluster[1].run()
self.cluster[2].run()
|
|
"""Create a local Vitess database for testing."""
import glob
import logging
import os
import random
import re
from vttest import environment
from vttest import vt_processes
class LocalDatabase(object):
"""Set up a local Vitess database."""
def __init__(self,
topology,
schema_dir,
mysql_only,
init_data_options,
web_dir=None):
"""Initializes an object of this class.
Args:
topology: a vttest.VTTestTopology object describing the topology.
schema_dir: see the documentation for the corresponding command line
flag in run_local_database.py
mysql_only: see the documentation for the corresponding command line
flag in run_local_database.py
init_data_options: an object of type InitDataOptions containing
options configuring populating the database with initial random data.
If the value is 'None' then the database will not be initialized
with random data.
web_dir: see the documentation for the corresponding command line
flag in run_local_database.py
"""
self.topology = topology
self.schema_dir = schema_dir
self.mysql_only = mysql_only
self.init_data_options = init_data_options
self.web_dir = web_dir
def setup(self):
"""Create a MySQL instance and all Vitess processes."""
mysql_port = environment.get_port('mysql')
self.directory = environment.get_test_directory()
self.mysql_db = environment.mysql_db_class(self.directory, mysql_port)
self.mysql_db.setup()
self.create_databases()
self.load_schema()
if self.init_data_options is not None:
self.rng = random.Random(self.init_data_options.rng_seed)
self.populate_with_random_data()
if self.mysql_only:
return
vt_processes.start_vt_processes(self.directory, self.topology,
self.mysql_db, self.schema_dir,
web_dir=self.web_dir)
def teardown(self):
"""Kill all Vitess processes and wait for them to end.
MySQLTestDB's wrapper script will take care of mysqld.
"""
if not self.mysql_only:
self.kill()
self.wait()
self.mysql_db.teardown()
environment.cleanup_test_directory(self.directory)
def kill(self):
"""Kill all Vitess processes."""
vt_processes.kill_vt_processes()
def wait(self):
"""Wait for all Vitess processes to end."""
vt_processes.wait_vt_processes()
def vtgate_addr(self):
"""Get the host:port for vtgate."""
if environment.get_protocol() == 'grpc':
return vt_processes.vtcombo_process.grpc_addr()
return vt_processes.vtcombo_process.addr()
def config(self):
"""Returns a dict with enough information to be able to connect."""
if self.mysql_only:
return self.mysql_db.config()
result = {
'port': vt_processes.vtcombo_process.port,
}
if environment.get_protocol() == 'grpc':
result['grpc_port'] = vt_processes.vtcombo_process.grpc_port
return result
def mysql_execute(self, queries, db_name=''):
"""Execute queries directly on MySQL.
The queries will be executed in a single transaction.
Args:
queries: A list of strings. The SQL statements to execute.
db_name: The database name to use.
Returns:
The results of the last query as a list of row tuples.
"""
conn = self.mysql_db.connect(db_name)
cursor = conn.cursor()
for query in queries:
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
# Commit all of the queries.
conn.commit()
conn.close()
return result
def create_databases(self):
"""Create a database for each shard."""
cmds = []
for kpb in self.topology.keyspaces:
if kpb.served_from:
# redirected keyspaces have no underlying database
continue
for spb in kpb.shards:
db_name = spb.db_name_override
if not db_name:
db_name = 'vt_%s_%s' % (kpb.name, spb.name)
cmds.append('create database `%s`' % db_name)
logging.info('Creating databases')
self.mysql_execute(cmds)
def load_schema(self):
"""Load schema SQL from data files."""
if not self.schema_dir:
return
if not os.path.isdir(self.schema_dir):
raise Exception('schema_dir "%s" is not a directory.' % self.schema_dir)
for kpb in self.topology.keyspaces:
if kpb.served_from:
# redirected keyspaces have no underlying database
continue
keyspace = kpb.name
keyspace_dir = os.path.join(self.schema_dir, keyspace)
if not os.path.isdir(keyspace_dir):
raise Exception(
'No subdirectory found in schema dir %s for keyspace %s. '
'For keyspaces without an initial schema, create the '
'directory %s and leave a README file to explain why the '
'directory exists. '
'Alternatively, disable loading schemas by setting --schema_dir '
'to "".' %
(self.schema_dir, keyspace, keyspace_dir))
for filepath in glob.glob(os.path.join(keyspace_dir, '*.sql')):
logging.info('Loading schema for keyspace %s from file %s',
keyspace, filepath)
cmds = self.get_sql_commands_from_file(filepath, keyspace_dir)
# Run the cmds on each shard in the keyspace.
for spb in kpb.shards:
db_name = spb.db_name_override
if not db_name:
db_name = 'vt_%s_%s' % (kpb.name, spb.name)
self.mysql_execute(cmds, db_name=db_name)
def populate_with_random_data(self):
"""Populates all shards with randomly generated data."""
for kpb in self.topology.keyspaces:
if kpb.served_from:
# redirected keyspaces have no underlying database
continue
for spb in kpb.shards:
db_name = spb.db_name_override
if not db_name:
db_name = 'vt_%s_%s' % (kpb.name, spb.name)
self.populate_shard_with_random_data(db_name)
def populate_shard_with_random_data(self, db_name):
"""Populates the given database with randomly generated data.
Every table in the database is populated.
Args:
db_name: The shard database name (string).
"""
tables = self.mysql_execute(['SHOW TABLES'], db_name)
for table in tables:
self.populate_table_with_random_data(db_name, table[0])
# The number of rows inserted in a single INSERT statement.
batch_insert_size = 1000
def populate_table_with_random_data(self, db_name, table_name):
"""Populates the given table with randomly generated data.
Queries the database for the table schema and then populates
the columns with randomly generated data.
Args:
db_name: The shard database name (string).
table_name: The name of the table to populate (string).
"""
field_infos = self.mysql_execute(['DESCRIBE %s' % table_name], db_name)
num_rows = self.rng.randint(self.init_data_options.min_table_shard_size,
self.init_data_options.max_table_shard_size)
rows = []
for _ in xrange(num_rows):
row = []
for field_info in field_infos:
field_type = field_info[1]
field_allow_nulls = (field_info[2] == 'YES')
row.append(
self.generate_random_field(
table_name, field_type, field_allow_nulls))
rows.append(row)
# Insert 'rows' into the database in batches of size
# self.batch_insert_size
field_names = [field_info[0] for field_info in field_infos]
for index in xrange(0, len(rows), self.batch_insert_size):
self.batch_insert(db_name,
table_name,
field_names,
rows[index:index + self.batch_insert_size])
def batch_insert(self, db_name, table_name, field_names, rows):
"""Inserts the rows in 'rows' into 'table_name' of database 'db_name'.
Args:
db_name: The name of the database containing the table.
table_name: The name of the table to populate.
field_names: The list of the field names in the table.
rows: A list of tuples with each tuple containing
the string representations of the fields.
The order of the representation must match the order of the field
names listed in 'field_names'.
"""
field_names_string = ','.join(field_names)
values_string = ','.join(['(' + ','.join(row) +')' for row in rows])
# We use "INSERT IGNORE" to ignore duplicate key errors.
insert_query = ('INSERT IGNORE INTO %s (%s) VALUES %s' %
(table_name, field_names_string, values_string))
logging.info('Executing in database %s: %s', db_name, insert_query)
self.mysql_execute([insert_query], db_name)
def generate_random_field(self, table_name, field_type, field_allows_nulls):
"""Generates a random field string representation.
By 'string representation' we mean a string that is suitable to be a part
of an 'INSERT INTO' SQL statement.
Args:
table_name: The name of the table that will contain the generated field
value. Only used for a descriptive exception message in case of
an error.
field_type: The field_type as given by a "DESCRIBE <table>" SQL statement.
field_allows_nulls: Should be 'true' if this field allows NULLS.
Returns:
The random field.
Raises:
Exception: If 'field_type' is not supported.
"""
value = None
if field_type.startswith('tinyint'):
value = self.random_integer(field_type, 1)
elif field_type.startswith('smallint'):
value = self.random_integer(field_type, 2)
elif field_type.startswith('mediumint'):
value = self.random_integer(field_type, 3)
elif field_type.startswith('int'):
value = self.random_integer(field_type, 4)
elif field_type.startswith('bigint'):
value = self.random_integer(field_type, 8)
elif field_type.startswith('decimal'):
value = self.random_decimal(field_type)
else:
raise Exception('Populating random data in field type: %s is not yet '
'supported. (table: %s)' % (field_type, table_name))
if (field_allows_nulls and
self.true_with_probability(self.init_data_options.null_probability)):
return 'NULL'
return value
def true_with_probability(self, true_probability):
"""Returns a pseudo-random boolean.
Args:
true_probability: The probability to use for returning 'true'.
Returns:
The value 'true' is with probability 'true_probability'.
"""
return self.rng.uniform(0, 1) < true_probability
def random_integer(self, field_type, num_bytes):
num_bits = 8*num_bytes
if field_type.endswith('unsigned'):
return '%d' % (self.rng.randint(0, 2**num_bits-1))
return '%d' % (self.rng.randint(-2**(num_bits-1), 2**(num_bits-1)-1))
decimal_regexp = re.compile(r'decimal\((\d+),(\d+)\)')
def random_decimal(self, field_type):
match = self.decimal_regexp.match(field_type)
if match is None:
raise Exception("Can't parse 'decimal' field type: %s" % field_type)
num_digits_right = int(match.group(2))
num_digits_left = int(match.group(1))-num_digits_right
boundary = 10**num_digits_left-1
rand = self.rng.uniform(-boundary, boundary)
return '%.*f' % (num_digits_right, rand)
def get_sql_commands_from_file(self, filename, source_root=None):
"""Given a file, extract an array of commands from the file.
Automatically strips out three types of MySQL comment syntax:
'--' at beginning of line: line removed
'-- ': remove everything from here to line's end (note space after dashes)
'#': remove everything from here to line's end
MySQL's handling of C-style /* ... */ comments is weird, so we
leave them alone for now. See the MySQL manual 6.1.6 "Comment Syntax"
for all the weird complications.
Args:
filename: the SQL source file to use.
source_root: if specified, 'source FILENAME' lines in the SQL file will
source the specified filename relative to source_root.
Returns:
A list of SQL commands.
"""
fd = open(filename)
lines = fd.readlines()
inside_single_quotes = 0
inside_double_quotes = 0
commands = []
cmd = ''
for line in lines:
# Strip newline and other trailing whitespace
line = line.rstrip()
if (not inside_single_quotes and not inside_double_quotes and
line.startswith('--')):
# Line starts with '--', skip line
continue
i = 0
next_i = 0
# Iterate through line, looking for special delimiters
while 1:
i = next_i
if i >= len(line):
break
# By default, move to next character after this one
next_i = i + 1
if line[i] == '\\':
# Next character is literal, skip this and the next character
next_i = i + 2
elif line[i] == "'":
if not inside_double_quotes:
inside_single_quotes = not inside_single_quotes
elif line[i] == '"':
if not inside_single_quotes:
inside_double_quotes = not inside_double_quotes
elif not inside_single_quotes and not inside_double_quotes:
if line[i] == '#' or line[i:i+3] == '-- ':
# Found unquoted "#" or "-- ", ignore rest of line
line = line[:i]
break
if line[i] == ';':
# Unquoted semicolon marks end of command
cmd += line[:i]
commands.append(cmd)
cmd = ''
# Chop off everything before and including the semicolon
line = line[i+1:]
# Start over at beginning of line
next_i = 0
# Reached end of line
if line and not line.isspace():
if source_root and not cmd and line.startswith('source '):
commands.extend(self.get_sql_commands_from_file(
os.path.join(source_root, line[7:]),
source_root=source_root))
else:
cmd += line
cmd += '\n'
# Accept last command even if it doesn't end in semicolon
cmd = cmd.strip()
if cmd:
commands.append(cmd)
return commands
def __enter__(self):
self.setup()
return self
def __exit__(self, exc_type, exc_info, tb):
self.teardown()
|
|
"""
A simple vtkTkRenderWidget for Tkinter.
Created by David Gobbi, April 1999
May ??, 1999 - Modifications peformed by Heather Drury,
to rewrite _pan to match method in TkInteractor.tcl
May 11, 1999 - Major rewrite by David Gobbi to make the
interactor bindings identical to the TkInteractor.tcl
bindings.
July 14, 1999 - Added modification by Ken Martin for VTK 2.4, to
use vtk widgets instead of Togl.
Aug 29, 1999 - Renamed file to vtkRenderWidget.py
Nov 14, 1999 - Added support for keyword 'rw'
Mar 23, 2000 - Extensive but backwards compatible changes,
improved documentation
"""
"""
A few important notes:
This class is meant to be used as a base-class widget for
doing VTK rendering in Python.
In VTK (and C++) there is a very important distinction between
public ivars (attributes in pythonspeak), protected ivars, and
private ivars. When you write a python class that you want
to 'look and feel' like a VTK class, you should follow these rules.
1) Attributes should never be public. Attributes should always be
either protected (prefixed with a single underscore) or private
(prefixed with a double underscore). You can provide access to
attributes through public Set/Get methods (same as VTK).
2) Use a single underscore to denote a protected attribute, e.g.
self._RenderWindow is protected (can be accessed from this
class or a derived class).
3) Use a double underscore to denote a private attribute, e.g.
self.__InExpose cannot be accessed outside of this class.
All attributes should be 'declared' in the __init__() function
i.e. set to some initial value. Don't forget that 'None' means
'NULL' - the python/vtk wrappers guarantee their equivalence.
"""
import Tkinter
import math, os, sys
import vtk
from vtkLoadPythonTkWidgets import vtkLoadPythonTkWidgets
class vtkTkRenderWidget(Tkinter.Widget):
"""
A vtkTkRenderWidget for Python.
Use GetRenderWindow() to get the vtkRenderWindow.
Create with the keyword stereo=1 in order to generate a
stereo-capable window.
Create with the keyword focus_on_enter=1 to enable
focus-follows-mouse. The default is for a click-to-focus mode.
"""
def __init__(self, master, cnf={}, **kw):
"""
Constructor.
Keyword arguments:
rw -- Use passed render window instead of creating a new one.
stereo -- If True, generate a stereo-capable window.
Defaults to False.
focus_on_enter -- If True, use a focus-follows-mouse mode.
Defaults to False where the widget will use a click-to-focus
mode.
"""
# load the necessary extensions into tk
vtkLoadPythonTkWidgets(master.tk)
try: # check to see if a render window was specified
renderWindow = kw['rw']
except KeyError:
renderWindow = vtk.vtkRenderWindow()
try: # was a stereo rendering context requested?
if kw['stereo']:
renderWindow.StereoCapableWindowOn()
del kw['stereo']
except KeyError:
pass
# check if focus should follow mouse
if kw.get('focus_on_enter'):
self._FocusOnEnter = 1
del kw['focus_on_enter']
else:
self._FocusOnEnter = 0
kw['rw'] = renderWindow.GetAddressAsString("vtkRenderWindow")
Tkinter.Widget.__init__(self, master, 'vtkTkRenderWidget', cnf, kw)
self._CurrentRenderer = None
self._CurrentCamera = None
self._CurrentZoom = 1.0
self._CurrentLight = None
self._ViewportCenterX = 0
self._ViewportCenterY = 0
self._Picker = vtk.vtkCellPicker()
self._PickedAssembly = None
self._PickedProperty = vtk.vtkProperty()
self._PickedProperty.SetColor(1,0,0)
self._PrePickedProperty = None
self._OldFocus = None
# used by the LOD actors
self._DesiredUpdateRate = 15
self._StillUpdateRate = 0.0001
# these record the previous mouse position
self._LastX = 0
self._LastY = 0
# private attributes
self.__InExpose = 0
# create the Tk bindings
self.BindTkRenderWidget()
def __getattr__(self,attr):
# because the tk part of vtkTkRenderWidget must have
# the only remaining reference to the RenderWindow when
# it is destroyed, we can't actually store the RenderWindow
# as an attribute but instead have to get it from the tk-side
if attr == '_RenderWindow':
return self.GetRenderWindow()
raise AttributeError, self.__class__.__name__ + \
" has no attribute named " + attr
def BindTkRenderWidget(self):
"""
Bind some default actions.
"""
self.bind("<ButtonPress>",
lambda e,s=self: s.StartMotion(e.x,e.y))
self.bind("<ButtonRelease>",
lambda e,s=self: s.EndMotion(e.x,e.y))
self.bind("<B1-Motion>",
lambda e,s=self: s.Rotate(e.x,e.y))
self.bind("<B2-Motion>",
lambda e,s=self: s.Pan(e.x,e.y))
self.bind("<B3-Motion>",
lambda e,s=self: s.Zoom(e.x,e.y))
self.bind("<Shift-B1-Motion>",
lambda e,s=self: s.Pan(e.x,e.y))
self.bind("<KeyPress-r>",
lambda e,s=self: s.Reset(e.x,e.y))
self.bind("<KeyPress-u>",
lambda e,s=self: s.deiconify())
self.bind("<KeyPress-w>",
lambda e,s=self: s.Wireframe())
self.bind("<KeyPress-s>",
lambda e,s=self: s.Surface())
self.bind("<KeyPress-p>",
lambda e,s=self: s.PickActor(e.x,e.y))
if self._FocusOnEnter:
self.bind("<Enter>",
lambda e,s=self: s.Enter(e.x,e.y))
self.bind("<Leave>",
lambda e,s=self: s.Leave(e.x,e.y))
else:
self.bind("<ButtonPress>",
lambda e,s=self: s.Enter(e.x,e.y))
self.bind("<Expose>",
lambda e,s=self: s.Expose())
def GetZoomFactor(self):
return self._CurrentZoom
def SetDesiredUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
self._DesiredUpdateRate = rate
def GetDesiredUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
return self._DesiredUpdateRate
def SetStillUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
self._StillUpdateRate = rate
def GetStillUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
return self._StillUpdateRate
def GetRenderWindow(self):
addr = self.tk.call(self._w, 'GetRenderWindow')[5:]
return vtk.vtkRenderWindow('_%s_vtkRenderWindow_p' % addr)
def GetPicker(self):
return self._Picker
def Expose(self):
if (not self.__InExpose):
self.__InExpose = 1
if (not self._RenderWindow.IsA('vtkCocoaRenderWindow')):
self.update()
self._RenderWindow.Render()
self.__InExpose = 0
def Render(self):
if (self._CurrentLight):
light = self._CurrentLight
light.SetPosition(self._CurrentCamera.GetPosition())
light.SetFocalPoint(self._CurrentCamera.GetFocalPoint())
self._RenderWindow.Render()
def UpdateRenderer(self,x,y):
"""
UpdateRenderer will identify the renderer under the mouse and set
up _CurrentRenderer, _CurrentCamera, and _CurrentLight.
"""
windowX = self.winfo_width()
windowY = self.winfo_height()
renderers = self._RenderWindow.GetRenderers()
numRenderers = renderers.GetNumberOfItems()
self._CurrentRenderer = None
renderers.InitTraversal()
for i in range(0,numRenderers):
renderer = renderers.GetNextItem()
vx,vy = (0,0)
if (windowX > 1):
vx = float(x)/(windowX-1)
if (windowY > 1):
vy = (windowY-float(y)-1)/(windowY-1)
(vpxmin,vpymin,vpxmax,vpymax) = renderer.GetViewport()
if (vx >= vpxmin and vx <= vpxmax and
vy >= vpymin and vy <= vpymax):
self._CurrentRenderer = renderer
self._ViewportCenterX = float(windowX)*(vpxmax-vpxmin)/2.0\
+vpxmin
self._ViewportCenterY = float(windowY)*(vpymax-vpymin)/2.0\
+vpymin
self._CurrentCamera = self._CurrentRenderer.GetActiveCamera()
lights = self._CurrentRenderer.GetLights()
lights.InitTraversal()
self._CurrentLight = lights.GetNextItem()
break
self._LastX = x
self._LastY = y
def GetCurrentRenderer(self):
return self._CurrentRenderer
def Enter(self,x,y):
self._OldFocus=self.focus_get()
self.focus()
self.StartMotion(x, y)
def Leave(self,x,y):
if (self._OldFocus != None):
self._OldFocus.focus()
def StartMotion(self,x,y):
self.GetRenderWindow().SetDesiredUpdateRate(self._DesiredUpdateRate)
self.UpdateRenderer(x,y)
def EndMotion(self,x,y):
self.GetRenderWindow().SetDesiredUpdateRate(self._StillUpdateRate)
if self._CurrentRenderer:
self.Render()
def Rotate(self,x,y):
if self._CurrentRenderer:
self._CurrentCamera.Azimuth(self._LastX - x)
self._CurrentCamera.Elevation(y - self._LastY)
self._CurrentCamera.OrthogonalizeViewUp()
self._LastX = x
self._LastY = y
self._CurrentRenderer.ResetCameraClippingRange()
self.Render()
def Pan(self,x,y):
if self._CurrentRenderer:
renderer = self._CurrentRenderer
camera = self._CurrentCamera
(pPoint0,pPoint1,pPoint2) = camera.GetPosition()
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
if (camera.GetParallelProjection()):
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetFocalPoint(fx,fy,fz)
renderer.SetWorldPoint(pPoint0,pPoint1,pPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetPosition(fx,fy,fz)
else:
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
# Specify a point location in world coordinates
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
# Convert world point coordinates to display coordinates
dPoint = renderer.GetDisplayPoint()
focalDepth = dPoint[2]
aPoint0 = self._ViewportCenterX + (x - self._LastX)
aPoint1 = self._ViewportCenterY - (y - self._LastY)
renderer.SetDisplayPoint(aPoint0,aPoint1,focalDepth)
renderer.DisplayToWorld()
(rPoint0,rPoint1,rPoint2,rPoint3) = renderer.GetWorldPoint()
if (rPoint3 != 0.0):
rPoint0 = rPoint0/rPoint3
rPoint1 = rPoint1/rPoint3
rPoint2 = rPoint2/rPoint3
camera.SetFocalPoint((fPoint0 - rPoint0) + fPoint0,
(fPoint1 - rPoint1) + fPoint1,
(fPoint2 - rPoint2) + fPoint2)
camera.SetPosition((fPoint0 - rPoint0) + pPoint0,
(fPoint1 - rPoint1) + pPoint1,
(fPoint2 - rPoint2) + pPoint2)
self._LastX = x
self._LastY = y
self.Render()
def Zoom(self,x,y):
if self._CurrentRenderer:
renderer = self._CurrentRenderer
camera = self._CurrentCamera
zoomFactor = math.pow(1.02,(0.5*(self._LastY - y)))
self._CurrentZoom = self._CurrentZoom * zoomFactor
if camera.GetParallelProjection():
parallelScale = camera.GetParallelScale()/zoomFactor
camera.SetParallelScale(parallelScale)
else:
camera.Dolly(zoomFactor)
renderer.ResetCameraClippingRange()
self._LastX = x
self._LastY = y
self.Render()
def Reset(self,x,y):
if self._CurrentRenderer:
self._CurrentRenderer.ResetCamera()
self.Render()
def Wireframe(self):
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToWireframe()
self.Render()
def Surface(self):
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToSurface()
self.Render()
def PickActor(self,x,y):
if self._CurrentRenderer:
renderer = self._CurrentRenderer
picker = self._Picker
windowY = self.winfo_height()
picker.Pick(x,(windowY - y - 1),0.0,renderer)
assembly = picker.GetAssembly()
if (self._PickedAssembly != None and
self._PrePickedProperty != None):
self._PickedAssembly.SetProperty(self._PrePickedProperty)
# release hold of the property
self._PrePickedProperty.UnRegister(self._PrePickedProperty)
self._PrePickedProperty = None
if (assembly != None):
self._PickedAssembly = assembly
self._PrePickedProperty = self._PickedAssembly.GetProperty()
# hold onto the property
self._PrePickedProperty.Register(self._PrePickedProperty)
self._PickedAssembly.SetProperty(self._PickedProperty)
self.Render()
#----------------------------------------------------------------------------
def vtkRenderWidgetConeExample():
"""Like it says, just a simple example
"""
# create root window
root = Tkinter.Tk()
# create vtkTkRenderWidget
pane = vtkTkRenderWidget(root,width=300,height=300)
ren = vtk.vtkRenderer()
pane.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# pack the pane into the tk root
pane.pack()
# start the tk mainloop
root.mainloop()
if __name__ == "__main__":
vtkRenderWidgetConeExample()
|
|
#!/usr/bin/env python
"""Tests for approval_checks module."""
from unittest import mock
from absl import app
from grr_response_core.lib import rdfvalue
from grr_response_server import access_control
from grr_response_server import data_store
from grr_response_server.authorization import client_approval_auth
from grr_response_server.gui import approval_checks
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import acl_test_lib
from grr.test_lib import test_lib
def _CreateApprovalRequest(approval_type,
subject_id,
expiration_time=None,
grants=None):
expiration_time = expiration_time or (
rdfvalue.RDFDatetime.Now() + rdfvalue.Duration.From(1, rdfvalue.HOURS))
return rdf_objects.ApprovalRequest(
approval_type=approval_type,
approval_id="1234",
subject_id=subject_id,
requestor_username=u"requestor",
reason="reason",
timestamp=rdfvalue.RDFDatetime.Now(),
expiration_time=expiration_time,
grants=grants)
class CheckClientApprovalRequestTest(acl_test_lib.AclTestMixin,
test_lib.GRRBaseTest):
def _CreateRequest(self, expiration_time=None, grants=None):
expiration_time = expiration_time or (
rdfvalue.RDFDatetime.Now() + rdfvalue.Duration.From(1, rdfvalue.HOURS))
return _CreateApprovalRequest(
rdf_objects.ApprovalRequest.ApprovalType.APPROVAL_TYPE_CLIENT,
self.client_id,
expiration_time=expiration_time,
grants=grants)
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
def testRaisesWhenNoGrants(self):
approval_request = self._CreateRequest(grants=[])
with self.assertRaisesRegex(
access_control.UnauthorizedAccess,
"Need at least 2 additional approvers for access"):
approval_checks.CheckApprovalRequest(approval_request)
def testRaisesWhenJustOneGrant(self):
approval_request = self._CreateRequest(
grants=[rdf_objects.ApprovalGrant(grantor_username=u"grantor")])
with self.assertRaisesRegex(
access_control.UnauthorizedAccess,
"Need at least 1 additional approver for access"):
approval_checks.CheckApprovalRequest(approval_request)
def testRaisesIfApprovalExpired(self):
approval_request = self._CreateRequest(
expiration_time=rdfvalue.RDFDatetime.Now() -
rdfvalue.Duration.From(1, rdfvalue.MINUTES),
grants=[
rdf_objects.ApprovalGrant(grantor_username=u"grantor1"),
rdf_objects.ApprovalGrant(grantor_username=u"grantor2")
])
with self.assertRaisesRegex(access_control.UnauthorizedAccess,
"Approval request is expired"):
approval_checks.CheckApprovalRequest(approval_request)
def testReturnsIfApprovalIsNotExpiredAndHasTwoGrants(self):
approval_request = self._CreateRequest(grants=[
rdf_objects.ApprovalGrant(grantor_username=u"grantor1"),
rdf_objects.ApprovalGrant(grantor_username=u"grantor2")
])
approval_checks.CheckApprovalRequest(approval_request)
@mock.patch(client_approval_auth.__name__ + ".CLIENT_APPROVAL_AUTH_MGR")
def testWhenAuthMgrActiveReturnsIfClientHasNoLabels(self, mock_mgr):
approval_request = self._CreateRequest(grants=[
rdf_objects.ApprovalGrant(grantor_username=u"grantor1"),
rdf_objects.ApprovalGrant(grantor_username=u"grantor2")
])
# Make sure approval manager is active.
mock_mgr.IsActive.return_value = True
approval_checks.CheckApprovalRequest(approval_request)
@mock.patch(client_approval_auth.__name__ + ".CLIENT_APPROVAL_AUTH_MGR")
def testWhenAuthMgrActiveChecksApproversForEachClientLabel(self, mock_mgr):
data_store.REL_DB.AddClientLabels(self.client_id, u"GRR", [u"foo", u"bar"])
approval_request = self._CreateRequest(grants=[
rdf_objects.ApprovalGrant(grantor_username=u"grantor1"),
rdf_objects.ApprovalGrant(grantor_username=u"grantor2")
])
# Make sure approval manager is active.
mock_mgr.IsActive.return_value = True
approval_checks.CheckApprovalRequest(approval_request)
self.assertLen(mock_mgr.CheckApproversForLabel.mock_calls, 2)
args = mock_mgr.CheckApproversForLabel.mock_calls[0][1]
self.assertEqual(args, (rdfvalue.RDFURN(
self.client_id), u"requestor", set(["grantor1", "grantor2"]), u"bar"))
args = mock_mgr.CheckApproversForLabel.mock_calls[1][1]
self.assertEqual(args, (rdfvalue.RDFURN(
self.client_id), u"requestor", set(["grantor1", "grantor2"]), u"foo"))
@mock.patch(client_approval_auth.__name__ + ".CLIENT_APPROVAL_AUTH_MGR")
def testWhenAuthMgrActiveRaisesIfAuthMgrRaises(self, mock_mgr):
data_store.REL_DB.AddClientLabels(self.client_id, u"GRR", [u"foo"])
approval_request = self._CreateRequest(grants=[
rdf_objects.ApprovalGrant(grantor_username=u"grantor1"),
rdf_objects.ApprovalGrant(grantor_username=u"grantor2")
])
# Make sure approval manager is active.
mock_mgr.IsActive.return_value = True
# CheckApproversForLabel should raise.
error = access_control.UnauthorizedAccess("some error")
mock_mgr.CheckApproversForLabel.side_effect = error
with self.assertRaisesRegex(access_control.UnauthorizedAccess,
"some error"):
approval_checks.CheckApprovalRequest(approval_request)
class CheckHuntAndCronJobApprovalRequestTestMixin(acl_test_lib.AclTestMixin):
APPROVAL_TYPE = None
def _CreateRequest(self, expiration_time=None, grants=None):
if not self.APPROVAL_TYPE:
raise ValueError("APPROVAL_TYPE has to be set.")
return _CreateApprovalRequest(
self.APPROVAL_TYPE,
"123456",
expiration_time=expiration_time,
grants=grants)
def setUp(self):
super().setUp()
self.CreateUser(u"grantor1")
self.CreateUser(u"grantor2")
def testRaisesWhenNoGrants(self):
approval_request = self._CreateRequest(grants=[])
with self.assertRaisesRegex(
access_control.UnauthorizedAccess,
"Need at least 2 additional approvers for access"):
approval_checks.CheckApprovalRequest(approval_request)
def testRaisesWhenJustOneGrant(self):
approval_request = self._CreateRequest(
grants=[rdf_objects.ApprovalGrant(grantor_username=u"grantor1")])
with self.assertRaisesRegex(
access_control.UnauthorizedAccess,
"Need at least 1 additional approver for access"):
approval_checks.CheckApprovalRequest(approval_request)
def testRaisesWhenNoGrantsFromAdmins(self):
approval_request = self._CreateRequest(grants=[
rdf_objects.ApprovalGrant(grantor_username=u"grantor1"),
rdf_objects.ApprovalGrant(grantor_username=u"grantor2")
])
with self.assertRaisesRegex(access_control.UnauthorizedAccess,
"Need at least 1 admin approver for access"):
approval_checks.CheckApprovalRequest(approval_request)
def testRaisesIfApprovalExpired(self):
# Make sure that approval is otherwise valid.
self.CreateAdminUser(u"grantor2")
approval_request = self._CreateRequest(
expiration_time=rdfvalue.RDFDatetime.Now() -
rdfvalue.Duration.From(1, rdfvalue.MINUTES),
grants=[
rdf_objects.ApprovalGrant(grantor_username=u"grantor1"),
rdf_objects.ApprovalGrant(grantor_username=u"grantor2")
])
with self.assertRaisesRegex(access_control.UnauthorizedAccess,
"Approval request is expired"):
approval_checks.CheckApprovalRequest(approval_request)
def testReturnsIfApprovalIsNotExpiredAndHasTwoGrantsIncludingAdmin(self):
self.CreateAdminUser(u"grantor2")
approval_request = self._CreateRequest(grants=[
rdf_objects.ApprovalGrant(grantor_username=u"grantor1"),
rdf_objects.ApprovalGrant(grantor_username=u"grantor2")
])
approval_checks.CheckApprovalRequest(approval_request)
class CheckHuntApprovalRequestTest(CheckHuntAndCronJobApprovalRequestTestMixin,
test_lib.GRRBaseTest):
APPROVAL_TYPE = rdf_objects.ApprovalRequest.ApprovalType.APPROVAL_TYPE_HUNT
class CheckCronJobApprovalRequestTest(
CheckHuntAndCronJobApprovalRequestTestMixin, test_lib.GRRBaseTest):
APPROVAL_TYPE = (
rdf_objects.ApprovalRequest.ApprovalType.APPROVAL_TYPE_CRON_JOB)
if __name__ == "__main__":
app.run(test_lib.main)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.